├── utils ├── __init__.py ├── util.py ├── logger.py └── options.py ├── dataset ├── __init__.py ├── dfmm_spotlight_hf │ └── dfmm_spotlight_hf.py └── dfmm_spotlight.py ├── models ├── archs │ ├── __init__.py │ ├── fcn_arch.py │ └── unet_arch.py ├── losses │ ├── __init__.py │ ├── accuracy.py │ └── cross_entropy_loss.py ├── __init__.py └── erlm_model.py ├── docs └── teaser.png ├── .gitignore ├── examples ├── MEN-Denim-id_00000089-03_7_additional.png └── WOMEN-Rompers_Jumpsuits-id_00001211-02_1_front.png ├── environment.yaml ├── train_texfit.sh ├── configs ├── base.yml └── region_gen.yml ├── LICENSE ├── pipeline.py ├── train_erlm.py ├── README.md └── train_texfit.py /utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /dataset/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /models/archs/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /models/losses/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/teaser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tongxin-wang/TexFit/HEAD/docs/teaser.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | .cache/ 3 | experiments/* 4 | tb_logger/* 5 | sd-model-finetuned/* 6 | *.pth -------------------------------------------------------------------------------- /examples/MEN-Denim-id_00000089-03_7_additional.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tongxin-wang/TexFit/HEAD/examples/MEN-Denim-id_00000089-03_7_additional.png -------------------------------------------------------------------------------- /examples/WOMEN-Rompers_Jumpsuits-id_00001211-02_1_front.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tongxin-wang/TexFit/HEAD/examples/WOMEN-Rompers_Jumpsuits-id_00001211-02_1_front.png -------------------------------------------------------------------------------- /environment.yaml: -------------------------------------------------------------------------------- 1 | name: texfit 2 | channels: 3 | - pytorch 4 | - defaults 5 | dependencies: 6 | - python=3.10.10 7 | - pip=23.0.1 8 | - cudatoolkit=11.3 9 | - pytorch=1.12.1 10 | - torchvision=0.13.1 11 | - numpy=1.23.5 12 | - pip: 13 | - jsonlines==3.1.0 14 | - datasets==2.14.6 15 | - transformers==4.27.4 16 | - diffusers==0.17.1 17 | - accelerate==0.17.1 18 | - tensorboard==2.12.0 19 | - opencv-python==4.7.0.72 20 | - git+https://github.com/openai/CLIP.git -------------------------------------------------------------------------------- /train_texfit.sh: -------------------------------------------------------------------------------- 1 | export MODEL_NAME="CompVis/stable-diffusion-v1-4" 2 | export dataset_name="dataset/dfmm_spotlight_hf" 3 | export CUDA_VISIBLE_DEVICES="0" 4 | 5 | python train_texfit.py \ 6 | --mixed_precision="fp16" \ 7 | --pretrained_model_name_or_path=$MODEL_NAME \ 8 | --dataset_name=$dataset_name \ 9 | --use_ema \ 10 | --resolution=512 \ 11 | --train_batch_size=1 \ 12 | --gradient_accumulation_steps=4 \ 13 | --gradient_checkpointing \ 14 | --max_train_steps=140000 \ 15 | --checkpointing_steps=20000 \ 16 | --learning_rate=1e-05 \ 17 | --max_grad_norm=1 \ 18 | --lr_scheduler="constant" --lr_warmup_steps=0 \ 19 | --output_dir="sd-model-finetuned/texfit-model" -------------------------------------------------------------------------------- /configs/base.yml: -------------------------------------------------------------------------------- 1 | name: base 2 | use_tb_logger: true 3 | debug_path: False 4 | set_CUDA_VISIBLE_DEVICES: True 5 | gpu_ids: [0] 6 | 7 | # dataset configs 8 | batch_size: 4 9 | num_workers: 4 10 | mask_dir: /path/to/DFMM-Spotlight/mask 11 | train_img_dir: /path/to/DFMM-Spotlight/train_images 12 | test_img_dir: /path/to/DFMM-Spotlight/test_images 13 | train_ann_file: /path/to/DFMM-Spotlight/mask_ann/train_ann_file.jsonl 14 | test_ann_file: /path/to/DFMM-Spotlight/mask_ann/test_ann_file.jsonl 15 | downsample_factor: 2 16 | 17 | model_type: ERLM 18 | text_embedding_dim: 512 19 | encoder_in_channels: 3 20 | fc_in_channels: 64 21 | fc_in_index: 4 22 | fc_channels: 64 23 | fc_num_convs: 1 24 | fc_concat_input: False 25 | fc_dropout_ratio: 0.1 26 | fc_num_classes: 2 27 | fc_align_corners: False 28 | 29 | # training configs 30 | val_freq: 5 31 | print_freq: 100 32 | weight_decay: 0 33 | manual_seed: 2023 34 | num_epochs: 100 35 | lr: !!float 1e-4 36 | lr_decay: step 37 | gamma: 0.1 38 | step: 50 39 | -------------------------------------------------------------------------------- /configs/region_gen.yml: -------------------------------------------------------------------------------- 1 | name: region_gen 2 | use_tb_logger: true 3 | debug_path: False 4 | set_CUDA_VISIBLE_DEVICES: True 5 | gpu_ids: [0] 6 | 7 | # dataset configs 8 | batch_size: 8 9 | num_workers: 4 10 | mask_dir: /path/to/DFMM-Spotlight/mask 11 | train_img_dir: /path/to/DFMM-Spotlight/train_images 12 | test_img_dir: /path/to/DFMM-Spotlight/test_images 13 | train_ann_file: /path/to/DFMM-Spotlight/mask_ann/train_ann_file.jsonl 14 | test_ann_file: /path/to/DFMM-Spotlight/mask_ann/test_ann_file.jsonl 15 | downsample_factor: 2 16 | 17 | model_type: ERLM 18 | text_embedding_dim: 512 19 | encoder_in_channels: 3 20 | fc_in_channels: 64 21 | fc_in_index: 4 22 | fc_channels: 64 23 | fc_num_convs: 1 24 | fc_concat_input: False 25 | fc_dropout_ratio: 0.1 26 | fc_num_classes: 2 27 | fc_align_corners: False 28 | 29 | # training configs 30 | val_freq: 5 31 | print_freq: 100 32 | weight_decay: 0 33 | manual_seed: 2023 34 | num_epochs: 100 35 | lr: !!float 1e-4 36 | lr_decay: step 37 | gamma: 0.1 38 | step: 50 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 TongxinWong 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import importlib 3 | import logging 4 | import os.path as osp 5 | 6 | # automatically scan and import model modules 7 | # scan all the files under the 'models' folder and collect files ending with 8 | # '_model.py' 9 | model_folder = osp.dirname(osp.abspath(__file__)) 10 | model_filenames = [ 11 | osp.splitext(osp.basename(v))[0] 12 | for v in glob.glob(f'{model_folder}/*_model.py') 13 | ] 14 | # import all the model modules 15 | _model_modules = [ 16 | importlib.import_module(f'models.{file_name}') 17 | for file_name in model_filenames 18 | ] 19 | 20 | 21 | def create_model(opt): 22 | """Create model. 23 | 24 | Args: 25 | opt (dict): Configuration. It constains: 26 | model_type (str): Model type. 27 | """ 28 | model_type = opt['model_type'] 29 | 30 | # dynamically instantiation 31 | for module in _model_modules: 32 | model_cls = getattr(module, model_type, None) 33 | if model_cls is not None: 34 | break 35 | if model_cls is None: 36 | raise ValueError(f'Model {model_type} is not found.') 37 | 38 | model = model_cls(opt) 39 | 40 | logger = logging.getLogger('base') 41 | logger.info(f'Model [{model.__class__.__name__}] is created.') 42 | return model 43 | -------------------------------------------------------------------------------- /models/losses/accuracy.py: -------------------------------------------------------------------------------- 1 | def accuracy(pred, target, topk=1, thresh=None): 2 | """Calculate accuracy according to the prediction and target. 3 | 4 | Args: 5 | pred (torch.Tensor): The model prediction, shape (N, num_class, ...) 6 | target (torch.Tensor): The target of each prediction, shape (N, , ...) 7 | topk (int | tuple[int], optional): If the predictions in ``topk`` 8 | matches the target, the predictions will be regarded as 9 | correct ones. Defaults to 1. 10 | thresh (float, optional): If not None, predictions with scores under 11 | this threshold are considered incorrect. Default to None. 12 | 13 | Returns: 14 | float | tuple[float]: If the input ``topk`` is a single integer, 15 | the function will return a single float as accuracy. If 16 | ``topk`` is a tuple containing multiple integers, the 17 | function will return a tuple containing accuracies of 18 | each ``topk`` number. 19 | """ 20 | assert isinstance(topk, (int, tuple)) 21 | if isinstance(topk, int): 22 | topk = (topk, ) 23 | return_single = True 24 | else: 25 | return_single = False 26 | 27 | maxk = max(topk) 28 | if pred.size(0) == 0: 29 | accu = [pred.new_tensor(0.) for i in range(len(topk))] 30 | return accu[0] if return_single else accu 31 | assert pred.ndim == target.ndim + 1 32 | assert pred.size(0) == target.size(0) 33 | assert maxk <= pred.size(1), \ 34 | f'maxk {maxk} exceeds pred dimension {pred.size(1)}' 35 | pred_value, pred_label = pred.topk(maxk, dim=1) 36 | # transpose to shape (maxk, N, ...) 37 | pred_label = pred_label.transpose(0, 1) 38 | correct = pred_label.eq(target.unsqueeze(0).expand_as(pred_label)) 39 | if thresh is not None: 40 | # Only prediction values larger than thresh are counted as correct 41 | correct = correct & (pred_value > thresh).t() 42 | res = [] 43 | for k in topk: 44 | correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) 45 | res.append(correct_k.mul_(100.0 / target.numel())) 46 | return res[0] if return_single else res 47 | -------------------------------------------------------------------------------- /pipeline.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import torch 4 | import clip 5 | import numpy as np 6 | 7 | from models import create_model 8 | from utils.options import dict_to_nonedict, parse 9 | from PIL import Image 10 | from diffusers import StableDiffusionInpaintPipeline 11 | 12 | def load_image(file_path): 13 | downsample_factor = 2 14 | with open(file_path, 'rb') as f: 15 | image = Image.open(f) 16 | width, height = image.size 17 | width = width // downsample_factor 18 | height = height // downsample_factor 19 | image = image.resize( 20 | size=(width, height), resample=Image.NEAREST) 21 | image = np.array(image).transpose(2, 0, 1) 22 | return image.astype(np.float32) 23 | 24 | def main(): 25 | # options 26 | parser = argparse.ArgumentParser() 27 | parser.add_argument('--opt', type=str, default='./configs/region_gen.yml', help='Path to option YAML file.') 28 | parser.add_argument('--img_path', type=str, help='Path to the fashion image.', required=True) 29 | parser.add_argument('--output_path', type=str, help='Saving path to the edited image.', required=True) 30 | parser.add_argument('--text_prompt', type=str, help='The editing text prompt.', required=True) 31 | parser.add_argument('--erlm_model_path', type=str, help='Path to ERLM model.', required=True) 32 | parser.add_argument('--texfit_model_path', type=str, help='Path to TexFit model.', required=True) 33 | args = parser.parse_args() 34 | opt = parse(args.opt, is_train=True) 35 | opt['pretrained_model_path'] = args.erlm_model_path 36 | 37 | # convert to NoneDict, which returns None for missing keys 38 | opt = dict_to_nonedict(opt) 39 | model = create_model(opt) 40 | model.load_network() 41 | model.encoder.eval() 42 | model.decoder.eval() 43 | 44 | img_path = args.img_path 45 | text = args.text_prompt 46 | 47 | img = load_image(img_path) 48 | img = torch.from_numpy(img) 49 | img = img.unsqueeze(dim=0) 50 | 51 | img = img.to(model.device) 52 | text_inputs = torch.cat([clip.tokenize(text)]).to(model.device) 53 | 54 | with torch.no_grad(): 55 | text_embedding = model.clip.encode_text(text_inputs) 56 | text_enc = model.encoder(img, text_embedding) 57 | seg_logits = model.decoder(text_enc) 58 | seg_pred = seg_logits.argmax(dim=1) 59 | seg_pred = seg_pred.cpu().numpy()[0] 60 | seg_img = Image.fromarray(np.uint8(seg_pred * 255)) 61 | 62 | img = Image.open(img_path).convert("RGB").resize((256, 512)) 63 | 64 | # Load pipeline 65 | pipe = StableDiffusionInpaintPipeline.from_pretrained(args.texfit_model_path, revision="fp16", 66 | torch_dtype=torch.float16, 67 | safety_checker=None, 68 | requires_safety_checker=False).to("cuda") 69 | prompt = [text] 70 | generator = torch.Generator("cuda").manual_seed(2023) 71 | images = pipe( 72 | height=512, 73 | width=256, 74 | prompt=prompt, 75 | image=img, 76 | mask_image=seg_img, 77 | num_inference_steps=50, 78 | generator=generator, 79 | ).images 80 | 81 | final_img = Image.composite(images[0], img, seg_img) 82 | final_img.save(f'{args.output_path}') 83 | print('Saved edited result to', args.output_path) 84 | 85 | if __name__ == '__main__': 86 | main() 87 | -------------------------------------------------------------------------------- /dataset/dfmm_spotlight_hf/dfmm_spotlight_hf.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | import datasets 17 | import os 18 | from PIL import Image 19 | import jsonlines 20 | 21 | 22 | _CITATION = """\ 23 | @inproceedings{wang2024texfit, 24 | title={TexFit: Text-Driven Fashion Image Editing with Diffusion Models}, 25 | author={Wang, Tongxin and Ye, Mang}, 26 | booktitle={Proceedings of the AAAI Conference on Artificial Intelligence}, 27 | volume={38}, 28 | number={9}, 29 | pages={10198--10206}, 30 | year={2024} 31 | } 32 | """ 33 | 34 | _DESCRIPTION = """\ 35 | A fashion image-region-text pair dataset called DFMM-Spotlight, highlighting local cloth. 36 | """ 37 | 38 | _HOMEPAGE = "" 39 | 40 | _LICENSE = "" 41 | 42 | 43 | class DFMMSpotlightDataset(datasets.GeneratorBasedBuilder): 44 | 45 | VERSION = datasets.Version("1.0.0") 46 | 47 | def _info(self): 48 | 49 | features = datasets.Features( 50 | { 51 | "image": datasets.Image(), 52 | "mask": datasets.Image(), 53 | "text": datasets.Value("string") 54 | } 55 | ) 56 | 57 | return datasets.DatasetInfo( 58 | description=_DESCRIPTION, 59 | features=features, 60 | homepage=_HOMEPAGE, 61 | license=_LICENSE, 62 | citation=_CITATION, 63 | ) 64 | 65 | def _split_generators(self, dl_manager): 66 | data_dir = '/path/to/DFMM-Spotlight' 67 | return [ 68 | datasets.SplitGenerator( 69 | name=datasets.Split.TRAIN, 70 | # These kwargs will be passed to _generate_examples 71 | gen_kwargs={ 72 | "filepath": data_dir, 73 | "split": "train", 74 | }, 75 | ), 76 | datasets.SplitGenerator( 77 | name=datasets.Split.TEST, 78 | # These kwargs will be passed to _generate_examples 79 | gen_kwargs={ 80 | "filepath": data_dir, 81 | "split": "test", 82 | }, 83 | ), 84 | ] 85 | 86 | # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` 87 | def _generate_examples(self, filepath, split): 88 | img_path = os.path.join(filepath, f'{split}_images') 89 | mask_path = os.path.join(filepath, 'mask') 90 | 91 | images = [] 92 | masks = [] 93 | texts = [] 94 | with jsonlines.open(os.path.join(filepath, 'mask_ann', f'{split}_ann_file.jsonl'), 'r') as reader: 95 | for row in reader: 96 | images.append(row['image']) 97 | masks.append(row['mask']) 98 | texts.append(row['text']) 99 | 100 | dataset_len = len(images) 101 | for i in range(dataset_len): 102 | yield i, { 103 | 'image': Image.open(os.path.join(img_path, images[i])), 104 | 'mask': Image.open(os.path.join(mask_path, masks[i])), 105 | "text": texts[i], 106 | } 107 | -------------------------------------------------------------------------------- /dataset/dfmm_spotlight.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path 3 | 4 | import numpy as np 5 | import torch 6 | import jsonlines 7 | import torch.utils.data as data 8 | from PIL import Image 9 | from random import choice 10 | 11 | CLOTH_TYPES = ['tank top', 'tank shirt', 'T-shirt', 'shirt', 'sweater', 'upper clothing', 12 | 'pants', 'shorts', 'trousers', 'skirt', 'lower clothing', 'outer clothing', 13 | 'dress', 'rompers', 'belt', 'sunglasses', 'glasses', 'bag'] 14 | 15 | def check_cloth_type(text): 16 | ret_cloth_type = '' 17 | for cloth_type in CLOTH_TYPES: 18 | if cloth_type in text: 19 | ret_cloth_type = cloth_type 20 | break 21 | return ret_cloth_type 22 | 23 | class DFMMSpotlight(data.Dataset): 24 | 25 | def __init__(self, mask_dir, img_dir, ann_file, downsample_factor=2): 26 | self._mask_path = mask_dir 27 | self._image_path = img_dir 28 | self._mask_fnames = [] 29 | self._image_fnames = [] 30 | self._cloth_texts = [] 31 | self._cloth_text_groups = {} 32 | 33 | for cloth_type in CLOTH_TYPES: 34 | self._cloth_text_groups[cloth_type] = set() 35 | 36 | self.downsample_factor = downsample_factor 37 | 38 | # load text-region pair data 39 | assert os.path.exists(ann_file) 40 | with jsonlines.open(ann_file, 'r') as reader: 41 | for row in reader: 42 | self._mask_fnames.append(row['mask']) 43 | self._image_fnames.append(row['image']) 44 | row_cloth_type = check_cloth_type(row['text']) 45 | if row_cloth_type: 46 | self._cloth_text_groups[row_cloth_type].add(row['text']) 47 | self._cloth_texts.append({'type': row_cloth_type, 'text': row['text']}) 48 | 49 | def _open_file(self, path_prefix, fname): 50 | return open(os.path.join(path_prefix, fname), 'rb') 51 | 52 | def _load_image(self, raw_idx): 53 | fname = self._image_fnames[raw_idx] 54 | with self._open_file(self._image_path, fname) as f: 55 | image = Image.open(f) 56 | if self.downsample_factor != 1: 57 | width, height = image.size 58 | width = width // self.downsample_factor 59 | height = height // self.downsample_factor 60 | image = image.resize( 61 | size=(width, height), resample=Image.NEAREST) 62 | image = np.array(image).transpose(2, 0, 1) 63 | return image.astype(np.float32) 64 | 65 | def _load_mask(self, raw_idx): 66 | fname = self._mask_fnames[raw_idx] 67 | with self._open_file(self._mask_path, fname) as f: 68 | mask = Image.open(f) 69 | if self.downsample_factor != 1: 70 | width, height = mask.size 71 | width = width // self.downsample_factor 72 | height = height // self.downsample_factor 73 | mask = mask.resize( 74 | size=(width, height), resample=Image.NEAREST) 75 | mask = np.array(mask) 76 | return mask.astype(np.float32) 77 | 78 | def __getitem__(self, index): 79 | mask = self._load_mask(index) 80 | image = self._load_image(index) 81 | text_info = self._cloth_texts[index] 82 | if text_info['type']: 83 | text = choice(list(self._cloth_text_groups[text_info['type']])) 84 | else: 85 | text = text_info['text'] 86 | 87 | mask = mask / 255 88 | mask = torch.LongTensor(mask) 89 | image = torch.from_numpy(image) 90 | 91 | return_dict = { 92 | 'mask': mask, 93 | 'image': image, 94 | 'text': text, 95 | 'mask_name': self._mask_fnames[index], 96 | 'img_name': self._image_fnames[index] 97 | } 98 | 99 | return return_dict 100 | 101 | def __len__(self): 102 | return len(self._image_fnames) 103 | -------------------------------------------------------------------------------- /train_erlm.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import time 4 | import torch 5 | import os 6 | import os.path as osp 7 | from dataset.dfmm_spotlight import DFMMSpotlight 8 | from models import create_model 9 | from utils.logger import MessageLogger, get_root_logger, init_tb_logger 10 | from utils.options import dict2str, dict_to_nonedict, parse 11 | from utils.util import make_exp_dirs 12 | 13 | 14 | def main(): 15 | # options 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('--opt', type=str, help='Path to option YAML file.') 18 | args = parser.parse_args() 19 | opt = parse(args.opt, is_train=True) 20 | 21 | # mkdir and loggers 22 | make_exp_dirs(opt) 23 | log_file = osp.join(opt['path']['log'], f"train_{opt['name']}.log") 24 | logger = get_root_logger( 25 | logger_name='base', log_level=logging.INFO, log_file=log_file) 26 | logger.info(dict2str(opt)) 27 | # initialize tensorboard logger 28 | tb_logger = None 29 | if opt['use_tb_logger'] and 'debug' not in opt['name']: 30 | tb_logger = init_tb_logger(log_dir='./tb_logger/' + opt['name']) 31 | 32 | # convert to NoneDict, which returns None for missing keys 33 | opt = dict_to_nonedict(opt) 34 | 35 | # set up data loader 36 | train_dataset = DFMMSpotlight( 37 | mask_dir=opt['mask_dir'], 38 | img_dir=opt['train_img_dir'], 39 | ann_file=opt['train_ann_file']) 40 | train_loader = torch.utils.data.DataLoader( 41 | dataset=train_dataset, 42 | batch_size=opt['batch_size'], 43 | shuffle=True, 44 | num_workers=opt['num_workers'], 45 | drop_last=True) 46 | logger.info(f'Number of train set: {len(train_dataset)}.') 47 | opt['max_iters'] = opt['num_epochs'] * len( 48 | train_dataset) // opt['batch_size'] 49 | 50 | test_dataset = DFMMSpotlight( 51 | mask_dir=opt['mask_dir'], 52 | img_dir=opt['test_img_dir'], 53 | ann_file=opt['test_ann_file']) 54 | test_loader = torch.utils.data.DataLoader( 55 | dataset=test_dataset, 56 | batch_size=1, 57 | shuffle=False, 58 | num_workers=opt['num_workers']) 59 | logger.info(f'Number of test set: {len(test_dataset)}.') 60 | 61 | current_iter = 0 62 | best_epoch = None 63 | best_acc = 0 64 | 65 | model = create_model(opt) 66 | 67 | data_time, iter_time = 0, 0 68 | current_iter = 0 69 | 70 | # create message logger (formatted outputs) 71 | msg_logger = MessageLogger(opt, current_iter, tb_logger) 72 | 73 | for epoch in range(opt['num_epochs']): 74 | lr = model.update_learning_rate(epoch) 75 | 76 | for _, batch_data in enumerate(train_loader): 77 | data_time = time.time() - data_time 78 | 79 | current_iter += 1 80 | 81 | model.feed_data(batch_data) 82 | model.optimize_parameters() 83 | 84 | iter_time = time.time() - iter_time 85 | if current_iter % opt['print_freq'] == 0: 86 | log_vars = {'epoch': epoch, 'iter': current_iter} 87 | log_vars.update({'lrs': [lr]}) 88 | log_vars.update({'time': iter_time, 'data_time': data_time}) 89 | log_vars.update(model.get_current_log()) 90 | msg_logger(log_vars) 91 | 92 | data_time = time.time() 93 | iter_time = time.time() 94 | 95 | if epoch % opt['val_freq'] == 0: 96 | save_dir = f'{opt["path"]["visualization"]}/testset/epoch_{epoch:03d}' 97 | os.makedirs(save_dir, exist_ok=opt['debug']) 98 | test_acc = model.inference(test_loader, save_dir) 99 | 100 | logger.info(f'Epoch: {epoch}, ' 101 | f'test_acc: {test_acc: .4f}.') 102 | 103 | if test_acc > best_acc: 104 | best_epoch = epoch 105 | best_acc = test_acc 106 | 107 | logger.info(f'Best epoch: {best_epoch}, ' 108 | f'Best test acc: {best_acc: .4f}.') 109 | 110 | # save model 111 | model.save_network( 112 | f'{opt["path"]["models"]}/region_generation_epoch{epoch}.pth') 113 | 114 | 115 | if __name__ == '__main__': 116 | main() 117 | -------------------------------------------------------------------------------- /utils/util.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import random 4 | import sys 5 | import time 6 | from shutil import get_terminal_size 7 | 8 | import numpy as np 9 | import torch 10 | 11 | logger = logging.getLogger('base') 12 | 13 | 14 | def make_exp_dirs(opt): 15 | """Make dirs for experiments.""" 16 | path_opt = opt['path'].copy() 17 | if opt['is_train']: 18 | overwrite = True if 'debug' in opt['name'] else False 19 | os.makedirs(path_opt.pop('experiments_root'), exist_ok=overwrite) 20 | os.makedirs(path_opt.pop('models'), exist_ok=overwrite) 21 | else: 22 | os.makedirs(path_opt.pop('results_root')) 23 | 24 | 25 | def set_random_seed(seed): 26 | """Set random seeds.""" 27 | random.seed(seed) 28 | np.random.seed(seed) 29 | torch.manual_seed(seed) 30 | torch.cuda.manual_seed(seed) 31 | torch.cuda.manual_seed_all(seed) 32 | 33 | 34 | class ProgressBar(object): 35 | """A progress bar which can print the progress. 36 | 37 | Modified from: 38 | https://github.com/hellock/cvbase/blob/master/cvbase/progress.py 39 | """ 40 | 41 | def __init__(self, task_num=0, bar_width=50, start=True): 42 | self.task_num = task_num 43 | max_bar_width = self._get_max_bar_width() 44 | self.bar_width = ( 45 | bar_width if bar_width <= max_bar_width else max_bar_width) 46 | self.completed = 0 47 | if start: 48 | self.start() 49 | 50 | def _get_max_bar_width(self): 51 | terminal_width, _ = get_terminal_size() 52 | max_bar_width = min(int(terminal_width * 0.6), terminal_width - 50) 53 | if max_bar_width < 10: 54 | print(f'terminal width is too small ({terminal_width}), ' 55 | 'please consider widen the terminal for better ' 56 | 'progressbar visualization') 57 | max_bar_width = 10 58 | return max_bar_width 59 | 60 | def start(self): 61 | if self.task_num > 0: 62 | sys.stdout.write(f"[{' ' * self.bar_width}] 0/{self.task_num}, " 63 | f'elapsed: 0s, ETA:\nStart...\n') 64 | else: 65 | sys.stdout.write('completed: 0, elapsed: 0s') 66 | sys.stdout.flush() 67 | self.start_time = time.time() 68 | 69 | def update(self, msg='In progress...'): 70 | self.completed += 1 71 | elapsed = time.time() - self.start_time 72 | fps = self.completed / elapsed 73 | if self.task_num > 0: 74 | percentage = self.completed / float(self.task_num) 75 | eta = int(elapsed * (1 - percentage) / percentage + 0.5) 76 | mark_width = int(self.bar_width * percentage) 77 | bar_chars = '>' * mark_width + '-' * (self.bar_width - mark_width) 78 | sys.stdout.write('\033[2F') # cursor up 2 lines 79 | sys.stdout.write( 80 | '\033[J' 81 | ) # clean the output (remove extra chars since last display) 82 | sys.stdout.write( 83 | f'[{bar_chars}] {self.completed}/{self.task_num}, ' 84 | f'{fps:.1f} task/s, elapsed: {int(elapsed + 0.5)}s, ' 85 | f'ETA: {eta:5}s\n{msg}\n') 86 | else: 87 | sys.stdout.write( 88 | f'completed: {self.completed}, elapsed: {int(elapsed + 0.5)}s, ' 89 | f'{fps:.1f} tasks/s') 90 | sys.stdout.flush() 91 | 92 | 93 | class AverageMeter(object): 94 | """ 95 | Computes and stores the average and current value 96 | Imported from 97 | https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262 98 | """ 99 | 100 | def __init__(self): 101 | self.reset() 102 | 103 | def reset(self): 104 | self.val = 0 105 | self.avg = 0 # running average = running sum / running count 106 | self.sum = 0 # running sum 107 | self.count = 0 # running count 108 | 109 | def update(self, val, n=1): 110 | # n = batch_size 111 | 112 | # val = batch accuracy for an attribute 113 | # self.val = val 114 | 115 | # sum = 100 * accumulative correct predictions for this attribute 116 | self.sum += val * n 117 | 118 | # count = total samples so far 119 | self.count += n 120 | 121 | # avg = 100 * avg accuracy for this attribute 122 | # for all the batches so far 123 | self.avg = self.sum / self.count 124 | -------------------------------------------------------------------------------- /utils/logger.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import logging 3 | import time 4 | 5 | 6 | class MessageLogger(): 7 | """Message logger for printing. 8 | 9 | Args: 10 | opt (dict): Config. It contains the following keys: 11 | name (str): Exp name. 12 | logger (dict): Contains 'print_freq' (str) for logger interval. 13 | train (dict): Contains 'niter' (int) for total iters. 14 | use_tb_logger (bool): Use tensorboard logger. 15 | start_iter (int): Start iter. Default: 1. 16 | tb_logger (obj:`tb_logger`): Tensorboard logger. Default: None. 17 | """ 18 | 19 | def __init__(self, opt, start_iter=1, tb_logger=None): 20 | self.exp_name = opt['name'] 21 | self.interval = opt['print_freq'] 22 | self.start_iter = start_iter 23 | self.max_iters = opt['max_iters'] 24 | self.use_tb_logger = opt['use_tb_logger'] 25 | self.tb_logger = tb_logger 26 | self.start_time = time.time() 27 | self.logger = get_root_logger() 28 | 29 | def __call__(self, log_vars): 30 | """Format logging message. 31 | 32 | Args: 33 | log_vars (dict): It contains the following keys: 34 | epoch (int): Epoch number. 35 | iter (int): Current iter. 36 | lrs (list): List for learning rates. 37 | 38 | time (float): Iter time. 39 | data_time (float): Data time for each iter. 40 | """ 41 | # epoch, iter, learning rates 42 | epoch = log_vars.pop('epoch') 43 | current_iter = log_vars.pop('iter') 44 | lrs = log_vars.pop('lrs') 45 | 46 | message = (f'[{self.exp_name[:5]}..][epoch:{epoch:3d}, ' 47 | f'iter:{current_iter:8,d}, lr:(') 48 | for v in lrs: 49 | message += f'{v:.3e},' 50 | message += ')] ' 51 | 52 | # time and estimated time 53 | if 'time' in log_vars.keys(): 54 | iter_time = log_vars.pop('time') 55 | data_time = log_vars.pop('data_time') 56 | 57 | total_time = time.time() - self.start_time 58 | time_sec_avg = total_time / (current_iter - self.start_iter + 1) 59 | eta_sec = time_sec_avg * (self.max_iters - current_iter - 1) 60 | eta_str = str(datetime.timedelta(seconds=int(eta_sec))) 61 | message += f'[eta: {eta_str}, ' 62 | message += f'time: {iter_time:.3f}, data_time: {data_time:.3f}] ' 63 | 64 | # other items, especially losses 65 | for k, v in log_vars.items(): 66 | message += f'{k}: {v:.4e} ' 67 | # tensorboard logger 68 | if self.use_tb_logger and 'debug' not in self.exp_name: 69 | self.tb_logger.add_scalar(k, v, current_iter) 70 | 71 | self.logger.info(message) 72 | 73 | 74 | def init_tb_logger(log_dir): 75 | from torch.utils.tensorboard import SummaryWriter 76 | tb_logger = SummaryWriter(log_dir=log_dir) 77 | return tb_logger 78 | 79 | 80 | def get_root_logger(logger_name='base', log_level=logging.INFO, log_file=None): 81 | """Get the root logger. 82 | 83 | The logger will be initialized if it has not been initialized. By default a 84 | StreamHandler will be added. If `log_file` is specified, a FileHandler will 85 | also be added. 86 | 87 | Args: 88 | logger_name (str): root logger name. Default: base. 89 | log_file (str | None): The log filename. If specified, a FileHandler 90 | will be added to the root logger. 91 | log_level (int): The root logger level. Note that only the process of 92 | rank 0 is affected, while other processes will set the level to 93 | "Error" and be silent most of the time. 94 | 95 | Returns: 96 | logging.Logger: The root logger. 97 | """ 98 | logger = logging.getLogger(logger_name) 99 | # if the logger has been initialized, just return it 100 | if logger.hasHandlers(): 101 | return logger 102 | 103 | format_str = '%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s' 104 | logging.basicConfig(format=format_str, level=log_level) 105 | 106 | if log_file is not None: 107 | file_handler = logging.FileHandler(log_file, 'w') 108 | file_handler.setFormatter(logging.Formatter(format_str)) 109 | file_handler.setLevel(log_level) 110 | logger.addHandler(file_handler) 111 | 112 | return logger 113 | -------------------------------------------------------------------------------- /utils/options.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | from collections import OrderedDict 4 | 5 | import yaml 6 | 7 | 8 | def ordered_yaml(): 9 | """Support OrderedDict for yaml. 10 | 11 | Returns: 12 | yaml Loader and Dumper. 13 | """ 14 | try: 15 | from yaml import CDumper as Dumper 16 | from yaml import CLoader as Loader 17 | except ImportError: 18 | from yaml import Dumper, Loader 19 | 20 | _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG 21 | 22 | def dict_representer(dumper, data): 23 | return dumper.represent_dict(data.items()) 24 | 25 | def dict_constructor(loader, node): 26 | return OrderedDict(loader.construct_pairs(node)) 27 | 28 | Dumper.add_representer(OrderedDict, dict_representer) 29 | Loader.add_constructor(_mapping_tag, dict_constructor) 30 | return Loader, Dumper 31 | 32 | 33 | def parse(opt_path, is_train=True): 34 | """Parse option file. 35 | 36 | Args: 37 | opt_path (str): Option file path. 38 | is_train (str): Indicate whether in training or not. Default: True. 39 | 40 | Returns: 41 | (dict): Options. 42 | """ 43 | with open(opt_path, mode='r') as f: 44 | Loader, _ = ordered_yaml() 45 | opt = yaml.load(f, Loader=Loader) 46 | 47 | gpu_list = ','.join(str(x) for x in opt['gpu_ids']) 48 | if opt.get('set_CUDA_VISIBLE_DEVICES', None): 49 | os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list 50 | print('export CUDA_VISIBLE_DEVICES=' + gpu_list, flush=True) 51 | else: 52 | print('gpu_list: ', gpu_list, flush=True) 53 | 54 | opt['is_train'] = is_train 55 | 56 | # paths 57 | opt['path'] = {} 58 | opt['path']['root'] = osp.abspath( 59 | osp.join(__file__, osp.pardir, osp.pardir)) 60 | if is_train: 61 | if opt.get('debug_path', None): 62 | experiments_path = 'experiments_debug' 63 | else: 64 | experiments_path = 'experiments' 65 | experiments_root = osp.join(opt['path']['root'], experiments_path, 66 | opt['name']) 67 | opt['path']['experiments_root'] = experiments_root 68 | opt['path']['models'] = osp.join(experiments_root, 'models') 69 | opt['path']['log'] = experiments_root 70 | opt['path']['visualization'] = osp.join(experiments_root, 71 | 'visualization') 72 | 73 | # change some options for debug mode 74 | if 'debug' in opt['name']: 75 | opt['debug'] = True 76 | opt['val_freq'] = 1 77 | opt['print_freq'] = 1 78 | opt['save_checkpoint_freq'] = 1 79 | else: # test 80 | results_root = osp.join(opt['path']['root'], 'results', opt['name']) 81 | opt['path']['results_root'] = results_root 82 | opt['path']['log'] = results_root 83 | opt['path']['visualization'] = osp.join(results_root, 'visualization') 84 | 85 | return opt 86 | 87 | 88 | def dict2str(opt, indent_level=1): 89 | """dict to string for printing options. 90 | 91 | Args: 92 | opt (dict): Option dict. 93 | indent_level (int): Indent level. Default: 1. 94 | 95 | Return: 96 | (str): Option string for printing. 97 | """ 98 | msg = '' 99 | for k, v in opt.items(): 100 | if isinstance(v, dict): 101 | msg += ' ' * (indent_level * 2) + k + ':[\n' 102 | msg += dict2str(v, indent_level + 1) 103 | msg += ' ' * (indent_level * 2) + ']\n' 104 | else: 105 | msg += ' ' * (indent_level * 2) + k + ': ' + str(v) + '\n' 106 | return msg 107 | 108 | 109 | class NoneDict(dict): 110 | """None dict. It will return none if key is not in the dict.""" 111 | 112 | def __missing__(self, key): 113 | return None 114 | 115 | 116 | def dict_to_nonedict(opt): 117 | """Convert to NoneDict, which returns None for missing keys. 118 | 119 | Args: 120 | opt (dict): Option dict. 121 | 122 | Returns: 123 | (dict): NoneDict for options. 124 | """ 125 | if isinstance(opt, dict): 126 | new_opt = dict() 127 | for key, sub_opt in opt.items(): 128 | new_opt[key] = dict_to_nonedict(sub_opt) 129 | return NoneDict(**new_opt) 130 | elif isinstance(opt, list): 131 | return [dict_to_nonedict(sub_opt) for sub_opt in opt] 132 | else: 133 | return opt 134 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TexFit: Text-Driven Fashion Image Editing with Diffusion Models 2 | 3 | ![teaser](docs/teaser.png) 4 | ### TexFit: Text-Driven Fashion Image Editing with Diffusion Models (AAAI 2024) 5 |
6 | Abstract: Fashion image editing aims to edit an input image to obtain richer or distinct visual clothing matching effects. Existing global fashion image editing methods are difficult to achieve rich outfit combination effects while local fashion image editing is more in line with the needs of diverse and personalized outfit matching. The local editing techniques typically depend on text and auxiliary modalities (e.g., human poses, human keypoints, garment sketches, etc.) for image manipulation, where the auxiliary modalities essentially assist in locating the editing region. Since these auxiliary modalities usually involve additional efforts in practical application scenarios, text-driven fashion image editing shows high flexibility. In this paper, we propose TexFit, a Text-driven Fashion image Editing method using diffusion models, which performs the local image editing only with the easily accessible text. Our approach employs a text-based editing region location module to predict precise editing region in the fashion image. Then, we take the predicted region as the generation condition of diffusion models together with the text prompt to achieve precise local editing of fashion images while keeping the rest part intact. In addition, previous fashion datasets usually focus on global description, lacking local descriptive information that can guide the precise local editing. Therefore, we develop a new DFMM-Spotlight dataset by using region extraction and attribute combination strategies. It focuses locally on clothes and accessories, enabling local editing with text input. Experimental results on the DFMM-Spotlight dataset demonstrate the effectiveness of our model. 7 |
8 | 9 | ### Setup 10 | 11 | Initialize a [conda](https://docs.conda.io/en/latest) environment named texfit by running: 12 | ```shell 13 | conda env create -f environment.yaml 14 | conda activate texfit 15 | 16 | # install mmcv and mmsegmentation 17 | pip install -U openmim 18 | mim install mmcv==1.2.1 19 | mim install mmsegmentation==0.9.0 20 | ``` 21 | 22 | And then initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: 23 | 24 | ```shell 25 | accelerate config 26 | ``` 27 | 28 | ### Data Preparation 29 | 30 | You need to download DFMM-Spotlight dataset from [Google Drive](https://drive.google.com/file/d/1AJBWrOENyssJX1zK6VtbT-mMC8_xXbR_/view?usp=sharing) and unzip to your own path `/path/to/DFMM-Spotlight`. The dataset folder structure should be as follows: 31 | 32 | ``` 33 | DFMM-Spotlight 34 | ├── train_images 35 | │   ├── MEN-Denim-id_00000080-01_7_additional.png 36 | │   ├── ....... 37 | │   └── WOMEN-Tees_Tanks-id_00007979-04_4_full.png 38 | ├── test_images 39 | │   ├── MEN-Denim-id_00000089-03_7_additional.png 40 | │   ├── ....... 41 | │   └── WOMEN-Tees_Tanks-id_00007970-01_7_additional.png 42 | ├── mask 43 | │   ├── MEN-Denim-id_00000080-01_7_additional_mask_0.png 44 | │   ├── ....... 45 | │   └── WOMEN-Tees_Tanks-id_00007979-04_4_full_mask_0.png 46 | └── mask_ann 47 |    ├── train_ann_file.jsonl 48 |    └── test_ann_file.jsonl 49 | ``` 50 | 51 | ### Training and Inference 52 | 53 | > [!IMPORTANT] 54 | > Replace all the `/path/to` paths in the code and configuration files with real paths. 55 | > `/path/to` paths exist in all the configuration files under the folder `configs` and `dataset/dfmm_spotlight_hf/dfmm_spotlight_hf.py`. 56 | 57 | #### Train the ERLM (Stage I) 58 | 59 | Train the editing region location module ERLM with the following command: 60 | 61 | ```shell 62 | CUDA_VISIBLE_DEVICES=0 python train_erlm.py --opt ./configs/region_gen.yml 63 | ``` 64 | 65 | #### Train the TexFit (Stage II) 66 | 67 | Train the local fashion image editing model TexFit with the following command: 68 | 69 | ```shell 70 | bash train_texfit.sh 71 | ``` 72 | 73 | #### Local Fashion Image Editing 74 | 75 | Once the ERLM and TexFit are trained, you can edit a fashion image locally by running the following command: 76 | 77 | ```shell 78 | CUDA_VISIBLE_DEVICES=0 python pipeline.py \ 79 | --opt ./configs/region_gen.yml \ 80 | --img_path /path/to/your_fashion_image_path \ 81 | --output_path /path/to/edited_image_saving_path \ 82 | --text_prompt the_editing_text_prompt \ 83 | --erlm_model_path /path/to/trained_erlm_model_path \ 84 | --texfit_model_path /path/to/trained_texfit_model_path 85 | ``` 86 | 87 | For example: 88 | 89 | ```shell 90 | CUDA_VISIBLE_DEVICES=0 python pipeline.py \ 91 | --opt ./configs/region_gen.yml \ 92 | --img_path examples/MEN-Denim-id_00000089-03_7_additional.png \ 93 | --output_path ./example_output.png \ 94 | --text_prompt 'denim blue lower clothing' \ 95 | --erlm_model_path experiments/region_gen/models/region_generation_epoch60.pth \ 96 | --texfit_model_path sd-model-finetuned/texfit-model 97 | ``` 98 | 99 | ### Pre-trained Models 100 | 101 | You can download the pre-trained ERLM and TexFit model from [Google Drive](https://drive.google.com/drive/folders/1-bMjvtbY3X3TGoQXCjw3Bt9-Jv_lJomK?usp=sharing). 102 | 103 | ### Citation 104 | 105 | If you find this paper or the code useful for your research, please consider citing: 106 | 107 | ```bibtex 108 | @inproceedings{wang2024texfit, 109 | title={TexFit: Text-Driven Fashion Image Editing with Diffusion Models}, 110 | author={Wang, Tongxin and Ye, Mang}, 111 | booktitle={Proceedings of the AAAI Conference on Artificial Intelligence}, 112 | volume={38}, 113 | number={9}, 114 | pages={10198--10206}, 115 | year={2024} 116 | } 117 | ``` 118 | 119 | ### Acknowledgments 120 | 121 | Our code is developed based on [🤗Diffusers](https://github.com/huggingface/diffusers) and [Text2Human](https://github.com/yumingj/Text2Human). Thanks for their open source contributions. -------------------------------------------------------------------------------- /models/erlm_model.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import math 3 | from collections import OrderedDict 4 | 5 | import mmcv 6 | import numpy as np 7 | import torch 8 | import clip 9 | import os 10 | 11 | from models.archs.fcn_arch import FCNHead 12 | from models.archs.unet_arch import AttrUNet 13 | from models.losses.accuracy import accuracy 14 | from models.losses.cross_entropy_loss import CrossEntropyLoss 15 | 16 | logger = logging.getLogger('base') 17 | 18 | 19 | class ERLM(): 20 | """Editing Region Generation model. 21 | """ 22 | 23 | def __init__(self, opt): 24 | self.opt = opt 25 | self.device = torch.device('cuda') 26 | self.is_train = opt['is_train'] 27 | 28 | clip_model, _ = clip.load('ViT-B/32', device=torch.device("cpu")) 29 | self.clip = clip_model.to(self.device) 30 | self.encoder = AttrUNet( 31 | in_channels=opt['encoder_in_channels'], attr_embedding=opt['text_embedding_dim']).to(self.device) 32 | self.decoder = FCNHead( 33 | in_channels=opt['fc_in_channels'], 34 | in_index=opt['fc_in_index'], 35 | channels=opt['fc_channels'], 36 | num_convs=opt['fc_num_convs'], 37 | concat_input=opt['fc_concat_input'], 38 | dropout_ratio=opt['fc_dropout_ratio'], 39 | num_classes=opt['fc_num_classes'], 40 | align_corners=opt['fc_align_corners'], 41 | ).to(self.device) 42 | 43 | self.init_training_settings() 44 | self.palette = [[0, 0, 0], [255, 255, 255]] 45 | 46 | def init_training_settings(self): 47 | optim_params = [] 48 | 49 | for v in self.encoder.parameters(): 50 | if v.requires_grad: 51 | optim_params.append(v) 52 | for v in self.decoder.parameters(): 53 | if v.requires_grad: 54 | optim_params.append(v) 55 | # set up optimizers 56 | self.optimizer = torch.optim.Adam( 57 | optim_params, 58 | self.opt['lr'], 59 | weight_decay=self.opt['weight_decay']) 60 | self.log_dict = OrderedDict() 61 | self.entropy_loss = CrossEntropyLoss().to(self.device) 62 | 63 | def feed_data(self, data): 64 | self.image = data['image'].to(self.device) 65 | self.mask = data['mask'].to(self.device) 66 | text = data['text'] 67 | text_inputs = torch.cat([clip.tokenize(text)]).to(self.device) 68 | with torch.no_grad(): 69 | self.text = self.clip.encode_text(text_inputs) 70 | 71 | def optimize_parameters(self): 72 | self.encoder.train() 73 | self.decoder.train() 74 | 75 | self.text_enc = self.encoder(self.image, self.text) 76 | self.seg_logits = self.decoder(self.text_enc) 77 | 78 | loss = self.entropy_loss(self.seg_logits, self.mask) 79 | 80 | self.optimizer.zero_grad() 81 | loss.backward() 82 | self.optimizer.step() 83 | 84 | self.log_dict['loss_total'] = loss 85 | 86 | def inference(self, data_loader, save_dir): 87 | self.encoder.eval() 88 | self.decoder.eval() 89 | 90 | acc = 0 91 | num = 0 92 | 93 | for _, data in enumerate(data_loader): 94 | image = data['image'].to(self.device) 95 | text = data['text'] 96 | text_inputs = torch.cat([clip.tokenize(text)]).to(self.device) 97 | mask = data['mask'].to(self.device) 98 | img_name = data['img_name'] 99 | 100 | num += image.size(0) 101 | with torch.no_grad(): 102 | text_embedding = self.clip.encode_text(text_inputs) 103 | text_enc = self.encoder(image, text_embedding) 104 | seg_logits = self.decoder(text_enc) 105 | seg_pred = seg_logits.argmax(dim=1) 106 | acc += accuracy(seg_logits, mask) 107 | palette_label = self.palette_result(mask.cpu().numpy()) 108 | palette_pred = self.palette_result(seg_pred.cpu().numpy()) 109 | image_numpy = image[0].cpu().numpy().astype(np.uint8).transpose(1, 2, 0) 110 | image_numpy = image_numpy[..., ::-1] 111 | concat_result = np.concatenate( 112 | (image_numpy, palette_pred, palette_label), axis=1) 113 | img_name_base, img_name_ext = os.path.splitext(img_name[0]) 114 | mmcv.imwrite(concat_result, f'{save_dir}/{img_name_base}_{text[0]}{img_name_ext}') 115 | 116 | self.encoder.train() 117 | self.decoder.train() 118 | return (acc / num).item() 119 | 120 | def get_current_log(self): 121 | return self.log_dict 122 | 123 | def update_learning_rate(self, epoch): 124 | """Update learning rate. 125 | 126 | Args: 127 | current_iter (int): Current iteration. 128 | warmup_iter (int): Warmup iter numbers. -1 for no warmup. 129 | Default: -1. 130 | """ 131 | lr = self.optimizer.param_groups[0]['lr'] 132 | 133 | if self.opt['lr_decay'] == 'step': 134 | lr = self.opt['lr'] * ( 135 | self.opt['gamma']**(epoch // self.opt['step'])) 136 | elif self.opt['lr_decay'] == 'cos': 137 | lr = self.opt['lr'] * ( 138 | 1 + math.cos(math.pi * epoch / self.opt['num_epochs'])) / 2 139 | elif self.opt['lr_decay'] == 'linear': 140 | lr = self.opt['lr'] * (1 - epoch / self.opt['num_epochs']) 141 | elif self.opt['lr_decay'] == 'linear2exp': 142 | if epoch < self.opt['turning_point'] + 1: 143 | # learning rate decay as 95% 144 | # at the turning point (1 / 95% = 1.0526) 145 | lr = self.opt['lr'] * ( 146 | 1 - epoch / int(self.opt['turning_point'] * 1.0526)) 147 | else: 148 | lr *= self.opt['gamma'] 149 | elif self.opt['lr_decay'] == 'schedule': 150 | if epoch in self.opt['schedule']: 151 | lr *= self.opt['gamma'] 152 | else: 153 | raise ValueError('Unknown lr mode {}'.format(self.opt['lr_decay'])) 154 | # set learning rate 155 | for param_group in self.optimizer.param_groups: 156 | param_group['lr'] = lr 157 | 158 | return lr 159 | 160 | def save_network(self, save_path): 161 | """Save networks. 162 | """ 163 | 164 | save_dict = {} 165 | save_dict['encoder'] = self.encoder.state_dict() 166 | save_dict['decoder'] = self.decoder.state_dict() 167 | 168 | torch.save(save_dict, save_path) 169 | 170 | def load_network(self): 171 | checkpoint = torch.load(self.opt['pretrained_model_path']) 172 | 173 | self.encoder.load_state_dict( 174 | checkpoint['encoder'], strict=True) 175 | self.encoder.eval() 176 | 177 | self.decoder.load_state_dict( 178 | checkpoint['decoder'], strict=True) 179 | self.decoder.eval() 180 | 181 | def palette_result(self, result): 182 | seg = result[0] 183 | palette = np.array(self.palette) 184 | assert palette.shape[1] == 3 185 | assert len(palette.shape) == 2 186 | color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) 187 | for label, color in enumerate(palette): 188 | color_seg[seg == label, :] = color 189 | # convert to BGR 190 | color_seg = color_seg[..., ::-1] 191 | return color_seg 192 | -------------------------------------------------------------------------------- /models/losses/cross_entropy_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | def reduce_loss(loss, reduction): 7 | """Reduce loss as specified. 8 | 9 | Args: 10 | loss (Tensor): Elementwise loss tensor. 11 | reduction (str): Options are "none", "mean" and "sum". 12 | 13 | Return: 14 | Tensor: Reduced loss tensor. 15 | """ 16 | reduction_enum = F._Reduction.get_enum(reduction) 17 | # none: 0, elementwise_mean:1, sum: 2 18 | if reduction_enum == 0: 19 | return loss 20 | elif reduction_enum == 1: 21 | return loss.mean() 22 | elif reduction_enum == 2: 23 | return loss.sum() 24 | 25 | 26 | def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): 27 | """Apply element-wise weight and reduce loss. 28 | 29 | Args: 30 | loss (Tensor): Element-wise loss. 31 | weight (Tensor): Element-wise weights. 32 | reduction (str): Same as built-in losses of PyTorch. 33 | avg_factor (float): Avarage factor when computing the mean of losses. 34 | 35 | Returns: 36 | Tensor: Processed loss values. 37 | """ 38 | # if weight is specified, apply element-wise weight 39 | if weight is not None: 40 | assert weight.dim() == loss.dim() 41 | if weight.dim() > 1: 42 | assert weight.size(1) == 1 or weight.size(1) == loss.size(1) 43 | loss = loss * weight 44 | 45 | # if avg_factor is not specified, just reduce the loss 46 | if avg_factor is None: 47 | loss = reduce_loss(loss, reduction) 48 | else: 49 | # if reduction is mean, then average the loss by avg_factor 50 | if reduction == 'mean': 51 | loss = loss.sum() / avg_factor 52 | # if reduction is 'none', then do nothing, otherwise raise an error 53 | elif reduction != 'none': 54 | raise ValueError('avg_factor can not be used with reduction="sum"') 55 | return loss 56 | 57 | 58 | def cross_entropy(pred, 59 | label, 60 | weight=None, 61 | class_weight=None, 62 | reduction='mean', 63 | avg_factor=None, 64 | ignore_index=-100): 65 | """The wrapper function for :func:`F.cross_entropy`""" 66 | # class_weight is a manual rescaling weight given to each class. 67 | # If given, has to be a Tensor of size C element-wise losses 68 | loss = F.cross_entropy( 69 | pred, 70 | label, 71 | weight=class_weight, 72 | reduction='none', 73 | ignore_index=ignore_index) 74 | 75 | # apply weights and do the reduction 76 | if weight is not None: 77 | weight = weight.float() 78 | loss = weight_reduce_loss( 79 | loss, weight=weight, reduction=reduction, avg_factor=avg_factor) 80 | 81 | return loss 82 | 83 | 84 | def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index): 85 | """Expand onehot labels to match the size of prediction.""" 86 | bin_labels = labels.new_zeros(target_shape) 87 | valid_mask = (labels >= 0) & (labels != ignore_index) 88 | inds = torch.nonzero(valid_mask, as_tuple=True) 89 | 90 | if inds[0].numel() > 0: 91 | if labels.dim() == 3: 92 | bin_labels[inds[0], labels[valid_mask], inds[1], inds[2]] = 1 93 | else: 94 | bin_labels[inds[0], labels[valid_mask]] = 1 95 | 96 | valid_mask = valid_mask.unsqueeze(1).expand(target_shape).float() 97 | if label_weights is None: 98 | bin_label_weights = valid_mask 99 | else: 100 | bin_label_weights = label_weights.unsqueeze(1).expand(target_shape) 101 | bin_label_weights *= valid_mask 102 | 103 | return bin_labels, bin_label_weights 104 | 105 | 106 | def binary_cross_entropy(pred, 107 | label, 108 | weight=None, 109 | reduction='mean', 110 | avg_factor=None, 111 | class_weight=None, 112 | ignore_index=255): 113 | """Calculate the binary CrossEntropy loss. 114 | 115 | Args: 116 | pred (torch.Tensor): The prediction with shape (N, 1). 117 | label (torch.Tensor): The learning label of the prediction. 118 | weight (torch.Tensor, optional): Sample-wise loss weight. 119 | reduction (str, optional): The method used to reduce the loss. 120 | Options are "none", "mean" and "sum". 121 | avg_factor (int, optional): Average factor that is used to average 122 | the loss. Defaults to None. 123 | class_weight (list[float], optional): The weight for each class. 124 | ignore_index (int | None): The label index to be ignored. Default: 255 125 | 126 | Returns: 127 | torch.Tensor: The calculated loss 128 | """ 129 | if pred.dim() != label.dim(): 130 | assert (pred.dim() == 2 and label.dim() == 1) or ( 131 | pred.dim() == 4 and label.dim() == 3), \ 132 | 'Only pred shape [N, C], label shape [N] or pred shape [N, C, ' \ 133 | 'H, W], label shape [N, H, W] are supported' 134 | label, weight = _expand_onehot_labels(label, weight, pred.shape, 135 | ignore_index) 136 | 137 | # weighted element-wise losses 138 | if weight is not None: 139 | weight = weight.float() 140 | loss = F.binary_cross_entropy_with_logits( 141 | pred, label.float(), pos_weight=class_weight, reduction='none') 142 | # do the reduction for the weighted loss 143 | loss = weight_reduce_loss( 144 | loss, weight, reduction=reduction, avg_factor=avg_factor) 145 | 146 | return loss 147 | 148 | 149 | def mask_cross_entropy(pred, 150 | target, 151 | label, 152 | reduction='mean', 153 | avg_factor=None, 154 | class_weight=None, 155 | ignore_index=None): 156 | """Calculate the CrossEntropy loss for masks. 157 | 158 | Args: 159 | pred (torch.Tensor): The prediction with shape (N, C), C is the number 160 | of classes. 161 | target (torch.Tensor): The learning label of the prediction. 162 | label (torch.Tensor): ``label`` indicates the class label of the mask' 163 | corresponding object. This will be used to select the mask in the 164 | of the class which the object belongs to when the mask prediction 165 | if not class-agnostic. 166 | reduction (str, optional): The method used to reduce the loss. 167 | Options are "none", "mean" and "sum". 168 | avg_factor (int, optional): Average factor that is used to average 169 | the loss. Defaults to None. 170 | class_weight (list[float], optional): The weight for each class. 171 | ignore_index (None): Placeholder, to be consistent with other loss. 172 | Default: None. 173 | 174 | Returns: 175 | torch.Tensor: The calculated loss 176 | """ 177 | assert ignore_index is None, 'BCE loss does not support ignore_index' 178 | # TODO: handle these two reserved arguments 179 | assert reduction == 'mean' and avg_factor is None 180 | num_rois = pred.size()[0] 181 | inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) 182 | pred_slice = pred[inds, label].squeeze(1) 183 | return F.binary_cross_entropy_with_logits( 184 | pred_slice, target, weight=class_weight, reduction='mean')[None] 185 | 186 | 187 | class CrossEntropyLoss(nn.Module): 188 | """CrossEntropyLoss. 189 | 190 | Args: 191 | use_sigmoid (bool, optional): Whether the prediction uses sigmoid 192 | of softmax. Defaults to False. 193 | use_mask (bool, optional): Whether to use mask cross entropy loss. 194 | Defaults to False. 195 | reduction (str, optional): . Defaults to 'mean'. 196 | Options are "none", "mean" and "sum". 197 | class_weight (list[float], optional): Weight of each class. 198 | Defaults to None. 199 | loss_weight (float, optional): Weight of the loss. Defaults to 1.0. 200 | """ 201 | 202 | def __init__(self, 203 | use_sigmoid=False, 204 | use_mask=False, 205 | reduction='mean', 206 | class_weight=None, 207 | loss_weight=1.0): 208 | super(CrossEntropyLoss, self).__init__() 209 | assert (use_sigmoid is False) or (use_mask is False) 210 | self.use_sigmoid = use_sigmoid 211 | self.use_mask = use_mask 212 | self.reduction = reduction 213 | self.loss_weight = loss_weight 214 | self.class_weight = class_weight 215 | 216 | if self.use_sigmoid: 217 | self.cls_criterion = binary_cross_entropy 218 | elif self.use_mask: 219 | self.cls_criterion = mask_cross_entropy 220 | else: 221 | self.cls_criterion = cross_entropy 222 | 223 | def forward(self, 224 | cls_score, 225 | label, 226 | weight=None, 227 | avg_factor=None, 228 | reduction_override=None, 229 | **kwargs): 230 | """Forward function.""" 231 | assert reduction_override in (None, 'none', 'mean', 'sum') 232 | reduction = ( 233 | reduction_override if reduction_override else self.reduction) 234 | if self.class_weight is not None: 235 | class_weight = cls_score.new_tensor(self.class_weight) 236 | else: 237 | class_weight = None 238 | loss_cls = self.loss_weight * self.cls_criterion( 239 | cls_score, 240 | label, 241 | weight, 242 | class_weight=class_weight, 243 | reduction=reduction, 244 | avg_factor=avg_factor, 245 | **kwargs) 246 | return loss_cls 247 | -------------------------------------------------------------------------------- /models/archs/fcn_arch.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from mmcv.cnn import ConvModule, normal_init 4 | from mmseg.ops import resize 5 | 6 | 7 | class BaseDecodeHead(nn.Module): 8 | """Base class for BaseDecodeHead. 9 | 10 | Args: 11 | in_channels (int|Sequence[int]): Input channels. 12 | channels (int): Channels after modules, before conv_seg. 13 | num_classes (int): Number of classes. 14 | dropout_ratio (float): Ratio of dropout layer. Default: 0.1. 15 | conv_cfg (dict|None): Config of conv layers. Default: None. 16 | norm_cfg (dict|None): Config of norm layers. Default: None. 17 | act_cfg (dict): Config of activation layers. 18 | Default: dict(type='ReLU') 19 | in_index (int|Sequence[int]): Input feature index. Default: -1 20 | input_transform (str|None): Transformation type of input features. 21 | Options: 'resize_concat', 'multiple_select', None. 22 | 'resize_concat': Multiple feature maps will be resize to the 23 | same size as first one and than concat together. 24 | Usually used in FCN head of HRNet. 25 | 'multiple_select': Multiple feature maps will be bundle into 26 | a list and passed into decode head. 27 | None: Only one select feature map is allowed. 28 | Default: None. 29 | loss_decode (dict): Config of decode loss. 30 | Default: dict(type='CrossEntropyLoss'). 31 | ignore_index (int | None): The label index to be ignored. When using 32 | masked BCE loss, ignore_index should be set to None. Default: 255 33 | sampler (dict|None): The config of segmentation map sampler. 34 | Default: None. 35 | align_corners (bool): align_corners argument of F.interpolate. 36 | Default: False. 37 | """ 38 | 39 | def __init__(self, 40 | in_channels, 41 | channels, 42 | *, 43 | num_classes, 44 | dropout_ratio=0.1, 45 | conv_cfg=None, 46 | norm_cfg=dict(type='BN'), 47 | act_cfg=dict(type='ReLU'), 48 | in_index=-1, 49 | input_transform=None, 50 | ignore_index=255, 51 | align_corners=False): 52 | super(BaseDecodeHead, self).__init__() 53 | self._init_inputs(in_channels, in_index, input_transform) 54 | self.channels = channels 55 | self.num_classes = num_classes 56 | self.dropout_ratio = dropout_ratio 57 | self.conv_cfg = conv_cfg 58 | self.norm_cfg = norm_cfg 59 | self.act_cfg = act_cfg 60 | self.in_index = in_index 61 | 62 | self.ignore_index = ignore_index 63 | self.align_corners = align_corners 64 | 65 | self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1) 66 | if dropout_ratio > 0: 67 | self.dropout = nn.Dropout2d(dropout_ratio) 68 | else: 69 | self.dropout = None 70 | 71 | def extra_repr(self): 72 | """Extra repr.""" 73 | s = f'input_transform={self.input_transform}, ' \ 74 | f'ignore_index={self.ignore_index}, ' \ 75 | f'align_corners={self.align_corners}' 76 | return s 77 | 78 | def _init_inputs(self, in_channels, in_index, input_transform): 79 | """Check and initialize input transforms. 80 | 81 | The in_channels, in_index and input_transform must match. 82 | Specifically, when input_transform is None, only single feature map 83 | will be selected. So in_channels and in_index must be of type int. 84 | When input_transform 85 | 86 | Args: 87 | in_channels (int|Sequence[int]): Input channels. 88 | in_index (int|Sequence[int]): Input feature index. 89 | input_transform (str|None): Transformation type of input features. 90 | Options: 'resize_concat', 'multiple_select', None. 91 | 'resize_concat': Multiple feature maps will be resize to the 92 | same size as first one and than concat together. 93 | Usually used in FCN head of HRNet. 94 | 'multiple_select': Multiple feature maps will be bundle into 95 | a list and passed into decode head. 96 | None: Only one select feature map is allowed. 97 | """ 98 | 99 | if input_transform is not None: 100 | assert input_transform in ['resize_concat', 'multiple_select'] 101 | self.input_transform = input_transform 102 | self.in_index = in_index 103 | if input_transform is not None: 104 | assert isinstance(in_channels, (list, tuple)) 105 | assert isinstance(in_index, (list, tuple)) 106 | assert len(in_channels) == len(in_index) 107 | if input_transform == 'resize_concat': 108 | self.in_channels = sum(in_channels) 109 | else: 110 | self.in_channels = in_channels 111 | else: 112 | assert isinstance(in_channels, int) 113 | assert isinstance(in_index, int) 114 | self.in_channels = in_channels 115 | 116 | def init_weights(self): 117 | """Initialize weights of classification layer.""" 118 | normal_init(self.conv_seg, mean=0, std=0.01) 119 | 120 | def _transform_inputs(self, inputs): 121 | """Transform inputs for decoder. 122 | 123 | Args: 124 | inputs (list[Tensor]): List of multi-level img features. 125 | 126 | Returns: 127 | Tensor: The transformed inputs 128 | """ 129 | 130 | if self.input_transform == 'resize_concat': 131 | inputs = [inputs[i] for i in self.in_index] 132 | upsampled_inputs = [ 133 | resize( 134 | input=x, 135 | size=inputs[0].shape[2:], 136 | mode='bilinear', 137 | align_corners=self.align_corners) for x in inputs 138 | ] 139 | inputs = torch.cat(upsampled_inputs, dim=1) 140 | elif self.input_transform == 'multiple_select': 141 | inputs = [inputs[i] for i in self.in_index] 142 | else: 143 | inputs = inputs[self.in_index] 144 | 145 | return inputs 146 | 147 | def forward(self, inputs): 148 | """Placeholder of forward function.""" 149 | pass 150 | 151 | def cls_seg(self, feat): 152 | """Classify each pixel.""" 153 | if self.dropout is not None: 154 | feat = self.dropout(feat) 155 | output = self.conv_seg(feat) 156 | return output 157 | 158 | 159 | class FCNHead(BaseDecodeHead): 160 | """Fully Convolution Networks for Semantic Segmentation. 161 | 162 | This head is implemented of `FCNNet `_. 163 | 164 | Args: 165 | num_convs (int): Number of convs in the head. Default: 2. 166 | kernel_size (int): The kernel size for convs in the head. Default: 3. 167 | concat_input (bool): Whether concat the input and output of convs 168 | before classification layer. 169 | """ 170 | 171 | def __init__(self, 172 | num_convs=2, 173 | kernel_size=3, 174 | concat_input=True, 175 | **kwargs): 176 | assert num_convs >= 0 177 | self.num_convs = num_convs 178 | self.concat_input = concat_input 179 | self.kernel_size = kernel_size 180 | super(FCNHead, self).__init__(**kwargs) 181 | if num_convs == 0: 182 | assert self.in_channels == self.channels 183 | 184 | convs = [] 185 | convs.append( 186 | ConvModule( 187 | self.in_channels, 188 | self.channels, 189 | kernel_size=kernel_size, 190 | padding=kernel_size // 2, 191 | conv_cfg=self.conv_cfg, 192 | norm_cfg=self.norm_cfg, 193 | act_cfg=self.act_cfg)) 194 | for i in range(num_convs - 1): 195 | convs.append( 196 | ConvModule( 197 | self.channels, 198 | self.channels, 199 | kernel_size=kernel_size, 200 | padding=kernel_size // 2, 201 | conv_cfg=self.conv_cfg, 202 | norm_cfg=self.norm_cfg, 203 | act_cfg=self.act_cfg)) 204 | if num_convs == 0: 205 | self.convs = nn.Identity() 206 | else: 207 | self.convs = nn.Sequential(*convs) 208 | if self.concat_input: 209 | self.conv_cat = ConvModule( 210 | self.in_channels + self.channels, 211 | self.channels, 212 | kernel_size=kernel_size, 213 | padding=kernel_size // 2, 214 | conv_cfg=self.conv_cfg, 215 | norm_cfg=self.norm_cfg, 216 | act_cfg=self.act_cfg) 217 | 218 | def forward(self, inputs): 219 | """Forward function.""" 220 | x = self._transform_inputs(inputs) 221 | output = self.convs(x) 222 | if self.concat_input: 223 | output = self.conv_cat(torch.cat([x, output], dim=1)) 224 | output = self.cls_seg(output) 225 | return output 226 | 227 | 228 | class MultiHeadFCNHead(nn.Module): 229 | """Fully Convolution Networks for Semantic Segmentation. 230 | 231 | This head is implemented of `FCNNet `_. 232 | 233 | Args: 234 | num_convs (int): Number of convs in the head. Default: 2. 235 | kernel_size (int): The kernel size for convs in the head. Default: 3. 236 | concat_input (bool): Whether concat the input and output of convs 237 | before classification layer. 238 | """ 239 | 240 | def __init__(self, 241 | in_channels, 242 | channels, 243 | *, 244 | num_classes, 245 | dropout_ratio=0.1, 246 | conv_cfg=None, 247 | norm_cfg=dict(type='BN'), 248 | act_cfg=dict(type='ReLU'), 249 | in_index=-1, 250 | input_transform=None, 251 | ignore_index=255, 252 | align_corners=False, 253 | num_convs=2, 254 | kernel_size=3, 255 | concat_input=True, 256 | num_head=18, 257 | **kwargs): 258 | super(MultiHeadFCNHead, self).__init__() 259 | assert num_convs >= 0 260 | self.num_convs = num_convs 261 | self.concat_input = concat_input 262 | self.kernel_size = kernel_size 263 | self._init_inputs(in_channels, in_index, input_transform) 264 | self.channels = channels 265 | self.num_classes = num_classes 266 | self.dropout_ratio = dropout_ratio 267 | self.conv_cfg = conv_cfg 268 | self.norm_cfg = norm_cfg 269 | self.act_cfg = act_cfg 270 | self.in_index = in_index 271 | self.num_head = num_head 272 | 273 | self.ignore_index = ignore_index 274 | self.align_corners = align_corners 275 | 276 | if dropout_ratio > 0: 277 | self.dropout = nn.Dropout2d(dropout_ratio) 278 | 279 | conv_seg_head_list = [] 280 | for _ in range(self.num_head): 281 | conv_seg_head_list.append( 282 | nn.Conv2d(channels, num_classes, kernel_size=1)) 283 | 284 | self.conv_seg_head_list = nn.ModuleList(conv_seg_head_list) 285 | 286 | self.init_weights() 287 | 288 | if num_convs == 0: 289 | assert self.in_channels == self.channels 290 | 291 | convs_list = [] 292 | conv_cat_list = [] 293 | 294 | for _ in range(self.num_head): 295 | convs = [] 296 | convs.append( 297 | ConvModule( 298 | self.in_channels, 299 | self.channels, 300 | kernel_size=kernel_size, 301 | padding=kernel_size // 2, 302 | conv_cfg=self.conv_cfg, 303 | norm_cfg=self.norm_cfg, 304 | act_cfg=self.act_cfg)) 305 | for _ in range(num_convs - 1): 306 | convs.append( 307 | ConvModule( 308 | self.channels, 309 | self.channels, 310 | kernel_size=kernel_size, 311 | padding=kernel_size // 2, 312 | conv_cfg=self.conv_cfg, 313 | norm_cfg=self.norm_cfg, 314 | act_cfg=self.act_cfg)) 315 | if num_convs == 0: 316 | convs_list.append(nn.Identity()) 317 | else: 318 | convs_list.append(nn.Sequential(*convs)) 319 | if self.concat_input: 320 | conv_cat_list.append( 321 | ConvModule( 322 | self.in_channels + self.channels, 323 | self.channels, 324 | kernel_size=kernel_size, 325 | padding=kernel_size // 2, 326 | conv_cfg=self.conv_cfg, 327 | norm_cfg=self.norm_cfg, 328 | act_cfg=self.act_cfg)) 329 | 330 | self.convs_list = nn.ModuleList(convs_list) 331 | self.conv_cat_list = nn.ModuleList(conv_cat_list) 332 | 333 | def forward(self, inputs): 334 | """Forward function.""" 335 | x = self._transform_inputs(inputs) 336 | 337 | output_list = [] 338 | for head_idx in range(self.num_head): 339 | output = self.convs_list[head_idx](x) 340 | if self.concat_input: 341 | output = self.conv_cat_list[head_idx]( 342 | torch.cat([x, output], dim=1)) 343 | if self.dropout is not None: 344 | output = self.dropout(output) 345 | output = self.conv_seg_head_list[head_idx](output) 346 | output_list.append(output) 347 | 348 | return output_list 349 | 350 | def _init_inputs(self, in_channels, in_index, input_transform): 351 | """Check and initialize input transforms. 352 | 353 | The in_channels, in_index and input_transform must match. 354 | Specifically, when input_transform is None, only single feature map 355 | will be selected. So in_channels and in_index must be of type int. 356 | When input_transform 357 | 358 | Args: 359 | in_channels (int|Sequence[int]): Input channels. 360 | in_index (int|Sequence[int]): Input feature index. 361 | input_transform (str|None): Transformation type of input features. 362 | Options: 'resize_concat', 'multiple_select', None. 363 | 'resize_concat': Multiple feature maps will be resize to the 364 | same size as first one and than concat together. 365 | Usually used in FCN head of HRNet. 366 | 'multiple_select': Multiple feature maps will be bundle into 367 | a list and passed into decode head. 368 | None: Only one select feature map is allowed. 369 | """ 370 | 371 | if input_transform is not None: 372 | assert input_transform in ['resize_concat', 'multiple_select'] 373 | self.input_transform = input_transform 374 | self.in_index = in_index 375 | if input_transform is not None: 376 | assert isinstance(in_channels, (list, tuple)) 377 | assert isinstance(in_index, (list, tuple)) 378 | assert len(in_channels) == len(in_index) 379 | if input_transform == 'resize_concat': 380 | self.in_channels = sum(in_channels) 381 | else: 382 | self.in_channels = in_channels 383 | else: 384 | assert isinstance(in_channels, int) 385 | assert isinstance(in_index, int) 386 | self.in_channels = in_channels 387 | 388 | def init_weights(self): 389 | """Initialize weights of classification layer.""" 390 | for conv_seg_head in self.conv_seg_head_list: 391 | normal_init(conv_seg_head, mean=0, std=0.01) 392 | 393 | def _transform_inputs(self, inputs): 394 | """Transform inputs for decoder. 395 | 396 | Args: 397 | inputs (list[Tensor]): List of multi-level img features. 398 | 399 | Returns: 400 | Tensor: The transformed inputs 401 | """ 402 | 403 | if self.input_transform == 'resize_concat': 404 | inputs = [inputs[i] for i in self.in_index] 405 | upsampled_inputs = [ 406 | resize( 407 | input=x, 408 | size=inputs[0].shape[2:], 409 | mode='bilinear', 410 | align_corners=self.align_corners) for x in inputs 411 | ] 412 | inputs = torch.cat(upsampled_inputs, dim=1) 413 | elif self.input_transform == 'multiple_select': 414 | inputs = [inputs[i] for i in self.in_index] 415 | else: 416 | inputs = inputs[self.in_index] 417 | 418 | return inputs 419 | -------------------------------------------------------------------------------- /models/archs/unet_arch.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.utils.checkpoint as cp 4 | from mmcv.cnn import (UPSAMPLE_LAYERS, ConvModule, build_activation_layer, 5 | build_norm_layer, build_upsample_layer, constant_init, 6 | kaiming_init) 7 | from mmcv.runner import load_checkpoint 8 | from mmcv.utils.parrots_wrapper import _BatchNorm 9 | from mmseg.utils import get_root_logger 10 | 11 | 12 | class UpConvBlock(nn.Module): 13 | """Upsample convolution block in decoder for UNet. 14 | 15 | This upsample convolution block consists of one upsample module 16 | followed by one convolution block. The upsample module expands the 17 | high-level low-resolution feature map and the convolution block fuses 18 | the upsampled high-level low-resolution feature map and the low-level 19 | high-resolution feature map from encoder. 20 | 21 | Args: 22 | conv_block (nn.Sequential): Sequential of convolutional layers. 23 | in_channels (int): Number of input channels of the high-level 24 | skip_channels (int): Number of input channels of the low-level 25 | high-resolution feature map from encoder. 26 | out_channels (int): Number of output channels. 27 | num_convs (int): Number of convolutional layers in the conv_block. 28 | Default: 2. 29 | stride (int): Stride of convolutional layer in conv_block. Default: 1. 30 | dilation (int): Dilation rate of convolutional layer in conv_block. 31 | Default: 1. 32 | with_cp (bool): Use checkpoint or not. Using checkpoint will save some 33 | memory while slowing down the training speed. Default: False. 34 | conv_cfg (dict | None): Config dict for convolution layer. 35 | Default: None. 36 | norm_cfg (dict | None): Config dict for normalization layer. 37 | Default: dict(type='BN'). 38 | act_cfg (dict | None): Config dict for activation layer in ConvModule. 39 | Default: dict(type='ReLU'). 40 | upsample_cfg (dict): The upsample config of the upsample module in 41 | decoder. Default: dict(type='InterpConv'). If the size of 42 | high-level feature map is the same as that of skip feature map 43 | (low-level feature map from encoder), it does not need upsample the 44 | high-level feature map and the upsample_cfg is None. 45 | dcn (bool): Use deformable convoluton in convolutional layer or not. 46 | Default: None. 47 | plugins (dict): plugins for convolutional layers. Default: None. 48 | """ 49 | 50 | def __init__(self, 51 | conv_block, 52 | in_channels, 53 | skip_channels, 54 | out_channels, 55 | num_convs=2, 56 | stride=1, 57 | dilation=1, 58 | with_cp=False, 59 | conv_cfg=None, 60 | norm_cfg=dict(type='BN'), 61 | act_cfg=dict(type='ReLU'), 62 | upsample_cfg=dict(type='InterpConv'), 63 | dcn=None, 64 | plugins=None): 65 | super(UpConvBlock, self).__init__() 66 | assert dcn is None, 'Not implemented yet.' 67 | assert plugins is None, 'Not implemented yet.' 68 | 69 | self.conv_block = conv_block( 70 | in_channels=2 * skip_channels, 71 | out_channels=out_channels, 72 | num_convs=num_convs, 73 | stride=stride, 74 | dilation=dilation, 75 | with_cp=with_cp, 76 | conv_cfg=conv_cfg, 77 | norm_cfg=norm_cfg, 78 | act_cfg=act_cfg, 79 | dcn=None, 80 | plugins=None) 81 | if upsample_cfg is not None: 82 | self.upsample = build_upsample_layer( 83 | cfg=upsample_cfg, 84 | in_channels=in_channels, 85 | out_channels=skip_channels, 86 | with_cp=with_cp, 87 | norm_cfg=norm_cfg, 88 | act_cfg=act_cfg) 89 | else: 90 | self.upsample = ConvModule( 91 | in_channels, 92 | skip_channels, 93 | kernel_size=1, 94 | stride=1, 95 | padding=0, 96 | conv_cfg=conv_cfg, 97 | norm_cfg=norm_cfg, 98 | act_cfg=act_cfg) 99 | 100 | def forward(self, skip, x): 101 | """Forward function.""" 102 | 103 | x = self.upsample(x) 104 | out = torch.cat([skip, x], dim=1) 105 | out = self.conv_block(out) 106 | 107 | return out 108 | 109 | 110 | class BasicConvBlock(nn.Module): 111 | """Basic convolutional block for UNet. 112 | 113 | This module consists of several plain convolutional layers. 114 | 115 | Args: 116 | in_channels (int): Number of input channels. 117 | out_channels (int): Number of output channels. 118 | num_convs (int): Number of convolutional layers. Default: 2. 119 | stride (int): Whether use stride convolution to downsample 120 | the input feature map. If stride=2, it only uses stride convolution 121 | in the first convolutional layer to downsample the input feature 122 | map. Options are 1 or 2. Default: 1. 123 | dilation (int): Whether use dilated convolution to expand the 124 | receptive field. Set dilation rate of each convolutional layer and 125 | the dilation rate of the first convolutional layer is always 1. 126 | Default: 1. 127 | with_cp (bool): Use checkpoint or not. Using checkpoint will save some 128 | memory while slowing down the training speed. Default: False. 129 | conv_cfg (dict | None): Config dict for convolution layer. 130 | Default: None. 131 | norm_cfg (dict | None): Config dict for normalization layer. 132 | Default: dict(type='BN'). 133 | act_cfg (dict | None): Config dict for activation layer in ConvModule. 134 | Default: dict(type='ReLU'). 135 | dcn (bool): Use deformable convoluton in convolutional layer or not. 136 | Default: None. 137 | plugins (dict): plugins for convolutional layers. Default: None. 138 | """ 139 | 140 | def __init__(self, 141 | in_channels, 142 | out_channels, 143 | num_convs=2, 144 | stride=1, 145 | dilation=1, 146 | with_cp=False, 147 | conv_cfg=None, 148 | norm_cfg=dict(type='BN'), 149 | act_cfg=dict(type='ReLU'), 150 | dcn=None, 151 | plugins=None): 152 | super(BasicConvBlock, self).__init__() 153 | assert dcn is None, 'Not implemented yet.' 154 | assert plugins is None, 'Not implemented yet.' 155 | 156 | self.with_cp = with_cp 157 | convs = [] 158 | for i in range(num_convs): 159 | convs.append( 160 | ConvModule( 161 | in_channels=in_channels if i == 0 else out_channels, 162 | out_channels=out_channels, 163 | kernel_size=3, 164 | stride=stride if i == 0 else 1, 165 | dilation=1 if i == 0 else dilation, 166 | padding=1 if i == 0 else dilation, 167 | conv_cfg=conv_cfg, 168 | norm_cfg=norm_cfg, 169 | act_cfg=act_cfg)) 170 | 171 | self.convs = nn.Sequential(*convs) 172 | 173 | def forward(self, x): 174 | """Forward function.""" 175 | 176 | if self.with_cp and x.requires_grad: 177 | out = cp.checkpoint(self.convs, x) 178 | else: 179 | out = self.convs(x) 180 | return out 181 | 182 | 183 | class DeconvModule(nn.Module): 184 | """Deconvolution upsample module in decoder for UNet (2X upsample). 185 | 186 | This module uses deconvolution to upsample feature map in the decoder 187 | of UNet. 188 | 189 | Args: 190 | in_channels (int): Number of input channels. 191 | out_channels (int): Number of output channels. 192 | with_cp (bool): Use checkpoint or not. Using checkpoint will save some 193 | memory while slowing down the training speed. Default: False. 194 | norm_cfg (dict | None): Config dict for normalization layer. 195 | Default: dict(type='BN'). 196 | act_cfg (dict | None): Config dict for activation layer in ConvModule. 197 | Default: dict(type='ReLU'). 198 | kernel_size (int): Kernel size of the convolutional layer. Default: 4. 199 | """ 200 | 201 | def __init__(self, 202 | in_channels, 203 | out_channels, 204 | with_cp=False, 205 | norm_cfg=dict(type='BN'), 206 | act_cfg=dict(type='ReLU'), 207 | *, 208 | kernel_size=4, 209 | scale_factor=2): 210 | super(DeconvModule, self).__init__() 211 | 212 | assert (kernel_size - scale_factor >= 0) and\ 213 | (kernel_size - scale_factor) % 2 == 0,\ 214 | f'kernel_size should be greater than or equal to scale_factor '\ 215 | f'and (kernel_size - scale_factor) should be even numbers, '\ 216 | f'while the kernel size is {kernel_size} and scale_factor is '\ 217 | f'{scale_factor}.' 218 | 219 | stride = scale_factor 220 | padding = (kernel_size - scale_factor) // 2 221 | self.with_cp = with_cp 222 | deconv = nn.ConvTranspose2d( 223 | in_channels, 224 | out_channels, 225 | kernel_size=kernel_size, 226 | stride=stride, 227 | padding=padding) 228 | 229 | norm_name, norm = build_norm_layer(norm_cfg, out_channels) 230 | activate = build_activation_layer(act_cfg) 231 | self.deconv_upsamping = nn.Sequential(deconv, norm, activate) 232 | 233 | def forward(self, x): 234 | """Forward function.""" 235 | 236 | if self.with_cp and x.requires_grad: 237 | out = cp.checkpoint(self.deconv_upsamping, x) 238 | else: 239 | out = self.deconv_upsamping(x) 240 | return out 241 | 242 | 243 | @UPSAMPLE_LAYERS.register_module() 244 | class InterpConv(nn.Module): 245 | """Interpolation upsample module in decoder for UNet. 246 | 247 | This module uses interpolation to upsample feature map in the decoder 248 | of UNet. It consists of one interpolation upsample layer and one 249 | convolutional layer. It can be one interpolation upsample layer followed 250 | by one convolutional layer (conv_first=False) or one convolutional layer 251 | followed by one interpolation upsample layer (conv_first=True). 252 | 253 | Args: 254 | in_channels (int): Number of input channels. 255 | out_channels (int): Number of output channels. 256 | with_cp (bool): Use checkpoint or not. Using checkpoint will save some 257 | memory while slowing down the training speed. Default: False. 258 | norm_cfg (dict | None): Config dict for normalization layer. 259 | Default: dict(type='BN'). 260 | act_cfg (dict | None): Config dict for activation layer in ConvModule. 261 | Default: dict(type='ReLU'). 262 | conv_cfg (dict | None): Config dict for convolution layer. 263 | Default: None. 264 | conv_first (bool): Whether convolutional layer or interpolation 265 | upsample layer first. Default: False. It means interpolation 266 | upsample layer followed by one convolutional layer. 267 | kernel_size (int): Kernel size of the convolutional layer. Default: 1. 268 | stride (int): Stride of the convolutional layer. Default: 1. 269 | padding (int): Padding of the convolutional layer. Default: 1. 270 | upsampe_cfg (dict): Interpolation config of the upsample layer. 271 | Default: dict( 272 | scale_factor=2, mode='bilinear', align_corners=False). 273 | """ 274 | 275 | def __init__(self, 276 | in_channels, 277 | out_channels, 278 | with_cp=False, 279 | norm_cfg=dict(type='BN'), 280 | act_cfg=dict(type='ReLU'), 281 | *, 282 | conv_cfg=None, 283 | conv_first=False, 284 | kernel_size=1, 285 | stride=1, 286 | padding=0, 287 | upsampe_cfg=dict( 288 | scale_factor=2, mode='bilinear', align_corners=False)): 289 | super(InterpConv, self).__init__() 290 | 291 | self.with_cp = with_cp 292 | conv = ConvModule( 293 | in_channels, 294 | out_channels, 295 | kernel_size=kernel_size, 296 | stride=stride, 297 | padding=padding, 298 | conv_cfg=conv_cfg, 299 | norm_cfg=norm_cfg, 300 | act_cfg=act_cfg) 301 | upsample = nn.Upsample(**upsampe_cfg) 302 | if conv_first: 303 | self.interp_upsample = nn.Sequential(conv, upsample) 304 | else: 305 | self.interp_upsample = nn.Sequential(upsample, conv) 306 | 307 | def forward(self, x): 308 | """Forward function.""" 309 | 310 | if self.with_cp and x.requires_grad: 311 | out = cp.checkpoint(self.interp_upsample, x) 312 | else: 313 | out = self.interp_upsample(x) 314 | return out 315 | 316 | 317 | class UNet(nn.Module): 318 | """UNet backbone. 319 | U-Net: Convolutional Networks for Biomedical Image Segmentation. 320 | https://arxiv.org/pdf/1505.04597.pdf 321 | 322 | Args: 323 | in_channels (int): Number of input image channels. Default" 3. 324 | base_channels (int): Number of base channels of each stage. 325 | The output channels of the first stage. Default: 64. 326 | num_stages (int): Number of stages in encoder, normally 5. Default: 5. 327 | strides (Sequence[int 1 | 2]): Strides of each stage in encoder. 328 | len(strides) is equal to num_stages. Normally the stride of the 329 | first stage in encoder is 1. If strides[i]=2, it uses stride 330 | convolution to downsample in the correspondence encoder stage. 331 | Default: (1, 1, 1, 1, 1). 332 | enc_num_convs (Sequence[int]): Number of convolutional layers in the 333 | convolution block of the correspondence encoder stage. 334 | Default: (2, 2, 2, 2, 2). 335 | dec_num_convs (Sequence[int]): Number of convolutional layers in the 336 | convolution block of the correspondence decoder stage. 337 | Default: (2, 2, 2, 2). 338 | downsamples (Sequence[int]): Whether use MaxPool to downsample the 339 | feature map after the first stage of encoder 340 | (stages: [1, num_stages)). If the correspondence encoder stage use 341 | stride convolution (strides[i]=2), it will never use MaxPool to 342 | downsample, even downsamples[i-1]=True. 343 | Default: (True, True, True, True). 344 | enc_dilations (Sequence[int]): Dilation rate of each stage in encoder. 345 | Default: (1, 1, 1, 1, 1). 346 | dec_dilations (Sequence[int]): Dilation rate of each stage in decoder. 347 | Default: (1, 1, 1, 1). 348 | with_cp (bool): Use checkpoint or not. Using checkpoint will save some 349 | memory while slowing down the training speed. Default: False. 350 | conv_cfg (dict | None): Config dict for convolution layer. 351 | Default: None. 352 | norm_cfg (dict | None): Config dict for normalization layer. 353 | Default: dict(type='BN'). 354 | act_cfg (dict | None): Config dict for activation layer in ConvModule. 355 | Default: dict(type='ReLU'). 356 | upsample_cfg (dict): The upsample config of the upsample module in 357 | decoder. Default: dict(type='InterpConv'). 358 | norm_eval (bool): Whether to set norm layers to eval mode, namely, 359 | freeze running stats (mean and var). Note: Effect on Batch Norm 360 | and its variants only. Default: False. 361 | dcn (bool): Use deformable convolution in convolutional layer or not. 362 | Default: None. 363 | plugins (dict): plugins for convolutional layers. Default: None. 364 | 365 | Notice: 366 | The input image size should be devisible by the whole downsample rate 367 | of the encoder. More detail of the whole downsample rate can be found 368 | in UNet._check_input_devisible. 369 | 370 | """ 371 | 372 | def __init__(self, 373 | in_channels=3, 374 | base_channels=64, 375 | num_stages=5, 376 | strides=(1, 1, 1, 1, 1), 377 | enc_num_convs=(2, 2, 2, 2, 2), 378 | dec_num_convs=(2, 2, 2, 2), 379 | downsamples=(True, True, True, True), 380 | enc_dilations=(1, 1, 1, 1, 1), 381 | dec_dilations=(1, 1, 1, 1), 382 | with_cp=False, 383 | conv_cfg=None, 384 | norm_cfg=dict(type='BN'), 385 | act_cfg=dict(type='ReLU'), 386 | upsample_cfg=dict(type='InterpConv'), 387 | norm_eval=False, 388 | dcn=None, 389 | plugins=None): 390 | super(UNet, self).__init__() 391 | assert dcn is None, 'Not implemented yet.' 392 | assert plugins is None, 'Not implemented yet.' 393 | assert len(strides) == num_stages, \ 394 | 'The length of strides should be equal to num_stages, '\ 395 | f'while the strides is {strides}, the length of '\ 396 | f'strides is {len(strides)}, and the num_stages is '\ 397 | f'{num_stages}.' 398 | assert len(enc_num_convs) == num_stages, \ 399 | 'The length of enc_num_convs should be equal to num_stages, '\ 400 | f'while the enc_num_convs is {enc_num_convs}, the length of '\ 401 | f'enc_num_convs is {len(enc_num_convs)}, and the num_stages is '\ 402 | f'{num_stages}.' 403 | assert len(dec_num_convs) == (num_stages-1), \ 404 | 'The length of dec_num_convs should be equal to (num_stages-1), '\ 405 | f'while the dec_num_convs is {dec_num_convs}, the length of '\ 406 | f'dec_num_convs is {len(dec_num_convs)}, and the num_stages is '\ 407 | f'{num_stages}.' 408 | assert len(downsamples) == (num_stages-1), \ 409 | 'The length of downsamples should be equal to (num_stages-1), '\ 410 | f'while the downsamples is {downsamples}, the length of '\ 411 | f'downsamples is {len(downsamples)}, and the num_stages is '\ 412 | f'{num_stages}.' 413 | assert len(enc_dilations) == num_stages, \ 414 | 'The length of enc_dilations should be equal to num_stages, '\ 415 | f'while the enc_dilations is {enc_dilations}, the length of '\ 416 | f'enc_dilations is {len(enc_dilations)}, and the num_stages is '\ 417 | f'{num_stages}.' 418 | assert len(dec_dilations) == (num_stages-1), \ 419 | 'The length of dec_dilations should be equal to (num_stages-1), '\ 420 | f'while the dec_dilations is {dec_dilations}, the length of '\ 421 | f'dec_dilations is {len(dec_dilations)}, and the num_stages is '\ 422 | f'{num_stages}.' 423 | self.num_stages = num_stages 424 | self.strides = strides 425 | self.downsamples = downsamples 426 | self.norm_eval = norm_eval 427 | 428 | self.encoder = nn.ModuleList() 429 | self.decoder = nn.ModuleList() 430 | 431 | for i in range(num_stages): 432 | enc_conv_block = [] 433 | if i != 0: 434 | if strides[i] == 1 and downsamples[i - 1]: 435 | enc_conv_block.append(nn.MaxPool2d(kernel_size=2)) 436 | upsample = (strides[i] != 1 or downsamples[i - 1]) 437 | self.decoder.append( 438 | UpConvBlock( 439 | conv_block=BasicConvBlock, 440 | in_channels=base_channels * 2**i, 441 | skip_channels=base_channels * 2**(i - 1), 442 | out_channels=base_channels * 2**(i - 1), 443 | num_convs=dec_num_convs[i - 1], 444 | stride=1, 445 | dilation=dec_dilations[i - 1], 446 | with_cp=with_cp, 447 | conv_cfg=conv_cfg, 448 | norm_cfg=norm_cfg, 449 | act_cfg=act_cfg, 450 | upsample_cfg=upsample_cfg if upsample else None, 451 | dcn=None, 452 | plugins=None)) 453 | 454 | enc_conv_block.append( 455 | BasicConvBlock( 456 | in_channels=in_channels, 457 | out_channels=base_channels * 2**i, 458 | num_convs=enc_num_convs[i], 459 | stride=strides[i], 460 | dilation=enc_dilations[i], 461 | with_cp=with_cp, 462 | conv_cfg=conv_cfg, 463 | norm_cfg=norm_cfg, 464 | act_cfg=act_cfg, 465 | dcn=None, 466 | plugins=None)) 467 | self.encoder.append((nn.Sequential(*enc_conv_block))) 468 | in_channels = base_channels * 2**i 469 | 470 | def forward(self, x): 471 | enc_outs = [] 472 | 473 | for enc in self.encoder: 474 | x = enc(x) 475 | enc_outs.append(x) 476 | dec_outs = [x] 477 | for i in reversed(range(len(self.decoder))): 478 | x = self.decoder[i](enc_outs[i], x) 479 | dec_outs.append(x) 480 | 481 | return dec_outs 482 | 483 | def init_weights(self, pretrained=None): 484 | """Initialize the weights in backbone. 485 | 486 | Args: 487 | pretrained (str, optional): Path to pre-trained weights. 488 | Defaults to None. 489 | """ 490 | if isinstance(pretrained, str): 491 | logger = get_root_logger() 492 | load_checkpoint(self, pretrained, strict=False, logger=logger) 493 | elif pretrained is None: 494 | for m in self.modules(): 495 | if isinstance(m, nn.Conv2d): 496 | kaiming_init(m) 497 | elif isinstance(m, (_BatchNorm, nn.GroupNorm)): 498 | constant_init(m, 1) 499 | else: 500 | raise TypeError('pretrained must be a str or None') 501 | 502 | 503 | class AttrUNet(nn.Module): 504 | """ShapeUNet backbone with small modifications. 505 | U-Net: Convolutional Networks for Biomedical Image Segmentation. 506 | https://arxiv.org/pdf/1505.04597.pdf 507 | 508 | Args: 509 | in_channels (int): Number of input image channels. Default" 3. 510 | base_channels (int): Number of base channels of each stage. 511 | The output channels of the first stage. Default: 64. 512 | num_stages (int): Number of stages in encoder, normally 5. Default: 5. 513 | strides (Sequence[int 1 | 2]): Strides of each stage in encoder. 514 | len(strides) is equal to num_stages. Normally the stride of the 515 | first stage in encoder is 1. If strides[i]=2, it uses stride 516 | convolution to downsample in the correspondance encoder stage. 517 | Default: (1, 1, 1, 1, 1). 518 | enc_num_convs (Sequence[int]): Number of convolutional layers in the 519 | convolution block of the correspondance encoder stage. 520 | Default: (2, 2, 2, 2, 2). 521 | dec_num_convs (Sequence[int]): Number of convolutional layers in the 522 | convolution block of the correspondance decoder stage. 523 | Default: (2, 2, 2, 2). 524 | downsamples (Sequence[int]): Whether use MaxPool to downsample the 525 | feature map after the first stage of encoder 526 | (stages: [1, num_stages)). If the correspondance encoder stage use 527 | stride convolution (strides[i]=2), it will never use MaxPool to 528 | downsample, even downsamples[i-1]=True. 529 | Default: (True, True, True, True). 530 | enc_dilations (Sequence[int]): Dilation rate of each stage in encoder. 531 | Default: (1, 1, 1, 1, 1). 532 | dec_dilations (Sequence[int]): Dilation rate of each stage in decoder. 533 | Default: (1, 1, 1, 1). 534 | with_cp (bool): Use checkpoint or not. Using checkpoint will save some 535 | memory while slowing down the training speed. Default: False. 536 | conv_cfg (dict | None): Config dict for convolution layer. 537 | Default: None. 538 | norm_cfg (dict | None): Config dict for normalization layer. 539 | Default: dict(type='BN'). 540 | act_cfg (dict | None): Config dict for activation layer in ConvModule. 541 | Default: dict(type='ReLU'). 542 | upsample_cfg (dict): The upsample config of the upsample module in 543 | decoder. Default: dict(type='InterpConv'). 544 | norm_eval (bool): Whether to set norm layers to eval mode, namely, 545 | freeze running stats (mean and var). Note: Effect on Batch Norm 546 | and its variants only. Default: False. 547 | dcn (bool): Use deformable convoluton in convolutional layer or not. 548 | Default: None. 549 | plugins (dict): plugins for convolutional layers. Default: None. 550 | 551 | Notice: 552 | The input image size should be devisible by the whole downsample rate 553 | of the encoder. More detail of the whole downsample rate can be found 554 | in UNet._check_input_devisible. 555 | 556 | """ 557 | 558 | def __init__(self, 559 | in_channels=3, 560 | base_channels=64, 561 | num_stages=5, 562 | attr_embedding=128, 563 | strides=(1, 1, 1, 1, 1), 564 | enc_num_convs=(2, 2, 2, 2, 2), 565 | dec_num_convs=(2, 2, 2, 2), 566 | downsamples=(True, True, True, True), 567 | enc_dilations=(1, 1, 1, 1, 1), 568 | dec_dilations=(1, 1, 1, 1), 569 | with_cp=False, 570 | conv_cfg=None, 571 | norm_cfg=dict(type='BN'), 572 | act_cfg=dict(type='ReLU'), 573 | upsample_cfg=dict(type='InterpConv'), 574 | norm_eval=False, 575 | dcn=None, 576 | plugins=None): 577 | super(AttrUNet, self).__init__() 578 | assert dcn is None, 'Not implemented yet.' 579 | assert plugins is None, 'Not implemented yet.' 580 | assert len(strides) == num_stages, \ 581 | 'The length of strides should be equal to num_stages, '\ 582 | f'while the strides is {strides}, the length of '\ 583 | f'strides is {len(strides)}, and the num_stages is '\ 584 | f'{num_stages}.' 585 | assert len(enc_num_convs) == num_stages, \ 586 | 'The length of enc_num_convs should be equal to num_stages, '\ 587 | f'while the enc_num_convs is {enc_num_convs}, the length of '\ 588 | f'enc_num_convs is {len(enc_num_convs)}, and the num_stages is '\ 589 | f'{num_stages}.' 590 | assert len(dec_num_convs) == (num_stages-1), \ 591 | 'The length of dec_num_convs should be equal to (num_stages-1), '\ 592 | f'while the dec_num_convs is {dec_num_convs}, the length of '\ 593 | f'dec_num_convs is {len(dec_num_convs)}, and the num_stages is '\ 594 | f'{num_stages}.' 595 | assert len(downsamples) == (num_stages-1), \ 596 | 'The length of downsamples should be equal to (num_stages-1), '\ 597 | f'while the downsamples is {downsamples}, the length of '\ 598 | f'downsamples is {len(downsamples)}, and the num_stages is '\ 599 | f'{num_stages}.' 600 | assert len(enc_dilations) == num_stages, \ 601 | 'The length of enc_dilations should be equal to num_stages, '\ 602 | f'while the enc_dilations is {enc_dilations}, the length of '\ 603 | f'enc_dilations is {len(enc_dilations)}, and the num_stages is '\ 604 | f'{num_stages}.' 605 | assert len(dec_dilations) == (num_stages-1), \ 606 | 'The length of dec_dilations should be equal to (num_stages-1), '\ 607 | f'while the dec_dilations is {dec_dilations}, the length of '\ 608 | f'dec_dilations is {len(dec_dilations)}, and the num_stages is '\ 609 | f'{num_stages}.' 610 | self.num_stages = num_stages 611 | self.strides = strides 612 | self.downsamples = downsamples 613 | self.norm_eval = norm_eval 614 | 615 | self.encoder = nn.ModuleList() 616 | self.decoder = nn.ModuleList() 617 | 618 | for i in range(num_stages): 619 | enc_conv_block = [] 620 | if i != 0: 621 | if strides[i] == 1 and downsamples[i - 1]: 622 | enc_conv_block.append(nn.MaxPool2d(kernel_size=2)) 623 | upsample = (strides[i] != 1 or downsamples[i - 1]) 624 | self.decoder.append( 625 | UpConvBlock( 626 | conv_block=BasicConvBlock, 627 | in_channels=base_channels * 2**i, 628 | skip_channels=base_channels * 2**(i - 1), 629 | out_channels=base_channels * 2**(i - 1), 630 | num_convs=dec_num_convs[i - 1], 631 | stride=1, 632 | dilation=dec_dilations[i - 1], 633 | with_cp=with_cp, 634 | conv_cfg=conv_cfg, 635 | norm_cfg=norm_cfg, 636 | act_cfg=act_cfg, 637 | upsample_cfg=upsample_cfg if upsample else None, 638 | dcn=None, 639 | plugins=None)) 640 | 641 | enc_conv_block.append( 642 | BasicConvBlock( 643 | in_channels=in_channels + attr_embedding, 644 | out_channels=base_channels * 2**i, 645 | num_convs=enc_num_convs[i], 646 | stride=strides[i], 647 | dilation=enc_dilations[i], 648 | with_cp=with_cp, 649 | conv_cfg=conv_cfg, 650 | norm_cfg=norm_cfg, 651 | act_cfg=act_cfg, 652 | dcn=None, 653 | plugins=None)) 654 | self.encoder.append((nn.Sequential(*enc_conv_block))) 655 | in_channels = base_channels * 2**i 656 | 657 | def forward(self, x, attr_embedding): 658 | enc_outs = [] 659 | Be, Ce = attr_embedding.size() 660 | for enc in self.encoder: 661 | _, _, H, W = x.size() 662 | x = enc( 663 | torch.cat([ 664 | x, 665 | attr_embedding.view(Be, Ce, 1, 1).expand((Be, Ce, H, W)) 666 | ], 667 | dim=1)) 668 | enc_outs.append(x) 669 | dec_outs = [x] 670 | for i in reversed(range(len(self.decoder))): 671 | x = self.decoder[i](enc_outs[i], x) 672 | dec_outs.append(x) 673 | 674 | return dec_outs 675 | 676 | def init_weights(self, pretrained=None): 677 | """Initialize the weights in backbone. 678 | 679 | Args: 680 | pretrained (str, optional): Path to pre-trained weights. 681 | Defaults to None. 682 | """ 683 | if isinstance(pretrained, str): 684 | logger = get_root_logger() 685 | load_checkpoint(self, pretrained, strict=False, logger=logger) 686 | elif pretrained is None: 687 | for m in self.modules(): 688 | if isinstance(m, nn.Conv2d): 689 | kaiming_init(m) 690 | elif isinstance(m, (_BatchNorm, nn.GroupNorm)): 691 | constant_init(m, 1) 692 | else: 693 | raise TypeError('pretrained must be a str or None') 694 | -------------------------------------------------------------------------------- /train_texfit.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding=utf-8 3 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | 16 | import argparse 17 | import logging 18 | import math 19 | import os 20 | import random 21 | from pathlib import Path 22 | from typing import Optional 23 | 24 | import accelerate 25 | import datasets 26 | import numpy as np 27 | import torch 28 | import torch.nn.functional as F 29 | import torch.utils.checkpoint 30 | import transformers 31 | from accelerate import Accelerator 32 | from accelerate.logging import get_logger 33 | from accelerate.utils import ProjectConfiguration, set_seed 34 | from datasets import load_dataset 35 | from huggingface_hub import HfFolder, Repository, create_repo, whoami 36 | from packaging import version 37 | from torchvision import transforms 38 | from tqdm.auto import tqdm 39 | from transformers import CLIPTextModel, CLIPTokenizer 40 | 41 | import diffusers 42 | from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel 43 | from diffusers.optimization import get_scheduler 44 | from diffusers.training_utils import EMAModel 45 | from diffusers.utils import check_min_version, deprecate 46 | from diffusers.utils.import_utils import is_xformers_available 47 | 48 | 49 | # Will error if the minimal version of diffusers is not installed. Remove at your own risks. 50 | check_min_version("0.15.0.dev0") 51 | 52 | logger = get_logger(__name__, log_level="INFO") 53 | 54 | 55 | def parse_args(): 56 | parser = argparse.ArgumentParser(description="Simple example of a training script.") 57 | parser.add_argument( 58 | "--pretrained_model_name_or_path", 59 | type=str, 60 | default=None, 61 | required=True, 62 | help="Path to pretrained model or model identifier from huggingface.co/models.", 63 | ) 64 | parser.add_argument( 65 | "--revision", 66 | type=str, 67 | default=None, 68 | required=False, 69 | help="Revision of pretrained model identifier from huggingface.co/models.", 70 | ) 71 | parser.add_argument( 72 | "--dataset_name", 73 | type=str, 74 | default=None, 75 | help=( 76 | "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," 77 | " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," 78 | " or to a folder containing files that 🤗 Datasets can understand." 79 | ), 80 | ) 81 | parser.add_argument( 82 | "--dataset_config_name", 83 | type=str, 84 | default=None, 85 | help="The config of the Dataset, leave as None if there's only one config.", 86 | ) 87 | parser.add_argument( 88 | "--train_data_dir", 89 | type=str, 90 | default=None, 91 | help=( 92 | "A folder containing the training data. Folder contents must follow the structure described in" 93 | " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" 94 | " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." 95 | ), 96 | ) 97 | parser.add_argument( 98 | "--image_column", type=str, default="image", help="The column of the dataset containing an image." 99 | ) 100 | parser.add_argument( 101 | "--caption_column", 102 | type=str, 103 | default="text", 104 | help="The column of the dataset containing a caption or a list of captions.", 105 | ) 106 | parser.add_argument( 107 | "--max_train_samples", 108 | type=int, 109 | default=None, 110 | help=( 111 | "For debugging purposes or quicker training, truncate the number of training examples to this " 112 | "value if set." 113 | ), 114 | ) 115 | parser.add_argument( 116 | "--output_dir", 117 | type=str, 118 | default="sd-model-finetuned", 119 | help="The output directory where the model predictions and checkpoints will be written.", 120 | ) 121 | parser.add_argument( 122 | "--cache_dir", 123 | type=str, 124 | default=None, 125 | help="The directory where the downloaded models and datasets will be stored.", 126 | ) 127 | parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") 128 | parser.add_argument( 129 | "--resolution", 130 | type=int, 131 | default=512, 132 | help=( 133 | "The resolution for input images, all the images in the train/validation dataset will be resized to this" 134 | " resolution" 135 | ), 136 | ) 137 | parser.add_argument( 138 | "--center_crop", 139 | default=False, 140 | action="store_true", 141 | help=( 142 | "Whether to center crop the input images to the resolution. If not set, the images will be randomly" 143 | " cropped. The images will be resized to the resolution first before cropping." 144 | ), 145 | ) 146 | parser.add_argument( 147 | "--random_flip", 148 | action="store_true", 149 | help="whether to randomly flip images horizontally", 150 | ) 151 | parser.add_argument( 152 | "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." 153 | ) 154 | parser.add_argument("--num_train_epochs", type=int, default=100) 155 | parser.add_argument( 156 | "--max_train_steps", 157 | type=int, 158 | default=None, 159 | help="Total number of training steps to perform. If provided, overrides num_train_epochs.", 160 | ) 161 | parser.add_argument( 162 | "--gradient_accumulation_steps", 163 | type=int, 164 | default=1, 165 | help="Number of updates steps to accumulate before performing a backward/update pass.", 166 | ) 167 | parser.add_argument( 168 | "--gradient_checkpointing", 169 | action="store_true", 170 | help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", 171 | ) 172 | parser.add_argument( 173 | "--learning_rate", 174 | type=float, 175 | default=1e-4, 176 | help="Initial learning rate (after the potential warmup period) to use.", 177 | ) 178 | parser.add_argument( 179 | "--scale_lr", 180 | action="store_true", 181 | default=False, 182 | help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", 183 | ) 184 | parser.add_argument( 185 | "--lr_scheduler", 186 | type=str, 187 | default="constant", 188 | help=( 189 | 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' 190 | ' "constant", "constant_with_warmup"]' 191 | ), 192 | ) 193 | parser.add_argument( 194 | "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." 195 | ) 196 | parser.add_argument( 197 | "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." 198 | ) 199 | parser.add_argument( 200 | "--allow_tf32", 201 | action="store_true", 202 | help=( 203 | "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" 204 | " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" 205 | ), 206 | ) 207 | parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") 208 | parser.add_argument( 209 | "--non_ema_revision", 210 | type=str, 211 | default=None, 212 | required=False, 213 | help=( 214 | "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or" 215 | " remote repository specified with --pretrained_model_name_or_path." 216 | ), 217 | ) 218 | parser.add_argument( 219 | "--dataloader_num_workers", 220 | type=int, 221 | default=0, 222 | help=( 223 | "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." 224 | ), 225 | ) 226 | parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") 227 | parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") 228 | parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") 229 | parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") 230 | parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") 231 | parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") 232 | parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") 233 | parser.add_argument( 234 | "--hub_model_id", 235 | type=str, 236 | default=None, 237 | help="The name of the repository to keep in sync with the local `output_dir`.", 238 | ) 239 | parser.add_argument( 240 | "--logging_dir", 241 | type=str, 242 | default="logs", 243 | help=( 244 | "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" 245 | " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." 246 | ), 247 | ) 248 | parser.add_argument( 249 | "--mixed_precision", 250 | type=str, 251 | default=None, 252 | choices=["no", "fp16", "bf16"], 253 | help=( 254 | "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" 255 | " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" 256 | " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." 257 | ), 258 | ) 259 | parser.add_argument( 260 | "--report_to", 261 | type=str, 262 | default="tensorboard", 263 | help=( 264 | 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' 265 | ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' 266 | ), 267 | ) 268 | parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") 269 | parser.add_argument( 270 | "--checkpointing_steps", 271 | type=int, 272 | default=500, 273 | help=( 274 | "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" 275 | " training using `--resume_from_checkpoint`." 276 | ), 277 | ) 278 | parser.add_argument( 279 | "--checkpoints_total_limit", 280 | type=int, 281 | default=None, 282 | help=( 283 | "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`." 284 | " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state" 285 | " for more docs" 286 | ), 287 | ) 288 | parser.add_argument( 289 | "--resume_from_checkpoint", 290 | type=str, 291 | default=None, 292 | help=( 293 | "Whether training should be resumed from a previous checkpoint. Use a path saved by" 294 | ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' 295 | ), 296 | ) 297 | parser.add_argument( 298 | "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." 299 | ) 300 | parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") 301 | 302 | args = parser.parse_args() 303 | env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) 304 | if env_local_rank != -1 and env_local_rank != args.local_rank: 305 | args.local_rank = env_local_rank 306 | 307 | # Sanity checks 308 | if args.dataset_name is None and args.train_data_dir is None: 309 | raise ValueError("Need either a dataset name or a training folder.") 310 | 311 | # default to using the same revision for the non-ema model if not specified 312 | if args.non_ema_revision is None: 313 | args.non_ema_revision = args.revision 314 | 315 | return args 316 | 317 | 318 | def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): 319 | if token is None: 320 | token = HfFolder.get_token() 321 | if organization is None: 322 | username = whoami(token)["name"] 323 | return f"{username}/{model_id}" 324 | else: 325 | return f"{organization}/{model_id}" 326 | 327 | 328 | dataset_name_mapping = { 329 | "lambdalabs/pokemon-blip-captions": ("image", "text"), 330 | } 331 | 332 | 333 | def main(): 334 | args = parse_args() 335 | 336 | if args.non_ema_revision is not None: 337 | deprecate( 338 | "non_ema_revision!=None", 339 | "0.15.0", 340 | message=( 341 | "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to" 342 | " use `--variant=non_ema` instead." 343 | ), 344 | ) 345 | logging_dir = os.path.join(args.output_dir, args.logging_dir) 346 | 347 | accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) 348 | 349 | accelerator = Accelerator( 350 | gradient_accumulation_steps=args.gradient_accumulation_steps, 351 | mixed_precision=args.mixed_precision, 352 | log_with=args.report_to, 353 | logging_dir=logging_dir, 354 | project_config=accelerator_project_config, 355 | ) 356 | 357 | # Make one log on every process with the configuration for debugging. 358 | logging.basicConfig( 359 | format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", 360 | datefmt="%m/%d/%Y %H:%M:%S", 361 | level=logging.INFO, 362 | ) 363 | logger.info(accelerator.state, main_process_only=False) 364 | if accelerator.is_local_main_process: 365 | datasets.utils.logging.set_verbosity_warning() 366 | transformers.utils.logging.set_verbosity_warning() 367 | diffusers.utils.logging.set_verbosity_info() 368 | else: 369 | datasets.utils.logging.set_verbosity_error() 370 | transformers.utils.logging.set_verbosity_error() 371 | diffusers.utils.logging.set_verbosity_error() 372 | 373 | # If passed along, set the training seed now. 374 | if args.seed is not None: 375 | set_seed(args.seed) 376 | 377 | # Handle the repository creation 378 | if accelerator.is_main_process: 379 | if args.push_to_hub: 380 | if args.hub_model_id is None: 381 | repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) 382 | else: 383 | repo_name = args.hub_model_id 384 | create_repo(repo_name, exist_ok=True, token=args.hub_token) 385 | repo = Repository(args.output_dir, clone_from=repo_name, token=args.hub_token) 386 | 387 | with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: 388 | if "step_*" not in gitignore: 389 | gitignore.write("step_*\n") 390 | if "epoch_*" not in gitignore: 391 | gitignore.write("epoch_*\n") 392 | elif args.output_dir is not None: 393 | os.makedirs(args.output_dir, exist_ok=True) 394 | 395 | # Load scheduler, tokenizer and models. 396 | noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") 397 | tokenizer = CLIPTokenizer.from_pretrained( 398 | args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision 399 | ) 400 | text_encoder = CLIPTextModel.from_pretrained( 401 | args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision 402 | ) 403 | vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) 404 | unet = UNet2DConditionModel.from_pretrained( 405 | args.pretrained_model_name_or_path, in_channels=9, low_cpu_mem_usage=False, ignore_mismatched_sizes=True, subfolder="unet", revision=args.non_ema_revision 406 | ) 407 | 408 | # Freeze vae and text_encoder 409 | vae.requires_grad_(False) 410 | text_encoder.requires_grad_(False) 411 | 412 | # Create EMA for the unet. 413 | if args.use_ema: 414 | ema_unet = UNet2DConditionModel.from_pretrained( 415 | args.pretrained_model_name_or_path, in_channels=9, low_cpu_mem_usage=False, ignore_mismatched_sizes=True, subfolder="unet", revision=args.revision 416 | ) 417 | ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config) 418 | 419 | if args.enable_xformers_memory_efficient_attention: 420 | if is_xformers_available(): 421 | import xformers 422 | 423 | xformers_version = version.parse(xformers.__version__) 424 | if xformers_version == version.parse("0.0.16"): 425 | logger.warn( 426 | "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." 427 | ) 428 | unet.enable_xformers_memory_efficient_attention() 429 | else: 430 | raise ValueError("xformers is not available. Make sure it is installed correctly") 431 | 432 | # `accelerate` 0.16.0 will have better support for customized saving 433 | if version.parse(accelerate.__version__) >= version.parse("0.16.0"): 434 | # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format 435 | def save_model_hook(models, weights, output_dir): 436 | if args.use_ema: 437 | ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) 438 | 439 | for i, model in enumerate(models): 440 | model.save_pretrained(os.path.join(output_dir, "unet")) 441 | 442 | # make sure to pop weight so that corresponding model is not saved again 443 | weights.pop() 444 | 445 | def load_model_hook(models, input_dir): 446 | if args.use_ema: 447 | load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) 448 | ema_unet.load_state_dict(load_model.state_dict()) 449 | ema_unet.to(accelerator.device) 450 | del load_model 451 | 452 | for i in range(len(models)): 453 | # pop models so that they are not loaded again 454 | model = models.pop() 455 | 456 | # load diffusers style into model 457 | load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") 458 | model.register_to_config(**load_model.config) 459 | 460 | model.load_state_dict(load_model.state_dict()) 461 | del load_model 462 | 463 | accelerator.register_save_state_pre_hook(save_model_hook) 464 | accelerator.register_load_state_pre_hook(load_model_hook) 465 | 466 | if args.gradient_checkpointing: 467 | unet.enable_gradient_checkpointing() 468 | 469 | # Enable TF32 for faster training on Ampere GPUs, 470 | # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices 471 | if args.allow_tf32: 472 | torch.backends.cuda.matmul.allow_tf32 = True 473 | 474 | if args.scale_lr: 475 | args.learning_rate = ( 476 | args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes 477 | ) 478 | 479 | # Initialize the optimizer 480 | if args.use_8bit_adam: 481 | try: 482 | import bitsandbytes as bnb 483 | except ImportError: 484 | raise ImportError( 485 | "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" 486 | ) 487 | 488 | optimizer_cls = bnb.optim.AdamW8bit 489 | else: 490 | optimizer_cls = torch.optim.AdamW 491 | 492 | optimizer = optimizer_cls( 493 | unet.parameters(), 494 | lr=args.learning_rate, 495 | betas=(args.adam_beta1, args.adam_beta2), 496 | weight_decay=args.adam_weight_decay, 497 | eps=args.adam_epsilon, 498 | ) 499 | 500 | # Get the datasets: you can either provide your own training and evaluation files (see below) 501 | # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). 502 | 503 | # In distributed training, the load_dataset function guarantees that only one local process can concurrently 504 | # download the dataset. 505 | if args.dataset_name is not None: 506 | # Downloading and loading a dataset from the hub. 507 | dataset = load_dataset( 508 | args.dataset_name, 509 | args.dataset_config_name, 510 | cache_dir=args.cache_dir, 511 | ) 512 | else: 513 | data_files = {} 514 | if args.train_data_dir is not None: 515 | data_files["train"] = os.path.join(args.train_data_dir, "**") 516 | dataset = load_dataset( 517 | "imagefolder", 518 | data_files=data_files, 519 | cache_dir=args.cache_dir, 520 | ) 521 | # See more about loading custom images at 522 | # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder 523 | 524 | # Preprocessing the datasets. 525 | # We need to tokenize inputs and targets. 526 | column_names = dataset["train"].column_names 527 | 528 | # Get the column names for input/target. 529 | dataset_columns = dataset_name_mapping.get(args.dataset_name, None) 530 | if args.image_column is None: 531 | image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] 532 | else: 533 | image_column = args.image_column 534 | if image_column not in column_names: 535 | raise ValueError( 536 | f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" 537 | ) 538 | if args.caption_column is None: 539 | caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] 540 | else: 541 | caption_column = args.caption_column 542 | if caption_column not in column_names: 543 | raise ValueError( 544 | f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" 545 | ) 546 | 547 | # Preprocessing the datasets. 548 | # We need to tokenize input captions and transform the images. 549 | def tokenize_captions(examples, is_train=True): 550 | captions = [] 551 | for caption in examples[caption_column]: 552 | if isinstance(caption, str): 553 | captions.append(caption) 554 | elif isinstance(caption, (list, np.ndarray)): 555 | # take a random caption if there are multiple 556 | captions.append(random.choice(caption) if is_train else caption[0]) 557 | else: 558 | raise ValueError( 559 | f"Caption column `{caption_column}` should contain either strings or lists of strings." 560 | ) 561 | inputs = tokenizer( 562 | captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt" 563 | ) 564 | return inputs.input_ids 565 | 566 | # Preprocessing the datasets. 567 | train_transforms = transforms.Compose( 568 | [ 569 | transforms.Resize((args.resolution, args.resolution // 2), interpolation=transforms.InterpolationMode.BILINEAR), 570 | transforms.CenterCrop(args.resolution) if args.center_crop else transforms.Lambda(lambda x: x), 571 | transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), 572 | transforms.ToTensor(), 573 | transforms.Normalize([0.5], [0.5]), 574 | ] 575 | ) 576 | 577 | train_mask_transforms = transforms.Compose( 578 | [ 579 | transforms.Resize((args.resolution, args.resolution // 2), interpolation=transforms.InterpolationMode.NEAREST), 580 | transforms.ToTensor(), 581 | ] 582 | ) 583 | 584 | def preprocess_train(examples): 585 | images = [image.convert("RGB") for image in examples[image_column]] 586 | masks = [mask for mask in examples['mask']] 587 | examples["pixel_values"] = [train_transforms(image) for image in images] 588 | examples["pixel_mask_values"] = [train_mask_transforms(mask) for mask in masks] 589 | examples["masked_pixel_values"] = [torch.masked_fill( 590 | examples["pixel_values"][i], examples["pixel_mask_values"][i].bool(), 0) for i in range(len(images))] 591 | examples["input_ids"] = tokenize_captions(examples) 592 | return examples 593 | 594 | with accelerator.main_process_first(): 595 | if args.max_train_samples is not None: 596 | dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) 597 | # Set the training transforms 598 | train_dataset = dataset["train"].with_transform(preprocess_train) 599 | 600 | def collate_fn(examples): 601 | pixel_values = torch.stack([example["pixel_values"] for example in examples]) 602 | pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() 603 | pixel_mask_values = torch.stack([example["pixel_mask_values"] for example in examples]) 604 | pixel_mask_values = pixel_mask_values.to(memory_format=torch.contiguous_format) 605 | masked_pixel_values = torch.stack([example["masked_pixel_values"] for example in examples]) 606 | masked_pixel_values = masked_pixel_values.to(memory_format=torch.contiguous_format).float() 607 | input_ids = torch.stack([example["input_ids"] for example in examples]) 608 | return {"pixel_values": pixel_values, "pixel_mask_values": pixel_mask_values, 609 | "masked_pixel_values": masked_pixel_values, "input_ids": input_ids} 610 | 611 | # DataLoaders creation: 612 | train_dataloader = torch.utils.data.DataLoader( 613 | train_dataset, 614 | shuffle=True, 615 | collate_fn=collate_fn, 616 | batch_size=args.train_batch_size, 617 | num_workers=args.dataloader_num_workers, 618 | ) 619 | 620 | # Scheduler and math around the number of training steps. 621 | overrode_max_train_steps = False 622 | num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) 623 | if args.max_train_steps is None: 624 | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch 625 | overrode_max_train_steps = True 626 | 627 | lr_scheduler = get_scheduler( 628 | args.lr_scheduler, 629 | optimizer=optimizer, 630 | num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, 631 | num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, 632 | ) 633 | 634 | # Prepare everything with our `accelerator`. 635 | unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( 636 | unet, optimizer, train_dataloader, lr_scheduler 637 | ) 638 | 639 | if args.use_ema: 640 | ema_unet.to(accelerator.device) 641 | 642 | # For mixed precision training we cast the text_encoder and vae weights to half-precision 643 | # as these models are only used for inference, keeping weights in full precision is not required. 644 | weight_dtype = torch.float32 645 | if accelerator.mixed_precision == "fp16": 646 | weight_dtype = torch.float16 647 | elif accelerator.mixed_precision == "bf16": 648 | weight_dtype = torch.bfloat16 649 | 650 | # Move text_encode and vae to gpu and cast to weight_dtype 651 | text_encoder.to(accelerator.device, dtype=weight_dtype) 652 | vae.to(accelerator.device, dtype=weight_dtype) 653 | 654 | # We need to recalculate our total training steps as the size of the training dataloader may have changed. 655 | num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) 656 | if overrode_max_train_steps: 657 | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch 658 | # Afterwards we recalculate our number of training epochs 659 | args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) 660 | 661 | # We need to initialize the trackers we use, and also store our configuration. 662 | # The trackers initializes automatically on the main process. 663 | if accelerator.is_main_process: 664 | accelerator.init_trackers("text2image-fine-tune", config=vars(args)) 665 | 666 | # Train! 667 | total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps 668 | 669 | logger.info("***** Running training *****") 670 | logger.info(f" Num examples = {len(train_dataset)}") 671 | logger.info(f" Num Epochs = {args.num_train_epochs}") 672 | logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") 673 | logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") 674 | logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") 675 | logger.info(f" Total optimization steps = {args.max_train_steps}") 676 | global_step = 0 677 | first_epoch = 0 678 | 679 | # Potentially load in the weights and states from a previous save 680 | if args.resume_from_checkpoint: 681 | if args.resume_from_checkpoint != "latest": 682 | path = os.path.basename(args.resume_from_checkpoint) 683 | else: 684 | # Get the most recent checkpoint 685 | dirs = os.listdir(args.output_dir) 686 | dirs = [d for d in dirs if d.startswith("checkpoint")] 687 | dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) 688 | path = dirs[-1] if len(dirs) > 0 else None 689 | 690 | if path is None: 691 | accelerator.print( 692 | f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." 693 | ) 694 | args.resume_from_checkpoint = None 695 | else: 696 | accelerator.print(f"Resuming from checkpoint {path}") 697 | accelerator.load_state(os.path.join(args.output_dir, path)) 698 | global_step = int(path.split("-")[1]) 699 | 700 | resume_global_step = global_step * args.gradient_accumulation_steps 701 | first_epoch = global_step // num_update_steps_per_epoch 702 | resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps) 703 | 704 | # Only show the progress bar once on each machine. 705 | progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) 706 | progress_bar.set_description("Steps") 707 | 708 | for epoch in range(first_epoch, args.num_train_epochs): 709 | unet.train() 710 | train_loss = 0.0 711 | for step, batch in enumerate(train_dataloader): 712 | # Skip steps until we reach the resumed step 713 | if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: 714 | if step % args.gradient_accumulation_steps == 0: 715 | progress_bar.update(1) 716 | continue 717 | 718 | with accelerator.accumulate(unet): 719 | # Convert images to latent space 720 | latents = vae.encode(batch["pixel_values"].to(weight_dtype)).latent_dist.sample() 721 | latents = latents * vae.config.scaling_factor 722 | 723 | masked_latents = vae.encode(batch["masked_pixel_values"].to(weight_dtype)).latent_dist.sample() 724 | masked_latents = masked_latents * vae.config.scaling_factor 725 | 726 | mask = torch.nn.functional.interpolate(batch["pixel_mask_values"], 727 | size=(args.resolution // 8, args.resolution // 16), mode='nearest') 728 | 729 | # Sample noise that we'll add to the latents 730 | noise = torch.randn_like(latents) 731 | if args.noise_offset: 732 | # https://www.crosslabs.org//blog/diffusion-with-offset-noise 733 | noise += args.noise_offset * torch.randn( 734 | (latents.shape[0], latents.shape[1], 1, 1), device=latents.device 735 | ) 736 | 737 | bsz = latents.shape[0] 738 | # Sample a random timestep for each image 739 | timesteps = torch.randint(0, noise_scheduler.num_train_timesteps, (bsz,), device=latents.device) 740 | timesteps = timesteps.long() 741 | 742 | # Add noise to the latents according to the noise magnitude at each timestep 743 | # (this is the forward diffusion process) 744 | noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) 745 | 746 | # concatenate the noised latents with the mask and the masked latents 747 | latent_model_input = torch.cat([noisy_latents, mask, masked_latents], dim=1) 748 | 749 | # Get the text embedding for conditioning 750 | encoder_hidden_states = text_encoder(batch["input_ids"])[0] 751 | 752 | # Get the target for loss depending on the prediction type 753 | if noise_scheduler.config.prediction_type == "epsilon": 754 | target = noise 755 | elif noise_scheduler.config.prediction_type == "v_prediction": 756 | target = noise_scheduler.get_velocity(latents, noise, timesteps) 757 | else: 758 | raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") 759 | 760 | # Predict the noise residual and compute loss 761 | model_pred = unet(latent_model_input, timesteps, encoder_hidden_states).sample 762 | loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") 763 | 764 | # Gather the losses across all processes for logging (if we use distributed training). 765 | avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() 766 | train_loss += avg_loss.item() / args.gradient_accumulation_steps 767 | 768 | # Backpropagate 769 | accelerator.backward(loss) 770 | if accelerator.sync_gradients: 771 | accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) 772 | optimizer.step() 773 | lr_scheduler.step() 774 | optimizer.zero_grad() 775 | 776 | # Checks if the accelerator has performed an optimization step behind the scenes 777 | if accelerator.sync_gradients: 778 | if args.use_ema: 779 | ema_unet.step(unet.parameters()) 780 | progress_bar.update(1) 781 | global_step += 1 782 | accelerator.log({"train_loss": train_loss}, step=global_step) 783 | train_loss = 0.0 784 | 785 | if global_step % args.checkpointing_steps == 0: 786 | if accelerator.is_main_process: 787 | save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") 788 | accelerator.save_state(save_path) 789 | logger.info(f"Saved state to {save_path}") 790 | 791 | logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} 792 | progress_bar.set_postfix(**logs) 793 | 794 | if global_step >= args.max_train_steps: 795 | break 796 | 797 | # Create the pipeline using the trained modules and save it. 798 | accelerator.wait_for_everyone() 799 | if accelerator.is_main_process: 800 | unet = accelerator.unwrap_model(unet) 801 | if args.use_ema: 802 | ema_unet.copy_to(unet.parameters()) 803 | 804 | pipeline = StableDiffusionPipeline.from_pretrained( 805 | args.pretrained_model_name_or_path, 806 | text_encoder=text_encoder, 807 | vae=vae, 808 | unet=unet, 809 | revision=args.revision, 810 | ) 811 | pipeline.save_pretrained(args.output_dir) 812 | 813 | if args.push_to_hub: 814 | repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True) 815 | 816 | accelerator.end_training() 817 | 818 | 819 | if __name__ == "__main__": 820 | main() 821 | --------------------------------------------------------------------------------