├── .gitattributes ├── .github └── ISSUE_TEMPLATE │ └── bug_report.md ├── .gitignore ├── LICENSE ├── README.md ├── __init__.py ├── annotator ├── annotator_path.py ├── binary │ └── __init__.py ├── lama │ ├── __init__.py │ ├── config.yaml │ └── saicinpainting │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-310.pyc │ │ └── utils.cpython-310.pyc │ │ ├── training │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ └── __init__.cpython-310.pyc │ │ ├── data │ │ │ ├── __init__.py │ │ │ └── masks.py │ │ ├── losses │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-310.pyc │ │ │ │ ├── distance_weighting.cpython-310.pyc │ │ │ │ ├── feature_matching.cpython-310.pyc │ │ │ │ └── perceptual.cpython-310.pyc │ │ │ ├── adversarial.py │ │ │ ├── constants.py │ │ │ ├── distance_weighting.py │ │ │ ├── feature_matching.py │ │ │ ├── perceptual.py │ │ │ ├── segmentation.py │ │ │ └── style_loss.py │ │ ├── modules │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-310.pyc │ │ │ │ ├── base.cpython-310.pyc │ │ │ │ ├── depthwise_sep_conv.cpython-310.pyc │ │ │ │ ├── ffc.cpython-310.pyc │ │ │ │ ├── multidilated_conv.cpython-310.pyc │ │ │ │ ├── pix2pixhd.cpython-310.pyc │ │ │ │ ├── spatial_transform.cpython-310.pyc │ │ │ │ └── squeeze_excitation.cpython-310.pyc │ │ │ ├── base.py │ │ │ ├── depthwise_sep_conv.py │ │ │ ├── fake_fakes.py │ │ │ ├── ffc.py │ │ │ ├── multidilated_conv.py │ │ │ ├── multiscale.py │ │ │ ├── pix2pixhd.py │ │ │ ├── spatial_transform.py │ │ │ └── squeeze_excitation.py │ │ ├── trainers │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-310.pyc │ │ │ │ ├── base.cpython-310.pyc │ │ │ │ └── default.cpython-310.pyc │ │ │ ├── base.py │ │ │ └── default.py │ │ └── visualizers │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── colors.py │ │ │ ├── directory.py │ │ │ └── noop.py │ │ └── utils.py ├── uniformer │ ├── LICENSE │ ├── __init__.py │ ├── configs │ │ └── _base_ │ │ │ ├── datasets │ │ │ ├── ade20k.py │ │ │ ├── chase_db1.py │ │ │ ├── cityscapes.py │ │ │ ├── cityscapes_769x769.py │ │ │ ├── drive.py │ │ │ ├── hrf.py │ │ │ ├── pascal_context.py │ │ │ ├── pascal_context_59.py │ │ │ ├── pascal_voc12.py │ │ │ ├── pascal_voc12_aug.py │ │ │ └── stare.py │ │ │ ├── default_runtime.py │ │ │ ├── models │ │ │ ├── ann_r50-d8.py │ │ │ ├── apcnet_r50-d8.py │ │ │ ├── ccnet_r50-d8.py │ │ │ ├── cgnet.py │ │ │ ├── danet_r50-d8.py │ │ │ ├── deeplabv3_r50-d8.py │ │ │ ├── deeplabv3_unet_s5-d16.py │ │ │ ├── deeplabv3plus_r50-d8.py │ │ │ ├── dmnet_r50-d8.py │ │ │ ├── dnl_r50-d8.py │ │ │ ├── emanet_r50-d8.py │ │ │ ├── encnet_r50-d8.py │ │ │ ├── fast_scnn.py │ │ │ ├── fcn_hr18.py │ │ │ ├── fcn_r50-d8.py │ │ │ ├── fcn_unet_s5-d16.py │ │ │ ├── fpn_r50.py │ │ │ ├── fpn_uniformer.py │ │ │ ├── gcnet_r50-d8.py │ │ │ ├── lraspp_m-v3-d8.py │ │ │ ├── nonlocal_r50-d8.py │ │ │ ├── ocrnet_hr18.py │ │ │ ├── ocrnet_r50-d8.py │ │ │ ├── pointrend_r50.py │ │ │ ├── psanet_r50-d8.py │ │ │ ├── pspnet_r50-d8.py │ │ │ ├── pspnet_unet_s5-d16.py │ │ │ ├── upernet_r50.py │ │ │ └── upernet_uniformer.py │ │ │ └── schedules │ │ │ ├── schedule_160k.py │ │ │ ├── schedule_20k.py │ │ │ ├── schedule_40k.py │ │ │ └── schedule_80k.py │ ├── inference.py │ ├── mmcv_custom │ │ ├── __init__.py │ │ └── checkpoint.py │ ├── uniformer.py │ └── upernet_global_small.py └── util.py ├── inpaint_Lama.py ├── requirements.txt └── workflows └── workflow_lama.json /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | *.bin filter=lfs diff=lfs merge=lfs -text 4 | *.pth filter=lfs diff=lfs merge=lfs -text 5 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Complete Workflow: 15 | 16 | Input Image: 17 | 18 | **Console Traceback** 19 | The complete console error traceback 20 | 21 | 22 | **Additional context** 23 | Add any other context about the problem here. 24 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ⚠️⚠️⚠️ Due to lack of bandwidth this repo is going archived in favor of actually mantained repos like [comfyui-inpaint-nodes](https://github.com/Acly/comfyui-inpaint-nodes) ⚠️⚠️⚠️ 2 | # LaMa Preprocessor 3 | 4 | Currenly only supports NVIDIA 5 | 6 | This preprocessor finally enable users to generate coherent inpaint and outpaint **prompt-free** 7 | 8 | For inpainting tasks, it's recommended to use the 'outpaint' function. Although the 'inpaint' function is still in the development phase, the results from the 'outpaint' function remain quite satisfactory. 9 | 10 | The best results are given on landscapes, good results can still be achieved in drawings by lowering the controlnet end percentage to 0.7-0.8 11 | 12 | ## Installation 13 | 14 | 1. **Manager installation (suggested)**: be sure to have [ComfyUi Manager](https://github.com/ltdrdata/ComfyUI-Manager) installed, then just search for lama preprocessor 15 | 16 | 2. Manual Installation: clone this repo inside the custom_nodes folder 17 | 18 | ## A LaMa prerocessor for ComfyUi 19 | 20 | This is a simple workflow example.To use this, download workflows/workflow_lama.json and then drop it in a ComfyUI tab 21 | 22 | 23 | --- 24 | 25 | This are some non cherry picked results, all obtained starting from this image 26 | ![startingImage](https://github.com/mlinmg/ComfyUI-LaMA-Preprocessor/assets/121761685/18b937d6-bcda-4606-a3b0-b24af55d27dd) 27 | 28 | 29 | --- 30 | 31 | ![ComfyUI_01581_](https://github.com/mlinmg/ComfyUI-LaMA-Preprocessor/assets/121761685/3adbc1f8-bb3e-4ae5-b31b-d7fb8624f0ae) 32 | ![ComfyUI_01580_](https://github.com/mlinmg/ComfyUI-LaMA-Preprocessor/assets/121761685/77f73d96-2612-431d-bd3c-7e6f4c2503c0) 33 | ![ComfyUI_01579_](https://github.com/mlinmg/ComfyUI-LaMA-Preprocessor/assets/121761685/5715229b-6b6e-4f2e-917c-97b09758c805) 34 | ![ComfyUI_01578_](https://github.com/mlinmg/ComfyUI-LaMA-Preprocessor/assets/121761685/dbe1a705-7574-4b2e-a2c6-d06708a38261) 35 | 36 | You can find the processor in image/preprocessors 37 | 38 | ## Contributing 39 | 40 | Everyone is invited to contribute, my school/work schedule seriously limit the time I can spend improving this project is any help is welcome. 41 | 42 | If you would like to help to the development of this repo there are some missing features that still need to be implemented: 43 | 44 | - [x] An unusual behavior is observed when providing an empty prompt to the drawing/cartoon outpainting system. Interestingly, the results appear to be significantly better when the two conditionings are prompted with "positive" and "negative" respectively. This is probably beacause when prompted with a blank prompt, the controlnet adds too much weights. 45 | - [ ] This workflow exibits some image darkening/color shifting, this should be further investigated in order to apply a color fix method 46 | - [ ] More consistent results. ~~One of the problem might be in [this function](https://github.com/mlinmg/ComfyUI-LaMA-Preprocessor/blob/main/inpaint_Lama.py#L179) it seems that sometimes the image does not match the mask and if you pass this image to the LaMa model it make a noisy greyish mess~~ this has been ruled out since the auto1111 preprocess gives approximately the same image as in comfyui. bit the consistency problem remain and the results are really different when compared to the automatic1111 repo. for anyone interested in contributing I have already inplemented a soft injection mechanism, you should start from here to see where it goes south. One of the problems I have seen is the difference in the clip embeddings 47 | - [x] [soft injection](https://github.com/Mikubill/sd-webui-controlnet/blob/7a4805c8ea3256a0eab3512280bd4f84ca0c8182/scripts/hook.py#L620),~~since I cannot understand how to set different weights inside the sampling steps in comfyUI. At the moment the control seems too high all around, and this cause artifacts in more complex in/outpaint~~ while I've been able to succesfully implement this, it seems to lower the weights of controlnet too much, using a soft injection will lead to broken results 48 | - [x] LaMa preprocessing node 49 | - [x] Basic workflow 50 | 51 | ## Thanks 52 | 53 | This code is inspired by Mikubill/sd-webui-controlnet 54 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from .inpaint_Lama import ( 2 | lamaPreprocessor, 3 | 4 | ) 5 | 6 | 7 | NODE_CLASS_MAPPINGS = { 8 | "lamaPreprocessor": lamaPreprocessor, 9 | 10 | } 11 | -------------------------------------------------------------------------------- /annotator/annotator_path.py: -------------------------------------------------------------------------------- 1 | import os 2 | from modules import shared 3 | 4 | models_path = shared.opts.data.get('control_net_modules_path', None) 5 | if not models_path: 6 | models_path = getattr(shared.cmd_opts, 'controlnet_annotator_models_path', None) 7 | if not models_path: 8 | models_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'downloads') 9 | 10 | if not os.path.isabs(models_path): 11 | models_path = os.path.join(shared.data_path, models_path) 12 | 13 | clip_vision_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'clip_vision') 14 | # clip vision is always inside controlnet "extensions\sd-webui-controlnet" 15 | # and any problem can be solved by removing controlnet and reinstall 16 | 17 | models_path = os.path.realpath(models_path) 18 | os.makedirs(models_path, exist_ok=True) 19 | print(f'ControlNet preprocessor location: {models_path}') 20 | # Make sure that the default location is inside controlnet "extensions\sd-webui-controlnet" 21 | # so that any problem can be solved by removing controlnet and reinstall 22 | # if users do not change configs on their own (otherwise users will know what is wrong) 23 | -------------------------------------------------------------------------------- /annotator/binary/__init__.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | 3 | 4 | def apply_binary(img, bin_threshold): 5 | img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) 6 | 7 | if bin_threshold == 0 or bin_threshold == 255: 8 | # Otsu's threshold 9 | otsu_threshold, img_bin = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) 10 | print("Otsu threshold:", otsu_threshold) 11 | else: 12 | _, img_bin = cv2.threshold(img_gray, bin_threshold, 255, cv2.THRESH_BINARY_INV) 13 | 14 | return cv2.cvtColor(img_bin, cv2.COLOR_GRAY2RGB) 15 | -------------------------------------------------------------------------------- /annotator/lama/__init__.py: -------------------------------------------------------------------------------- 1 | # https://github.com/advimman/lama 2 | import sys 3 | 4 | import yaml 5 | import torch 6 | from omegaconf import OmegaConf 7 | import numpy as np 8 | import urllib.request 9 | 10 | from einops import rearrange 11 | import os 12 | percorso_radice_progetto = os.path.abspath(os.path.dirname(__file__)) 13 | from .saicinpainting.training.trainers import load_checkpoint 14 | 15 | # Aggiungi il percorso alla variabile d'ambiente PYTHONPATH 16 | sys.path.append(percorso_radice_progetto) 17 | 18 | 19 | devices = 'cuda' if torch.cuda.is_available() else 'cpu' 20 | models_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models','lama') 21 | os.makedirs(models_path, exist_ok=True) 22 | 23 | class LamaInpainting: 24 | model_dir = os.path.join(models_path, "lama") 25 | 26 | def __init__(self): 27 | self.model = None 28 | self.device = devices 29 | 30 | 31 | 32 | def _load_file_from_url(self, model_path: str, model_dir: str) -> None: 33 | os.makedirs(os.path.dirname(model_dir), exist_ok=True) 34 | urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split("/")[-1])) 35 | 36 | def load_model(self): 37 | remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/ControlNetLama.pth" 38 | modelpath = os.path.join(self.model_dir, "ControlNetLama.pth") 39 | if not os.path.exists(modelpath): 40 | self._load_file_from_url(remote_model_path, model_dir=self.model_dir) 41 | config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.yaml') 42 | cfg = yaml.safe_load(open(config_path, 'rt')) 43 | cfg = OmegaConf.create(cfg) 44 | cfg.training_model.predict_only = True 45 | cfg.visualizer.kind = 'noop' 46 | self.model = load_checkpoint(cfg, os.path.abspath(modelpath), strict=False, map_location='cpu') 47 | self.model = self.model.to(self.device) 48 | self.model.eval() 49 | 50 | def unload_model(self): 51 | if self.model is not None: 52 | self.model.cpu() 53 | 54 | def __call__(self, input_image): 55 | from PIL import Image 56 | if self.model is None: 57 | self.load_model() 58 | self.model.to(self.device) 59 | color = np.ascontiguousarray(input_image[:, :, 0:3]).astype(np.float32) / 255 60 | 61 | mask = np.ascontiguousarray(input_image[:, :, 3:4]).astype(np.float32) / 255 62 | 63 | with torch.no_grad(): 64 | color = torch.from_numpy(color).float().to(self.device) 65 | mask = torch.from_numpy(mask).float().to(self.device) 66 | mask = (mask > 0.5).float() 67 | color = color * (1 - mask) 68 | image_feed = torch.cat([color, mask], dim=2) 69 | 70 | image_feed = rearrange(image_feed, 'h w c -> 1 c h w') 71 | result = self.model(image_feed)[0] 72 | result = rearrange(result, 'c h w -> h w c') 73 | result = result * mask + color * (1 - mask) 74 | result *= 255.0 75 | return result.detach().cpu().numpy().clip(0, 255).astype(np.uint8) 76 | -------------------------------------------------------------------------------- /annotator/lama/config.yaml: -------------------------------------------------------------------------------- 1 | run_title: b18_ffc075_batch8x15 2 | training_model: 3 | kind: default 4 | visualize_each_iters: 1000 5 | concat_mask: true 6 | store_discr_outputs_for_vis: true 7 | losses: 8 | l1: 9 | weight_missing: 0 10 | weight_known: 10 11 | perceptual: 12 | weight: 0 13 | adversarial: 14 | kind: r1 15 | weight: 10 16 | gp_coef: 0.001 17 | mask_as_fake_target: true 18 | allow_scale_mask: true 19 | feature_matching: 20 | weight: 100 21 | resnet_pl: 22 | weight: 30 23 | weights_path: ${env:TORCH_HOME} 24 | 25 | optimizers: 26 | generator: 27 | kind: adam 28 | lr: 0.001 29 | discriminator: 30 | kind: adam 31 | lr: 0.0001 32 | visualizer: 33 | key_order: 34 | - image 35 | - predicted_image 36 | - discr_output_fake 37 | - discr_output_real 38 | - inpainted 39 | rescale_keys: 40 | - discr_output_fake 41 | - discr_output_real 42 | kind: directory 43 | outdir: /group-volume/User-Driven-Content-Generation/r.suvorov/inpainting/experiments/r.suvorov_2021-04-30_14-41-12_train_simple_pix2pix2_gap_sdpl_novgg_large_b18_ffc075_batch8x15/samples 44 | location: 45 | data_root_dir: /group-volume/User-Driven-Content-Generation/datasets/inpainting_data_root_large 46 | out_root_dir: /group-volume/User-Driven-Content-Generation/${env:USER}/inpainting/experiments 47 | tb_dir: /group-volume/User-Driven-Content-Generation/${env:USER}/inpainting/tb_logs 48 | data: 49 | batch_size: 15 50 | val_batch_size: 2 51 | num_workers: 3 52 | train: 53 | indir: ${location.data_root_dir}/train 54 | out_size: 256 55 | mask_gen_kwargs: 56 | irregular_proba: 1 57 | irregular_kwargs: 58 | max_angle: 4 59 | max_len: 200 60 | max_width: 100 61 | max_times: 5 62 | min_times: 1 63 | box_proba: 1 64 | box_kwargs: 65 | margin: 10 66 | bbox_min_size: 30 67 | bbox_max_size: 150 68 | max_times: 3 69 | min_times: 1 70 | segm_proba: 0 71 | segm_kwargs: 72 | confidence_threshold: 0.5 73 | max_object_area: 0.5 74 | min_mask_area: 0.07 75 | downsample_levels: 6 76 | num_variants_per_mask: 1 77 | rigidness_mode: 1 78 | max_foreground_coverage: 0.3 79 | max_foreground_intersection: 0.7 80 | max_mask_intersection: 0.1 81 | max_hidden_area: 0.1 82 | max_scale_change: 0.25 83 | horizontal_flip: true 84 | max_vertical_shift: 0.2 85 | position_shuffle: true 86 | transform_variant: distortions 87 | dataloader_kwargs: 88 | batch_size: ${data.batch_size} 89 | shuffle: true 90 | num_workers: ${data.num_workers} 91 | val: 92 | indir: ${location.data_root_dir}/val 93 | img_suffix: .png 94 | dataloader_kwargs: 95 | batch_size: ${data.val_batch_size} 96 | shuffle: false 97 | num_workers: ${data.num_workers} 98 | visual_test: 99 | indir: ${location.data_root_dir}/korean_test 100 | img_suffix: _input.png 101 | pad_out_to_modulo: 32 102 | dataloader_kwargs: 103 | batch_size: 1 104 | shuffle: false 105 | num_workers: ${data.num_workers} 106 | generator: 107 | kind: ffc_resnet 108 | input_nc: 4 109 | output_nc: 3 110 | ngf: 64 111 | n_downsampling: 3 112 | n_blocks: 18 113 | add_out_act: sigmoid 114 | init_conv_kwargs: 115 | ratio_gin: 0 116 | ratio_gout: 0 117 | enable_lfu: false 118 | downsample_conv_kwargs: 119 | ratio_gin: ${generator.init_conv_kwargs.ratio_gout} 120 | ratio_gout: ${generator.downsample_conv_kwargs.ratio_gin} 121 | enable_lfu: false 122 | resnet_conv_kwargs: 123 | ratio_gin: 0.75 124 | ratio_gout: ${generator.resnet_conv_kwargs.ratio_gin} 125 | enable_lfu: false 126 | discriminator: 127 | kind: pix2pixhd_nlayer 128 | input_nc: 3 129 | ndf: 64 130 | n_layers: 4 131 | evaluator: 132 | kind: default 133 | inpainted_key: inpainted 134 | integral_kind: ssim_fid100_f1 135 | trainer: 136 | kwargs: 137 | gpus: -1 138 | accelerator: ddp 139 | max_epochs: 200 140 | gradient_clip_val: 1 141 | log_gpu_memory: None 142 | limit_train_batches: 25000 143 | val_check_interval: ${trainer.kwargs.limit_train_batches} 144 | log_every_n_steps: 1000 145 | precision: 32 146 | terminate_on_nan: false 147 | check_val_every_n_epoch: 1 148 | num_sanity_val_steps: 8 149 | limit_val_batches: 1000 150 | replace_sampler_ddp: false 151 | checkpoint_kwargs: 152 | verbose: true 153 | save_top_k: 5 154 | save_last: true 155 | period: 1 156 | monitor: val_ssim_fid100_f1_total_mean 157 | mode: max 158 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/__init__.py -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/__pycache__/utils.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/__pycache__/utils.cpython-310.pyc -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/__init__.py -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/data/__init__.py -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/data/masks.py: -------------------------------------------------------------------------------- 1 | import math 2 | import random 3 | import hashlib 4 | import logging 5 | from enum import Enum 6 | 7 | import cv2 8 | import numpy as np 9 | 10 | # from annotator.lama.saicinpainting.evaluation.masks.mask import SegmentationMask 11 | from annotator.lama.saicinpainting.utils import LinearRamp 12 | 13 | LOGGER = logging.getLogger(__name__) 14 | 15 | 16 | class DrawMethod(Enum): 17 | LINE = 'line' 18 | CIRCLE = 'circle' 19 | SQUARE = 'square' 20 | 21 | 22 | def make_random_irregular_mask(shape, max_angle=4, max_len=60, max_width=20, min_times=0, max_times=10, 23 | draw_method=DrawMethod.LINE): 24 | draw_method = DrawMethod(draw_method) 25 | 26 | height, width = shape 27 | mask = np.zeros((height, width), np.float32) 28 | times = np.random.randint(min_times, max_times + 1) 29 | for i in range(times): 30 | start_x = np.random.randint(width) 31 | start_y = np.random.randint(height) 32 | for j in range(1 + np.random.randint(5)): 33 | angle = 0.01 + np.random.randint(max_angle) 34 | if i % 2 == 0: 35 | angle = 2 * 3.1415926 - angle 36 | length = 10 + np.random.randint(max_len) 37 | brush_w = 5 + np.random.randint(max_width) 38 | end_x = np.clip((start_x + length * np.sin(angle)).astype(np.int32), 0, width) 39 | end_y = np.clip((start_y + length * np.cos(angle)).astype(np.int32), 0, height) 40 | if draw_method == DrawMethod.LINE: 41 | cv2.line(mask, (start_x, start_y), (end_x, end_y), 1.0, brush_w) 42 | elif draw_method == DrawMethod.CIRCLE: 43 | cv2.circle(mask, (start_x, start_y), radius=brush_w, color=1., thickness=-1) 44 | elif draw_method == DrawMethod.SQUARE: 45 | radius = brush_w // 2 46 | mask[start_y - radius:start_y + radius, start_x - radius:start_x + radius] = 1 47 | start_x, start_y = end_x, end_y 48 | return mask[None, ...] 49 | 50 | 51 | class RandomIrregularMaskGenerator: 52 | def __init__(self, max_angle=4, max_len=60, max_width=20, min_times=0, max_times=10, ramp_kwargs=None, 53 | draw_method=DrawMethod.LINE): 54 | self.max_angle = max_angle 55 | self.max_len = max_len 56 | self.max_width = max_width 57 | self.min_times = min_times 58 | self.max_times = max_times 59 | self.draw_method = draw_method 60 | self.ramp = LinearRamp(**ramp_kwargs) if ramp_kwargs is not None else None 61 | 62 | def __call__(self, img, iter_i=None, raw_image=None): 63 | coef = self.ramp(iter_i) if (self.ramp is not None) and (iter_i is not None) else 1 64 | cur_max_len = int(max(1, self.max_len * coef)) 65 | cur_max_width = int(max(1, self.max_width * coef)) 66 | cur_max_times = int(self.min_times + 1 + (self.max_times - self.min_times) * coef) 67 | return make_random_irregular_mask(img.shape[1:], max_angle=self.max_angle, max_len=cur_max_len, 68 | max_width=cur_max_width, min_times=self.min_times, max_times=cur_max_times, 69 | draw_method=self.draw_method) 70 | 71 | 72 | def make_random_rectangle_mask(shape, margin=10, bbox_min_size=30, bbox_max_size=100, min_times=0, max_times=3): 73 | height, width = shape 74 | mask = np.zeros((height, width), np.float32) 75 | bbox_max_size = min(bbox_max_size, height - margin * 2, width - margin * 2) 76 | times = np.random.randint(min_times, max_times + 1) 77 | for i in range(times): 78 | box_width = np.random.randint(bbox_min_size, bbox_max_size) 79 | box_height = np.random.randint(bbox_min_size, bbox_max_size) 80 | start_x = np.random.randint(margin, width - margin - box_width + 1) 81 | start_y = np.random.randint(margin, height - margin - box_height + 1) 82 | mask[start_y:start_y + box_height, start_x:start_x + box_width] = 1 83 | return mask[None, ...] 84 | 85 | 86 | class RandomRectangleMaskGenerator: 87 | def __init__(self, margin=10, bbox_min_size=30, bbox_max_size=100, min_times=0, max_times=3, ramp_kwargs=None): 88 | self.margin = margin 89 | self.bbox_min_size = bbox_min_size 90 | self.bbox_max_size = bbox_max_size 91 | self.min_times = min_times 92 | self.max_times = max_times 93 | self.ramp = LinearRamp(**ramp_kwargs) if ramp_kwargs is not None else None 94 | 95 | def __call__(self, img, iter_i=None, raw_image=None): 96 | coef = self.ramp(iter_i) if (self.ramp is not None) and (iter_i is not None) else 1 97 | cur_bbox_max_size = int(self.bbox_min_size + 1 + (self.bbox_max_size - self.bbox_min_size) * coef) 98 | cur_max_times = int(self.min_times + (self.max_times - self.min_times) * coef) 99 | return make_random_rectangle_mask(img.shape[1:], margin=self.margin, bbox_min_size=self.bbox_min_size, 100 | bbox_max_size=cur_bbox_max_size, min_times=self.min_times, 101 | max_times=cur_max_times) 102 | 103 | 104 | class RandomSegmentationMaskGenerator: 105 | def __init__(self, **kwargs): 106 | self.impl = None # will be instantiated in first call (effectively in subprocess) 107 | self.kwargs = kwargs 108 | 109 | def __call__(self, img, iter_i=None, raw_image=None): 110 | if self.impl is None: 111 | self.impl = SegmentationMask(**self.kwargs) 112 | 113 | masks = self.impl.get_masks(np.transpose(img, (1, 2, 0))) 114 | masks = [m for m in masks if len(np.unique(m)) > 1] 115 | return np.random.choice(masks) 116 | 117 | 118 | def make_random_superres_mask(shape, min_step=2, max_step=4, min_width=1, max_width=3): 119 | height, width = shape 120 | mask = np.zeros((height, width), np.float32) 121 | step_x = np.random.randint(min_step, max_step + 1) 122 | width_x = np.random.randint(min_width, min(step_x, max_width + 1)) 123 | offset_x = np.random.randint(0, step_x) 124 | 125 | step_y = np.random.randint(min_step, max_step + 1) 126 | width_y = np.random.randint(min_width, min(step_y, max_width + 1)) 127 | offset_y = np.random.randint(0, step_y) 128 | 129 | for dy in range(width_y): 130 | mask[offset_y + dy::step_y] = 1 131 | for dx in range(width_x): 132 | mask[:, offset_x + dx::step_x] = 1 133 | return mask[None, ...] 134 | 135 | 136 | class RandomSuperresMaskGenerator: 137 | def __init__(self, **kwargs): 138 | self.kwargs = kwargs 139 | 140 | def __call__(self, img, iter_i=None): 141 | return make_random_superres_mask(img.shape[1:], **self.kwargs) 142 | 143 | 144 | class DumbAreaMaskGenerator: 145 | min_ratio = 0.1 146 | max_ratio = 0.35 147 | default_ratio = 0.225 148 | 149 | def __init__(self, is_training): 150 | #Parameters: 151 | # is_training(bool): If true - random rectangular mask, if false - central square mask 152 | self.is_training = is_training 153 | 154 | def _random_vector(self, dimension): 155 | if self.is_training: 156 | lower_limit = math.sqrt(self.min_ratio) 157 | upper_limit = math.sqrt(self.max_ratio) 158 | mask_side = round((random.random() * (upper_limit - lower_limit) + lower_limit) * dimension) 159 | u = random.randint(0, dimension-mask_side-1) 160 | v = u+mask_side 161 | else: 162 | margin = (math.sqrt(self.default_ratio) / 2) * dimension 163 | u = round(dimension/2 - margin) 164 | v = round(dimension/2 + margin) 165 | return u, v 166 | 167 | def __call__(self, img, iter_i=None, raw_image=None): 168 | c, height, width = img.shape 169 | mask = np.zeros((height, width), np.float32) 170 | x1, x2 = self._random_vector(width) 171 | y1, y2 = self._random_vector(height) 172 | mask[x1:x2, y1:y2] = 1 173 | return mask[None, ...] 174 | 175 | 176 | class OutpaintingMaskGenerator: 177 | def __init__(self, min_padding_percent:float=0.04, max_padding_percent:int=0.25, left_padding_prob:float=0.5, top_padding_prob:float=0.5, 178 | right_padding_prob:float=0.5, bottom_padding_prob:float=0.5, is_fixed_randomness:bool=False): 179 | """ 180 | is_fixed_randomness - get identical paddings for the same image if args are the same 181 | """ 182 | self.min_padding_percent = min_padding_percent 183 | self.max_padding_percent = max_padding_percent 184 | self.probs = [left_padding_prob, top_padding_prob, right_padding_prob, bottom_padding_prob] 185 | self.is_fixed_randomness = is_fixed_randomness 186 | 187 | assert self.min_padding_percent <= self.max_padding_percent 188 | assert self.max_padding_percent > 0 189 | assert len([x for x in [self.min_padding_percent, self.max_padding_percent] if (x>=0 and x<=1)]) == 2, f"Padding percentage should be in [0,1]" 190 | assert sum(self.probs) > 0, f"At least one of the padding probs should be greater than 0 - {self.probs}" 191 | assert len([x for x in self.probs if (x >= 0) and (x <= 1)]) == 4, f"At least one of padding probs is not in [0,1] - {self.probs}" 192 | if len([x for x in self.probs if x > 0]) == 1: 193 | LOGGER.warning(f"Only one padding prob is greater than zero - {self.probs}. That means that the outpainting masks will be always on the same side") 194 | 195 | def apply_padding(self, mask, coord): 196 | mask[int(coord[0][0]*self.img_h):int(coord[1][0]*self.img_h), 197 | int(coord[0][1]*self.img_w):int(coord[1][1]*self.img_w)] = 1 198 | return mask 199 | 200 | def get_padding(self, size): 201 | n1 = int(self.min_padding_percent*size) 202 | n2 = int(self.max_padding_percent*size) 203 | return self.rnd.randint(n1, n2) / size 204 | 205 | @staticmethod 206 | def _img2rs(img): 207 | arr = np.ascontiguousarray(img.astype(np.uint8)) 208 | str_hash = hashlib.sha1(arr).hexdigest() 209 | res = hash(str_hash)%(2**32) 210 | return res 211 | 212 | def __call__(self, img, iter_i=None, raw_image=None): 213 | c, self.img_h, self.img_w = img.shape 214 | mask = np.zeros((self.img_h, self.img_w), np.float32) 215 | at_least_one_mask_applied = False 216 | 217 | if self.is_fixed_randomness: 218 | assert raw_image is not None, f"Cant calculate hash on raw_image=None" 219 | rs = self._img2rs(raw_image) 220 | self.rnd = np.random.RandomState(rs) 221 | else: 222 | self.rnd = np.random 223 | 224 | coords = [[ 225 | (0,0), 226 | (1,self.get_padding(size=self.img_h)) 227 | ], 228 | [ 229 | (0,0), 230 | (self.get_padding(size=self.img_w),1) 231 | ], 232 | [ 233 | (0,1-self.get_padding(size=self.img_h)), 234 | (1,1) 235 | ], 236 | [ 237 | (1-self.get_padding(size=self.img_w),0), 238 | (1,1) 239 | ]] 240 | 241 | for pp, coord in zip(self.probs, coords): 242 | if self.rnd.random() < pp: 243 | at_least_one_mask_applied = True 244 | mask = self.apply_padding(mask=mask, coord=coord) 245 | 246 | if not at_least_one_mask_applied: 247 | idx = self.rnd.choice(range(len(coords)), p=np.array(self.probs)/sum(self.probs)) 248 | mask = self.apply_padding(mask=mask, coord=coords[idx]) 249 | return mask[None, ...] 250 | 251 | 252 | class MixedMaskGenerator: 253 | def __init__(self, irregular_proba=1/3, irregular_kwargs=None, 254 | box_proba=1/3, box_kwargs=None, 255 | segm_proba=1/3, segm_kwargs=None, 256 | squares_proba=0, squares_kwargs=None, 257 | superres_proba=0, superres_kwargs=None, 258 | outpainting_proba=0, outpainting_kwargs=None, 259 | invert_proba=0): 260 | self.probas = [] 261 | self.gens = [] 262 | 263 | if irregular_proba > 0: 264 | self.probas.append(irregular_proba) 265 | if irregular_kwargs is None: 266 | irregular_kwargs = {} 267 | else: 268 | irregular_kwargs = dict(irregular_kwargs) 269 | irregular_kwargs['draw_method'] = DrawMethod.LINE 270 | self.gens.append(RandomIrregularMaskGenerator(**irregular_kwargs)) 271 | 272 | if box_proba > 0: 273 | self.probas.append(box_proba) 274 | if box_kwargs is None: 275 | box_kwargs = {} 276 | self.gens.append(RandomRectangleMaskGenerator(**box_kwargs)) 277 | 278 | if segm_proba > 0: 279 | self.probas.append(segm_proba) 280 | if segm_kwargs is None: 281 | segm_kwargs = {} 282 | self.gens.append(RandomSegmentationMaskGenerator(**segm_kwargs)) 283 | 284 | if squares_proba > 0: 285 | self.probas.append(squares_proba) 286 | if squares_kwargs is None: 287 | squares_kwargs = {} 288 | else: 289 | squares_kwargs = dict(squares_kwargs) 290 | squares_kwargs['draw_method'] = DrawMethod.SQUARE 291 | self.gens.append(RandomIrregularMaskGenerator(**squares_kwargs)) 292 | 293 | if superres_proba > 0: 294 | self.probas.append(superres_proba) 295 | if superres_kwargs is None: 296 | superres_kwargs = {} 297 | self.gens.append(RandomSuperresMaskGenerator(**superres_kwargs)) 298 | 299 | if outpainting_proba > 0: 300 | self.probas.append(outpainting_proba) 301 | if outpainting_kwargs is None: 302 | outpainting_kwargs = {} 303 | self.gens.append(OutpaintingMaskGenerator(**outpainting_kwargs)) 304 | 305 | self.probas = np.array(self.probas, dtype='float32') 306 | self.probas /= self.probas.sum() 307 | self.invert_proba = invert_proba 308 | 309 | def __call__(self, img, iter_i=None, raw_image=None): 310 | kind = np.random.choice(len(self.probas), p=self.probas) 311 | gen = self.gens[kind] 312 | result = gen(img, iter_i=iter_i, raw_image=raw_image) 313 | if self.invert_proba > 0 and random.random() < self.invert_proba: 314 | result = 1 - result 315 | return result 316 | 317 | 318 | def get_mask_generator(kind, kwargs): 319 | if kind is None: 320 | kind = "mixed" 321 | if kwargs is None: 322 | kwargs = {} 323 | 324 | if kind == "mixed": 325 | cl = MixedMaskGenerator 326 | elif kind == "outpainting": 327 | cl = OutpaintingMaskGenerator 328 | elif kind == "dumb": 329 | cl = DumbAreaMaskGenerator 330 | else: 331 | raise NotImplementedError(f"No such generator kind = {kind}") 332 | return cl(**kwargs) 333 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/losses/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/losses/__init__.py -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/losses/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/losses/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/losses/__pycache__/distance_weighting.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/losses/__pycache__/distance_weighting.cpython-310.pyc -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/losses/__pycache__/feature_matching.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/losses/__pycache__/feature_matching.cpython-310.pyc -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/losses/__pycache__/perceptual.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/losses/__pycache__/perceptual.cpython-310.pyc -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/losses/adversarial.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple, Dict, Optional 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | 7 | 8 | class BaseAdversarialLoss: 9 | def pre_generator_step(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, 10 | generator: nn.Module, discriminator: nn.Module): 11 | """ 12 | Prepare for generator step 13 | :param real_batch: Tensor, a batch of real samples 14 | :param fake_batch: Tensor, a batch of samples produced by generator 15 | :param generator: 16 | :param discriminator: 17 | :return: None 18 | """ 19 | 20 | def pre_discriminator_step(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, 21 | generator: nn.Module, discriminator: nn.Module): 22 | """ 23 | Prepare for discriminator step 24 | :param real_batch: Tensor, a batch of real samples 25 | :param fake_batch: Tensor, a batch of samples produced by generator 26 | :param generator: 27 | :param discriminator: 28 | :return: None 29 | """ 30 | 31 | def generator_loss(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, 32 | discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor, 33 | mask: Optional[torch.Tensor] = None) \ 34 | -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: 35 | """ 36 | Calculate generator loss 37 | :param real_batch: Tensor, a batch of real samples 38 | :param fake_batch: Tensor, a batch of samples produced by generator 39 | :param discr_real_pred: Tensor, discriminator output for real_batch 40 | :param discr_fake_pred: Tensor, discriminator output for fake_batch 41 | :param mask: Tensor, actual mask, which was at input of generator when making fake_batch 42 | :return: total generator loss along with some values that might be interesting to log 43 | """ 44 | raise NotImplemented() 45 | 46 | def discriminator_loss(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, 47 | discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor, 48 | mask: Optional[torch.Tensor] = None) \ 49 | -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: 50 | """ 51 | Calculate discriminator loss and call .backward() on it 52 | :param real_batch: Tensor, a batch of real samples 53 | :param fake_batch: Tensor, a batch of samples produced by generator 54 | :param discr_real_pred: Tensor, discriminator output for real_batch 55 | :param discr_fake_pred: Tensor, discriminator output for fake_batch 56 | :param mask: Tensor, actual mask, which was at input of generator when making fake_batch 57 | :return: total discriminator loss along with some values that might be interesting to log 58 | """ 59 | raise NotImplemented() 60 | 61 | def interpolate_mask(self, mask, shape): 62 | assert mask is not None 63 | assert self.allow_scale_mask or shape == mask.shape[-2:] 64 | if shape != mask.shape[-2:] and self.allow_scale_mask: 65 | if self.mask_scale_mode == 'maxpool': 66 | mask = F.adaptive_max_pool2d(mask, shape) 67 | else: 68 | mask = F.interpolate(mask, size=shape, mode=self.mask_scale_mode) 69 | return mask 70 | 71 | def make_r1_gp(discr_real_pred, real_batch): 72 | if torch.is_grad_enabled(): 73 | grad_real = torch.autograd.grad(outputs=discr_real_pred.sum(), inputs=real_batch, create_graph=True)[0] 74 | grad_penalty = (grad_real.view(grad_real.shape[0], -1).norm(2, dim=1) ** 2).mean() 75 | else: 76 | grad_penalty = 0 77 | real_batch.requires_grad = False 78 | 79 | return grad_penalty 80 | 81 | class NonSaturatingWithR1(BaseAdversarialLoss): 82 | def __init__(self, gp_coef=5, weight=1, mask_as_fake_target=False, allow_scale_mask=False, 83 | mask_scale_mode='nearest', extra_mask_weight_for_gen=0, 84 | use_unmasked_for_gen=True, use_unmasked_for_discr=True): 85 | self.gp_coef = gp_coef 86 | self.weight = weight 87 | # use for discr => use for gen; 88 | # otherwise we teach only the discr to pay attention to very small difference 89 | assert use_unmasked_for_gen or (not use_unmasked_for_discr) 90 | # mask as target => use unmasked for discr: 91 | # if we don't care about unmasked regions at all 92 | # then it doesn't matter if the value of mask_as_fake_target is true or false 93 | assert use_unmasked_for_discr or (not mask_as_fake_target) 94 | self.use_unmasked_for_gen = use_unmasked_for_gen 95 | self.use_unmasked_for_discr = use_unmasked_for_discr 96 | self.mask_as_fake_target = mask_as_fake_target 97 | self.allow_scale_mask = allow_scale_mask 98 | self.mask_scale_mode = mask_scale_mode 99 | self.extra_mask_weight_for_gen = extra_mask_weight_for_gen 100 | 101 | def generator_loss(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, 102 | discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor, 103 | mask=None) \ 104 | -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: 105 | fake_loss = F.softplus(-discr_fake_pred) 106 | if (self.mask_as_fake_target and self.extra_mask_weight_for_gen > 0) or \ 107 | not self.use_unmasked_for_gen: # == if masked region should be treated differently 108 | mask = self.interpolate_mask(mask, discr_fake_pred.shape[-2:]) 109 | if not self.use_unmasked_for_gen: 110 | fake_loss = fake_loss * mask 111 | else: 112 | pixel_weights = 1 + mask * self.extra_mask_weight_for_gen 113 | fake_loss = fake_loss * pixel_weights 114 | 115 | return fake_loss.mean() * self.weight, dict() 116 | 117 | def pre_discriminator_step(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, 118 | generator: nn.Module, discriminator: nn.Module): 119 | real_batch.requires_grad = True 120 | 121 | def discriminator_loss(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, 122 | discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor, 123 | mask=None) \ 124 | -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: 125 | 126 | real_loss = F.softplus(-discr_real_pred) 127 | grad_penalty = make_r1_gp(discr_real_pred, real_batch) * self.gp_coef 128 | fake_loss = F.softplus(discr_fake_pred) 129 | 130 | if not self.use_unmasked_for_discr or self.mask_as_fake_target: 131 | # == if masked region should be treated differently 132 | mask = self.interpolate_mask(mask, discr_fake_pred.shape[-2:]) 133 | # use_unmasked_for_discr=False only makes sense for fakes; 134 | # for reals there is no difference beetween two regions 135 | fake_loss = fake_loss * mask 136 | if self.mask_as_fake_target: 137 | fake_loss = fake_loss + (1 - mask) * F.softplus(-discr_fake_pred) 138 | 139 | sum_discr_loss = real_loss + grad_penalty + fake_loss 140 | metrics = dict(discr_real_out=discr_real_pred.mean(), 141 | discr_fake_out=discr_fake_pred.mean(), 142 | discr_real_gp=grad_penalty) 143 | return sum_discr_loss.mean(), metrics 144 | 145 | class BCELoss(BaseAdversarialLoss): 146 | def __init__(self, weight): 147 | self.weight = weight 148 | self.bce_loss = nn.BCEWithLogitsLoss() 149 | 150 | def generator_loss(self, discr_fake_pred: torch.Tensor) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: 151 | real_mask_gt = torch.zeros(discr_fake_pred.shape).to(discr_fake_pred.device) 152 | fake_loss = self.bce_loss(discr_fake_pred, real_mask_gt) * self.weight 153 | return fake_loss, dict() 154 | 155 | def pre_discriminator_step(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, 156 | generator: nn.Module, discriminator: nn.Module): 157 | real_batch.requires_grad = True 158 | 159 | def discriminator_loss(self, 160 | mask: torch.Tensor, 161 | discr_real_pred: torch.Tensor, 162 | discr_fake_pred: torch.Tensor) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: 163 | 164 | real_mask_gt = torch.zeros(discr_real_pred.shape).to(discr_real_pred.device) 165 | sum_discr_loss = (self.bce_loss(discr_real_pred, real_mask_gt) + self.bce_loss(discr_fake_pred, mask)) / 2 166 | metrics = dict(discr_real_out=discr_real_pred.mean(), 167 | discr_fake_out=discr_fake_pred.mean(), 168 | discr_real_gp=0) 169 | return sum_discr_loss, metrics 170 | 171 | 172 | def make_discrim_loss(kind, **kwargs): 173 | if kind == 'r1': 174 | return NonSaturatingWithR1(**kwargs) 175 | elif kind == 'bce': 176 | return BCELoss(**kwargs) 177 | raise ValueError(f'Unknown adversarial loss kind {kind}') 178 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/losses/constants.py: -------------------------------------------------------------------------------- 1 | weights = {"ade20k": 2 | [6.34517766497462, 3 | 9.328358208955224, 4 | 11.389521640091116, 5 | 16.10305958132045, 6 | 20.833333333333332, 7 | 22.22222222222222, 8 | 25.125628140703515, 9 | 43.29004329004329, 10 | 50.5050505050505, 11 | 54.6448087431694, 12 | 55.24861878453038, 13 | 60.24096385542168, 14 | 62.5, 15 | 66.2251655629139, 16 | 84.74576271186442, 17 | 90.90909090909092, 18 | 91.74311926605505, 19 | 96.15384615384616, 20 | 96.15384615384616, 21 | 97.08737864077669, 22 | 102.04081632653062, 23 | 135.13513513513513, 24 | 149.2537313432836, 25 | 153.84615384615384, 26 | 163.93442622950818, 27 | 166.66666666666666, 28 | 188.67924528301887, 29 | 192.30769230769232, 30 | 217.3913043478261, 31 | 227.27272727272725, 32 | 227.27272727272725, 33 | 227.27272727272725, 34 | 303.03030303030306, 35 | 322.5806451612903, 36 | 333.3333333333333, 37 | 370.3703703703703, 38 | 384.61538461538464, 39 | 416.6666666666667, 40 | 416.6666666666667, 41 | 434.7826086956522, 42 | 434.7826086956522, 43 | 454.5454545454545, 44 | 454.5454545454545, 45 | 500.0, 46 | 526.3157894736842, 47 | 526.3157894736842, 48 | 555.5555555555555, 49 | 555.5555555555555, 50 | 555.5555555555555, 51 | 555.5555555555555, 52 | 555.5555555555555, 53 | 555.5555555555555, 54 | 555.5555555555555, 55 | 588.2352941176471, 56 | 588.2352941176471, 57 | 588.2352941176471, 58 | 588.2352941176471, 59 | 588.2352941176471, 60 | 666.6666666666666, 61 | 666.6666666666666, 62 | 666.6666666666666, 63 | 666.6666666666666, 64 | 714.2857142857143, 65 | 714.2857142857143, 66 | 714.2857142857143, 67 | 714.2857142857143, 68 | 714.2857142857143, 69 | 769.2307692307693, 70 | 769.2307692307693, 71 | 769.2307692307693, 72 | 833.3333333333334, 73 | 833.3333333333334, 74 | 833.3333333333334, 75 | 833.3333333333334, 76 | 909.090909090909, 77 | 1000.0, 78 | 1111.111111111111, 79 | 1111.111111111111, 80 | 1111.111111111111, 81 | 1111.111111111111, 82 | 1111.111111111111, 83 | 1250.0, 84 | 1250.0, 85 | 1250.0, 86 | 1250.0, 87 | 1250.0, 88 | 1428.5714285714287, 89 | 1428.5714285714287, 90 | 1428.5714285714287, 91 | 1428.5714285714287, 92 | 1428.5714285714287, 93 | 1428.5714285714287, 94 | 1428.5714285714287, 95 | 1666.6666666666667, 96 | 1666.6666666666667, 97 | 1666.6666666666667, 98 | 1666.6666666666667, 99 | 1666.6666666666667, 100 | 1666.6666666666667, 101 | 1666.6666666666667, 102 | 1666.6666666666667, 103 | 1666.6666666666667, 104 | 1666.6666666666667, 105 | 1666.6666666666667, 106 | 2000.0, 107 | 2000.0, 108 | 2000.0, 109 | 2000.0, 110 | 2000.0, 111 | 2000.0, 112 | 2000.0, 113 | 2000.0, 114 | 2000.0, 115 | 2000.0, 116 | 2000.0, 117 | 2000.0, 118 | 2000.0, 119 | 2000.0, 120 | 2000.0, 121 | 2000.0, 122 | 2000.0, 123 | 2500.0, 124 | 2500.0, 125 | 2500.0, 126 | 2500.0, 127 | 2500.0, 128 | 2500.0, 129 | 2500.0, 130 | 2500.0, 131 | 2500.0, 132 | 2500.0, 133 | 2500.0, 134 | 2500.0, 135 | 2500.0, 136 | 3333.3333333333335, 137 | 3333.3333333333335, 138 | 3333.3333333333335, 139 | 3333.3333333333335, 140 | 3333.3333333333335, 141 | 3333.3333333333335, 142 | 3333.3333333333335, 143 | 3333.3333333333335, 144 | 3333.3333333333335, 145 | 3333.3333333333335, 146 | 3333.3333333333335, 147 | 3333.3333333333335, 148 | 3333.3333333333335, 149 | 5000.0, 150 | 5000.0, 151 | 5000.0] 152 | } -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/losses/distance_weighting.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import torchvision 5 | 6 | from annotator.lama.saicinpainting.training.losses.perceptual import IMAGENET_STD, IMAGENET_MEAN 7 | 8 | 9 | def dummy_distance_weighter(real_img, pred_img, mask): 10 | return mask 11 | 12 | 13 | def get_gauss_kernel(kernel_size, width_factor=1): 14 | coords = torch.stack(torch.meshgrid(torch.arange(kernel_size), 15 | torch.arange(kernel_size)), 16 | dim=0).float() 17 | diff = torch.exp(-((coords - kernel_size // 2) ** 2).sum(0) / kernel_size / width_factor) 18 | diff /= diff.sum() 19 | return diff 20 | 21 | 22 | class BlurMask(nn.Module): 23 | def __init__(self, kernel_size=5, width_factor=1): 24 | super().__init__() 25 | self.filter = nn.Conv2d(1, 1, kernel_size, padding=kernel_size // 2, padding_mode='replicate', bias=False) 26 | self.filter.weight.data.copy_(get_gauss_kernel(kernel_size, width_factor=width_factor)) 27 | 28 | def forward(self, real_img, pred_img, mask): 29 | with torch.no_grad(): 30 | result = self.filter(mask) * mask 31 | return result 32 | 33 | 34 | class EmulatedEDTMask(nn.Module): 35 | def __init__(self, dilate_kernel_size=5, blur_kernel_size=5, width_factor=1): 36 | super().__init__() 37 | self.dilate_filter = nn.Conv2d(1, 1, dilate_kernel_size, padding=dilate_kernel_size// 2, padding_mode='replicate', 38 | bias=False) 39 | self.dilate_filter.weight.data.copy_(torch.ones(1, 1, dilate_kernel_size, dilate_kernel_size, dtype=torch.float)) 40 | self.blur_filter = nn.Conv2d(1, 1, blur_kernel_size, padding=blur_kernel_size // 2, padding_mode='replicate', bias=False) 41 | self.blur_filter.weight.data.copy_(get_gauss_kernel(blur_kernel_size, width_factor=width_factor)) 42 | 43 | def forward(self, real_img, pred_img, mask): 44 | with torch.no_grad(): 45 | known_mask = 1 - mask 46 | dilated_known_mask = (self.dilate_filter(known_mask) > 1).float() 47 | result = self.blur_filter(1 - dilated_known_mask) * mask 48 | return result 49 | 50 | 51 | class PropagatePerceptualSim(nn.Module): 52 | def __init__(self, level=2, max_iters=10, temperature=500, erode_mask_size=3): 53 | super().__init__() 54 | vgg = torchvision.models.vgg19(pretrained=True).features 55 | vgg_avg_pooling = [] 56 | 57 | for weights in vgg.parameters(): 58 | weights.requires_grad = False 59 | 60 | cur_level_i = 0 61 | for module in vgg.modules(): 62 | if module.__class__.__name__ == 'Sequential': 63 | continue 64 | elif module.__class__.__name__ == 'MaxPool2d': 65 | vgg_avg_pooling.append(nn.AvgPool2d(kernel_size=2, stride=2, padding=0)) 66 | else: 67 | vgg_avg_pooling.append(module) 68 | if module.__class__.__name__ == 'ReLU': 69 | cur_level_i += 1 70 | if cur_level_i == level: 71 | break 72 | 73 | self.features = nn.Sequential(*vgg_avg_pooling) 74 | 75 | self.max_iters = max_iters 76 | self.temperature = temperature 77 | self.do_erode = erode_mask_size > 0 78 | if self.do_erode: 79 | self.erode_mask = nn.Conv2d(1, 1, erode_mask_size, padding=erode_mask_size // 2, bias=False) 80 | self.erode_mask.weight.data.fill_(1) 81 | 82 | def forward(self, real_img, pred_img, mask): 83 | with torch.no_grad(): 84 | real_img = (real_img - IMAGENET_MEAN.to(real_img)) / IMAGENET_STD.to(real_img) 85 | real_feats = self.features(real_img) 86 | 87 | vertical_sim = torch.exp(-(real_feats[:, :, 1:] - real_feats[:, :, :-1]).pow(2).sum(1, keepdim=True) 88 | / self.temperature) 89 | horizontal_sim = torch.exp(-(real_feats[:, :, :, 1:] - real_feats[:, :, :, :-1]).pow(2).sum(1, keepdim=True) 90 | / self.temperature) 91 | 92 | mask_scaled = F.interpolate(mask, size=real_feats.shape[-2:], mode='bilinear', align_corners=False) 93 | if self.do_erode: 94 | mask_scaled = (self.erode_mask(mask_scaled) > 1).float() 95 | 96 | cur_knowness = 1 - mask_scaled 97 | 98 | for iter_i in range(self.max_iters): 99 | new_top_knowness = F.pad(cur_knowness[:, :, :-1] * vertical_sim, (0, 0, 1, 0), mode='replicate') 100 | new_bottom_knowness = F.pad(cur_knowness[:, :, 1:] * vertical_sim, (0, 0, 0, 1), mode='replicate') 101 | 102 | new_left_knowness = F.pad(cur_knowness[:, :, :, :-1] * horizontal_sim, (1, 0, 0, 0), mode='replicate') 103 | new_right_knowness = F.pad(cur_knowness[:, :, :, 1:] * horizontal_sim, (0, 1, 0, 0), mode='replicate') 104 | 105 | new_knowness = torch.stack([new_top_knowness, new_bottom_knowness, 106 | new_left_knowness, new_right_knowness], 107 | dim=0).max(0).values 108 | 109 | cur_knowness = torch.max(cur_knowness, new_knowness) 110 | 111 | cur_knowness = F.interpolate(cur_knowness, size=mask.shape[-2:], mode='bilinear') 112 | result = torch.min(mask, 1 - cur_knowness) 113 | 114 | return result 115 | 116 | 117 | def make_mask_distance_weighter(kind='none', **kwargs): 118 | if kind == 'none': 119 | return dummy_distance_weighter 120 | if kind == 'blur': 121 | return BlurMask(**kwargs) 122 | if kind == 'edt': 123 | return EmulatedEDTMask(**kwargs) 124 | if kind == 'pps': 125 | return PropagatePerceptualSim(**kwargs) 126 | raise ValueError(f'Unknown mask distance weighter kind {kind}') 127 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/losses/feature_matching.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | 6 | 7 | def masked_l2_loss(pred, target, mask, weight_known, weight_missing): 8 | per_pixel_l2 = F.mse_loss(pred, target, reduction='none') 9 | pixel_weights = mask * weight_missing + (1 - mask) * weight_known 10 | return (pixel_weights * per_pixel_l2).mean() 11 | 12 | 13 | def masked_l1_loss(pred, target, mask, weight_known, weight_missing): 14 | per_pixel_l1 = F.l1_loss(pred, target, reduction='none') 15 | pixel_weights = mask * weight_missing + (1 - mask) * weight_known 16 | return (pixel_weights * per_pixel_l1).mean() 17 | 18 | 19 | def feature_matching_loss(fake_features: List[torch.Tensor], target_features: List[torch.Tensor], mask=None): 20 | if mask is None: 21 | res = torch.stack([F.mse_loss(fake_feat, target_feat) 22 | for fake_feat, target_feat in zip(fake_features, target_features)]).mean() 23 | else: 24 | res = 0 25 | norm = 0 26 | for fake_feat, target_feat in zip(fake_features, target_features): 27 | cur_mask = F.interpolate(mask, size=fake_feat.shape[-2:], mode='bilinear', align_corners=False) 28 | error_weights = 1 - cur_mask 29 | cur_val = ((fake_feat - target_feat).pow(2) * error_weights).mean() 30 | res = res + cur_val 31 | norm += 1 32 | res = res / norm 33 | return res 34 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/losses/perceptual.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import torchvision 5 | 6 | # from models.ade20k import ModelBuilder 7 | from annotator.lama.saicinpainting.utils import check_and_warn_input_range 8 | 9 | 10 | IMAGENET_MEAN = torch.FloatTensor([0.485, 0.456, 0.406])[None, :, None, None] 11 | IMAGENET_STD = torch.FloatTensor([0.229, 0.224, 0.225])[None, :, None, None] 12 | 13 | 14 | class PerceptualLoss(nn.Module): 15 | def __init__(self, normalize_inputs=True): 16 | super(PerceptualLoss, self).__init__() 17 | 18 | self.normalize_inputs = normalize_inputs 19 | self.mean_ = IMAGENET_MEAN 20 | self.std_ = IMAGENET_STD 21 | 22 | vgg = torchvision.models.vgg19(pretrained=True).features 23 | vgg_avg_pooling = [] 24 | 25 | for weights in vgg.parameters(): 26 | weights.requires_grad = False 27 | 28 | for module in vgg.modules(): 29 | if module.__class__.__name__ == 'Sequential': 30 | continue 31 | elif module.__class__.__name__ == 'MaxPool2d': 32 | vgg_avg_pooling.append(nn.AvgPool2d(kernel_size=2, stride=2, padding=0)) 33 | else: 34 | vgg_avg_pooling.append(module) 35 | 36 | self.vgg = nn.Sequential(*vgg_avg_pooling) 37 | 38 | def do_normalize_inputs(self, x): 39 | return (x - self.mean_.to(x.device)) / self.std_.to(x.device) 40 | 41 | def partial_losses(self, input, target, mask=None): 42 | check_and_warn_input_range(target, 0, 1, 'PerceptualLoss target in partial_losses') 43 | 44 | # we expect input and target to be in [0, 1] range 45 | losses = [] 46 | 47 | if self.normalize_inputs: 48 | features_input = self.do_normalize_inputs(input) 49 | features_target = self.do_normalize_inputs(target) 50 | else: 51 | features_input = input 52 | features_target = target 53 | 54 | for layer in self.vgg[:30]: 55 | 56 | features_input = layer(features_input) 57 | features_target = layer(features_target) 58 | 59 | if layer.__class__.__name__ == 'ReLU': 60 | loss = F.mse_loss(features_input, features_target, reduction='none') 61 | 62 | if mask is not None: 63 | cur_mask = F.interpolate(mask, size=features_input.shape[-2:], 64 | mode='bilinear', align_corners=False) 65 | loss = loss * (1 - cur_mask) 66 | 67 | loss = loss.mean(dim=tuple(range(1, len(loss.shape)))) 68 | losses.append(loss) 69 | 70 | return losses 71 | 72 | def forward(self, input, target, mask=None): 73 | losses = self.partial_losses(input, target, mask=mask) 74 | return torch.stack(losses).sum(dim=0) 75 | 76 | def get_global_features(self, input): 77 | check_and_warn_input_range(input, 0, 1, 'PerceptualLoss input in get_global_features') 78 | 79 | if self.normalize_inputs: 80 | features_input = self.do_normalize_inputs(input) 81 | else: 82 | features_input = input 83 | 84 | features_input = self.vgg(features_input) 85 | return features_input 86 | 87 | 88 | class ResNetPL(nn.Module): 89 | def __init__(self, weight=1, 90 | weights_path=None, arch_encoder='resnet50dilated', segmentation=True): 91 | super().__init__() 92 | self.impl = ModelBuilder.get_encoder(weights_path=weights_path, 93 | arch_encoder=arch_encoder, 94 | arch_decoder='ppm_deepsup', 95 | fc_dim=2048, 96 | segmentation=segmentation) 97 | self.impl.eval() 98 | for w in self.impl.parameters(): 99 | w.requires_grad_(False) 100 | 101 | self.weight = weight 102 | 103 | def forward(self, pred, target): 104 | pred = (pred - IMAGENET_MEAN.to(pred)) / IMAGENET_STD.to(pred) 105 | target = (target - IMAGENET_MEAN.to(target)) / IMAGENET_STD.to(target) 106 | 107 | pred_feats = self.impl(pred, return_feature_maps=True) 108 | target_feats = self.impl(target, return_feature_maps=True) 109 | 110 | result = torch.stack([F.mse_loss(cur_pred, cur_target) 111 | for cur_pred, cur_target 112 | in zip(pred_feats, target_feats)]).sum() * self.weight 113 | return result 114 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/losses/segmentation.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | from .constants import weights as constant_weights 6 | 7 | 8 | class CrossEntropy2d(nn.Module): 9 | def __init__(self, reduction="mean", ignore_label=255, weights=None, *args, **kwargs): 10 | """ 11 | weight (Tensor, optional): a manual rescaling weight given to each class. 12 | If given, has to be a Tensor of size "nclasses" 13 | """ 14 | super(CrossEntropy2d, self).__init__() 15 | self.reduction = reduction 16 | self.ignore_label = ignore_label 17 | self.weights = weights 18 | if self.weights is not None: 19 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 20 | self.weights = torch.FloatTensor(constant_weights[weights]).to(device) 21 | 22 | def forward(self, predict, target): 23 | """ 24 | Args: 25 | predict:(n, c, h, w) 26 | target:(n, 1, h, w) 27 | """ 28 | target = target.long() 29 | assert not target.requires_grad 30 | assert predict.dim() == 4, "{0}".format(predict.size()) 31 | assert target.dim() == 4, "{0}".format(target.size()) 32 | assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0)) 33 | assert target.size(1) == 1, "{0}".format(target.size(1)) 34 | assert predict.size(2) == target.size(2), "{0} vs {1} ".format(predict.size(2), target.size(2)) 35 | assert predict.size(3) == target.size(3), "{0} vs {1} ".format(predict.size(3), target.size(3)) 36 | target = target.squeeze(1) 37 | n, c, h, w = predict.size() 38 | target_mask = (target >= 0) * (target != self.ignore_label) 39 | target = target[target_mask] 40 | predict = predict.transpose(1, 2).transpose(2, 3).contiguous() 41 | predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c) 42 | loss = F.cross_entropy(predict, target, weight=self.weights, reduction=self.reduction) 43 | return loss 44 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/losses/style_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torchvision.models as models 4 | 5 | 6 | class PerceptualLoss(nn.Module): 7 | r""" 8 | Perceptual loss, VGG-based 9 | https://arxiv.org/abs/1603.08155 10 | https://github.com/dxyang/StyleTransfer/blob/master/utils.py 11 | """ 12 | 13 | def __init__(self, weights=[1.0, 1.0, 1.0, 1.0, 1.0]): 14 | super(PerceptualLoss, self).__init__() 15 | self.add_module('vgg', VGG19()) 16 | self.criterion = torch.nn.L1Loss() 17 | self.weights = weights 18 | 19 | def __call__(self, x, y): 20 | # Compute features 21 | x_vgg, y_vgg = self.vgg(x), self.vgg(y) 22 | 23 | content_loss = 0.0 24 | content_loss += self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1']) 25 | content_loss += self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1']) 26 | content_loss += self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1']) 27 | content_loss += self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1']) 28 | content_loss += self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1']) 29 | 30 | 31 | return content_loss 32 | 33 | 34 | class VGG19(torch.nn.Module): 35 | def __init__(self): 36 | super(VGG19, self).__init__() 37 | features = models.vgg19(pretrained=True).features 38 | self.relu1_1 = torch.nn.Sequential() 39 | self.relu1_2 = torch.nn.Sequential() 40 | 41 | self.relu2_1 = torch.nn.Sequential() 42 | self.relu2_2 = torch.nn.Sequential() 43 | 44 | self.relu3_1 = torch.nn.Sequential() 45 | self.relu3_2 = torch.nn.Sequential() 46 | self.relu3_3 = torch.nn.Sequential() 47 | self.relu3_4 = torch.nn.Sequential() 48 | 49 | self.relu4_1 = torch.nn.Sequential() 50 | self.relu4_2 = torch.nn.Sequential() 51 | self.relu4_3 = torch.nn.Sequential() 52 | self.relu4_4 = torch.nn.Sequential() 53 | 54 | self.relu5_1 = torch.nn.Sequential() 55 | self.relu5_2 = torch.nn.Sequential() 56 | self.relu5_3 = torch.nn.Sequential() 57 | self.relu5_4 = torch.nn.Sequential() 58 | 59 | for x in range(2): 60 | self.relu1_1.add_module(str(x), features[x]) 61 | 62 | for x in range(2, 4): 63 | self.relu1_2.add_module(str(x), features[x]) 64 | 65 | for x in range(4, 7): 66 | self.relu2_1.add_module(str(x), features[x]) 67 | 68 | for x in range(7, 9): 69 | self.relu2_2.add_module(str(x), features[x]) 70 | 71 | for x in range(9, 12): 72 | self.relu3_1.add_module(str(x), features[x]) 73 | 74 | for x in range(12, 14): 75 | self.relu3_2.add_module(str(x), features[x]) 76 | 77 | for x in range(14, 16): 78 | self.relu3_2.add_module(str(x), features[x]) 79 | 80 | for x in range(16, 18): 81 | self.relu3_4.add_module(str(x), features[x]) 82 | 83 | for x in range(18, 21): 84 | self.relu4_1.add_module(str(x), features[x]) 85 | 86 | for x in range(21, 23): 87 | self.relu4_2.add_module(str(x), features[x]) 88 | 89 | for x in range(23, 25): 90 | self.relu4_3.add_module(str(x), features[x]) 91 | 92 | for x in range(25, 27): 93 | self.relu4_4.add_module(str(x), features[x]) 94 | 95 | for x in range(27, 30): 96 | self.relu5_1.add_module(str(x), features[x]) 97 | 98 | for x in range(30, 32): 99 | self.relu5_2.add_module(str(x), features[x]) 100 | 101 | for x in range(32, 34): 102 | self.relu5_3.add_module(str(x), features[x]) 103 | 104 | for x in range(34, 36): 105 | self.relu5_4.add_module(str(x), features[x]) 106 | 107 | # don't need the gradients, just want the features 108 | for param in self.parameters(): 109 | param.requires_grad = False 110 | 111 | def forward(self, x): 112 | relu1_1 = self.relu1_1(x) 113 | relu1_2 = self.relu1_2(relu1_1) 114 | 115 | relu2_1 = self.relu2_1(relu1_2) 116 | relu2_2 = self.relu2_2(relu2_1) 117 | 118 | relu3_1 = self.relu3_1(relu2_2) 119 | relu3_2 = self.relu3_2(relu3_1) 120 | relu3_3 = self.relu3_3(relu3_2) 121 | relu3_4 = self.relu3_4(relu3_3) 122 | 123 | relu4_1 = self.relu4_1(relu3_4) 124 | relu4_2 = self.relu4_2(relu4_1) 125 | relu4_3 = self.relu4_3(relu4_2) 126 | relu4_4 = self.relu4_4(relu4_3) 127 | 128 | relu5_1 = self.relu5_1(relu4_4) 129 | relu5_2 = self.relu5_2(relu5_1) 130 | relu5_3 = self.relu5_3(relu5_2) 131 | relu5_4 = self.relu5_4(relu5_3) 132 | 133 | out = { 134 | 'relu1_1': relu1_1, 135 | 'relu1_2': relu1_2, 136 | 137 | 'relu2_1': relu2_1, 138 | 'relu2_2': relu2_2, 139 | 140 | 'relu3_1': relu3_1, 141 | 'relu3_2': relu3_2, 142 | 'relu3_3': relu3_3, 143 | 'relu3_4': relu3_4, 144 | 145 | 'relu4_1': relu4_1, 146 | 'relu4_2': relu4_2, 147 | 'relu4_3': relu4_3, 148 | 'relu4_4': relu4_4, 149 | 150 | 'relu5_1': relu5_1, 151 | 'relu5_2': relu5_2, 152 | 'relu5_3': relu5_3, 153 | 'relu5_4': relu5_4, 154 | } 155 | return out 156 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/modules/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from annotator.lama.saicinpainting.training.modules.ffc import FFCResNetGenerator 4 | from annotator.lama.saicinpainting.training.modules.pix2pixhd import GlobalGenerator, MultiDilatedGlobalGenerator, \ 5 | NLayerDiscriminator, MultidilatedNLayerDiscriminator 6 | 7 | def make_generator(config, kind, **kwargs): 8 | logging.info(f'Make generator {kind}') 9 | 10 | if kind == 'pix2pixhd_multidilated': 11 | return MultiDilatedGlobalGenerator(**kwargs) 12 | 13 | if kind == 'pix2pixhd_global': 14 | return GlobalGenerator(**kwargs) 15 | 16 | if kind == 'ffc_resnet': 17 | return FFCResNetGenerator(**kwargs) 18 | 19 | raise ValueError(f'Unknown generator kind {kind}') 20 | 21 | 22 | def make_discriminator(kind, **kwargs): 23 | logging.info(f'Make discriminator {kind}') 24 | 25 | if kind == 'pix2pixhd_nlayer_multidilated': 26 | return MultidilatedNLayerDiscriminator(**kwargs) 27 | 28 | if kind == 'pix2pixhd_nlayer': 29 | return NLayerDiscriminator(**kwargs) 30 | 31 | raise ValueError(f'Unknown discriminator kind {kind}') 32 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/modules/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/modules/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/modules/__pycache__/base.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/modules/__pycache__/base.cpython-310.pyc -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/modules/__pycache__/depthwise_sep_conv.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/modules/__pycache__/depthwise_sep_conv.cpython-310.pyc -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/modules/__pycache__/ffc.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/modules/__pycache__/ffc.cpython-310.pyc -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/modules/__pycache__/multidilated_conv.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/modules/__pycache__/multidilated_conv.cpython-310.pyc -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/modules/__pycache__/pix2pixhd.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/modules/__pycache__/pix2pixhd.cpython-310.pyc -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/modules/__pycache__/spatial_transform.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/modules/__pycache__/spatial_transform.cpython-310.pyc -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/modules/__pycache__/squeeze_excitation.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/modules/__pycache__/squeeze_excitation.cpython-310.pyc -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/modules/base.py: -------------------------------------------------------------------------------- 1 | import abc 2 | from typing import Tuple, List 3 | 4 | import torch 5 | import torch.nn as nn 6 | 7 | from annotator.lama.saicinpainting.training.modules.depthwise_sep_conv import DepthWiseSeperableConv 8 | from annotator.lama.saicinpainting.training.modules.multidilated_conv import MultidilatedConv 9 | 10 | 11 | class BaseDiscriminator(nn.Module): 12 | @abc.abstractmethod 13 | def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: 14 | """ 15 | Predict scores and get intermediate activations. Useful for feature matching loss 16 | :return tuple (scores, list of intermediate activations) 17 | """ 18 | raise NotImplemented() 19 | 20 | 21 | def get_conv_block_ctor(kind='default'): 22 | if not isinstance(kind, str): 23 | return kind 24 | if kind == 'default': 25 | return nn.Conv2d 26 | if kind == 'depthwise': 27 | return DepthWiseSeperableConv 28 | if kind == 'multidilated': 29 | return MultidilatedConv 30 | raise ValueError(f'Unknown convolutional block kind {kind}') 31 | 32 | 33 | def get_norm_layer(kind='bn'): 34 | if not isinstance(kind, str): 35 | return kind 36 | if kind == 'bn': 37 | return nn.BatchNorm2d 38 | if kind == 'in': 39 | return nn.InstanceNorm2d 40 | raise ValueError(f'Unknown norm block kind {kind}') 41 | 42 | 43 | def get_activation(kind='tanh'): 44 | if kind == 'tanh': 45 | return nn.Tanh() 46 | if kind == 'sigmoid': 47 | return nn.Sigmoid() 48 | if kind is False: 49 | return nn.Identity() 50 | raise ValueError(f'Unknown activation kind {kind}') 51 | 52 | 53 | class SimpleMultiStepGenerator(nn.Module): 54 | def __init__(self, steps: List[nn.Module]): 55 | super().__init__() 56 | self.steps = nn.ModuleList(steps) 57 | 58 | def forward(self, x): 59 | cur_in = x 60 | outs = [] 61 | for step in self.steps: 62 | cur_out = step(cur_in) 63 | outs.append(cur_out) 64 | cur_in = torch.cat((cur_in, cur_out), dim=1) 65 | return torch.cat(outs[::-1], dim=1) 66 | 67 | def deconv_factory(kind, ngf, mult, norm_layer, activation, max_features): 68 | if kind == 'convtranspose': 69 | return [nn.ConvTranspose2d(min(max_features, ngf * mult), 70 | min(max_features, int(ngf * mult / 2)), 71 | kernel_size=3, stride=2, padding=1, output_padding=1), 72 | norm_layer(min(max_features, int(ngf * mult / 2))), activation] 73 | elif kind == 'bilinear': 74 | return [nn.Upsample(scale_factor=2, mode='bilinear'), 75 | DepthWiseSeperableConv(min(max_features, ngf * mult), 76 | min(max_features, int(ngf * mult / 2)), 77 | kernel_size=3, stride=1, padding=1), 78 | norm_layer(min(max_features, int(ngf * mult / 2))), activation] 79 | else: 80 | raise Exception(f"Invalid deconv kind: {kind}") -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/modules/depthwise_sep_conv.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class DepthWiseSeperableConv(nn.Module): 5 | def __init__(self, in_dim, out_dim, *args, **kwargs): 6 | super().__init__() 7 | if 'groups' in kwargs: 8 | # ignoring groups for Depthwise Sep Conv 9 | del kwargs['groups'] 10 | 11 | self.depthwise = nn.Conv2d(in_dim, in_dim, *args, groups=in_dim, **kwargs) 12 | self.pointwise = nn.Conv2d(in_dim, out_dim, kernel_size=1) 13 | 14 | def forward(self, x): 15 | out = self.depthwise(x) 16 | out = self.pointwise(out) 17 | return out -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/modules/fake_fakes.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from kornia import SamplePadding 3 | from kornia.augmentation import RandomAffine, CenterCrop 4 | 5 | 6 | class FakeFakesGenerator: 7 | def __init__(self, aug_proba=0.5, img_aug_degree=30, img_aug_translate=0.2): 8 | self.grad_aug = RandomAffine(degrees=360, 9 | translate=0.2, 10 | padding_mode=SamplePadding.REFLECTION, 11 | keepdim=False, 12 | p=1) 13 | self.img_aug = RandomAffine(degrees=img_aug_degree, 14 | translate=img_aug_translate, 15 | padding_mode=SamplePadding.REFLECTION, 16 | keepdim=True, 17 | p=1) 18 | self.aug_proba = aug_proba 19 | 20 | def __call__(self, input_images, masks): 21 | blend_masks = self._fill_masks_with_gradient(masks) 22 | blend_target = self._make_blend_target(input_images) 23 | result = input_images * (1 - blend_masks) + blend_target * blend_masks 24 | return result, blend_masks 25 | 26 | def _make_blend_target(self, input_images): 27 | batch_size = input_images.shape[0] 28 | permuted = input_images[torch.randperm(batch_size)] 29 | augmented = self.img_aug(input_images) 30 | is_aug = (torch.rand(batch_size, device=input_images.device)[:, None, None, None] < self.aug_proba).float() 31 | result = augmented * is_aug + permuted * (1 - is_aug) 32 | return result 33 | 34 | def _fill_masks_with_gradient(self, masks): 35 | batch_size, _, height, width = masks.shape 36 | grad = torch.linspace(0, 1, steps=width * 2, device=masks.device, dtype=masks.dtype) \ 37 | .view(1, 1, 1, -1).expand(batch_size, 1, height * 2, width * 2) 38 | grad = self.grad_aug(grad) 39 | grad = CenterCrop((height, width))(grad) 40 | grad *= masks 41 | 42 | grad_for_min = grad + (1 - masks) * 10 43 | grad -= grad_for_min.view(batch_size, -1).min(-1).values[:, None, None, None] 44 | grad /= grad.view(batch_size, -1).max(-1).values[:, None, None, None] + 1e-6 45 | grad.clamp_(min=0, max=1) 46 | 47 | return grad 48 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/modules/multidilated_conv.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import random 4 | from annotator.lama.saicinpainting.training.modules.depthwise_sep_conv import DepthWiseSeperableConv 5 | 6 | class MultidilatedConv(nn.Module): 7 | def __init__(self, in_dim, out_dim, kernel_size, dilation_num=3, comb_mode='sum', equal_dim=True, 8 | shared_weights=False, padding=1, min_dilation=1, shuffle_in_channels=False, use_depthwise=False, **kwargs): 9 | super().__init__() 10 | convs = [] 11 | self.equal_dim = equal_dim 12 | assert comb_mode in ('cat_out', 'sum', 'cat_in', 'cat_both'), comb_mode 13 | if comb_mode in ('cat_out', 'cat_both'): 14 | self.cat_out = True 15 | if equal_dim: 16 | assert out_dim % dilation_num == 0 17 | out_dims = [out_dim // dilation_num] * dilation_num 18 | self.index = sum([[i + j * (out_dims[0]) for j in range(dilation_num)] for i in range(out_dims[0])], []) 19 | else: 20 | out_dims = [out_dim // 2 ** (i + 1) for i in range(dilation_num - 1)] 21 | out_dims.append(out_dim - sum(out_dims)) 22 | index = [] 23 | starts = [0] + out_dims[:-1] 24 | lengths = [out_dims[i] // out_dims[-1] for i in range(dilation_num)] 25 | for i in range(out_dims[-1]): 26 | for j in range(dilation_num): 27 | index += list(range(starts[j], starts[j] + lengths[j])) 28 | starts[j] += lengths[j] 29 | self.index = index 30 | assert(len(index) == out_dim) 31 | self.out_dims = out_dims 32 | else: 33 | self.cat_out = False 34 | self.out_dims = [out_dim] * dilation_num 35 | 36 | if comb_mode in ('cat_in', 'cat_both'): 37 | if equal_dim: 38 | assert in_dim % dilation_num == 0 39 | in_dims = [in_dim // dilation_num] * dilation_num 40 | else: 41 | in_dims = [in_dim // 2 ** (i + 1) for i in range(dilation_num - 1)] 42 | in_dims.append(in_dim - sum(in_dims)) 43 | self.in_dims = in_dims 44 | self.cat_in = True 45 | else: 46 | self.cat_in = False 47 | self.in_dims = [in_dim] * dilation_num 48 | 49 | conv_type = DepthWiseSeperableConv if use_depthwise else nn.Conv2d 50 | dilation = min_dilation 51 | for i in range(dilation_num): 52 | if isinstance(padding, int): 53 | cur_padding = padding * dilation 54 | else: 55 | cur_padding = padding[i] 56 | convs.append(conv_type( 57 | self.in_dims[i], self.out_dims[i], kernel_size, padding=cur_padding, dilation=dilation, **kwargs 58 | )) 59 | if i > 0 and shared_weights: 60 | convs[-1].weight = convs[0].weight 61 | convs[-1].bias = convs[0].bias 62 | dilation *= 2 63 | self.convs = nn.ModuleList(convs) 64 | 65 | self.shuffle_in_channels = shuffle_in_channels 66 | if self.shuffle_in_channels: 67 | # shuffle list as shuffling of tensors is nondeterministic 68 | in_channels_permute = list(range(in_dim)) 69 | random.shuffle(in_channels_permute) 70 | # save as buffer so it is saved and loaded with checkpoint 71 | self.register_buffer('in_channels_permute', torch.tensor(in_channels_permute)) 72 | 73 | def forward(self, x): 74 | if self.shuffle_in_channels: 75 | x = x[:, self.in_channels_permute] 76 | 77 | outs = [] 78 | if self.cat_in: 79 | if self.equal_dim: 80 | x = x.chunk(len(self.convs), dim=1) 81 | else: 82 | new_x = [] 83 | start = 0 84 | for dim in self.in_dims: 85 | new_x.append(x[:, start:start+dim]) 86 | start += dim 87 | x = new_x 88 | for i, conv in enumerate(self.convs): 89 | if self.cat_in: 90 | input = x[i] 91 | else: 92 | input = x 93 | outs.append(conv(input)) 94 | if self.cat_out: 95 | out = torch.cat(outs, dim=1)[:, self.index] 96 | else: 97 | out = sum(outs) 98 | return out 99 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/modules/multiscale.py: -------------------------------------------------------------------------------- 1 | from typing import List, Tuple, Union, Optional 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | 7 | from annotator.lama.saicinpainting.training.modules.base import get_conv_block_ctor, get_activation 8 | from annotator.lama.saicinpainting.training.modules.pix2pixhd import ResnetBlock 9 | 10 | 11 | class ResNetHead(nn.Module): 12 | def __init__(self, input_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, 13 | padding_type='reflect', conv_kind='default', activation=nn.ReLU(True)): 14 | assert (n_blocks >= 0) 15 | super(ResNetHead, self).__init__() 16 | 17 | conv_layer = get_conv_block_ctor(conv_kind) 18 | 19 | model = [nn.ReflectionPad2d(3), 20 | conv_layer(input_nc, ngf, kernel_size=7, padding=0), 21 | norm_layer(ngf), 22 | activation] 23 | 24 | ### downsample 25 | for i in range(n_downsampling): 26 | mult = 2 ** i 27 | model += [conv_layer(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1), 28 | norm_layer(ngf * mult * 2), 29 | activation] 30 | 31 | mult = 2 ** n_downsampling 32 | 33 | ### resnet blocks 34 | for i in range(n_blocks): 35 | model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer, 36 | conv_kind=conv_kind)] 37 | 38 | self.model = nn.Sequential(*model) 39 | 40 | def forward(self, input): 41 | return self.model(input) 42 | 43 | 44 | class ResNetTail(nn.Module): 45 | def __init__(self, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, 46 | padding_type='reflect', conv_kind='default', activation=nn.ReLU(True), 47 | up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True), add_out_act=False, out_extra_layers_n=0, 48 | add_in_proj=None): 49 | assert (n_blocks >= 0) 50 | super(ResNetTail, self).__init__() 51 | 52 | mult = 2 ** n_downsampling 53 | 54 | model = [] 55 | 56 | if add_in_proj is not None: 57 | model.append(nn.Conv2d(add_in_proj, ngf * mult, kernel_size=1)) 58 | 59 | ### resnet blocks 60 | for i in range(n_blocks): 61 | model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer, 62 | conv_kind=conv_kind)] 63 | 64 | ### upsample 65 | for i in range(n_downsampling): 66 | mult = 2 ** (n_downsampling - i) 67 | model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, 68 | output_padding=1), 69 | up_norm_layer(int(ngf * mult / 2)), 70 | up_activation] 71 | self.model = nn.Sequential(*model) 72 | 73 | out_layers = [] 74 | for _ in range(out_extra_layers_n): 75 | out_layers += [nn.Conv2d(ngf, ngf, kernel_size=1, padding=0), 76 | up_norm_layer(ngf), 77 | up_activation] 78 | out_layers += [nn.ReflectionPad2d(3), 79 | nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] 80 | 81 | if add_out_act: 82 | out_layers.append(get_activation('tanh' if add_out_act is True else add_out_act)) 83 | 84 | self.out_proj = nn.Sequential(*out_layers) 85 | 86 | def forward(self, input, return_last_act=False): 87 | features = self.model(input) 88 | out = self.out_proj(features) 89 | if return_last_act: 90 | return out, features 91 | else: 92 | return out 93 | 94 | 95 | class MultiscaleResNet(nn.Module): 96 | def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=2, n_blocks_head=2, n_blocks_tail=6, n_scales=3, 97 | norm_layer=nn.BatchNorm2d, padding_type='reflect', conv_kind='default', activation=nn.ReLU(True), 98 | up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True), add_out_act=False, out_extra_layers_n=0, 99 | out_cumulative=False, return_only_hr=False): 100 | super().__init__() 101 | 102 | self.heads = nn.ModuleList([ResNetHead(input_nc, ngf=ngf, n_downsampling=n_downsampling, 103 | n_blocks=n_blocks_head, norm_layer=norm_layer, padding_type=padding_type, 104 | conv_kind=conv_kind, activation=activation) 105 | for i in range(n_scales)]) 106 | tail_in_feats = ngf * (2 ** n_downsampling) + ngf 107 | self.tails = nn.ModuleList([ResNetTail(output_nc, 108 | ngf=ngf, n_downsampling=n_downsampling, 109 | n_blocks=n_blocks_tail, norm_layer=norm_layer, padding_type=padding_type, 110 | conv_kind=conv_kind, activation=activation, up_norm_layer=up_norm_layer, 111 | up_activation=up_activation, add_out_act=add_out_act, 112 | out_extra_layers_n=out_extra_layers_n, 113 | add_in_proj=None if (i == n_scales - 1) else tail_in_feats) 114 | for i in range(n_scales)]) 115 | 116 | self.out_cumulative = out_cumulative 117 | self.return_only_hr = return_only_hr 118 | 119 | @property 120 | def num_scales(self): 121 | return len(self.heads) 122 | 123 | def forward(self, ms_inputs: List[torch.Tensor], smallest_scales_num: Optional[int] = None) \ 124 | -> Union[torch.Tensor, List[torch.Tensor]]: 125 | """ 126 | :param ms_inputs: List of inputs of different resolutions from HR to LR 127 | :param smallest_scales_num: int or None, number of smallest scales to take at input 128 | :return: Depending on return_only_hr: 129 | True: Only the most HR output 130 | False: List of outputs of different resolutions from HR to LR 131 | """ 132 | if smallest_scales_num is None: 133 | assert len(self.heads) == len(ms_inputs), (len(self.heads), len(ms_inputs), smallest_scales_num) 134 | smallest_scales_num = len(self.heads) 135 | else: 136 | assert smallest_scales_num == len(ms_inputs) <= len(self.heads), (len(self.heads), len(ms_inputs), smallest_scales_num) 137 | 138 | cur_heads = self.heads[-smallest_scales_num:] 139 | ms_features = [cur_head(cur_inp) for cur_head, cur_inp in zip(cur_heads, ms_inputs)] 140 | 141 | all_outputs = [] 142 | prev_tail_features = None 143 | for i in range(len(ms_features)): 144 | scale_i = -i - 1 145 | 146 | cur_tail_input = ms_features[-i - 1] 147 | if prev_tail_features is not None: 148 | if prev_tail_features.shape != cur_tail_input.shape: 149 | prev_tail_features = F.interpolate(prev_tail_features, size=cur_tail_input.shape[2:], 150 | mode='bilinear', align_corners=False) 151 | cur_tail_input = torch.cat((cur_tail_input, prev_tail_features), dim=1) 152 | 153 | cur_out, cur_tail_feats = self.tails[scale_i](cur_tail_input, return_last_act=True) 154 | 155 | prev_tail_features = cur_tail_feats 156 | all_outputs.append(cur_out) 157 | 158 | if self.out_cumulative: 159 | all_outputs_cum = [all_outputs[0]] 160 | for i in range(1, len(ms_features)): 161 | cur_out = all_outputs[i] 162 | cur_out_cum = cur_out + F.interpolate(all_outputs_cum[-1], size=cur_out.shape[2:], 163 | mode='bilinear', align_corners=False) 164 | all_outputs_cum.append(cur_out_cum) 165 | all_outputs = all_outputs_cum 166 | 167 | if self.return_only_hr: 168 | return all_outputs[-1] 169 | else: 170 | return all_outputs[::-1] 171 | 172 | 173 | class MultiscaleDiscriminatorSimple(nn.Module): 174 | def __init__(self, ms_impl): 175 | super().__init__() 176 | self.ms_impl = nn.ModuleList(ms_impl) 177 | 178 | @property 179 | def num_scales(self): 180 | return len(self.ms_impl) 181 | 182 | def forward(self, ms_inputs: List[torch.Tensor], smallest_scales_num: Optional[int] = None) \ 183 | -> List[Tuple[torch.Tensor, List[torch.Tensor]]]: 184 | """ 185 | :param ms_inputs: List of inputs of different resolutions from HR to LR 186 | :param smallest_scales_num: int or None, number of smallest scales to take at input 187 | :return: List of pairs (prediction, features) for different resolutions from HR to LR 188 | """ 189 | if smallest_scales_num is None: 190 | assert len(self.ms_impl) == len(ms_inputs), (len(self.ms_impl), len(ms_inputs), smallest_scales_num) 191 | smallest_scales_num = len(self.heads) 192 | else: 193 | assert smallest_scales_num == len(ms_inputs) <= len(self.ms_impl), \ 194 | (len(self.ms_impl), len(ms_inputs), smallest_scales_num) 195 | 196 | return [cur_discr(cur_input) for cur_discr, cur_input in zip(self.ms_impl[-smallest_scales_num:], ms_inputs)] 197 | 198 | 199 | class SingleToMultiScaleInputMixin: 200 | def forward(self, x: torch.Tensor) -> List: 201 | orig_height, orig_width = x.shape[2:] 202 | factors = [2 ** i for i in range(self.num_scales)] 203 | ms_inputs = [F.interpolate(x, size=(orig_height // f, orig_width // f), mode='bilinear', align_corners=False) 204 | for f in factors] 205 | return super().forward(ms_inputs) 206 | 207 | 208 | class GeneratorMultiToSingleOutputMixin: 209 | def forward(self, x): 210 | return super().forward(x)[0] 211 | 212 | 213 | class DiscriminatorMultiToSingleOutputMixin: 214 | def forward(self, x): 215 | out_feat_tuples = super().forward(x) 216 | return out_feat_tuples[0][0], [f for _, flist in out_feat_tuples for f in flist] 217 | 218 | 219 | class DiscriminatorMultiToSingleOutputStackedMixin: 220 | def __init__(self, *args, return_feats_only_levels=None, **kwargs): 221 | super().__init__(*args, **kwargs) 222 | self.return_feats_only_levels = return_feats_only_levels 223 | 224 | def forward(self, x): 225 | out_feat_tuples = super().forward(x) 226 | outs = [out for out, _ in out_feat_tuples] 227 | scaled_outs = [outs[0]] + [F.interpolate(cur_out, size=outs[0].shape[-2:], 228 | mode='bilinear', align_corners=False) 229 | for cur_out in outs[1:]] 230 | out = torch.cat(scaled_outs, dim=1) 231 | if self.return_feats_only_levels is not None: 232 | feat_lists = [out_feat_tuples[i][1] for i in self.return_feats_only_levels] 233 | else: 234 | feat_lists = [flist for _, flist in out_feat_tuples] 235 | feats = [f for flist in feat_lists for f in flist] 236 | return out, feats 237 | 238 | 239 | class MultiscaleDiscrSingleInput(SingleToMultiScaleInputMixin, DiscriminatorMultiToSingleOutputStackedMixin, MultiscaleDiscriminatorSimple): 240 | pass 241 | 242 | 243 | class MultiscaleResNetSingle(GeneratorMultiToSingleOutputMixin, SingleToMultiScaleInputMixin, MultiscaleResNet): 244 | pass 245 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/modules/spatial_transform.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from kornia.geometry.transform import rotate 5 | 6 | 7 | class LearnableSpatialTransformWrapper(nn.Module): 8 | def __init__(self, impl, pad_coef=0.5, angle_init_range=80, train_angle=True): 9 | super().__init__() 10 | self.impl = impl 11 | self.angle = torch.rand(1) * angle_init_range 12 | if train_angle: 13 | self.angle = nn.Parameter(self.angle, requires_grad=True) 14 | self.pad_coef = pad_coef 15 | 16 | def forward(self, x): 17 | if torch.is_tensor(x): 18 | return self.inverse_transform(self.impl(self.transform(x)), x) 19 | elif isinstance(x, tuple): 20 | x_trans = tuple(self.transform(elem) for elem in x) 21 | y_trans = self.impl(x_trans) 22 | return tuple(self.inverse_transform(elem, orig_x) for elem, orig_x in zip(y_trans, x)) 23 | else: 24 | raise ValueError(f'Unexpected input type {type(x)}') 25 | 26 | def transform(self, x): 27 | height, width = x.shape[2:] 28 | pad_h, pad_w = int(height * self.pad_coef), int(width * self.pad_coef) 29 | x_padded = F.pad(x, [pad_w, pad_w, pad_h, pad_h], mode='reflect') 30 | x_padded_rotated = rotate(x_padded, angle=self.angle.to(x_padded)) 31 | return x_padded_rotated 32 | 33 | def inverse_transform(self, y_padded_rotated, orig_x): 34 | height, width = orig_x.shape[2:] 35 | pad_h, pad_w = int(height * self.pad_coef), int(width * self.pad_coef) 36 | 37 | y_padded = rotate(y_padded_rotated, angle=-self.angle.to(y_padded_rotated)) 38 | y_height, y_width = y_padded.shape[2:] 39 | y = y_padded[:, :, pad_h : y_height - pad_h, pad_w : y_width - pad_w] 40 | return y 41 | 42 | 43 | if __name__ == '__main__': 44 | layer = LearnableSpatialTransformWrapper(nn.Identity()) 45 | x = torch.arange(2* 3 * 15 * 15).view(2, 3, 15, 15).float() 46 | y = layer(x) 47 | assert x.shape == y.shape 48 | assert torch.allclose(x[:, :, 1:, 1:][:, :, :-1, :-1], y[:, :, 1:, 1:][:, :, :-1, :-1]) 49 | print('all ok') 50 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/modules/squeeze_excitation.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | class SELayer(nn.Module): 5 | def __init__(self, channel, reduction=16): 6 | super(SELayer, self).__init__() 7 | self.avg_pool = nn.AdaptiveAvgPool2d(1) 8 | self.fc = nn.Sequential( 9 | nn.Linear(channel, channel // reduction, bias=False), 10 | nn.ReLU(inplace=True), 11 | nn.Linear(channel // reduction, channel, bias=False), 12 | nn.Sigmoid() 13 | ) 14 | 15 | def forward(self, x): 16 | b, c, _, _ = x.size() 17 | y = self.avg_pool(x).view(b, c) 18 | y = self.fc(y).view(b, c, 1, 1) 19 | res = x * y.expand_as(x) 20 | return res 21 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/trainers/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import torch 3 | from annotator.lama.saicinpainting.training.trainers.default import DefaultInpaintingTrainingModule 4 | 5 | 6 | def get_training_model_class(kind): 7 | if kind == 'default': 8 | return DefaultInpaintingTrainingModule 9 | 10 | raise ValueError(f'Unknown trainer module {kind}') 11 | 12 | 13 | def make_training_model(config): 14 | kind = config.training_model.kind 15 | kwargs = dict(config.training_model) 16 | kwargs.pop('kind') 17 | kwargs['use_ddp'] = config.trainer.kwargs.get('accelerator', None) == 'ddp' 18 | 19 | logging.info(f'Make training model {kind}') 20 | 21 | cls = get_training_model_class(kind) 22 | return cls(config, **kwargs) 23 | 24 | 25 | def load_checkpoint(train_config, path, map_location='cuda', strict=True): 26 | model = make_training_model(train_config).generator 27 | state = torch.load(path, map_location=map_location) 28 | model.load_state_dict(state, strict=strict) 29 | return model 30 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/trainers/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/trainers/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/trainers/__pycache__/base.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/trainers/__pycache__/base.cpython-310.pyc -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/trainers/__pycache__/default.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlinmg/ComfyUI-LaMA-Preprocessor/7c302f6175bcb77f0947b47e2629ec2f00aee346/annotator/lama/saicinpainting/training/trainers/__pycache__/default.cpython-310.pyc -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/trainers/default.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | from omegaconf import OmegaConf 6 | 7 | # from annotator.lama.saicinpainting.training.data.datasets import make_constant_area_crop_params 8 | from annotator.lama.saicinpainting.training.losses.distance_weighting import make_mask_distance_weighter 9 | from annotator.lama.saicinpainting.training.losses.feature_matching import feature_matching_loss, masked_l1_loss 10 | # from annotator.lama.saicinpainting.training.modules.fake_fakes import FakeFakesGenerator 11 | from annotator.lama.saicinpainting.training.trainers.base import BaseInpaintingTrainingModule, make_multiscale_noise 12 | from annotator.lama.saicinpainting.utils import add_prefix_to_keys, get_ramp 13 | 14 | LOGGER = logging.getLogger(__name__) 15 | 16 | 17 | def make_constant_area_crop_batch(batch, **kwargs): 18 | crop_y, crop_x, crop_height, crop_width = make_constant_area_crop_params(img_height=batch['image'].shape[2], 19 | img_width=batch['image'].shape[3], 20 | **kwargs) 21 | batch['image'] = batch['image'][:, :, crop_y : crop_y + crop_height, crop_x : crop_x + crop_width] 22 | batch['mask'] = batch['mask'][:, :, crop_y: crop_y + crop_height, crop_x: crop_x + crop_width] 23 | return batch 24 | 25 | 26 | class DefaultInpaintingTrainingModule(BaseInpaintingTrainingModule): 27 | def __init__(self, *args, concat_mask=True, rescale_scheduler_kwargs=None, image_to_discriminator='predicted_image', 28 | add_noise_kwargs=None, noise_fill_hole=False, const_area_crop_kwargs=None, 29 | distance_weighter_kwargs=None, distance_weighted_mask_for_discr=False, 30 | fake_fakes_proba=0, fake_fakes_generator_kwargs=None, 31 | **kwargs): 32 | super().__init__(*args, **kwargs) 33 | self.concat_mask = concat_mask 34 | self.rescale_size_getter = get_ramp(**rescale_scheduler_kwargs) if rescale_scheduler_kwargs is not None else None 35 | self.image_to_discriminator = image_to_discriminator 36 | self.add_noise_kwargs = add_noise_kwargs 37 | self.noise_fill_hole = noise_fill_hole 38 | self.const_area_crop_kwargs = const_area_crop_kwargs 39 | self.refine_mask_for_losses = make_mask_distance_weighter(**distance_weighter_kwargs) \ 40 | if distance_weighter_kwargs is not None else None 41 | self.distance_weighted_mask_for_discr = distance_weighted_mask_for_discr 42 | 43 | self.fake_fakes_proba = fake_fakes_proba 44 | if self.fake_fakes_proba > 1e-3: 45 | self.fake_fakes_gen = FakeFakesGenerator(**(fake_fakes_generator_kwargs or {})) 46 | 47 | def forward(self, batch): 48 | if self.training and self.rescale_size_getter is not None: 49 | cur_size = self.rescale_size_getter(self.global_step) 50 | batch['image'] = F.interpolate(batch['image'], size=cur_size, mode='bilinear', align_corners=False) 51 | batch['mask'] = F.interpolate(batch['mask'], size=cur_size, mode='nearest') 52 | 53 | if self.training and self.const_area_crop_kwargs is not None: 54 | batch = make_constant_area_crop_batch(batch, **self.const_area_crop_kwargs) 55 | 56 | img = batch['image'] 57 | mask = batch['mask'] 58 | 59 | masked_img = img * (1 - mask) 60 | 61 | if self.add_noise_kwargs is not None: 62 | noise = make_multiscale_noise(masked_img, **self.add_noise_kwargs) 63 | if self.noise_fill_hole: 64 | masked_img = masked_img + mask * noise[:, :masked_img.shape[1]] 65 | masked_img = torch.cat([masked_img, noise], dim=1) 66 | 67 | if self.concat_mask: 68 | masked_img = torch.cat([masked_img, mask], dim=1) 69 | 70 | batch['predicted_image'] = self.generator(masked_img) 71 | batch['inpainted'] = mask * batch['predicted_image'] + (1 - mask) * batch['image'] 72 | 73 | if self.fake_fakes_proba > 1e-3: 74 | if self.training and torch.rand(1).item() < self.fake_fakes_proba: 75 | batch['fake_fakes'], batch['fake_fakes_masks'] = self.fake_fakes_gen(img, mask) 76 | batch['use_fake_fakes'] = True 77 | else: 78 | batch['fake_fakes'] = torch.zeros_like(img) 79 | batch['fake_fakes_masks'] = torch.zeros_like(mask) 80 | batch['use_fake_fakes'] = False 81 | 82 | batch['mask_for_losses'] = self.refine_mask_for_losses(img, batch['predicted_image'], mask) \ 83 | if self.refine_mask_for_losses is not None and self.training \ 84 | else mask 85 | 86 | return batch 87 | 88 | def generator_loss(self, batch): 89 | img = batch['image'] 90 | predicted_img = batch[self.image_to_discriminator] 91 | original_mask = batch['mask'] 92 | supervised_mask = batch['mask_for_losses'] 93 | 94 | # L1 95 | l1_value = masked_l1_loss(predicted_img, img, supervised_mask, 96 | self.config.losses.l1.weight_known, 97 | self.config.losses.l1.weight_missing) 98 | 99 | total_loss = l1_value 100 | metrics = dict(gen_l1=l1_value) 101 | 102 | # vgg-based perceptual loss 103 | if self.config.losses.perceptual.weight > 0: 104 | pl_value = self.loss_pl(predicted_img, img, mask=supervised_mask).sum() * self.config.losses.perceptual.weight 105 | total_loss = total_loss + pl_value 106 | metrics['gen_pl'] = pl_value 107 | 108 | # discriminator 109 | # adversarial_loss calls backward by itself 110 | mask_for_discr = supervised_mask if self.distance_weighted_mask_for_discr else original_mask 111 | self.adversarial_loss.pre_generator_step(real_batch=img, fake_batch=predicted_img, 112 | generator=self.generator, discriminator=self.discriminator) 113 | discr_real_pred, discr_real_features = self.discriminator(img) 114 | discr_fake_pred, discr_fake_features = self.discriminator(predicted_img) 115 | adv_gen_loss, adv_metrics = self.adversarial_loss.generator_loss(real_batch=img, 116 | fake_batch=predicted_img, 117 | discr_real_pred=discr_real_pred, 118 | discr_fake_pred=discr_fake_pred, 119 | mask=mask_for_discr) 120 | total_loss = total_loss + adv_gen_loss 121 | metrics['gen_adv'] = adv_gen_loss 122 | metrics.update(add_prefix_to_keys(adv_metrics, 'adv_')) 123 | 124 | # feature matching 125 | if self.config.losses.feature_matching.weight > 0: 126 | need_mask_in_fm = OmegaConf.to_container(self.config.losses.feature_matching).get('pass_mask', False) 127 | mask_for_fm = supervised_mask if need_mask_in_fm else None 128 | fm_value = feature_matching_loss(discr_fake_features, discr_real_features, 129 | mask=mask_for_fm) * self.config.losses.feature_matching.weight 130 | total_loss = total_loss + fm_value 131 | metrics['gen_fm'] = fm_value 132 | 133 | if self.loss_resnet_pl is not None: 134 | resnet_pl_value = self.loss_resnet_pl(predicted_img, img) 135 | total_loss = total_loss + resnet_pl_value 136 | metrics['gen_resnet_pl'] = resnet_pl_value 137 | 138 | return total_loss, metrics 139 | 140 | def discriminator_loss(self, batch): 141 | total_loss = 0 142 | metrics = {} 143 | 144 | predicted_img = batch[self.image_to_discriminator].detach() 145 | self.adversarial_loss.pre_discriminator_step(real_batch=batch['image'], fake_batch=predicted_img, 146 | generator=self.generator, discriminator=self.discriminator) 147 | discr_real_pred, discr_real_features = self.discriminator(batch['image']) 148 | discr_fake_pred, discr_fake_features = self.discriminator(predicted_img) 149 | adv_discr_loss, adv_metrics = self.adversarial_loss.discriminator_loss(real_batch=batch['image'], 150 | fake_batch=predicted_img, 151 | discr_real_pred=discr_real_pred, 152 | discr_fake_pred=discr_fake_pred, 153 | mask=batch['mask']) 154 | total_loss = total_loss + adv_discr_loss 155 | metrics['discr_adv'] = adv_discr_loss 156 | metrics.update(add_prefix_to_keys(adv_metrics, 'adv_')) 157 | 158 | 159 | if batch.get('use_fake_fakes', False): 160 | fake_fakes = batch['fake_fakes'] 161 | self.adversarial_loss.pre_discriminator_step(real_batch=batch['image'], fake_batch=fake_fakes, 162 | generator=self.generator, discriminator=self.discriminator) 163 | discr_fake_fakes_pred, _ = self.discriminator(fake_fakes) 164 | fake_fakes_adv_discr_loss, fake_fakes_adv_metrics = self.adversarial_loss.discriminator_loss( 165 | real_batch=batch['image'], 166 | fake_batch=fake_fakes, 167 | discr_real_pred=discr_real_pred, 168 | discr_fake_pred=discr_fake_fakes_pred, 169 | mask=batch['mask'] 170 | ) 171 | total_loss = total_loss + fake_fakes_adv_discr_loss 172 | metrics['discr_adv_fake_fakes'] = fake_fakes_adv_discr_loss 173 | metrics.update(add_prefix_to_keys(fake_fakes_adv_metrics, 'adv_')) 174 | 175 | return total_loss, metrics 176 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/visualizers/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from annotator.lama.saicinpainting.training.visualizers.directory import DirectoryVisualizer 4 | from annotator.lama.saicinpainting.training.visualizers.noop import NoopVisualizer 5 | 6 | 7 | def make_visualizer(kind, **kwargs): 8 | logging.info(f'Make visualizer {kind}') 9 | 10 | if kind == 'directory': 11 | return DirectoryVisualizer(**kwargs) 12 | if kind == 'noop': 13 | return NoopVisualizer() 14 | 15 | raise ValueError(f'Unknown visualizer kind {kind}') 16 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/visualizers/base.py: -------------------------------------------------------------------------------- 1 | import abc 2 | from typing import Dict, List 3 | 4 | import numpy as np 5 | import torch 6 | from skimage import color 7 | from skimage.segmentation import mark_boundaries 8 | 9 | from . import colors 10 | 11 | COLORS, _ = colors.generate_colors(151) # 151 - max classes for semantic segmentation 12 | 13 | 14 | class BaseVisualizer: 15 | @abc.abstractmethod 16 | def __call__(self, epoch_i, batch_i, batch, suffix='', rank=None): 17 | """ 18 | Take a batch, make an image from it and visualize 19 | """ 20 | raise NotImplementedError() 21 | 22 | 23 | def visualize_mask_and_images(images_dict: Dict[str, np.ndarray], keys: List[str], 24 | last_without_mask=True, rescale_keys=None, mask_only_first=None, 25 | black_mask=False) -> np.ndarray: 26 | mask = images_dict['mask'] > 0.5 27 | result = [] 28 | for i, k in enumerate(keys): 29 | img = images_dict[k] 30 | img = np.transpose(img, (1, 2, 0)) 31 | 32 | if rescale_keys is not None and k in rescale_keys: 33 | img = img - img.min() 34 | img /= img.max() + 1e-5 35 | if len(img.shape) == 2: 36 | img = np.expand_dims(img, 2) 37 | 38 | if img.shape[2] == 1: 39 | img = np.repeat(img, 3, axis=2) 40 | elif (img.shape[2] > 3): 41 | img_classes = img.argmax(2) 42 | img = color.label2rgb(img_classes, colors=COLORS) 43 | 44 | if mask_only_first: 45 | need_mark_boundaries = i == 0 46 | else: 47 | need_mark_boundaries = i < len(keys) - 1 or not last_without_mask 48 | 49 | if need_mark_boundaries: 50 | if black_mask: 51 | img = img * (1 - mask[0][..., None]) 52 | img = mark_boundaries(img, 53 | mask[0], 54 | color=(1., 0., 0.), 55 | outline_color=(1., 1., 1.), 56 | mode='thick') 57 | result.append(img) 58 | return np.concatenate(result, axis=1) 59 | 60 | 61 | def visualize_mask_and_images_batch(batch: Dict[str, torch.Tensor], keys: List[str], max_items=10, 62 | last_without_mask=True, rescale_keys=None) -> np.ndarray: 63 | batch = {k: tens.detach().cpu().numpy() for k, tens in batch.items() 64 | if k in keys or k == 'mask'} 65 | 66 | batch_size = next(iter(batch.values())).shape[0] 67 | items_to_vis = min(batch_size, max_items) 68 | result = [] 69 | for i in range(items_to_vis): 70 | cur_dct = {k: tens[i] for k, tens in batch.items()} 71 | result.append(visualize_mask_and_images(cur_dct, keys, last_without_mask=last_without_mask, 72 | rescale_keys=rescale_keys)) 73 | return np.concatenate(result, axis=0) 74 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/visualizers/colors.py: -------------------------------------------------------------------------------- 1 | import random 2 | import colorsys 3 | 4 | import numpy as np 5 | import matplotlib 6 | matplotlib.use('agg') 7 | import matplotlib.pyplot as plt 8 | from matplotlib.colors import LinearSegmentedColormap 9 | 10 | 11 | def generate_colors(nlabels, type='bright', first_color_black=False, last_color_black=True, verbose=False): 12 | # https://stackoverflow.com/questions/14720331/how-to-generate-random-colors-in-matplotlib 13 | """ 14 | Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks 15 | :param nlabels: Number of labels (size of colormap) 16 | :param type: 'bright' for strong colors, 'soft' for pastel colors 17 | :param first_color_black: Option to use first color as black, True or False 18 | :param last_color_black: Option to use last color as black, True or False 19 | :param verbose: Prints the number of labels and shows the colormap. True or False 20 | :return: colormap for matplotlib 21 | """ 22 | if type not in ('bright', 'soft'): 23 | print ('Please choose "bright" or "soft" for type') 24 | return 25 | 26 | if verbose: 27 | print('Number of labels: ' + str(nlabels)) 28 | 29 | # Generate color map for bright colors, based on hsv 30 | if type == 'bright': 31 | randHSVcolors = [(np.random.uniform(low=0.0, high=1), 32 | np.random.uniform(low=0.2, high=1), 33 | np.random.uniform(low=0.9, high=1)) for i in range(nlabels)] 34 | 35 | # Convert HSV list to RGB 36 | randRGBcolors = [] 37 | for HSVcolor in randHSVcolors: 38 | randRGBcolors.append(colorsys.hsv_to_rgb(HSVcolor[0], HSVcolor[1], HSVcolor[2])) 39 | 40 | if first_color_black: 41 | randRGBcolors[0] = [0, 0, 0] 42 | 43 | if last_color_black: 44 | randRGBcolors[-1] = [0, 0, 0] 45 | 46 | random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels) 47 | 48 | # Generate soft pastel colors, by limiting the RGB spectrum 49 | if type == 'soft': 50 | low = 0.6 51 | high = 0.95 52 | randRGBcolors = [(np.random.uniform(low=low, high=high), 53 | np.random.uniform(low=low, high=high), 54 | np.random.uniform(low=low, high=high)) for i in range(nlabels)] 55 | 56 | if first_color_black: 57 | randRGBcolors[0] = [0, 0, 0] 58 | 59 | if last_color_black: 60 | randRGBcolors[-1] = [0, 0, 0] 61 | random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels) 62 | 63 | # Display colorbar 64 | if verbose: 65 | from matplotlib import colors, colorbar 66 | from matplotlib import pyplot as plt 67 | fig, ax = plt.subplots(1, 1, figsize=(15, 0.5)) 68 | 69 | bounds = np.linspace(0, nlabels, nlabels + 1) 70 | norm = colors.BoundaryNorm(bounds, nlabels) 71 | 72 | cb = colorbar.ColorbarBase(ax, cmap=random_colormap, norm=norm, spacing='proportional', ticks=None, 73 | boundaries=bounds, format='%1i', orientation=u'horizontal') 74 | 75 | return randRGBcolors, random_colormap 76 | 77 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/visualizers/directory.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import cv2 4 | import numpy as np 5 | 6 | from annotator.lama.saicinpainting.training.visualizers.base import BaseVisualizer, visualize_mask_and_images_batch 7 | from annotator.lama.saicinpainting.utils import check_and_warn_input_range 8 | 9 | 10 | class DirectoryVisualizer(BaseVisualizer): 11 | DEFAULT_KEY_ORDER = 'image predicted_image inpainted'.split(' ') 12 | 13 | def __init__(self, outdir, key_order=DEFAULT_KEY_ORDER, max_items_in_batch=10, 14 | last_without_mask=True, rescale_keys=None): 15 | self.outdir = outdir 16 | os.makedirs(self.outdir, exist_ok=True) 17 | self.key_order = key_order 18 | self.max_items_in_batch = max_items_in_batch 19 | self.last_without_mask = last_without_mask 20 | self.rescale_keys = rescale_keys 21 | 22 | def __call__(self, epoch_i, batch_i, batch, suffix='', rank=None): 23 | check_and_warn_input_range(batch['image'], 0, 1, 'DirectoryVisualizer target image') 24 | vis_img = visualize_mask_and_images_batch(batch, self.key_order, max_items=self.max_items_in_batch, 25 | last_without_mask=self.last_without_mask, 26 | rescale_keys=self.rescale_keys) 27 | 28 | vis_img = np.clip(vis_img * 255, 0, 255).astype('uint8') 29 | 30 | curoutdir = os.path.join(self.outdir, f'epoch{epoch_i:04d}{suffix}') 31 | os.makedirs(curoutdir, exist_ok=True) 32 | rank_suffix = f'_r{rank}' if rank is not None else '' 33 | out_fname = os.path.join(curoutdir, f'batch{batch_i:07d}{rank_suffix}.jpg') 34 | 35 | vis_img = cv2.cvtColor(vis_img, cv2.COLOR_RGB2BGR) 36 | cv2.imwrite(out_fname, vis_img) 37 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/training/visualizers/noop.py: -------------------------------------------------------------------------------- 1 | from annotator.lama.saicinpainting.training.visualizers.base import BaseVisualizer 2 | 3 | 4 | class NoopVisualizer(BaseVisualizer): 5 | def __init__(self, *args, **kwargs): 6 | pass 7 | 8 | def __call__(self, epoch_i, batch_i, batch, suffix='', rank=None): 9 | pass 10 | -------------------------------------------------------------------------------- /annotator/lama/saicinpainting/utils.py: -------------------------------------------------------------------------------- 1 | import bisect 2 | import functools 3 | import logging 4 | import numbers 5 | import os 6 | import signal 7 | import sys 8 | import traceback 9 | import warnings 10 | 11 | import torch 12 | from pytorch_lightning import seed_everything 13 | 14 | LOGGER = logging.getLogger(__name__) 15 | 16 | 17 | def check_and_warn_input_range(tensor, min_value, max_value, name): 18 | actual_min = tensor.min() 19 | actual_max = tensor.max() 20 | if actual_min < min_value or actual_max > max_value: 21 | warnings.warn(f"{name} must be in {min_value}..{max_value} range, but it ranges {actual_min}..{actual_max}") 22 | 23 | 24 | def sum_dict_with_prefix(target, cur_dict, prefix, default=0): 25 | for k, v in cur_dict.items(): 26 | target_key = prefix + k 27 | target[target_key] = target.get(target_key, default) + v 28 | 29 | 30 | def average_dicts(dict_list): 31 | result = {} 32 | norm = 1e-3 33 | for dct in dict_list: 34 | sum_dict_with_prefix(result, dct, '') 35 | norm += 1 36 | for k in list(result): 37 | result[k] /= norm 38 | return result 39 | 40 | 41 | def add_prefix_to_keys(dct, prefix): 42 | return {prefix + k: v for k, v in dct.items()} 43 | 44 | 45 | def set_requires_grad(module, value): 46 | for param in module.parameters(): 47 | param.requires_grad = value 48 | 49 | 50 | def flatten_dict(dct): 51 | result = {} 52 | for k, v in dct.items(): 53 | if isinstance(k, tuple): 54 | k = '_'.join(k) 55 | if isinstance(v, dict): 56 | for sub_k, sub_v in flatten_dict(v).items(): 57 | result[f'{k}_{sub_k}'] = sub_v 58 | else: 59 | result[k] = v 60 | return result 61 | 62 | 63 | class LinearRamp: 64 | def __init__(self, start_value=0, end_value=1, start_iter=-1, end_iter=0): 65 | self.start_value = start_value 66 | self.end_value = end_value 67 | self.start_iter = start_iter 68 | self.end_iter = end_iter 69 | 70 | def __call__(self, i): 71 | if i < self.start_iter: 72 | return self.start_value 73 | if i >= self.end_iter: 74 | return self.end_value 75 | part = (i - self.start_iter) / (self.end_iter - self.start_iter) 76 | return self.start_value * (1 - part) + self.end_value * part 77 | 78 | 79 | class LadderRamp: 80 | def __init__(self, start_iters, values): 81 | self.start_iters = start_iters 82 | self.values = values 83 | assert len(values) == len(start_iters) + 1, (len(values), len(start_iters)) 84 | 85 | def __call__(self, i): 86 | segment_i = bisect.bisect_right(self.start_iters, i) 87 | return self.values[segment_i] 88 | 89 | 90 | def get_ramp(kind='ladder', **kwargs): 91 | if kind == 'linear': 92 | return LinearRamp(**kwargs) 93 | if kind == 'ladder': 94 | return LadderRamp(**kwargs) 95 | raise ValueError(f'Unexpected ramp kind: {kind}') 96 | 97 | 98 | def print_traceback_handler(sig, frame): 99 | LOGGER.warning(f'Received signal {sig}') 100 | bt = ''.join(traceback.format_stack()) 101 | LOGGER.warning(f'Requested stack trace:\n{bt}') 102 | 103 | 104 | def register_debug_signal_handlers(sig=None, handler=print_traceback_handler): 105 | LOGGER.warning(f'Setting signal {sig} handler {handler}') 106 | signal.signal(sig, handler) 107 | 108 | 109 | def handle_deterministic_config(config): 110 | seed = dict(config).get('seed', None) 111 | if seed is None: 112 | return False 113 | 114 | seed_everything(seed) 115 | return True 116 | 117 | 118 | def get_shape(t): 119 | if torch.is_tensor(t): 120 | return tuple(t.shape) 121 | elif isinstance(t, dict): 122 | return {n: get_shape(q) for n, q in t.items()} 123 | elif isinstance(t, (list, tuple)): 124 | return [get_shape(q) for q in t] 125 | elif isinstance(t, numbers.Number): 126 | return type(t) 127 | else: 128 | raise ValueError('unexpected type {}'.format(type(t))) 129 | 130 | 131 | def get_has_ddp_rank(): 132 | master_port = os.environ.get('MASTER_PORT', None) 133 | node_rank = os.environ.get('NODE_RANK', None) 134 | local_rank = os.environ.get('LOCAL_RANK', None) 135 | world_size = os.environ.get('WORLD_SIZE', None) 136 | has_rank = master_port is not None or node_rank is not None or local_rank is not None or world_size is not None 137 | return has_rank 138 | 139 | 140 | def handle_ddp_subprocess(): 141 | def main_decorator(main_func): 142 | @functools.wraps(main_func) 143 | def new_main(*args, **kwargs): 144 | # Trainer sets MASTER_PORT, NODE_RANK, LOCAL_RANK, WORLD_SIZE 145 | parent_cwd = os.environ.get('TRAINING_PARENT_WORK_DIR', None) 146 | has_parent = parent_cwd is not None 147 | has_rank = get_has_ddp_rank() 148 | assert has_parent == has_rank, f'Inconsistent state: has_parent={has_parent}, has_rank={has_rank}' 149 | 150 | if has_parent: 151 | # we are in the worker 152 | sys.argv.extend([ 153 | f'hydra.run.dir={parent_cwd}', 154 | # 'hydra/hydra_logging=disabled', 155 | # 'hydra/job_logging=disabled' 156 | ]) 157 | # do nothing if this is a top-level process 158 | # TRAINING_PARENT_WORK_DIR is set in handle_ddp_parent_process after hydra initialization 159 | 160 | main_func(*args, **kwargs) 161 | return new_main 162 | return main_decorator 163 | 164 | 165 | def handle_ddp_parent_process(): 166 | parent_cwd = os.environ.get('TRAINING_PARENT_WORK_DIR', None) 167 | has_parent = parent_cwd is not None 168 | has_rank = get_has_ddp_rank() 169 | assert has_parent == has_rank, f'Inconsistent state: has_parent={has_parent}, has_rank={has_rank}' 170 | 171 | if parent_cwd is None: 172 | os.environ['TRAINING_PARENT_WORK_DIR'] = os.getcwd() 173 | 174 | return has_parent 175 | -------------------------------------------------------------------------------- /annotator/uniformer/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2022 SenseTime X-Lab. All rights reserved. 2 | 3 | Apache License 4 | Version 2.0, January 2004 5 | http://www.apache.org/licenses/ 6 | 7 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 8 | 9 | 1. Definitions. 10 | 11 | "License" shall mean the terms and conditions for use, reproduction, 12 | and distribution as defined by Sections 1 through 9 of this document. 13 | 14 | "Licensor" shall mean the copyright owner or entity authorized by 15 | the copyright owner that is granting the License. 16 | 17 | "Legal Entity" shall mean the union of the acting entity and all 18 | other entities that control, are controlled by, or are under common 19 | control with that entity. For the purposes of this definition, 20 | "control" means (i) the power, direct or indirect, to cause the 21 | direction or management of such entity, whether by contract or 22 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 23 | outstanding shares, or (iii) beneficial ownership of such entity. 24 | 25 | "You" (or "Your") shall mean an individual or Legal Entity 26 | exercising permissions granted by this License. 27 | 28 | "Source" form shall mean the preferred form for making modifications, 29 | including but not limited to software source code, documentation 30 | source, and configuration files. 31 | 32 | "Object" form shall mean any form resulting from mechanical 33 | transformation or translation of a Source form, including but 34 | not limited to compiled object code, generated documentation, 35 | and conversions to other media types. 36 | 37 | "Work" shall mean the work of authorship, whether in Source or 38 | Object form, made available under the License, as indicated by a 39 | copyright notice that is included in or attached to the work 40 | (an example is provided in the Appendix below). 41 | 42 | "Derivative Works" shall mean any work, whether in Source or Object 43 | form, that is based on (or derived from) the Work and for which the 44 | editorial revisions, annotations, elaborations, or other modifications 45 | represent, as a whole, an original work of authorship. For the purposes 46 | of this License, Derivative Works shall not include works that remain 47 | separable from, or merely link (or bind by name) to the interfaces of, 48 | the Work and Derivative Works thereof. 49 | 50 | "Contribution" shall mean any work of authorship, including 51 | the original version of the Work and any modifications or additions 52 | to that Work or Derivative Works thereof, that is intentionally 53 | submitted to Licensor for inclusion in the Work by the copyright owner 54 | or by an individual or Legal Entity authorized to submit on behalf of 55 | the copyright owner. For the purposes of this definition, "submitted" 56 | means any form of electronic, verbal, or written communication sent 57 | to the Licensor or its representatives, including but not limited to 58 | communication on electronic mailing lists, source code control systems, 59 | and issue tracking systems that are managed by, or on behalf of, the 60 | Licensor for the purpose of discussing and improving the Work, but 61 | excluding communication that is conspicuously marked or otherwise 62 | designated in writing by the copyright owner as "Not a Contribution." 63 | 64 | "Contributor" shall mean Licensor and any individual or Legal Entity 65 | on behalf of whom a Contribution has been received by Licensor and 66 | subsequently incorporated within the Work. 67 | 68 | 2. Grant of Copyright License. Subject to the terms and conditions of 69 | this License, each Contributor hereby grants to You a perpetual, 70 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 71 | copyright license to reproduce, prepare Derivative Works of, 72 | publicly display, publicly perform, sublicense, and distribute the 73 | Work and such Derivative Works in Source or Object form. 74 | 75 | 3. Grant of Patent License. Subject to the terms and conditions of 76 | this License, each Contributor hereby grants to You a perpetual, 77 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 78 | (except as stated in this section) patent license to make, have made, 79 | use, offer to sell, sell, import, and otherwise transfer the Work, 80 | where such license applies only to those patent claims licensable 81 | by such Contributor that are necessarily infringed by their 82 | Contribution(s) alone or by combination of their Contribution(s) 83 | with the Work to which such Contribution(s) was submitted. If You 84 | institute patent litigation against any entity (including a 85 | cross-claim or counterclaim in a lawsuit) alleging that the Work 86 | or a Contribution incorporated within the Work constitutes direct 87 | or contributory patent infringement, then any patent licenses 88 | granted to You under this License for that Work shall terminate 89 | as of the date such litigation is filed. 90 | 91 | 4. Redistribution. You may reproduce and distribute copies of the 92 | Work or Derivative Works thereof in any medium, with or without 93 | modifications, and in Source or Object form, provided that You 94 | meet the following conditions: 95 | 96 | (a) You must give any other recipients of the Work or 97 | Derivative Works a copy of this License; and 98 | 99 | (b) You must cause any modified files to carry prominent notices 100 | stating that You changed the files; and 101 | 102 | (c) You must retain, in the Source form of any Derivative Works 103 | that You distribute, all copyright, patent, trademark, and 104 | attribution notices from the Source form of the Work, 105 | excluding those notices that do not pertain to any part of 106 | the Derivative Works; and 107 | 108 | (d) If the Work includes a "NOTICE" text file as part of its 109 | distribution, then any Derivative Works that You distribute must 110 | include a readable copy of the attribution notices contained 111 | within such NOTICE file, excluding those notices that do not 112 | pertain to any part of the Derivative Works, in at least one 113 | of the following places: within a NOTICE text file distributed 114 | as part of the Derivative Works; within the Source form or 115 | documentation, if provided along with the Derivative Works; or, 116 | within a display generated by the Derivative Works, if and 117 | wherever such third-party notices normally appear. The contents 118 | of the NOTICE file are for informational purposes only and 119 | do not modify the License. You may add Your own attribution 120 | notices within Derivative Works that You distribute, alongside 121 | or as an addendum to the NOTICE text from the Work, provided 122 | that such additional attribution notices cannot be construed 123 | as modifying the License. 124 | 125 | You may add Your own copyright statement to Your modifications and 126 | may provide additional or different license terms and conditions 127 | for use, reproduction, or distribution of Your modifications, or 128 | for any such Derivative Works as a whole, provided Your use, 129 | reproduction, and distribution of the Work otherwise complies with 130 | the conditions stated in this License. 131 | 132 | 5. Submission of Contributions. Unless You explicitly state otherwise, 133 | any Contribution intentionally submitted for inclusion in the Work 134 | by You to the Licensor shall be under the terms and conditions of 135 | this License, without any additional terms or conditions. 136 | Notwithstanding the above, nothing herein shall supersede or modify 137 | the terms of any separate license agreement you may have executed 138 | with Licensor regarding such Contributions. 139 | 140 | 6. Trademarks. This License does not grant permission to use the trade 141 | names, trademarks, service marks, or product names of the Licensor, 142 | except as required for reasonable and customary use in describing the 143 | origin of the Work and reproducing the content of the NOTICE file. 144 | 145 | 7. Disclaimer of Warranty. Unless required by applicable law or 146 | agreed to in writing, Licensor provides the Work (and each 147 | Contributor provides its Contributions) on an "AS IS" BASIS, 148 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 149 | implied, including, without limitation, any warranties or conditions 150 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 151 | PARTICULAR PURPOSE. You are solely responsible for determining the 152 | appropriateness of using or redistributing the Work and assume any 153 | risks associated with Your exercise of permissions under this License. 154 | 155 | 8. Limitation of Liability. In no event and under no legal theory, 156 | whether in tort (including negligence), contract, or otherwise, 157 | unless required by applicable law (such as deliberate and grossly 158 | negligent acts) or agreed to in writing, shall any Contributor be 159 | liable to You for damages, including any direct, indirect, special, 160 | incidental, or consequential damages of any character arising as a 161 | result of this License or out of the use or inability to use the 162 | Work (including but not limited to damages for loss of goodwill, 163 | work stoppage, computer failure or malfunction, or any and all 164 | other commercial damages or losses), even if such Contributor 165 | has been advised of the possibility of such damages. 166 | 167 | 9. Accepting Warranty or Additional Liability. While redistributing 168 | the Work or Derivative Works thereof, You may choose to offer, 169 | and charge a fee for, acceptance of support, warranty, indemnity, 170 | or other liability obligations and/or rights consistent with this 171 | License. However, in accepting such obligations, You may act only 172 | on Your own behalf and on Your sole responsibility, not on behalf 173 | of any other Contributor, and only if You agree to indemnify, 174 | defend, and hold each Contributor harmless for any liability 175 | incurred by, or claims asserted against, such Contributor by reason 176 | of your accepting any such warranty or additional liability. 177 | 178 | END OF TERMS AND CONDITIONS 179 | 180 | APPENDIX: How to apply the Apache License to your work. 181 | 182 | To apply the Apache License to your work, attach the following 183 | boilerplate notice, with the fields enclosed by brackets "[]" 184 | replaced with your own identifying information. (Don't include 185 | the brackets!) The text should be enclosed in the appropriate 186 | comment syntax for the file format. We also recommend that a 187 | file or class name and description of purpose be included on the 188 | same "printed page" as the copyright notice for easier 189 | identification within third-party archives. 190 | 191 | Copyright 2022 SenseTime X-Lab. 192 | 193 | Licensed under the Apache License, Version 2.0 (the "License"); 194 | you may not use this file except in compliance with the License. 195 | You may obtain a copy of the License at 196 | 197 | http://www.apache.org/licenses/LICENSE-2.0 198 | 199 | Unless required by applicable law or agreed to in writing, software 200 | distributed under the License is distributed on an "AS IS" BASIS, 201 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 202 | See the License for the specific language governing permissions and 203 | limitations under the License. -------------------------------------------------------------------------------- /annotator/uniformer/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | from annotator.annotator_path import models_path 3 | from modules import devices 4 | from annotator.uniformer.inference import init_segmentor, inference_segmentor, show_result_pyplot 5 | import urllib.request 6 | 7 | try: 8 | from mmseg.core.evaluation import get_palette 9 | except ImportError: 10 | from annotator.mmpkg.mmseg.core.evaluation import get_palette 11 | 12 | modeldir = os.path.join(models_path, "uniformer") 13 | checkpoint_file = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/upernet_global_small.pth" 14 | config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "upernet_global_small.py") 15 | old_modeldir = os.path.dirname(os.path.realpath(__file__)) 16 | model = None 17 | 18 | def unload_uniformer_model(): 19 | global model 20 | if model is not None: 21 | model = model.cpu() 22 | def _load_file_from_url(model_path: str, model_dir: str) -> None: 23 | os.makedirs(os.path.dirname(model_dir), exist_ok=True) 24 | urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split("/")[-1])) 25 | 26 | def apply_uniformer(img): 27 | global model 28 | if model is None: 29 | modelpath = os.path.join(modeldir, "upernet_global_small.pth") 30 | old_modelpath = os.path.join(old_modeldir, "upernet_global_small.pth") 31 | if os.path.exists(old_modelpath): 32 | modelpath = old_modelpath 33 | elif not os.path.exists(modelpath): 34 | _load_file_from_url(checkpoint_file, model_dir=modeldir) 35 | 36 | model = init_segmentor(config_file, modelpath, device=devices.get_device_for("controlnet")) 37 | model = model.to(devices.get_device_for("controlnet")) 38 | 39 | if devices.get_device_for("controlnet").type == 'mps': 40 | # adaptive_avg_pool2d can fail on MPS, workaround with CPU 41 | import torch.nn.functional 42 | 43 | orig_adaptive_avg_pool2d = torch.nn.functional.adaptive_avg_pool2d 44 | def cpu_if_exception(input, *args, **kwargs): 45 | try: 46 | return orig_adaptive_avg_pool2d(input, *args, **kwargs) 47 | except: 48 | return orig_adaptive_avg_pool2d(input.cpu(), *args, **kwargs).to(input.device) 49 | 50 | try: 51 | torch.nn.functional.adaptive_avg_pool2d = cpu_if_exception 52 | result = inference_segmentor(model, img) 53 | finally: 54 | torch.nn.functional.adaptive_avg_pool2d = orig_adaptive_avg_pool2d 55 | else: 56 | result = inference_segmentor(model, img) 57 | 58 | res_img = show_result_pyplot(model, img, result, get_palette('ade'), opacity=1) 59 | return res_img 60 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/datasets/ade20k.py: -------------------------------------------------------------------------------- 1 | # dataset settings 2 | dataset_type = 'ADE20KDataset' 3 | data_root = 'data/ade/ADEChallengeData2016' 4 | img_norm_cfg = dict( 5 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 6 | crop_size = (512, 512) 7 | train_pipeline = [ 8 | dict(type='LoadImageFromFile'), 9 | dict(type='LoadAnnotations', reduce_zero_label=True), 10 | dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), 11 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), 12 | dict(type='RandomFlip', prob=0.5), 13 | dict(type='PhotoMetricDistortion'), 14 | dict(type='Normalize', **img_norm_cfg), 15 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), 16 | dict(type='DefaultFormatBundle'), 17 | dict(type='Collect', keys=['img', 'gt_semantic_seg']), 18 | ] 19 | test_pipeline = [ 20 | dict(type='LoadImageFromFile'), 21 | dict( 22 | type='MultiScaleFlipAug', 23 | img_scale=(2048, 512), 24 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], 25 | flip=False, 26 | transforms=[ 27 | dict(type='Resize', keep_ratio=True), 28 | dict(type='RandomFlip'), 29 | dict(type='Normalize', **img_norm_cfg), 30 | dict(type='ImageToTensor', keys=['img']), 31 | dict(type='Collect', keys=['img']), 32 | ]) 33 | ] 34 | data = dict( 35 | samples_per_gpu=4, 36 | workers_per_gpu=4, 37 | train=dict( 38 | type=dataset_type, 39 | data_root=data_root, 40 | img_dir='images/training', 41 | ann_dir='annotations/training', 42 | pipeline=train_pipeline), 43 | val=dict( 44 | type=dataset_type, 45 | data_root=data_root, 46 | img_dir='images/validation', 47 | ann_dir='annotations/validation', 48 | pipeline=test_pipeline), 49 | test=dict( 50 | type=dataset_type, 51 | data_root=data_root, 52 | img_dir='images/validation', 53 | ann_dir='annotations/validation', 54 | pipeline=test_pipeline)) 55 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/datasets/chase_db1.py: -------------------------------------------------------------------------------- 1 | # dataset settings 2 | dataset_type = 'ChaseDB1Dataset' 3 | data_root = 'data/CHASE_DB1' 4 | img_norm_cfg = dict( 5 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 6 | img_scale = (960, 999) 7 | crop_size = (128, 128) 8 | train_pipeline = [ 9 | dict(type='LoadImageFromFile'), 10 | dict(type='LoadAnnotations'), 11 | dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), 12 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), 13 | dict(type='RandomFlip', prob=0.5), 14 | dict(type='PhotoMetricDistortion'), 15 | dict(type='Normalize', **img_norm_cfg), 16 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), 17 | dict(type='DefaultFormatBundle'), 18 | dict(type='Collect', keys=['img', 'gt_semantic_seg']) 19 | ] 20 | test_pipeline = [ 21 | dict(type='LoadImageFromFile'), 22 | dict( 23 | type='MultiScaleFlipAug', 24 | img_scale=img_scale, 25 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], 26 | flip=False, 27 | transforms=[ 28 | dict(type='Resize', keep_ratio=True), 29 | dict(type='RandomFlip'), 30 | dict(type='Normalize', **img_norm_cfg), 31 | dict(type='ImageToTensor', keys=['img']), 32 | dict(type='Collect', keys=['img']) 33 | ]) 34 | ] 35 | 36 | data = dict( 37 | samples_per_gpu=4, 38 | workers_per_gpu=4, 39 | train=dict( 40 | type='RepeatDataset', 41 | times=40000, 42 | dataset=dict( 43 | type=dataset_type, 44 | data_root=data_root, 45 | img_dir='images/training', 46 | ann_dir='annotations/training', 47 | pipeline=train_pipeline)), 48 | val=dict( 49 | type=dataset_type, 50 | data_root=data_root, 51 | img_dir='images/validation', 52 | ann_dir='annotations/validation', 53 | pipeline=test_pipeline), 54 | test=dict( 55 | type=dataset_type, 56 | data_root=data_root, 57 | img_dir='images/validation', 58 | ann_dir='annotations/validation', 59 | pipeline=test_pipeline)) 60 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/datasets/cityscapes.py: -------------------------------------------------------------------------------- 1 | # dataset settings 2 | dataset_type = 'CityscapesDataset' 3 | data_root = 'data/cityscapes/' 4 | img_norm_cfg = dict( 5 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 6 | crop_size = (512, 1024) 7 | train_pipeline = [ 8 | dict(type='LoadImageFromFile'), 9 | dict(type='LoadAnnotations'), 10 | dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), 11 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), 12 | dict(type='RandomFlip', prob=0.5), 13 | dict(type='PhotoMetricDistortion'), 14 | dict(type='Normalize', **img_norm_cfg), 15 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), 16 | dict(type='DefaultFormatBundle'), 17 | dict(type='Collect', keys=['img', 'gt_semantic_seg']), 18 | ] 19 | test_pipeline = [ 20 | dict(type='LoadImageFromFile'), 21 | dict( 22 | type='MultiScaleFlipAug', 23 | img_scale=(2048, 1024), 24 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], 25 | flip=False, 26 | transforms=[ 27 | dict(type='Resize', keep_ratio=True), 28 | dict(type='RandomFlip'), 29 | dict(type='Normalize', **img_norm_cfg), 30 | dict(type='ImageToTensor', keys=['img']), 31 | dict(type='Collect', keys=['img']), 32 | ]) 33 | ] 34 | data = dict( 35 | samples_per_gpu=2, 36 | workers_per_gpu=2, 37 | train=dict( 38 | type=dataset_type, 39 | data_root=data_root, 40 | img_dir='leftImg8bit/train', 41 | ann_dir='gtFine/train', 42 | pipeline=train_pipeline), 43 | val=dict( 44 | type=dataset_type, 45 | data_root=data_root, 46 | img_dir='leftImg8bit/val', 47 | ann_dir='gtFine/val', 48 | pipeline=test_pipeline), 49 | test=dict( 50 | type=dataset_type, 51 | data_root=data_root, 52 | img_dir='leftImg8bit/val', 53 | ann_dir='gtFine/val', 54 | pipeline=test_pipeline)) 55 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py: -------------------------------------------------------------------------------- 1 | _base_ = './cityscapes.py' 2 | img_norm_cfg = dict( 3 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 4 | crop_size = (769, 769) 5 | train_pipeline = [ 6 | dict(type='LoadImageFromFile'), 7 | dict(type='LoadAnnotations'), 8 | dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)), 9 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), 10 | dict(type='RandomFlip', prob=0.5), 11 | dict(type='PhotoMetricDistortion'), 12 | dict(type='Normalize', **img_norm_cfg), 13 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), 14 | dict(type='DefaultFormatBundle'), 15 | dict(type='Collect', keys=['img', 'gt_semantic_seg']), 16 | ] 17 | test_pipeline = [ 18 | dict(type='LoadImageFromFile'), 19 | dict( 20 | type='MultiScaleFlipAug', 21 | img_scale=(2049, 1025), 22 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], 23 | flip=False, 24 | transforms=[ 25 | dict(type='Resize', keep_ratio=True), 26 | dict(type='RandomFlip'), 27 | dict(type='Normalize', **img_norm_cfg), 28 | dict(type='ImageToTensor', keys=['img']), 29 | dict(type='Collect', keys=['img']), 30 | ]) 31 | ] 32 | data = dict( 33 | train=dict(pipeline=train_pipeline), 34 | val=dict(pipeline=test_pipeline), 35 | test=dict(pipeline=test_pipeline)) 36 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/datasets/drive.py: -------------------------------------------------------------------------------- 1 | # dataset settings 2 | dataset_type = 'DRIVEDataset' 3 | data_root = 'data/DRIVE' 4 | img_norm_cfg = dict( 5 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 6 | img_scale = (584, 565) 7 | crop_size = (64, 64) 8 | train_pipeline = [ 9 | dict(type='LoadImageFromFile'), 10 | dict(type='LoadAnnotations'), 11 | dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), 12 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), 13 | dict(type='RandomFlip', prob=0.5), 14 | dict(type='PhotoMetricDistortion'), 15 | dict(type='Normalize', **img_norm_cfg), 16 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), 17 | dict(type='DefaultFormatBundle'), 18 | dict(type='Collect', keys=['img', 'gt_semantic_seg']) 19 | ] 20 | test_pipeline = [ 21 | dict(type='LoadImageFromFile'), 22 | dict( 23 | type='MultiScaleFlipAug', 24 | img_scale=img_scale, 25 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], 26 | flip=False, 27 | transforms=[ 28 | dict(type='Resize', keep_ratio=True), 29 | dict(type='RandomFlip'), 30 | dict(type='Normalize', **img_norm_cfg), 31 | dict(type='ImageToTensor', keys=['img']), 32 | dict(type='Collect', keys=['img']) 33 | ]) 34 | ] 35 | 36 | data = dict( 37 | samples_per_gpu=4, 38 | workers_per_gpu=4, 39 | train=dict( 40 | type='RepeatDataset', 41 | times=40000, 42 | dataset=dict( 43 | type=dataset_type, 44 | data_root=data_root, 45 | img_dir='images/training', 46 | ann_dir='annotations/training', 47 | pipeline=train_pipeline)), 48 | val=dict( 49 | type=dataset_type, 50 | data_root=data_root, 51 | img_dir='images/validation', 52 | ann_dir='annotations/validation', 53 | pipeline=test_pipeline), 54 | test=dict( 55 | type=dataset_type, 56 | data_root=data_root, 57 | img_dir='images/validation', 58 | ann_dir='annotations/validation', 59 | pipeline=test_pipeline)) 60 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/datasets/hrf.py: -------------------------------------------------------------------------------- 1 | # dataset settings 2 | dataset_type = 'HRFDataset' 3 | data_root = 'data/HRF' 4 | img_norm_cfg = dict( 5 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 6 | img_scale = (2336, 3504) 7 | crop_size = (256, 256) 8 | train_pipeline = [ 9 | dict(type='LoadImageFromFile'), 10 | dict(type='LoadAnnotations'), 11 | dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), 12 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), 13 | dict(type='RandomFlip', prob=0.5), 14 | dict(type='PhotoMetricDistortion'), 15 | dict(type='Normalize', **img_norm_cfg), 16 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), 17 | dict(type='DefaultFormatBundle'), 18 | dict(type='Collect', keys=['img', 'gt_semantic_seg']) 19 | ] 20 | test_pipeline = [ 21 | dict(type='LoadImageFromFile'), 22 | dict( 23 | type='MultiScaleFlipAug', 24 | img_scale=img_scale, 25 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], 26 | flip=False, 27 | transforms=[ 28 | dict(type='Resize', keep_ratio=True), 29 | dict(type='RandomFlip'), 30 | dict(type='Normalize', **img_norm_cfg), 31 | dict(type='ImageToTensor', keys=['img']), 32 | dict(type='Collect', keys=['img']) 33 | ]) 34 | ] 35 | 36 | data = dict( 37 | samples_per_gpu=4, 38 | workers_per_gpu=4, 39 | train=dict( 40 | type='RepeatDataset', 41 | times=40000, 42 | dataset=dict( 43 | type=dataset_type, 44 | data_root=data_root, 45 | img_dir='images/training', 46 | ann_dir='annotations/training', 47 | pipeline=train_pipeline)), 48 | val=dict( 49 | type=dataset_type, 50 | data_root=data_root, 51 | img_dir='images/validation', 52 | ann_dir='annotations/validation', 53 | pipeline=test_pipeline), 54 | test=dict( 55 | type=dataset_type, 56 | data_root=data_root, 57 | img_dir='images/validation', 58 | ann_dir='annotations/validation', 59 | pipeline=test_pipeline)) 60 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/datasets/pascal_context.py: -------------------------------------------------------------------------------- 1 | # dataset settings 2 | dataset_type = 'PascalContextDataset' 3 | data_root = 'data/VOCdevkit/VOC2010/' 4 | img_norm_cfg = dict( 5 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 6 | 7 | img_scale = (520, 520) 8 | crop_size = (480, 480) 9 | 10 | train_pipeline = [ 11 | dict(type='LoadImageFromFile'), 12 | dict(type='LoadAnnotations'), 13 | dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), 14 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), 15 | dict(type='RandomFlip', prob=0.5), 16 | dict(type='PhotoMetricDistortion'), 17 | dict(type='Normalize', **img_norm_cfg), 18 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), 19 | dict(type='DefaultFormatBundle'), 20 | dict(type='Collect', keys=['img', 'gt_semantic_seg']), 21 | ] 22 | test_pipeline = [ 23 | dict(type='LoadImageFromFile'), 24 | dict( 25 | type='MultiScaleFlipAug', 26 | img_scale=img_scale, 27 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], 28 | flip=False, 29 | transforms=[ 30 | dict(type='Resize', keep_ratio=True), 31 | dict(type='RandomFlip'), 32 | dict(type='Normalize', **img_norm_cfg), 33 | dict(type='ImageToTensor', keys=['img']), 34 | dict(type='Collect', keys=['img']), 35 | ]) 36 | ] 37 | data = dict( 38 | samples_per_gpu=4, 39 | workers_per_gpu=4, 40 | train=dict( 41 | type=dataset_type, 42 | data_root=data_root, 43 | img_dir='JPEGImages', 44 | ann_dir='SegmentationClassContext', 45 | split='ImageSets/SegmentationContext/train.txt', 46 | pipeline=train_pipeline), 47 | val=dict( 48 | type=dataset_type, 49 | data_root=data_root, 50 | img_dir='JPEGImages', 51 | ann_dir='SegmentationClassContext', 52 | split='ImageSets/SegmentationContext/val.txt', 53 | pipeline=test_pipeline), 54 | test=dict( 55 | type=dataset_type, 56 | data_root=data_root, 57 | img_dir='JPEGImages', 58 | ann_dir='SegmentationClassContext', 59 | split='ImageSets/SegmentationContext/val.txt', 60 | pipeline=test_pipeline)) 61 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/datasets/pascal_context_59.py: -------------------------------------------------------------------------------- 1 | # dataset settings 2 | dataset_type = 'PascalContextDataset59' 3 | data_root = 'data/VOCdevkit/VOC2010/' 4 | img_norm_cfg = dict( 5 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 6 | 7 | img_scale = (520, 520) 8 | crop_size = (480, 480) 9 | 10 | train_pipeline = [ 11 | dict(type='LoadImageFromFile'), 12 | dict(type='LoadAnnotations', reduce_zero_label=True), 13 | dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), 14 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), 15 | dict(type='RandomFlip', prob=0.5), 16 | dict(type='PhotoMetricDistortion'), 17 | dict(type='Normalize', **img_norm_cfg), 18 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), 19 | dict(type='DefaultFormatBundle'), 20 | dict(type='Collect', keys=['img', 'gt_semantic_seg']), 21 | ] 22 | test_pipeline = [ 23 | dict(type='LoadImageFromFile'), 24 | dict( 25 | type='MultiScaleFlipAug', 26 | img_scale=img_scale, 27 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], 28 | flip=False, 29 | transforms=[ 30 | dict(type='Resize', keep_ratio=True), 31 | dict(type='RandomFlip'), 32 | dict(type='Normalize', **img_norm_cfg), 33 | dict(type='ImageToTensor', keys=['img']), 34 | dict(type='Collect', keys=['img']), 35 | ]) 36 | ] 37 | data = dict( 38 | samples_per_gpu=4, 39 | workers_per_gpu=4, 40 | train=dict( 41 | type=dataset_type, 42 | data_root=data_root, 43 | img_dir='JPEGImages', 44 | ann_dir='SegmentationClassContext', 45 | split='ImageSets/SegmentationContext/train.txt', 46 | pipeline=train_pipeline), 47 | val=dict( 48 | type=dataset_type, 49 | data_root=data_root, 50 | img_dir='JPEGImages', 51 | ann_dir='SegmentationClassContext', 52 | split='ImageSets/SegmentationContext/val.txt', 53 | pipeline=test_pipeline), 54 | test=dict( 55 | type=dataset_type, 56 | data_root=data_root, 57 | img_dir='JPEGImages', 58 | ann_dir='SegmentationClassContext', 59 | split='ImageSets/SegmentationContext/val.txt', 60 | pipeline=test_pipeline)) 61 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/datasets/pascal_voc12.py: -------------------------------------------------------------------------------- 1 | # dataset settings 2 | dataset_type = 'PascalVOCDataset' 3 | data_root = 'data/VOCdevkit/VOC2012' 4 | img_norm_cfg = dict( 5 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 6 | crop_size = (512, 512) 7 | train_pipeline = [ 8 | dict(type='LoadImageFromFile'), 9 | dict(type='LoadAnnotations'), 10 | dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), 11 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), 12 | dict(type='RandomFlip', prob=0.5), 13 | dict(type='PhotoMetricDistortion'), 14 | dict(type='Normalize', **img_norm_cfg), 15 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), 16 | dict(type='DefaultFormatBundle'), 17 | dict(type='Collect', keys=['img', 'gt_semantic_seg']), 18 | ] 19 | test_pipeline = [ 20 | dict(type='LoadImageFromFile'), 21 | dict( 22 | type='MultiScaleFlipAug', 23 | img_scale=(2048, 512), 24 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], 25 | flip=False, 26 | transforms=[ 27 | dict(type='Resize', keep_ratio=True), 28 | dict(type='RandomFlip'), 29 | dict(type='Normalize', **img_norm_cfg), 30 | dict(type='ImageToTensor', keys=['img']), 31 | dict(type='Collect', keys=['img']), 32 | ]) 33 | ] 34 | data = dict( 35 | samples_per_gpu=4, 36 | workers_per_gpu=4, 37 | train=dict( 38 | type=dataset_type, 39 | data_root=data_root, 40 | img_dir='JPEGImages', 41 | ann_dir='SegmentationClass', 42 | split='ImageSets/Segmentation/train.txt', 43 | pipeline=train_pipeline), 44 | val=dict( 45 | type=dataset_type, 46 | data_root=data_root, 47 | img_dir='JPEGImages', 48 | ann_dir='SegmentationClass', 49 | split='ImageSets/Segmentation/val.txt', 50 | pipeline=test_pipeline), 51 | test=dict( 52 | type=dataset_type, 53 | data_root=data_root, 54 | img_dir='JPEGImages', 55 | ann_dir='SegmentationClass', 56 | split='ImageSets/Segmentation/val.txt', 57 | pipeline=test_pipeline)) 58 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/datasets/pascal_voc12_aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './pascal_voc12.py' 2 | # dataset settings 3 | data = dict( 4 | train=dict( 5 | ann_dir=['SegmentationClass', 'SegmentationClassAug'], 6 | split=[ 7 | 'ImageSets/Segmentation/train.txt', 8 | 'ImageSets/Segmentation/aug.txt' 9 | ])) 10 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/datasets/stare.py: -------------------------------------------------------------------------------- 1 | # dataset settings 2 | dataset_type = 'STAREDataset' 3 | data_root = 'data/STARE' 4 | img_norm_cfg = dict( 5 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 6 | img_scale = (605, 700) 7 | crop_size = (128, 128) 8 | train_pipeline = [ 9 | dict(type='LoadImageFromFile'), 10 | dict(type='LoadAnnotations'), 11 | dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), 12 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), 13 | dict(type='RandomFlip', prob=0.5), 14 | dict(type='PhotoMetricDistortion'), 15 | dict(type='Normalize', **img_norm_cfg), 16 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), 17 | dict(type='DefaultFormatBundle'), 18 | dict(type='Collect', keys=['img', 'gt_semantic_seg']) 19 | ] 20 | test_pipeline = [ 21 | dict(type='LoadImageFromFile'), 22 | dict( 23 | type='MultiScaleFlipAug', 24 | img_scale=img_scale, 25 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], 26 | flip=False, 27 | transforms=[ 28 | dict(type='Resize', keep_ratio=True), 29 | dict(type='RandomFlip'), 30 | dict(type='Normalize', **img_norm_cfg), 31 | dict(type='ImageToTensor', keys=['img']), 32 | dict(type='Collect', keys=['img']) 33 | ]) 34 | ] 35 | 36 | data = dict( 37 | samples_per_gpu=4, 38 | workers_per_gpu=4, 39 | train=dict( 40 | type='RepeatDataset', 41 | times=40000, 42 | dataset=dict( 43 | type=dataset_type, 44 | data_root=data_root, 45 | img_dir='images/training', 46 | ann_dir='annotations/training', 47 | pipeline=train_pipeline)), 48 | val=dict( 49 | type=dataset_type, 50 | data_root=data_root, 51 | img_dir='images/validation', 52 | ann_dir='annotations/validation', 53 | pipeline=test_pipeline), 54 | test=dict( 55 | type=dataset_type, 56 | data_root=data_root, 57 | img_dir='images/validation', 58 | ann_dir='annotations/validation', 59 | pipeline=test_pipeline)) 60 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/default_runtime.py: -------------------------------------------------------------------------------- 1 | # yapf:disable 2 | log_config = dict( 3 | interval=50, 4 | hooks=[ 5 | dict(type='TextLoggerHook', by_epoch=False), 6 | # dict(type='TensorboardLoggerHook') 7 | ]) 8 | # yapf:enable 9 | dist_params = dict(backend='nccl') 10 | log_level = 'INFO' 11 | load_from = None 12 | resume_from = None 13 | workflow = [('train', 1)] 14 | cudnn_benchmark = True 15 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/ann_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='ANNHead', 19 | in_channels=[1024, 2048], 20 | in_index=[2, 3], 21 | channels=512, 22 | project_channels=256, 23 | query_scales=(1, ), 24 | key_pool_scales=(1, 3, 6, 8), 25 | dropout_ratio=0.1, 26 | num_classes=19, 27 | norm_cfg=norm_cfg, 28 | align_corners=False, 29 | loss_decode=dict( 30 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 31 | auxiliary_head=dict( 32 | type='FCNHead', 33 | in_channels=1024, 34 | in_index=2, 35 | channels=256, 36 | num_convs=1, 37 | concat_input=False, 38 | dropout_ratio=0.1, 39 | num_classes=19, 40 | norm_cfg=norm_cfg, 41 | align_corners=False, 42 | loss_decode=dict( 43 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 44 | # model training and testing settings 45 | train_cfg=dict(), 46 | test_cfg=dict(mode='whole')) 47 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/apcnet_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='APCHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | pool_scales=(1, 2, 3, 6), 23 | dropout_ratio=0.1, 24 | num_classes=19, 25 | norm_cfg=dict(type='SyncBN', requires_grad=True), 26 | align_corners=False, 27 | loss_decode=dict( 28 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 29 | auxiliary_head=dict( 30 | type='FCNHead', 31 | in_channels=1024, 32 | in_index=2, 33 | channels=256, 34 | num_convs=1, 35 | concat_input=False, 36 | dropout_ratio=0.1, 37 | num_classes=19, 38 | norm_cfg=norm_cfg, 39 | align_corners=False, 40 | loss_decode=dict( 41 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 42 | # model training and testing settings 43 | train_cfg=dict(), 44 | test_cfg=dict(mode='whole')) 45 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/ccnet_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='CCHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | recurrence=2, 23 | dropout_ratio=0.1, 24 | num_classes=19, 25 | norm_cfg=norm_cfg, 26 | align_corners=False, 27 | loss_decode=dict( 28 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 29 | auxiliary_head=dict( 30 | type='FCNHead', 31 | in_channels=1024, 32 | in_index=2, 33 | channels=256, 34 | num_convs=1, 35 | concat_input=False, 36 | dropout_ratio=0.1, 37 | num_classes=19, 38 | norm_cfg=norm_cfg, 39 | align_corners=False, 40 | loss_decode=dict( 41 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 42 | # model training and testing settings 43 | train_cfg=dict(), 44 | test_cfg=dict(mode='whole')) 45 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/cgnet.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', eps=1e-03, requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | backbone=dict( 6 | type='CGNet', 7 | norm_cfg=norm_cfg, 8 | in_channels=3, 9 | num_channels=(32, 64, 128), 10 | num_blocks=(3, 21), 11 | dilations=(2, 4), 12 | reductions=(8, 16)), 13 | decode_head=dict( 14 | type='FCNHead', 15 | in_channels=256, 16 | in_index=2, 17 | channels=256, 18 | num_convs=0, 19 | concat_input=False, 20 | dropout_ratio=0, 21 | num_classes=19, 22 | norm_cfg=norm_cfg, 23 | loss_decode=dict( 24 | type='CrossEntropyLoss', 25 | use_sigmoid=False, 26 | loss_weight=1.0, 27 | class_weight=[ 28 | 2.5959933, 6.7415504, 3.5354059, 9.8663225, 9.690899, 9.369352, 29 | 10.289121, 9.953208, 4.3097677, 9.490387, 7.674431, 9.396905, 30 | 10.347791, 6.3927646, 10.226669, 10.241062, 10.280587, 31 | 10.396974, 10.055647 32 | ])), 33 | # model training and testing settings 34 | train_cfg=dict(sampler=None), 35 | test_cfg=dict(mode='whole')) 36 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/danet_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='DAHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | pam_channels=64, 23 | dropout_ratio=0.1, 24 | num_classes=19, 25 | norm_cfg=norm_cfg, 26 | align_corners=False, 27 | loss_decode=dict( 28 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 29 | auxiliary_head=dict( 30 | type='FCNHead', 31 | in_channels=1024, 32 | in_index=2, 33 | channels=256, 34 | num_convs=1, 35 | concat_input=False, 36 | dropout_ratio=0.1, 37 | num_classes=19, 38 | norm_cfg=norm_cfg, 39 | align_corners=False, 40 | loss_decode=dict( 41 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 42 | # model training and testing settings 43 | train_cfg=dict(), 44 | test_cfg=dict(mode='whole')) 45 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/deeplabv3_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='ASPPHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | dilations=(1, 12, 24, 36), 23 | dropout_ratio=0.1, 24 | num_classes=19, 25 | norm_cfg=norm_cfg, 26 | align_corners=False, 27 | loss_decode=dict( 28 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 29 | auxiliary_head=dict( 30 | type='FCNHead', 31 | in_channels=1024, 32 | in_index=2, 33 | channels=256, 34 | num_convs=1, 35 | concat_input=False, 36 | dropout_ratio=0.1, 37 | num_classes=19, 38 | norm_cfg=norm_cfg, 39 | align_corners=False, 40 | loss_decode=dict( 41 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 42 | # model training and testing settings 43 | train_cfg=dict(), 44 | test_cfg=dict(mode='whole')) 45 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/deeplabv3_unet_s5-d16.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained=None, 6 | backbone=dict( 7 | type='UNet', 8 | in_channels=3, 9 | base_channels=64, 10 | num_stages=5, 11 | strides=(1, 1, 1, 1, 1), 12 | enc_num_convs=(2, 2, 2, 2, 2), 13 | dec_num_convs=(2, 2, 2, 2), 14 | downsamples=(True, True, True, True), 15 | enc_dilations=(1, 1, 1, 1, 1), 16 | dec_dilations=(1, 1, 1, 1), 17 | with_cp=False, 18 | conv_cfg=None, 19 | norm_cfg=norm_cfg, 20 | act_cfg=dict(type='ReLU'), 21 | upsample_cfg=dict(type='InterpConv'), 22 | norm_eval=False), 23 | decode_head=dict( 24 | type='ASPPHead', 25 | in_channels=64, 26 | in_index=4, 27 | channels=16, 28 | dilations=(1, 12, 24, 36), 29 | dropout_ratio=0.1, 30 | num_classes=2, 31 | norm_cfg=norm_cfg, 32 | align_corners=False, 33 | loss_decode=dict( 34 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 35 | auxiliary_head=dict( 36 | type='FCNHead', 37 | in_channels=128, 38 | in_index=3, 39 | channels=64, 40 | num_convs=1, 41 | concat_input=False, 42 | dropout_ratio=0.1, 43 | num_classes=2, 44 | norm_cfg=norm_cfg, 45 | align_corners=False, 46 | loss_decode=dict( 47 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 48 | # model training and testing settings 49 | train_cfg=dict(), 50 | test_cfg=dict(mode='slide', crop_size=256, stride=170)) 51 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/deeplabv3plus_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='DepthwiseSeparableASPPHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | dilations=(1, 12, 24, 36), 23 | c1_in_channels=256, 24 | c1_channels=48, 25 | dropout_ratio=0.1, 26 | num_classes=19, 27 | norm_cfg=norm_cfg, 28 | align_corners=False, 29 | loss_decode=dict( 30 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 31 | auxiliary_head=dict( 32 | type='FCNHead', 33 | in_channels=1024, 34 | in_index=2, 35 | channels=256, 36 | num_convs=1, 37 | concat_input=False, 38 | dropout_ratio=0.1, 39 | num_classes=19, 40 | norm_cfg=norm_cfg, 41 | align_corners=False, 42 | loss_decode=dict( 43 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 44 | # model training and testing settings 45 | train_cfg=dict(), 46 | test_cfg=dict(mode='whole')) 47 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/dmnet_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='DMHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | filter_sizes=(1, 3, 5, 7), 23 | dropout_ratio=0.1, 24 | num_classes=19, 25 | norm_cfg=dict(type='SyncBN', requires_grad=True), 26 | align_corners=False, 27 | loss_decode=dict( 28 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 29 | auxiliary_head=dict( 30 | type='FCNHead', 31 | in_channels=1024, 32 | in_index=2, 33 | channels=256, 34 | num_convs=1, 35 | concat_input=False, 36 | dropout_ratio=0.1, 37 | num_classes=19, 38 | norm_cfg=norm_cfg, 39 | align_corners=False, 40 | loss_decode=dict( 41 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 42 | # model training and testing settings 43 | train_cfg=dict(), 44 | test_cfg=dict(mode='whole')) 45 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/dnl_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='DNLHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | dropout_ratio=0.1, 23 | reduction=2, 24 | use_scale=True, 25 | mode='embedded_gaussian', 26 | num_classes=19, 27 | norm_cfg=norm_cfg, 28 | align_corners=False, 29 | loss_decode=dict( 30 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 31 | auxiliary_head=dict( 32 | type='FCNHead', 33 | in_channels=1024, 34 | in_index=2, 35 | channels=256, 36 | num_convs=1, 37 | concat_input=False, 38 | dropout_ratio=0.1, 39 | num_classes=19, 40 | norm_cfg=norm_cfg, 41 | align_corners=False, 42 | loss_decode=dict( 43 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 44 | # model training and testing settings 45 | train_cfg=dict(), 46 | test_cfg=dict(mode='whole')) 47 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/emanet_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='EMAHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=256, 22 | ema_channels=512, 23 | num_bases=64, 24 | num_stages=3, 25 | momentum=0.1, 26 | dropout_ratio=0.1, 27 | num_classes=19, 28 | norm_cfg=norm_cfg, 29 | align_corners=False, 30 | loss_decode=dict( 31 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 32 | auxiliary_head=dict( 33 | type='FCNHead', 34 | in_channels=1024, 35 | in_index=2, 36 | channels=256, 37 | num_convs=1, 38 | concat_input=False, 39 | dropout_ratio=0.1, 40 | num_classes=19, 41 | norm_cfg=norm_cfg, 42 | align_corners=False, 43 | loss_decode=dict( 44 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 45 | # model training and testing settings 46 | train_cfg=dict(), 47 | test_cfg=dict(mode='whole')) 48 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/encnet_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='EncHead', 19 | in_channels=[512, 1024, 2048], 20 | in_index=(1, 2, 3), 21 | channels=512, 22 | num_codes=32, 23 | use_se_loss=True, 24 | add_lateral=False, 25 | dropout_ratio=0.1, 26 | num_classes=19, 27 | norm_cfg=norm_cfg, 28 | align_corners=False, 29 | loss_decode=dict( 30 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), 31 | loss_se_decode=dict( 32 | type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)), 33 | auxiliary_head=dict( 34 | type='FCNHead', 35 | in_channels=1024, 36 | in_index=2, 37 | channels=256, 38 | num_convs=1, 39 | concat_input=False, 40 | dropout_ratio=0.1, 41 | num_classes=19, 42 | norm_cfg=norm_cfg, 43 | align_corners=False, 44 | loss_decode=dict( 45 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 46 | # model training and testing settings 47 | train_cfg=dict(), 48 | test_cfg=dict(mode='whole')) 49 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/fast_scnn.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01) 3 | model = dict( 4 | type='EncoderDecoder', 5 | backbone=dict( 6 | type='FastSCNN', 7 | downsample_dw_channels=(32, 48), 8 | global_in_channels=64, 9 | global_block_channels=(64, 96, 128), 10 | global_block_strides=(2, 2, 1), 11 | global_out_channels=128, 12 | higher_in_channels=64, 13 | lower_in_channels=128, 14 | fusion_out_channels=128, 15 | out_indices=(0, 1, 2), 16 | norm_cfg=norm_cfg, 17 | align_corners=False), 18 | decode_head=dict( 19 | type='DepthwiseSeparableFCNHead', 20 | in_channels=128, 21 | channels=128, 22 | concat_input=False, 23 | num_classes=19, 24 | in_index=-1, 25 | norm_cfg=norm_cfg, 26 | align_corners=False, 27 | loss_decode=dict( 28 | type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)), 29 | auxiliary_head=[ 30 | dict( 31 | type='FCNHead', 32 | in_channels=128, 33 | channels=32, 34 | num_convs=1, 35 | num_classes=19, 36 | in_index=-2, 37 | norm_cfg=norm_cfg, 38 | concat_input=False, 39 | align_corners=False, 40 | loss_decode=dict( 41 | type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)), 42 | dict( 43 | type='FCNHead', 44 | in_channels=64, 45 | channels=32, 46 | num_convs=1, 47 | num_classes=19, 48 | in_index=-3, 49 | norm_cfg=norm_cfg, 50 | concat_input=False, 51 | align_corners=False, 52 | loss_decode=dict( 53 | type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)), 54 | ], 55 | # model training and testing settings 56 | train_cfg=dict(), 57 | test_cfg=dict(mode='whole')) 58 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/fcn_hr18.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://msra/hrnetv2_w18', 6 | backbone=dict( 7 | type='HRNet', 8 | norm_cfg=norm_cfg, 9 | norm_eval=False, 10 | extra=dict( 11 | stage1=dict( 12 | num_modules=1, 13 | num_branches=1, 14 | block='BOTTLENECK', 15 | num_blocks=(4, ), 16 | num_channels=(64, )), 17 | stage2=dict( 18 | num_modules=1, 19 | num_branches=2, 20 | block='BASIC', 21 | num_blocks=(4, 4), 22 | num_channels=(18, 36)), 23 | stage3=dict( 24 | num_modules=4, 25 | num_branches=3, 26 | block='BASIC', 27 | num_blocks=(4, 4, 4), 28 | num_channels=(18, 36, 72)), 29 | stage4=dict( 30 | num_modules=3, 31 | num_branches=4, 32 | block='BASIC', 33 | num_blocks=(4, 4, 4, 4), 34 | num_channels=(18, 36, 72, 144)))), 35 | decode_head=dict( 36 | type='FCNHead', 37 | in_channels=[18, 36, 72, 144], 38 | in_index=(0, 1, 2, 3), 39 | channels=sum([18, 36, 72, 144]), 40 | input_transform='resize_concat', 41 | kernel_size=1, 42 | num_convs=1, 43 | concat_input=False, 44 | dropout_ratio=-1, 45 | num_classes=19, 46 | norm_cfg=norm_cfg, 47 | align_corners=False, 48 | loss_decode=dict( 49 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 50 | # model training and testing settings 51 | train_cfg=dict(), 52 | test_cfg=dict(mode='whole')) 53 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/fcn_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='FCNHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | num_convs=2, 23 | concat_input=True, 24 | dropout_ratio=0.1, 25 | num_classes=19, 26 | norm_cfg=norm_cfg, 27 | align_corners=False, 28 | loss_decode=dict( 29 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 30 | auxiliary_head=dict( 31 | type='FCNHead', 32 | in_channels=1024, 33 | in_index=2, 34 | channels=256, 35 | num_convs=1, 36 | concat_input=False, 37 | dropout_ratio=0.1, 38 | num_classes=19, 39 | norm_cfg=norm_cfg, 40 | align_corners=False, 41 | loss_decode=dict( 42 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 43 | # model training and testing settings 44 | train_cfg=dict(), 45 | test_cfg=dict(mode='whole')) 46 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/fcn_unet_s5-d16.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained=None, 6 | backbone=dict( 7 | type='UNet', 8 | in_channels=3, 9 | base_channels=64, 10 | num_stages=5, 11 | strides=(1, 1, 1, 1, 1), 12 | enc_num_convs=(2, 2, 2, 2, 2), 13 | dec_num_convs=(2, 2, 2, 2), 14 | downsamples=(True, True, True, True), 15 | enc_dilations=(1, 1, 1, 1, 1), 16 | dec_dilations=(1, 1, 1, 1), 17 | with_cp=False, 18 | conv_cfg=None, 19 | norm_cfg=norm_cfg, 20 | act_cfg=dict(type='ReLU'), 21 | upsample_cfg=dict(type='InterpConv'), 22 | norm_eval=False), 23 | decode_head=dict( 24 | type='FCNHead', 25 | in_channels=64, 26 | in_index=4, 27 | channels=64, 28 | num_convs=1, 29 | concat_input=False, 30 | dropout_ratio=0.1, 31 | num_classes=2, 32 | norm_cfg=norm_cfg, 33 | align_corners=False, 34 | loss_decode=dict( 35 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 36 | auxiliary_head=dict( 37 | type='FCNHead', 38 | in_channels=128, 39 | in_index=3, 40 | channels=64, 41 | num_convs=1, 42 | concat_input=False, 43 | dropout_ratio=0.1, 44 | num_classes=2, 45 | norm_cfg=norm_cfg, 46 | align_corners=False, 47 | loss_decode=dict( 48 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 49 | # model training and testing settings 50 | train_cfg=dict(), 51 | test_cfg=dict(mode='slide', crop_size=256, stride=170)) 52 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/fpn_r50.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 1, 1), 12 | strides=(1, 2, 2, 2), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | neck=dict( 18 | type='FPN', 19 | in_channels=[256, 512, 1024, 2048], 20 | out_channels=256, 21 | num_outs=4), 22 | decode_head=dict( 23 | type='FPNHead', 24 | in_channels=[256, 256, 256, 256], 25 | in_index=[0, 1, 2, 3], 26 | feature_strides=[4, 8, 16, 32], 27 | channels=128, 28 | dropout_ratio=0.1, 29 | num_classes=19, 30 | norm_cfg=norm_cfg, 31 | align_corners=False, 32 | loss_decode=dict( 33 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 34 | # model training and testing settings 35 | train_cfg=dict(), 36 | test_cfg=dict(mode='whole')) 37 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/fpn_uniformer.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | backbone=dict( 6 | type='UniFormer', 7 | embed_dim=[64, 128, 320, 512], 8 | layers=[3, 4, 8, 3], 9 | head_dim=64, 10 | mlp_ratio=4., 11 | qkv_bias=True, 12 | drop_rate=0., 13 | attn_drop_rate=0., 14 | drop_path_rate=0.1), 15 | neck=dict( 16 | type='FPN', 17 | in_channels=[64, 128, 320, 512], 18 | out_channels=256, 19 | num_outs=4), 20 | decode_head=dict( 21 | type='FPNHead', 22 | in_channels=[256, 256, 256, 256], 23 | in_index=[0, 1, 2, 3], 24 | feature_strides=[4, 8, 16, 32], 25 | channels=128, 26 | dropout_ratio=0.1, 27 | num_classes=150, 28 | norm_cfg=norm_cfg, 29 | align_corners=False, 30 | loss_decode=dict( 31 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 32 | # model training and testing settings 33 | train_cfg=dict(), 34 | test_cfg=dict(mode='whole') 35 | ) 36 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/gcnet_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='GCHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | ratio=1 / 4., 23 | pooling_type='att', 24 | fusion_types=('channel_add', ), 25 | dropout_ratio=0.1, 26 | num_classes=19, 27 | norm_cfg=norm_cfg, 28 | align_corners=False, 29 | loss_decode=dict( 30 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 31 | auxiliary_head=dict( 32 | type='FCNHead', 33 | in_channels=1024, 34 | in_index=2, 35 | channels=256, 36 | num_convs=1, 37 | concat_input=False, 38 | dropout_ratio=0.1, 39 | num_classes=19, 40 | norm_cfg=norm_cfg, 41 | align_corners=False, 42 | loss_decode=dict( 43 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 44 | # model training and testing settings 45 | train_cfg=dict(), 46 | test_cfg=dict(mode='whole')) 47 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/lraspp_m-v3-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | backbone=dict( 6 | type='MobileNetV3', 7 | arch='large', 8 | out_indices=(1, 3, 16), 9 | norm_cfg=norm_cfg), 10 | decode_head=dict( 11 | type='LRASPPHead', 12 | in_channels=(16, 24, 960), 13 | in_index=(0, 1, 2), 14 | channels=128, 15 | input_transform='multiple_select', 16 | dropout_ratio=0.1, 17 | num_classes=19, 18 | norm_cfg=norm_cfg, 19 | act_cfg=dict(type='ReLU'), 20 | align_corners=False, 21 | loss_decode=dict( 22 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 23 | # model training and testing settings 24 | train_cfg=dict(), 25 | test_cfg=dict(mode='whole')) 26 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/nonlocal_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='NLHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | dropout_ratio=0.1, 23 | reduction=2, 24 | use_scale=True, 25 | mode='embedded_gaussian', 26 | num_classes=19, 27 | norm_cfg=norm_cfg, 28 | align_corners=False, 29 | loss_decode=dict( 30 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 31 | auxiliary_head=dict( 32 | type='FCNHead', 33 | in_channels=1024, 34 | in_index=2, 35 | channels=256, 36 | num_convs=1, 37 | concat_input=False, 38 | dropout_ratio=0.1, 39 | num_classes=19, 40 | norm_cfg=norm_cfg, 41 | align_corners=False, 42 | loss_decode=dict( 43 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 44 | # model training and testing settings 45 | train_cfg=dict(), 46 | test_cfg=dict(mode='whole')) 47 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/ocrnet_hr18.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='CascadeEncoderDecoder', 5 | num_stages=2, 6 | pretrained='open-mmlab://msra/hrnetv2_w18', 7 | backbone=dict( 8 | type='HRNet', 9 | norm_cfg=norm_cfg, 10 | norm_eval=False, 11 | extra=dict( 12 | stage1=dict( 13 | num_modules=1, 14 | num_branches=1, 15 | block='BOTTLENECK', 16 | num_blocks=(4, ), 17 | num_channels=(64, )), 18 | stage2=dict( 19 | num_modules=1, 20 | num_branches=2, 21 | block='BASIC', 22 | num_blocks=(4, 4), 23 | num_channels=(18, 36)), 24 | stage3=dict( 25 | num_modules=4, 26 | num_branches=3, 27 | block='BASIC', 28 | num_blocks=(4, 4, 4), 29 | num_channels=(18, 36, 72)), 30 | stage4=dict( 31 | num_modules=3, 32 | num_branches=4, 33 | block='BASIC', 34 | num_blocks=(4, 4, 4, 4), 35 | num_channels=(18, 36, 72, 144)))), 36 | decode_head=[ 37 | dict( 38 | type='FCNHead', 39 | in_channels=[18, 36, 72, 144], 40 | channels=sum([18, 36, 72, 144]), 41 | in_index=(0, 1, 2, 3), 42 | input_transform='resize_concat', 43 | kernel_size=1, 44 | num_convs=1, 45 | concat_input=False, 46 | dropout_ratio=-1, 47 | num_classes=19, 48 | norm_cfg=norm_cfg, 49 | align_corners=False, 50 | loss_decode=dict( 51 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 52 | dict( 53 | type='OCRHead', 54 | in_channels=[18, 36, 72, 144], 55 | in_index=(0, 1, 2, 3), 56 | input_transform='resize_concat', 57 | channels=512, 58 | ocr_channels=256, 59 | dropout_ratio=-1, 60 | num_classes=19, 61 | norm_cfg=norm_cfg, 62 | align_corners=False, 63 | loss_decode=dict( 64 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 65 | ], 66 | # model training and testing settings 67 | train_cfg=dict(), 68 | test_cfg=dict(mode='whole')) 69 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/ocrnet_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='CascadeEncoderDecoder', 5 | num_stages=2, 6 | pretrained='open-mmlab://resnet50_v1c', 7 | backbone=dict( 8 | type='ResNetV1c', 9 | depth=50, 10 | num_stages=4, 11 | out_indices=(0, 1, 2, 3), 12 | dilations=(1, 1, 2, 4), 13 | strides=(1, 2, 1, 1), 14 | norm_cfg=norm_cfg, 15 | norm_eval=False, 16 | style='pytorch', 17 | contract_dilation=True), 18 | decode_head=[ 19 | dict( 20 | type='FCNHead', 21 | in_channels=1024, 22 | in_index=2, 23 | channels=256, 24 | num_convs=1, 25 | concat_input=False, 26 | dropout_ratio=0.1, 27 | num_classes=19, 28 | norm_cfg=norm_cfg, 29 | align_corners=False, 30 | loss_decode=dict( 31 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 32 | dict( 33 | type='OCRHead', 34 | in_channels=2048, 35 | in_index=3, 36 | channels=512, 37 | ocr_channels=256, 38 | dropout_ratio=0.1, 39 | num_classes=19, 40 | norm_cfg=norm_cfg, 41 | align_corners=False, 42 | loss_decode=dict( 43 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) 44 | ], 45 | # model training and testing settings 46 | train_cfg=dict(), 47 | test_cfg=dict(mode='whole')) 48 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/pointrend_r50.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='CascadeEncoderDecoder', 5 | num_stages=2, 6 | pretrained='open-mmlab://resnet50_v1c', 7 | backbone=dict( 8 | type='ResNetV1c', 9 | depth=50, 10 | num_stages=4, 11 | out_indices=(0, 1, 2, 3), 12 | dilations=(1, 1, 1, 1), 13 | strides=(1, 2, 2, 2), 14 | norm_cfg=norm_cfg, 15 | norm_eval=False, 16 | style='pytorch', 17 | contract_dilation=True), 18 | neck=dict( 19 | type='FPN', 20 | in_channels=[256, 512, 1024, 2048], 21 | out_channels=256, 22 | num_outs=4), 23 | decode_head=[ 24 | dict( 25 | type='FPNHead', 26 | in_channels=[256, 256, 256, 256], 27 | in_index=[0, 1, 2, 3], 28 | feature_strides=[4, 8, 16, 32], 29 | channels=128, 30 | dropout_ratio=-1, 31 | num_classes=19, 32 | norm_cfg=norm_cfg, 33 | align_corners=False, 34 | loss_decode=dict( 35 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 36 | dict( 37 | type='PointHead', 38 | in_channels=[256], 39 | in_index=[0], 40 | channels=256, 41 | num_fcs=3, 42 | coarse_pred_each_layer=True, 43 | dropout_ratio=-1, 44 | num_classes=19, 45 | align_corners=False, 46 | loss_decode=dict( 47 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) 48 | ], 49 | # model training and testing settings 50 | train_cfg=dict( 51 | num_points=2048, oversample_ratio=3, importance_sample_ratio=0.75), 52 | test_cfg=dict( 53 | mode='whole', 54 | subdivision_steps=2, 55 | subdivision_num_points=8196, 56 | scale_factor=2)) 57 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/psanet_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='PSAHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | mask_size=(97, 97), 23 | psa_type='bi-direction', 24 | compact=False, 25 | shrink_factor=2, 26 | normalization_factor=1.0, 27 | psa_softmax=True, 28 | dropout_ratio=0.1, 29 | num_classes=19, 30 | norm_cfg=norm_cfg, 31 | align_corners=False, 32 | loss_decode=dict( 33 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 34 | auxiliary_head=dict( 35 | type='FCNHead', 36 | in_channels=1024, 37 | in_index=2, 38 | channels=256, 39 | num_convs=1, 40 | concat_input=False, 41 | dropout_ratio=0.1, 42 | num_classes=19, 43 | norm_cfg=norm_cfg, 44 | align_corners=False, 45 | loss_decode=dict( 46 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 47 | # model training and testing settings 48 | train_cfg=dict(), 49 | test_cfg=dict(mode='whole')) 50 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/pspnet_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='PSPHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | pool_scales=(1, 2, 3, 6), 23 | dropout_ratio=0.1, 24 | num_classes=19, 25 | norm_cfg=norm_cfg, 26 | align_corners=False, 27 | loss_decode=dict( 28 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 29 | auxiliary_head=dict( 30 | type='FCNHead', 31 | in_channels=1024, 32 | in_index=2, 33 | channels=256, 34 | num_convs=1, 35 | concat_input=False, 36 | dropout_ratio=0.1, 37 | num_classes=19, 38 | norm_cfg=norm_cfg, 39 | align_corners=False, 40 | loss_decode=dict( 41 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 42 | # model training and testing settings 43 | train_cfg=dict(), 44 | test_cfg=dict(mode='whole')) 45 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/pspnet_unet_s5-d16.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained=None, 6 | backbone=dict( 7 | type='UNet', 8 | in_channels=3, 9 | base_channels=64, 10 | num_stages=5, 11 | strides=(1, 1, 1, 1, 1), 12 | enc_num_convs=(2, 2, 2, 2, 2), 13 | dec_num_convs=(2, 2, 2, 2), 14 | downsamples=(True, True, True, True), 15 | enc_dilations=(1, 1, 1, 1, 1), 16 | dec_dilations=(1, 1, 1, 1), 17 | with_cp=False, 18 | conv_cfg=None, 19 | norm_cfg=norm_cfg, 20 | act_cfg=dict(type='ReLU'), 21 | upsample_cfg=dict(type='InterpConv'), 22 | norm_eval=False), 23 | decode_head=dict( 24 | type='PSPHead', 25 | in_channels=64, 26 | in_index=4, 27 | channels=16, 28 | pool_scales=(1, 2, 3, 6), 29 | dropout_ratio=0.1, 30 | num_classes=2, 31 | norm_cfg=norm_cfg, 32 | align_corners=False, 33 | loss_decode=dict( 34 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 35 | auxiliary_head=dict( 36 | type='FCNHead', 37 | in_channels=128, 38 | in_index=3, 39 | channels=64, 40 | num_convs=1, 41 | concat_input=False, 42 | dropout_ratio=0.1, 43 | num_classes=2, 44 | norm_cfg=norm_cfg, 45 | align_corners=False, 46 | loss_decode=dict( 47 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 48 | # model training and testing settings 49 | train_cfg=dict(), 50 | test_cfg=dict(mode='slide', crop_size=256, stride=170)) 51 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/upernet_r50.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 1, 1), 12 | strides=(1, 2, 2, 2), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='UPerHead', 19 | in_channels=[256, 512, 1024, 2048], 20 | in_index=[0, 1, 2, 3], 21 | pool_scales=(1, 2, 3, 6), 22 | channels=512, 23 | dropout_ratio=0.1, 24 | num_classes=19, 25 | norm_cfg=norm_cfg, 26 | align_corners=False, 27 | loss_decode=dict( 28 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 29 | auxiliary_head=dict( 30 | type='FCNHead', 31 | in_channels=1024, 32 | in_index=2, 33 | channels=256, 34 | num_convs=1, 35 | concat_input=False, 36 | dropout_ratio=0.1, 37 | num_classes=19, 38 | norm_cfg=norm_cfg, 39 | align_corners=False, 40 | loss_decode=dict( 41 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 42 | # model training and testing settings 43 | train_cfg=dict(), 44 | test_cfg=dict(mode='whole')) 45 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/models/upernet_uniformer.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='BN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained=None, 6 | backbone=dict( 7 | type='UniFormer', 8 | embed_dim=[64, 128, 320, 512], 9 | layers=[3, 4, 8, 3], 10 | head_dim=64, 11 | mlp_ratio=4., 12 | qkv_bias=True, 13 | drop_rate=0., 14 | attn_drop_rate=0., 15 | drop_path_rate=0.1), 16 | decode_head=dict( 17 | type='UPerHead', 18 | in_channels=[64, 128, 320, 512], 19 | in_index=[0, 1, 2, 3], 20 | pool_scales=(1, 2, 3, 6), 21 | channels=512, 22 | dropout_ratio=0.1, 23 | num_classes=19, 24 | norm_cfg=norm_cfg, 25 | align_corners=False, 26 | loss_decode=dict( 27 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 28 | auxiliary_head=dict( 29 | type='FCNHead', 30 | in_channels=320, 31 | in_index=2, 32 | channels=256, 33 | num_convs=1, 34 | concat_input=False, 35 | dropout_ratio=0.1, 36 | num_classes=19, 37 | norm_cfg=norm_cfg, 38 | align_corners=False, 39 | loss_decode=dict( 40 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 41 | # model training and testing settings 42 | train_cfg=dict(), 43 | test_cfg=dict(mode='whole')) -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/schedules/schedule_160k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=160000) 8 | checkpoint_config = dict(by_epoch=False, interval=16000) 9 | evaluation = dict(interval=16000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/schedules/schedule_20k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=20000) 8 | checkpoint_config = dict(by_epoch=False, interval=2000) 9 | evaluation = dict(interval=2000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/schedules/schedule_40k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=40000) 8 | checkpoint_config = dict(by_epoch=False, interval=4000) 9 | evaluation = dict(interval=4000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /annotator/uniformer/configs/_base_/schedules/schedule_80k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=80000) 8 | checkpoint_config = dict(by_epoch=False, interval=8000) 9 | evaluation = dict(interval=8000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /annotator/uniformer/inference.py: -------------------------------------------------------------------------------- 1 | 2 | import torch 3 | 4 | try: 5 | import mmcv as mmcv 6 | from mmcv.parallel import collate, scatter 7 | from mmcv.runner import load_checkpoint 8 | from mmseg.datasets.pipelines import Compose 9 | from mmseg.models import build_segmentor 10 | except ImportError: 11 | import annotator.mmpkg.mmcv as mmcv 12 | from annotator.mmpkg.mmcv.parallel import collate, scatter 13 | from annotator.mmpkg.mmcv.runner import load_checkpoint 14 | from annotator.mmpkg.mmseg.datasets.pipelines import Compose 15 | from annotator.mmpkg.mmseg.models import build_segmentor 16 | 17 | def init_segmentor(config, checkpoint=None, device='cuda:0'): 18 | """Initialize a segmentor from config file. 19 | 20 | Args: 21 | config (str or :obj:`mmcv.Config`): Config file path or the config 22 | object. 23 | checkpoint (str, optional): Checkpoint path. If left as None, the model 24 | will not load any weights. 25 | device (str, optional) CPU/CUDA device option. Default 'cuda:0'. 26 | Use 'cpu' for loading model on CPU. 27 | Returns: 28 | nn.Module: The constructed segmentor. 29 | """ 30 | if isinstance(config, str): 31 | config = mmcv.Config.fromfile(config) 32 | elif not isinstance(config, mmcv.Config): 33 | raise TypeError('config must be a filename or Config object, ' 34 | 'but got {}'.format(type(config))) 35 | config.model.pretrained = None 36 | config.model.train_cfg = None 37 | model = build_segmentor(config.model, test_cfg=config.get('test_cfg')) 38 | if checkpoint is not None: 39 | checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') 40 | model.CLASSES = checkpoint['meta']['CLASSES'] 41 | model.PALETTE = checkpoint['meta']['PALETTE'] 42 | model.cfg = config # save the config in the model for convenience 43 | model.to(device) 44 | model.eval() 45 | return model 46 | 47 | 48 | class LoadImage: 49 | """A simple pipeline to load image.""" 50 | 51 | def __call__(self, results): 52 | """Call function to load images into results. 53 | 54 | Args: 55 | results (dict): A result dict contains the file name 56 | of the image to be read. 57 | 58 | Returns: 59 | dict: ``results`` will be returned containing loaded image. 60 | """ 61 | 62 | if isinstance(results['img'], str): 63 | results['filename'] = results['img'] 64 | results['ori_filename'] = results['img'] 65 | else: 66 | results['filename'] = None 67 | results['ori_filename'] = None 68 | img = mmcv.imread(results['img']) 69 | results['img'] = img 70 | results['img_shape'] = img.shape 71 | results['ori_shape'] = img.shape 72 | return results 73 | 74 | 75 | def inference_segmentor(model, img): 76 | """Inference image(s) with the segmentor. 77 | 78 | Args: 79 | model (nn.Module): The loaded segmentor. 80 | imgs (str/ndarray or list[str/ndarray]): Either image files or loaded 81 | images. 82 | 83 | Returns: 84 | (list[Tensor]): The segmentation result. 85 | """ 86 | cfg = model.cfg 87 | device = next(model.parameters()).device # model device 88 | # build the data pipeline 89 | test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:] 90 | test_pipeline = Compose(test_pipeline) 91 | # prepare data 92 | data = dict(img=img) 93 | data = test_pipeline(data) 94 | data = collate([data], samples_per_gpu=1) 95 | if next(model.parameters()).is_cuda: 96 | # scatter to specified GPU 97 | data = scatter(data, [device])[0] 98 | else: 99 | data['img_metas'] = [i.data[0] for i in data['img_metas']] 100 | 101 | data['img'] = [x.to(device) for x in data['img']] 102 | 103 | # forward the model 104 | with torch.no_grad(): 105 | result = model(return_loss=False, rescale=True, **data) 106 | return result 107 | 108 | 109 | def show_result_pyplot(model, 110 | img, 111 | result, 112 | palette=None, 113 | fig_size=(15, 10), 114 | opacity=0.5, 115 | title='', 116 | block=True): 117 | """Visualize the segmentation results on the image. 118 | 119 | Args: 120 | model (nn.Module): The loaded segmentor. 121 | img (str or np.ndarray): Image filename or loaded image. 122 | result (list): The segmentation result. 123 | palette (list[list[int]]] | None): The palette of segmentation 124 | map. If None is given, random palette will be generated. 125 | Default: None 126 | fig_size (tuple): Figure size of the pyplot figure. 127 | opacity(float): Opacity of painted segmentation map. 128 | Default 0.5. 129 | Must be in (0, 1] range. 130 | title (str): The title of pyplot figure. 131 | Default is ''. 132 | block (bool): Whether to block the pyplot figure. 133 | Default is True. 134 | """ 135 | if hasattr(model, 'module'): 136 | model = model.module 137 | img = model.show_result( 138 | img, result, palette=palette, show=False, opacity=opacity) 139 | # plt.figure(figsize=fig_size) 140 | # plt.imshow(mmcv.bgr2rgb(img)) 141 | # plt.title(title) 142 | # plt.tight_layout() 143 | # plt.show(block=block) 144 | return mmcv.bgr2rgb(img) 145 | -------------------------------------------------------------------------------- /annotator/uniformer/mmcv_custom/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from .checkpoint import load_checkpoint 4 | 5 | __all__ = ['load_checkpoint'] -------------------------------------------------------------------------------- /annotator/uniformer/upernet_global_small.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | 'configs/_base_/models/upernet_uniformer.py', 3 | 'configs/_base_/datasets/ade20k.py', 4 | 'configs/_base_/default_runtime.py', 5 | 'configs/_base_/schedules/schedule_160k.py' 6 | ] 7 | 8 | custom_imports = dict( 9 | imports=['annotator.uniformer.uniformer'], 10 | allow_failed_imports=False 11 | ) 12 | 13 | model = dict( 14 | backbone=dict( 15 | type='UniFormer', 16 | embed_dim=[64, 128, 320, 512], 17 | layers=[3, 4, 8, 3], 18 | head_dim=64, 19 | drop_path_rate=0.25, 20 | windows=False, 21 | hybrid=False 22 | ), 23 | decode_head=dict( 24 | in_channels=[64, 128, 320, 512], 25 | num_classes=150 26 | ), 27 | auxiliary_head=dict( 28 | in_channels=320, 29 | num_classes=150 30 | )) 31 | 32 | # AdamW optimizer, no weight decay for position embedding & layer norm in backbone 33 | optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01, 34 | paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.), 35 | 'relative_position_bias_table': dict(decay_mult=0.), 36 | 'norm': dict(decay_mult=0.)})) 37 | 38 | lr_config = dict(_delete_=True, policy='poly', 39 | warmup='linear', 40 | warmup_iters=1500, 41 | warmup_ratio=1e-6, 42 | power=1.0, min_lr=0.0, by_epoch=False) 43 | 44 | data=dict(samples_per_gpu=2) -------------------------------------------------------------------------------- /annotator/util.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | 4 | 5 | def HWC3(x): 6 | assert x.dtype == np.uint8 7 | if x.ndim == 2: 8 | x = x[:, :, None] 9 | assert x.ndim == 3 10 | H, W, C = x.shape 11 | assert C == 1 or C == 3 or C == 4 12 | if C == 3: 13 | return x 14 | if C == 1: 15 | return np.concatenate([x, x, x], axis=2) 16 | if C == 4: 17 | color = x[:, :, 0:3].astype(np.float32) 18 | alpha = x[:, :, 3:4].astype(np.float32) / 255.0 19 | y = color * alpha + 255.0 * (1.0 - alpha) 20 | y = y.clip(0, 255).astype(np.uint8) 21 | return y 22 | 23 | 24 | def make_noise_disk(H, W, C, F): 25 | noise = np.random.uniform(low=0, high=1, size=((H // F) + 2, (W // F) + 2, C)) 26 | noise = cv2.resize(noise, (W + 2 * F, H + 2 * F), interpolation=cv2.INTER_CUBIC) 27 | noise = noise[F: F + H, F: F + W] 28 | noise -= np.min(noise) 29 | noise /= np.max(noise) 30 | if C == 1: 31 | noise = noise[:, :, None] 32 | return noise 33 | 34 | 35 | def nms(x, t, s): 36 | x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s) 37 | 38 | f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8) 39 | f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8) 40 | f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8) 41 | f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8) 42 | 43 | y = np.zeros_like(x) 44 | 45 | for f in [f1, f2, f3, f4]: 46 | np.putmask(y, cv2.dilate(x, kernel=f) == x, x) 47 | 48 | z = np.zeros_like(y, dtype=np.uint8) 49 | z[y > t] = 255 50 | return z 51 | 52 | 53 | def min_max_norm(x): 54 | x -= np.min(x) 55 | x /= np.maximum(np.max(x), 1e-5) 56 | return x 57 | 58 | 59 | def safe_step(x, step=2): 60 | y = x.astype(np.float32) * float(step + 1) 61 | y = y.astype(np.int32).astype(np.float32) / float(step) 62 | return y 63 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pytorch_lightning 2 | pandas 3 | kornia 4 | --------------------------------------------------------------------------------