├── .dockerignore ├── .gitattributes ├── .gitignore ├── CHANGELOG.md ├── CITATION.cff ├── CODEOWNERS ├── LICENSE.txt ├── README.md ├── cache.json ├── cog.yaml ├── configs └── v1-inference.yaml ├── download_weights.py ├── environment-wsl2.yaml ├── example.gif ├── extensions-builtin ├── LDSR │ ├── ldsr_model_arch.py │ ├── preload.py │ ├── scripts │ │ └── ldsr_model.py │ ├── sd_hijack_autoencoder.py │ ├── sd_hijack_ddpm_v1.py │ └── vqvae_quantize.py ├── Lora │ ├── extra_networks_lora.py │ ├── lora.py │ ├── lora_logger.py │ ├── lora_patches.py │ ├── lyco_helpers.py │ ├── network.py │ ├── network_full.py │ ├── network_glora.py │ ├── network_hada.py │ ├── network_ia3.py │ ├── network_lokr.py │ ├── network_lora.py │ ├── network_norm.py │ ├── network_oft.py │ ├── networks.py │ ├── preload.py │ ├── scripts │ │ └── lora_script.py │ ├── ui_edit_user_metadata.py │ └── ui_extra_networks_lora.py ├── ScuNET │ ├── preload.py │ ├── scripts │ │ └── scunet_model.py │ └── scunet_model_arch.py ├── SwinIR │ ├── preload.py │ ├── scripts │ │ └── swinir_model.py │ ├── swinir_model_arch.py │ └── swinir_model_arch_v2.py ├── canvas-zoom-and-pan │ ├── javascript │ │ └── zoom.js │ ├── scripts │ │ └── hotkey_config.py │ └── style.css ├── extra-options-section │ └── scripts │ │ └── extra_options_section.py ├── hypertile │ ├── hypertile.py │ └── scripts │ │ ├── hypertile_script.py │ │ └── hypertile_xyz.py ├── mobile │ └── javascript │ │ └── mobile.js └── prompt-bracket-checker │ └── javascript │ └── prompt-bracket-checker.js ├── extensions └── put extensions here.txt ├── handfix └── handfix.py ├── html ├── card-no-preview.png ├── extra-networks-card.html ├── extra-networks-no-cards.html ├── footer.html └── licenses.html ├── init.png ├── init.sh ├── init_env.py ├── javascript ├── aspectRatioOverlay.js ├── contextMenus.js ├── dragdrop.js ├── edit-attention.js ├── edit-order.js ├── extensions.js ├── extraNetworks.js ├── generationParams.js ├── hints.js ├── hires_fix.js ├── imageMaskFix.js ├── imageviewer.js ├── imageviewerGamepad.js ├── inputAccordion.js ├── localStorage.js ├── localization.js ├── notification.js ├── profilerVisualization.js ├── progressbar.js ├── resizeHandle.js ├── settings.js ├── textualInversion.js ├── token-counters.js ├── ui.js └── ui_settings_hints.js ├── launch.py ├── localizations └── Put localization files here.txt ├── models ├── Lora │ └── more_details.safetensors ├── Stable-diffusion │ └── Put Stable Diffusion checkpoints here.txt ├── VAE-approx │ └── model.pt ├── VAE │ └── Put VAE here.txt ├── deepbooru │ └── Put your deepbooru release project folder here.txt └── karlo │ └── ViT-L-14_stats.th ├── modified_controlnet.py ├── modules.zip ├── modules ├── Roboto-Regular.ttf ├── api │ ├── api.py │ └── models.py ├── cache.py ├── call_queue.py ├── cmd_args.py ├── codeformer │ ├── codeformer_arch.py │ └── vqgan_arch.py ├── codeformer_model.py ├── config_states.py ├── debugging │ └── debug_image.py ├── deepbooru.py ├── deepbooru_model.py ├── devices.py ├── errors.py ├── esrgan_model.py ├── esrgan_model_arch.py ├── extensions.py ├── extra_networks.py ├── extra_networks_hypernet.py ├── extras.py ├── face_restoration.py ├── fifo_lock.py ├── generation_parameters_copypaste.py ├── gfpgan_model.py ├── gitpython_hack.py ├── gradio_extensons.py ├── hashes.py ├── hypernetworks │ ├── hypernetwork.py │ └── ui.py ├── images.py ├── img2img.py ├── import_hook.py ├── initialize.py ├── initialize_util.py ├── interrogate.py ├── launch_utils.py ├── localization.py ├── logging_config.py ├── lowvram.py ├── mac_specific.py ├── masking.py ├── memmon.py ├── modelloader.py ├── models │ └── diffusion │ │ ├── ddpm_edit.py │ │ └── uni_pc │ │ ├── __init__.py │ │ ├── sampler.py │ │ └── uni_pc.py ├── ngrok.py ├── options.py ├── patches.py ├── paths.py ├── paths_internal.py ├── postprocessing.py ├── processing.py ├── processing_scripts │ ├── refiner.py │ └── seed.py ├── progress.py ├── prompt_parser.py ├── realesrgan_model.py ├── restart.py ├── rng.py ├── rng_philox.py ├── safe.py ├── script_callbacks.py ├── script_loading.py ├── scripts.py ├── scripts_auto_postprocessing.py ├── scripts_postprocessing.py ├── sd_disable_initialization.py ├── sd_hijack.py ├── sd_hijack_checkpoint.py ├── sd_hijack_clip.py ├── sd_hijack_clip_old.py ├── sd_hijack_ip2p.py ├── sd_hijack_open_clip.py ├── sd_hijack_optimizations.py ├── sd_hijack_unet.py ├── sd_hijack_utils.py ├── sd_hijack_xlmr.py ├── sd_models.py ├── sd_models_config.py ├── sd_models_types.py ├── sd_models_xl.py ├── sd_samplers.py ├── sd_samplers_cfg_denoiser.py ├── sd_samplers_common.py ├── sd_samplers_compvis.py ├── sd_samplers_extra.py ├── sd_samplers_kdiffusion.py ├── sd_samplers_timesteps.py ├── sd_samplers_timesteps_impl.py ├── sd_unet.py ├── sd_vae.py ├── sd_vae_approx.py ├── sd_vae_taesd.py ├── shared.py ├── shared_cmd_options.py ├── shared_gradio_themes.py ├── shared_init.py ├── shared_items.py ├── shared_options.py ├── shared_state.py ├── shared_total_tqdm.py ├── styles.py ├── sub_quadratic_attention.py ├── sysinfo.py ├── textual_inversion │ ├── autocrop.py │ ├── dataset.py │ ├── image_embedding.py │ ├── learn_schedule.py │ ├── logging.py │ ├── test_embedding.png │ ├── textual_inversion.py │ └── ui.py ├── tiling │ ├── img_utils.py │ └── seamless_tiling.py ├── timer.py ├── txt2img.py ├── ui.py ├── ui_checkpoint_merger.py ├── ui_common.py ├── ui_components.py ├── ui_extensions.py ├── ui_extra_networks.py ├── ui_extra_networks_checkpoints.py ├── ui_extra_networks_checkpoints_user_metadata.py ├── ui_extra_networks_hypernets.py ├── ui_extra_networks_textual_inversion.py ├── ui_extra_networks_user_metadata.py ├── ui_gradio_extensions.py ├── ui_loadsave.py ├── ui_postprocessing.py ├── ui_prompt_styles.py ├── ui_settings.py ├── ui_tempdir.py ├── ui_toprow.py ├── upscaler.py ├── util.py ├── xlmr.py ├── xlmr_m18.py └── xpu_specific.py ├── package.json ├── params.txt ├── predict.py ├── pyproject.toml ├── request.json ├── requirements-test.txt ├── requirements.txt ├── requirements_versions.txt ├── script.js ├── scripts ├── custom_code.py ├── img2imgalt.py ├── loopback.py ├── outpainting_mk_2.py ├── poor_mans_outpainting.py ├── postprocessing_caption.py ├── postprocessing_codeformer.py ├── postprocessing_create_flipped_copies.py ├── postprocessing_focal_crop.py ├── postprocessing_gfpgan.py ├── postprocessing_split_oversized.py ├── postprocessing_upscale.py ├── processing_autosized_crop.py ├── prompt_matrix.py ├── prompts_from_file.py ├── sd_upscale.py └── xyz_grid.py ├── style.css ├── test ├── __init__.py ├── conftest.py ├── test_extras.py ├── test_files │ ├── empty.pt │ ├── img2img_basic.png │ └── mask_basic.png ├── test_img2img.py ├── test_txt2img.py └── test_utils.py ├── textual_inversion_templates ├── hypernetwork.txt ├── none.txt ├── style.txt ├── style_filewords.txt ├── subject.txt └── subject_filewords.txt ├── ui-config.json ├── webui-macos-env.sh ├── webui-user.bat ├── webui-user.sh ├── webui.bat ├── webui.py └── webui.sh /.dockerignore: -------------------------------------------------------------------------------- 1 | # The .dockerignore file excludes files from the container build process. 2 | # 3 | # https://docs.docker.com/engine/reference/builder/#dockerignore-file 4 | 5 | # Exclude Git files 6 | .git 7 | .github 8 | .gitignore 9 | 10 | # Exclude Python cache files 11 | __pycache__ 12 | .mypy_cache 13 | .pytest_cache 14 | .ruff_cache 15 | 16 | # Exclude Python virtual environment 17 | /venv 18 | 19 | example.gif -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .mypy_cache 3 | .pytest_cache 4 | .ruff_cache -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | message: "If you use this software, please cite it as below." 3 | authors: 4 | - given-names: AUTOMATIC1111 5 | title: "Stable Diffusion Web UI" 6 | date-released: 2022-08-22 7 | url: "https://github.com/AUTOMATIC1111/stable-diffusion-webui" 8 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @AUTOMATIC1111 2 | 3 | # if you were managing a localization and were removed from this file, this is because 4 | # the intended way to do localizations now is via extensions. See: 5 | # https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Developing-extensions 6 | # Make a repo with your localization and since you are still listed as a collaborator 7 | # you can add it to the wiki page yourself. This change is because some people complained 8 | # the git commit log is cluttered with things unrelated to almost everyone and 9 | # because I believe this is the best overall for the project to handle localizations almost 10 | # entirely without my oversight. 11 | 12 | 13 | -------------------------------------------------------------------------------- /cache.json: -------------------------------------------------------------------------------- 1 | { 2 | "safetensors-metadata": { 3 | "checkpoint/v1-5-pruned-emaonly.safetensors": { 4 | "mtime": 1707282384.7290666, 5 | "value": { 6 | "format": "pt" 7 | } 8 | }, 9 | "lora/more_details": { 10 | "mtime": 1707282511.967678, 11 | "value": {} 12 | } 13 | }, 14 | "hashes": { 15 | "checkpoint/v1-5-pruned-emaonly.safetensors": { 16 | "mtime": 1707282384.7290666, 17 | "sha256": "6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa" 18 | } 19 | }, 20 | "hashes-addnet": { 21 | "lora/more_details": { 22 | "mtime": 1707282511.967678, 23 | "sha256": "3b8aa1d351efcbf8152e6327671d1310f6052b261363521874bb57c2711f419d" 24 | } 25 | } 26 | } -------------------------------------------------------------------------------- /cog.yaml: -------------------------------------------------------------------------------- 1 | build: 2 | gpu: true 3 | system_packages: 4 | - "libgl1-mesa-glx" 5 | - "libglib2.0-0" 6 | python_version: "3.10.4" 7 | python_packages: 8 | - "torch==2.0.1" 9 | - "torchvision==0.15.2" 10 | - "xformers==0.0.22" 11 | - "tensorboard==2.16.2" 12 | - "gfpgan==1.3.8" 13 | - "lpips==0.1.4" 14 | - "realesrgan==0.3.0" 15 | - "gdown==5.1.0" 16 | - "mediapipe==0.10.14" 17 | run: 18 | - curl -o /usr/local/bin/pget -L "https://github.com/replicate/pget/releases/latest/download/pget_$(uname -s)_$(uname -m)" 19 | - chmod +x /usr/local/bin/pget 20 | - git config --global --add safe.directory /src 21 | - git config --global --add safe.directory /src/extensions/sd-webui-controlnet 22 | - git config --global --add safe.directory /src/extensions/multidiffusion-upscaler-for-automatic1111 23 | - git clone https://github.com/philz1337x/stable-diffusion-webui-cog-init /stable-diffusion-webui 24 | - python /stable-diffusion-webui/init_env.py --skip-torch-cuda-test 25 | - sed -i 's/from pkg_resources import packaging/import packaging/g' /root/.pyenv/versions/3.10.4/lib/python3.10/site-packages/clip/clip.py 26 | predict: "predict.py:Predictor" 27 | -------------------------------------------------------------------------------- /configs/v1-inference.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | base_learning_rate: 1.0e-04 3 | target: ldm.models.diffusion.ddpm.LatentDiffusion 4 | params: 5 | linear_start: 0.00085 6 | linear_end: 0.0120 7 | num_timesteps_cond: 1 8 | log_every_t: 200 9 | timesteps: 1000 10 | first_stage_key: "jpg" 11 | cond_stage_key: "txt" 12 | image_size: 64 13 | channels: 4 14 | cond_stage_trainable: false # Note: different from the one we trained before 15 | conditioning_key: crossattn 16 | monitor: val/loss_simple_ema 17 | scale_factor: 0.18215 18 | use_ema: False 19 | 20 | scheduler_config: # 10000 warmup steps 21 | target: ldm.lr_scheduler.LambdaLinearScheduler 22 | params: 23 | warm_up_steps: [ 10000 ] 24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases 25 | f_start: [ 1.e-6 ] 26 | f_max: [ 1. ] 27 | f_min: [ 1. ] 28 | 29 | unet_config: 30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel 31 | params: 32 | image_size: 32 # unused 33 | in_channels: 4 34 | out_channels: 4 35 | model_channels: 320 36 | attention_resolutions: [ 4, 2, 1 ] 37 | num_res_blocks: 2 38 | channel_mult: [ 1, 2, 4, 4 ] 39 | num_heads: 8 40 | use_spatial_transformer: True 41 | transformer_depth: 1 42 | context_dim: 768 43 | use_checkpoint: True 44 | legacy: False 45 | 46 | first_stage_config: 47 | target: ldm.models.autoencoder.AutoencoderKL 48 | params: 49 | embed_dim: 4 50 | monitor: val/rec_loss 51 | ddconfig: 52 | double_z: true 53 | z_channels: 4 54 | resolution: 256 55 | in_channels: 3 56 | out_ch: 3 57 | ch: 128 58 | ch_mult: 59 | - 1 60 | - 2 61 | - 4 62 | - 4 63 | num_res_blocks: 2 64 | attn_resolutions: [] 65 | dropout: 0.0 66 | lossconfig: 67 | target: torch.nn.Identity 68 | 69 | cond_stage_config: 70 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder 71 | -------------------------------------------------------------------------------- /download_weights.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | import shutil 4 | 5 | def download_file(url, folder_path, filename): 6 | if not os.path.exists(folder_path): 7 | os.makedirs(folder_path) 8 | file_path = os.path.join(folder_path, filename) 9 | 10 | if os.path.isfile(file_path): 11 | print(f"File already exists: {file_path}") 12 | else: 13 | response = requests.get(url, stream=True) 14 | if response.status_code == 200: 15 | with open(file_path, 'wb') as file: 16 | for chunk in response.iter_content(chunk_size=1024): 17 | file.write(chunk) 18 | print(f"File successfully downloaded and saved: {file_path}") 19 | else: 20 | print(f"Error downloading the file. Status code: {response.status_code}") 21 | 22 | # Prepare webui 23 | from modules.launch_utils import prepare_environment 24 | prepare_environment() 25 | 26 | print("Modifiying controlnet.py") 27 | shutil.copyfile('modified_controlnet.py', 'extensions/sd-webui-controlnet/scripts/controlnet.py') 28 | print("Modifiying controlnet.py - Done") 29 | 30 | # Checkpoints 31 | download_file( 32 | "https://huggingface.co/philz1337x/flat2DAnimerge_v45Sharp/resolve/main/flat2DAnimerge_v45Sharp.safetensors?download=true", 33 | "models/Stable-diffusion", 34 | "flat2DAnimerge_v45Sharp.safetensors" 35 | ) 36 | download_file( 37 | "https://huggingface.co/dantea1118/juggernaut_reborn/resolve/main/juggernaut_reborn.safetensors?download=true", 38 | "models/Stable-diffusion", 39 | "juggernaut_reborn.safetensors" 40 | ) 41 | download_file( 42 | "https://huggingface.co/philz1337x/epicrealism/resolve/main/epicrealism_naturalSinRC1VAE.safetensors?download=true", 43 | "models/Stable-diffusion", 44 | "epicrealism_naturalSinRC1VAE.safetensors" 45 | ) 46 | 47 | # Upscaler Model 48 | download_file( 49 | "https://huggingface.co/philz1337x/upscaler/resolve/main/4x-UltraSharp.pth?download=true", 50 | "models/ESRGAN", 51 | "4x-UltraSharp.pth" 52 | ) 53 | 54 | # Embeddings 55 | download_file( 56 | "https://huggingface.co/philz1337x/embeddings/resolve/main/verybadimagenegative_v1.3.pt?download=true", 57 | "embeddings", 58 | "verybadimagenegative_v1.3.pt" 59 | ) 60 | download_file( 61 | "https://huggingface.co/philz1337x/embeddings/resolve/main/JuggernautNegative-neg.pt?download=true", 62 | "embeddings", 63 | "JuggernautNegative-neg.pt" 64 | ) 65 | 66 | # Lora Models 67 | download_file( 68 | "https://huggingface.co/philz1337x/loras/resolve/main/SDXLrender_v2.0.safetensors?download=true", 69 | "models/Lora", 70 | "SDXLrender_v2.0.safetensors" 71 | ) 72 | download_file( 73 | "https://huggingface.co/philz1337x/loras/resolve/main/more_details.safetensors?download=true", 74 | "models/Lora", 75 | "more_details.safetensors" 76 | ) 77 | 78 | # Controlnet models 79 | download_file( 80 | "https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11f1e_sd15_tile.pth?download=true", 81 | "models/ControlNet", 82 | "control_v11f1e_sd15_tile.pth" 83 | ) 84 | 85 | # VAE 86 | download_file( 87 | "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.safetensors?download=true", 88 | "models/VAE", 89 | "vae-ft-mse-840000-ema-pruned.safetensors" 90 | ) 91 | -------------------------------------------------------------------------------- /environment-wsl2.yaml: -------------------------------------------------------------------------------- 1 | name: automatic 2 | channels: 3 | - pytorch 4 | - defaults 5 | dependencies: 6 | - python=3.10 7 | - pip=23.0 8 | - cudatoolkit=11.8 9 | - pytorch=2.0 10 | - torchvision=0.15 11 | - numpy=1.23 12 | -------------------------------------------------------------------------------- /example.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/example.gif -------------------------------------------------------------------------------- /extensions-builtin/LDSR/preload.py: -------------------------------------------------------------------------------- 1 | import os 2 | from modules import paths 3 | 4 | 5 | def preload(parser): 6 | parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(paths.models_path, 'LDSR')) 7 | -------------------------------------------------------------------------------- /extensions-builtin/LDSR/scripts/ldsr_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from modules.modelloader import load_file_from_url 4 | from modules.upscaler import Upscaler, UpscalerData 5 | from ldsr_model_arch import LDSR 6 | from modules import shared, script_callbacks, errors 7 | import sd_hijack_autoencoder # noqa: F401 8 | import sd_hijack_ddpm_v1 # noqa: F401 9 | 10 | 11 | class UpscalerLDSR(Upscaler): 12 | def __init__(self, user_path): 13 | self.name = "LDSR" 14 | self.user_path = user_path 15 | self.model_url = "https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1" 16 | self.yaml_url = "https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1" 17 | super().__init__() 18 | scaler_data = UpscalerData("LDSR", None, self) 19 | self.scalers = [scaler_data] 20 | 21 | def load_model(self, path: str): 22 | # Remove incorrect project.yaml file if too big 23 | yaml_path = os.path.join(self.model_path, "project.yaml") 24 | old_model_path = os.path.join(self.model_path, "model.pth") 25 | new_model_path = os.path.join(self.model_path, "model.ckpt") 26 | 27 | local_model_paths = self.find_models(ext_filter=[".ckpt", ".safetensors"]) 28 | local_ckpt_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.ckpt")]), None) 29 | local_safetensors_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.safetensors")]), None) 30 | local_yaml_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("project.yaml")]), None) 31 | 32 | if os.path.exists(yaml_path): 33 | statinfo = os.stat(yaml_path) 34 | if statinfo.st_size >= 10485760: 35 | print("Removing invalid LDSR YAML file.") 36 | os.remove(yaml_path) 37 | 38 | if os.path.exists(old_model_path): 39 | print("Renaming model from model.pth to model.ckpt") 40 | os.rename(old_model_path, new_model_path) 41 | 42 | if local_safetensors_path is not None and os.path.exists(local_safetensors_path): 43 | model = local_safetensors_path 44 | else: 45 | model = local_ckpt_path or load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name="model.ckpt") 46 | 47 | yaml = local_yaml_path or load_file_from_url(self.yaml_url, model_dir=self.model_download_path, file_name="project.yaml") 48 | 49 | return LDSR(model, yaml) 50 | 51 | def do_upscale(self, img, path): 52 | try: 53 | ldsr = self.load_model(path) 54 | except Exception: 55 | errors.report(f"Failed loading LDSR model {path}", exc_info=True) 56 | return img 57 | ddim_steps = shared.opts.ldsr_steps 58 | return ldsr.super_resolution(img, ddim_steps, self.scale) 59 | 60 | 61 | def on_ui_settings(): 62 | import gradio as gr 63 | 64 | shared.opts.add_option("ldsr_steps", shared.OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}, section=('upscaling', "Upscaling"))) 65 | shared.opts.add_option("ldsr_cached", shared.OptionInfo(False, "Cache LDSR model in memory", gr.Checkbox, {"interactive": True}, section=('upscaling', "Upscaling"))) 66 | 67 | 68 | script_callbacks.on_ui_settings(on_ui_settings) 69 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/extra_networks_lora.py: -------------------------------------------------------------------------------- 1 | from modules import extra_networks, shared 2 | import networks 3 | 4 | 5 | class ExtraNetworkLora(extra_networks.ExtraNetwork): 6 | def __init__(self): 7 | super().__init__('lora') 8 | 9 | self.errors = {} 10 | """mapping of network names to the number of errors the network had during operation""" 11 | 12 | def activate(self, p, params_list): 13 | additional = shared.opts.sd_lora 14 | 15 | self.errors.clear() 16 | 17 | if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional): 18 | p.all_prompts = [x + f"" for x in p.all_prompts] 19 | params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) 20 | 21 | names = [] 22 | te_multipliers = [] 23 | unet_multipliers = [] 24 | dyn_dims = [] 25 | for params in params_list: 26 | assert params.items 27 | 28 | names.append(params.positional[0]) 29 | 30 | te_multiplier = float(params.positional[1]) if len(params.positional) > 1 else 1.0 31 | te_multiplier = float(params.named.get("te", te_multiplier)) 32 | 33 | unet_multiplier = float(params.positional[2]) if len(params.positional) > 2 else te_multiplier 34 | unet_multiplier = float(params.named.get("unet", unet_multiplier)) 35 | 36 | dyn_dim = int(params.positional[3]) if len(params.positional) > 3 else None 37 | dyn_dim = int(params.named["dyn"]) if "dyn" in params.named else dyn_dim 38 | 39 | te_multipliers.append(te_multiplier) 40 | unet_multipliers.append(unet_multiplier) 41 | dyn_dims.append(dyn_dim) 42 | 43 | networks.load_networks(names, te_multipliers, unet_multipliers, dyn_dims) 44 | 45 | if shared.opts.lora_add_hashes_to_infotext: 46 | network_hashes = [] 47 | for item in networks.loaded_networks: 48 | shorthash = item.network_on_disk.shorthash 49 | if not shorthash: 50 | continue 51 | 52 | alias = item.mentioned_name 53 | if not alias: 54 | continue 55 | 56 | alias = alias.replace(":", "").replace(",", "") 57 | 58 | network_hashes.append(f"{alias}: {shorthash}") 59 | 60 | if network_hashes: 61 | p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes) 62 | 63 | def deactivate(self, p): 64 | if self.errors: 65 | p.comment("Networks with errors: " + ", ".join(f"{k} ({v})" for k, v in self.errors.items())) 66 | 67 | self.errors.clear() 68 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/lora.py: -------------------------------------------------------------------------------- 1 | import networks 2 | 3 | list_available_loras = networks.list_available_networks 4 | 5 | available_loras = networks.available_networks 6 | available_lora_aliases = networks.available_network_aliases 7 | available_lora_hash_lookup = networks.available_network_hash_lookup 8 | forbidden_lora_aliases = networks.forbidden_network_aliases 9 | loaded_loras = networks.loaded_networks 10 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/lora_logger.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import copy 3 | import logging 4 | 5 | 6 | class ColoredFormatter(logging.Formatter): 7 | COLORS = { 8 | "DEBUG": "\033[0;36m", # CYAN 9 | "INFO": "\033[0;32m", # GREEN 10 | "WARNING": "\033[0;33m", # YELLOW 11 | "ERROR": "\033[0;31m", # RED 12 | "CRITICAL": "\033[0;37;41m", # WHITE ON RED 13 | "RESET": "\033[0m", # RESET COLOR 14 | } 15 | 16 | def format(self, record): 17 | colored_record = copy.copy(record) 18 | levelname = colored_record.levelname 19 | seq = self.COLORS.get(levelname, self.COLORS["RESET"]) 20 | colored_record.levelname = f"{seq}{levelname}{self.COLORS['RESET']}" 21 | return super().format(colored_record) 22 | 23 | 24 | logger = logging.getLogger("lora") 25 | logger.propagate = False 26 | 27 | 28 | if not logger.handlers: 29 | handler = logging.StreamHandler(sys.stdout) 30 | handler.setFormatter( 31 | ColoredFormatter("[%(name)s]-%(levelname)s: %(message)s") 32 | ) 33 | logger.addHandler(handler) 34 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/lora_patches.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | import networks 4 | from modules import patches 5 | 6 | 7 | class LoraPatches: 8 | def __init__(self): 9 | self.Linear_forward = patches.patch(__name__, torch.nn.Linear, 'forward', networks.network_Linear_forward) 10 | self.Linear_load_state_dict = patches.patch(__name__, torch.nn.Linear, '_load_from_state_dict', networks.network_Linear_load_state_dict) 11 | self.Conv2d_forward = patches.patch(__name__, torch.nn.Conv2d, 'forward', networks.network_Conv2d_forward) 12 | self.Conv2d_load_state_dict = patches.patch(__name__, torch.nn.Conv2d, '_load_from_state_dict', networks.network_Conv2d_load_state_dict) 13 | self.GroupNorm_forward = patches.patch(__name__, torch.nn.GroupNorm, 'forward', networks.network_GroupNorm_forward) 14 | self.GroupNorm_load_state_dict = patches.patch(__name__, torch.nn.GroupNorm, '_load_from_state_dict', networks.network_GroupNorm_load_state_dict) 15 | self.LayerNorm_forward = patches.patch(__name__, torch.nn.LayerNorm, 'forward', networks.network_LayerNorm_forward) 16 | self.LayerNorm_load_state_dict = patches.patch(__name__, torch.nn.LayerNorm, '_load_from_state_dict', networks.network_LayerNorm_load_state_dict) 17 | self.MultiheadAttention_forward = patches.patch(__name__, torch.nn.MultiheadAttention, 'forward', networks.network_MultiheadAttention_forward) 18 | self.MultiheadAttention_load_state_dict = patches.patch(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict', networks.network_MultiheadAttention_load_state_dict) 19 | 20 | def undo(self): 21 | self.Linear_forward = patches.undo(__name__, torch.nn.Linear, 'forward') 22 | self.Linear_load_state_dict = patches.undo(__name__, torch.nn.Linear, '_load_from_state_dict') 23 | self.Conv2d_forward = patches.undo(__name__, torch.nn.Conv2d, 'forward') 24 | self.Conv2d_load_state_dict = patches.undo(__name__, torch.nn.Conv2d, '_load_from_state_dict') 25 | self.GroupNorm_forward = patches.undo(__name__, torch.nn.GroupNorm, 'forward') 26 | self.GroupNorm_load_state_dict = patches.undo(__name__, torch.nn.GroupNorm, '_load_from_state_dict') 27 | self.LayerNorm_forward = patches.undo(__name__, torch.nn.LayerNorm, 'forward') 28 | self.LayerNorm_load_state_dict = patches.undo(__name__, torch.nn.LayerNorm, '_load_from_state_dict') 29 | self.MultiheadAttention_forward = patches.undo(__name__, torch.nn.MultiheadAttention, 'forward') 30 | self.MultiheadAttention_load_state_dict = patches.undo(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict') 31 | 32 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/lyco_helpers.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def make_weight_cp(t, wa, wb): 5 | temp = torch.einsum('i j k l, j r -> i r k l', t, wb) 6 | return torch.einsum('i j k l, i r -> r j k l', temp, wa) 7 | 8 | 9 | def rebuild_conventional(up, down, shape, dyn_dim=None): 10 | up = up.reshape(up.size(0), -1) 11 | down = down.reshape(down.size(0), -1) 12 | if dyn_dim is not None: 13 | up = up[:, :dyn_dim] 14 | down = down[:dyn_dim, :] 15 | return (up @ down).reshape(shape) 16 | 17 | 18 | def rebuild_cp_decomposition(up, down, mid): 19 | up = up.reshape(up.size(0), -1) 20 | down = down.reshape(down.size(0), -1) 21 | return torch.einsum('n m k l, i n, m j -> i j k l', mid, up, down) 22 | 23 | 24 | # copied from https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/lokr.py 25 | def factorization(dimension: int, factor:int=-1) -> tuple[int, int]: 26 | ''' 27 | return a tuple of two value of input dimension decomposed by the number closest to factor 28 | second value is higher or equal than first value. 29 | 30 | In LoRA with Kroneckor Product, first value is a value for weight scale. 31 | secon value is a value for weight. 32 | 33 | Becuase of non-commutative property, A⊗B ≠ B⊗A. Meaning of two matrices is slightly different. 34 | 35 | examples) 36 | factor 37 | -1 2 4 8 16 ... 38 | 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 39 | 128 -> 8, 16 128 -> 2, 64 128 -> 4, 32 128 -> 8, 16 128 -> 8, 16 40 | 250 -> 10, 25 250 -> 2, 125 250 -> 2, 125 250 -> 5, 50 250 -> 10, 25 41 | 360 -> 8, 45 360 -> 2, 180 360 -> 4, 90 360 -> 8, 45 360 -> 12, 30 42 | 512 -> 16, 32 512 -> 2, 256 512 -> 4, 128 512 -> 8, 64 512 -> 16, 32 43 | 1024 -> 32, 32 1024 -> 2, 512 1024 -> 4, 256 1024 -> 8, 128 1024 -> 16, 64 44 | ''' 45 | 46 | if factor > 0 and (dimension % factor) == 0: 47 | m = factor 48 | n = dimension // factor 49 | if m > n: 50 | n, m = m, n 51 | return m, n 52 | if factor < 0: 53 | factor = dimension 54 | m, n = 1, dimension 55 | length = m + n 56 | while m length or new_m>factor: 62 | break 63 | else: 64 | m, n = new_m, new_n 65 | if m > n: 66 | n, m = m, n 67 | return m, n 68 | 69 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/network_full.py: -------------------------------------------------------------------------------- 1 | import network 2 | 3 | 4 | class ModuleTypeFull(network.ModuleType): 5 | def create_module(self, net: network.Network, weights: network.NetworkWeights): 6 | if all(x in weights.w for x in ["diff"]): 7 | return NetworkModuleFull(net, weights) 8 | 9 | return None 10 | 11 | 12 | class NetworkModuleFull(network.NetworkModule): 13 | def __init__(self, net: network.Network, weights: network.NetworkWeights): 14 | super().__init__(net, weights) 15 | 16 | self.weight = weights.w.get("diff") 17 | self.ex_bias = weights.w.get("diff_b") 18 | 19 | def calc_updown(self, orig_weight): 20 | output_shape = self.weight.shape 21 | updown = self.weight.to(orig_weight.device, dtype=orig_weight.dtype) 22 | if self.ex_bias is not None: 23 | ex_bias = self.ex_bias.to(orig_weight.device, dtype=orig_weight.dtype) 24 | else: 25 | ex_bias = None 26 | 27 | return self.finalize_updown(updown, orig_weight, output_shape, ex_bias) 28 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/network_glora.py: -------------------------------------------------------------------------------- 1 | 2 | import network 3 | 4 | class ModuleTypeGLora(network.ModuleType): 5 | def create_module(self, net: network.Network, weights: network.NetworkWeights): 6 | if all(x in weights.w for x in ["a1.weight", "a2.weight", "alpha", "b1.weight", "b2.weight"]): 7 | return NetworkModuleGLora(net, weights) 8 | 9 | return None 10 | 11 | # adapted from https://github.com/KohakuBlueleaf/LyCORIS 12 | class NetworkModuleGLora(network.NetworkModule): 13 | def __init__(self, net: network.Network, weights: network.NetworkWeights): 14 | super().__init__(net, weights) 15 | 16 | if hasattr(self.sd_module, 'weight'): 17 | self.shape = self.sd_module.weight.shape 18 | 19 | self.w1a = weights.w["a1.weight"] 20 | self.w1b = weights.w["b1.weight"] 21 | self.w2a = weights.w["a2.weight"] 22 | self.w2b = weights.w["b2.weight"] 23 | 24 | def calc_updown(self, orig_weight): 25 | w1a = self.w1a.to(orig_weight.device, dtype=orig_weight.dtype) 26 | w1b = self.w1b.to(orig_weight.device, dtype=orig_weight.dtype) 27 | w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype) 28 | w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype) 29 | 30 | output_shape = [w1a.size(0), w1b.size(1)] 31 | updown = ((w2b @ w1b) + ((orig_weight @ w2a) @ w1a)) 32 | 33 | return self.finalize_updown(updown, orig_weight, output_shape) 34 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/network_hada.py: -------------------------------------------------------------------------------- 1 | import lyco_helpers 2 | import network 3 | 4 | 5 | class ModuleTypeHada(network.ModuleType): 6 | def create_module(self, net: network.Network, weights: network.NetworkWeights): 7 | if all(x in weights.w for x in ["hada_w1_a", "hada_w1_b", "hada_w2_a", "hada_w2_b"]): 8 | return NetworkModuleHada(net, weights) 9 | 10 | return None 11 | 12 | 13 | class NetworkModuleHada(network.NetworkModule): 14 | def __init__(self, net: network.Network, weights: network.NetworkWeights): 15 | super().__init__(net, weights) 16 | 17 | if hasattr(self.sd_module, 'weight'): 18 | self.shape = self.sd_module.weight.shape 19 | 20 | self.w1a = weights.w["hada_w1_a"] 21 | self.w1b = weights.w["hada_w1_b"] 22 | self.dim = self.w1b.shape[0] 23 | self.w2a = weights.w["hada_w2_a"] 24 | self.w2b = weights.w["hada_w2_b"] 25 | 26 | self.t1 = weights.w.get("hada_t1") 27 | self.t2 = weights.w.get("hada_t2") 28 | 29 | def calc_updown(self, orig_weight): 30 | w1a = self.w1a.to(orig_weight.device, dtype=orig_weight.dtype) 31 | w1b = self.w1b.to(orig_weight.device, dtype=orig_weight.dtype) 32 | w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype) 33 | w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype) 34 | 35 | output_shape = [w1a.size(0), w1b.size(1)] 36 | 37 | if self.t1 is not None: 38 | output_shape = [w1a.size(1), w1b.size(1)] 39 | t1 = self.t1.to(orig_weight.device, dtype=orig_weight.dtype) 40 | updown1 = lyco_helpers.make_weight_cp(t1, w1a, w1b) 41 | output_shape += t1.shape[2:] 42 | else: 43 | if len(w1b.shape) == 4: 44 | output_shape += w1b.shape[2:] 45 | updown1 = lyco_helpers.rebuild_conventional(w1a, w1b, output_shape) 46 | 47 | if self.t2 is not None: 48 | t2 = self.t2.to(orig_weight.device, dtype=orig_weight.dtype) 49 | updown2 = lyco_helpers.make_weight_cp(t2, w2a, w2b) 50 | else: 51 | updown2 = lyco_helpers.rebuild_conventional(w2a, w2b, output_shape) 52 | 53 | updown = updown1 * updown2 54 | 55 | return self.finalize_updown(updown, orig_weight, output_shape) 56 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/network_ia3.py: -------------------------------------------------------------------------------- 1 | import network 2 | 3 | 4 | class ModuleTypeIa3(network.ModuleType): 5 | def create_module(self, net: network.Network, weights: network.NetworkWeights): 6 | if all(x in weights.w for x in ["weight"]): 7 | return NetworkModuleIa3(net, weights) 8 | 9 | return None 10 | 11 | 12 | class NetworkModuleIa3(network.NetworkModule): 13 | def __init__(self, net: network.Network, weights: network.NetworkWeights): 14 | super().__init__(net, weights) 15 | 16 | self.w = weights.w["weight"] 17 | self.on_input = weights.w["on_input"].item() 18 | 19 | def calc_updown(self, orig_weight): 20 | w = self.w.to(orig_weight.device, dtype=orig_weight.dtype) 21 | 22 | output_shape = [w.size(0), orig_weight.size(1)] 23 | if self.on_input: 24 | output_shape.reverse() 25 | else: 26 | w = w.reshape(-1, 1) 27 | 28 | updown = orig_weight * w 29 | 30 | return self.finalize_updown(updown, orig_weight, output_shape) 31 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/network_lokr.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | import lyco_helpers 4 | import network 5 | 6 | 7 | class ModuleTypeLokr(network.ModuleType): 8 | def create_module(self, net: network.Network, weights: network.NetworkWeights): 9 | has_1 = "lokr_w1" in weights.w or ("lokr_w1_a" in weights.w and "lokr_w1_b" in weights.w) 10 | has_2 = "lokr_w2" in weights.w or ("lokr_w2_a" in weights.w and "lokr_w2_b" in weights.w) 11 | if has_1 and has_2: 12 | return NetworkModuleLokr(net, weights) 13 | 14 | return None 15 | 16 | 17 | def make_kron(orig_shape, w1, w2): 18 | if len(w2.shape) == 4: 19 | w1 = w1.unsqueeze(2).unsqueeze(2) 20 | w2 = w2.contiguous() 21 | return torch.kron(w1, w2).reshape(orig_shape) 22 | 23 | 24 | class NetworkModuleLokr(network.NetworkModule): 25 | def __init__(self, net: network.Network, weights: network.NetworkWeights): 26 | super().__init__(net, weights) 27 | 28 | self.w1 = weights.w.get("lokr_w1") 29 | self.w1a = weights.w.get("lokr_w1_a") 30 | self.w1b = weights.w.get("lokr_w1_b") 31 | self.dim = self.w1b.shape[0] if self.w1b is not None else self.dim 32 | self.w2 = weights.w.get("lokr_w2") 33 | self.w2a = weights.w.get("lokr_w2_a") 34 | self.w2b = weights.w.get("lokr_w2_b") 35 | self.dim = self.w2b.shape[0] if self.w2b is not None else self.dim 36 | self.t2 = weights.w.get("lokr_t2") 37 | 38 | def calc_updown(self, orig_weight): 39 | if self.w1 is not None: 40 | w1 = self.w1.to(orig_weight.device, dtype=orig_weight.dtype) 41 | else: 42 | w1a = self.w1a.to(orig_weight.device, dtype=orig_weight.dtype) 43 | w1b = self.w1b.to(orig_weight.device, dtype=orig_weight.dtype) 44 | w1 = w1a @ w1b 45 | 46 | if self.w2 is not None: 47 | w2 = self.w2.to(orig_weight.device, dtype=orig_weight.dtype) 48 | elif self.t2 is None: 49 | w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype) 50 | w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype) 51 | w2 = w2a @ w2b 52 | else: 53 | t2 = self.t2.to(orig_weight.device, dtype=orig_weight.dtype) 54 | w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype) 55 | w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype) 56 | w2 = lyco_helpers.make_weight_cp(t2, w2a, w2b) 57 | 58 | output_shape = [w1.size(0) * w2.size(0), w1.size(1) * w2.size(1)] 59 | if len(orig_weight.shape) == 4: 60 | output_shape = orig_weight.shape 61 | 62 | updown = make_kron(output_shape, w1, w2) 63 | 64 | return self.finalize_updown(updown, orig_weight, output_shape) 65 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/network_norm.py: -------------------------------------------------------------------------------- 1 | import network 2 | 3 | 4 | class ModuleTypeNorm(network.ModuleType): 5 | def create_module(self, net: network.Network, weights: network.NetworkWeights): 6 | if all(x in weights.w for x in ["w_norm", "b_norm"]): 7 | return NetworkModuleNorm(net, weights) 8 | 9 | return None 10 | 11 | 12 | class NetworkModuleNorm(network.NetworkModule): 13 | def __init__(self, net: network.Network, weights: network.NetworkWeights): 14 | super().__init__(net, weights) 15 | 16 | self.w_norm = weights.w.get("w_norm") 17 | self.b_norm = weights.w.get("b_norm") 18 | 19 | def calc_updown(self, orig_weight): 20 | output_shape = self.w_norm.shape 21 | updown = self.w_norm.to(orig_weight.device, dtype=orig_weight.dtype) 22 | 23 | if self.b_norm is not None: 24 | ex_bias = self.b_norm.to(orig_weight.device, dtype=orig_weight.dtype) 25 | else: 26 | ex_bias = None 27 | 28 | return self.finalize_updown(updown, orig_weight, output_shape, ex_bias) 29 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/preload.py: -------------------------------------------------------------------------------- 1 | import os 2 | from modules import paths 3 | 4 | 5 | def preload(parser): 6 | parser.add_argument("--lora-dir", type=str, help="Path to directory with Lora networks.", default=os.path.join(paths.models_path, 'Lora')) 7 | parser.add_argument("--lyco-dir-backcompat", type=str, help="Path to directory with LyCORIS networks (for backawards compatibility; can also use --lyco-dir).", default=os.path.join(paths.models_path, 'LyCORIS')) 8 | -------------------------------------------------------------------------------- /extensions-builtin/ScuNET/preload.py: -------------------------------------------------------------------------------- 1 | import os 2 | from modules import paths 3 | 4 | 5 | def preload(parser): 6 | parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(paths.models_path, 'ScuNET')) 7 | -------------------------------------------------------------------------------- /extensions-builtin/SwinIR/preload.py: -------------------------------------------------------------------------------- 1 | import os 2 | from modules import paths 3 | 4 | 5 | def preload(parser): 6 | parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(paths.models_path, 'SwinIR')) 7 | -------------------------------------------------------------------------------- /extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | from modules import shared 3 | 4 | shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas Hotkeys"), { 5 | "canvas_hotkey_zoom": shared.OptionInfo("Alt", "Zoom canvas", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"), 6 | "canvas_hotkey_adjust": shared.OptionInfo("Ctrl", "Adjust brush size", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"), 7 | "canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"), 8 | "canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "), 9 | "canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"), 10 | "canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"), 11 | "canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"), 12 | "canvas_auto_expand": shared.OptionInfo(True, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"), 13 | "canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"), 14 | "canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size", "Moving canvas","Fullscreen","Reset Zoom","Overlap"]}), 15 | })) 16 | -------------------------------------------------------------------------------- /extensions-builtin/canvas-zoom-and-pan/style.css: -------------------------------------------------------------------------------- 1 | .canvas-tooltip-info { 2 | position: absolute; 3 | top: 10px; 4 | left: 10px; 5 | cursor: help; 6 | background-color: rgba(0, 0, 0, 0.3); 7 | width: 20px; 8 | height: 20px; 9 | border-radius: 50%; 10 | display: flex; 11 | align-items: center; 12 | justify-content: center; 13 | flex-direction: column; 14 | 15 | z-index: 100; 16 | } 17 | 18 | .canvas-tooltip-info::after { 19 | content: ''; 20 | display: block; 21 | width: 2px; 22 | height: 7px; 23 | background-color: white; 24 | margin-top: 2px; 25 | } 26 | 27 | .canvas-tooltip-info::before { 28 | content: ''; 29 | display: block; 30 | width: 2px; 31 | height: 2px; 32 | background-color: white; 33 | } 34 | 35 | .canvas-tooltip-content { 36 | display: none; 37 | background-color: #f9f9f9; 38 | color: #333; 39 | border: 1px solid #ddd; 40 | padding: 15px; 41 | position: absolute; 42 | top: 40px; 43 | left: 10px; 44 | width: 250px; 45 | font-size: 16px; 46 | opacity: 0; 47 | border-radius: 8px; 48 | box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2); 49 | 50 | z-index: 100; 51 | } 52 | 53 | .canvas-tooltip:hover .canvas-tooltip-content { 54 | display: block; 55 | animation: fadeIn 0.5s; 56 | opacity: 1; 57 | } 58 | 59 | @keyframes fadeIn { 60 | from {opacity: 0;} 61 | to {opacity: 1;} 62 | } 63 | 64 | .styler { 65 | overflow:inherit !important; 66 | } -------------------------------------------------------------------------------- /extensions-builtin/hypertile/scripts/hypertile_xyz.py: -------------------------------------------------------------------------------- 1 | from modules import scripts 2 | from modules.shared import opts 3 | 4 | xyz_grid = [x for x in scripts.scripts_data if x.script_class.__module__ == "xyz_grid.py"][0].module 5 | 6 | def int_applier(value_name:str, min_range:int = -1, max_range:int = -1): 7 | """ 8 | Returns a function that applies the given value to the given value_name in opts.data. 9 | """ 10 | def validate(value_name:str, value:str): 11 | value = int(value) 12 | # validate value 13 | if not min_range == -1: 14 | assert value >= min_range, f"Value {value} for {value_name} must be greater than or equal to {min_range}" 15 | if not max_range == -1: 16 | assert value <= max_range, f"Value {value} for {value_name} must be less than or equal to {max_range}" 17 | def apply_int(p, x, xs): 18 | validate(value_name, x) 19 | opts.data[value_name] = int(x) 20 | return apply_int 21 | 22 | def bool_applier(value_name:str): 23 | """ 24 | Returns a function that applies the given value to the given value_name in opts.data. 25 | """ 26 | def validate(value_name:str, value:str): 27 | assert value.lower() in ["true", "false"], f"Value {value} for {value_name} must be either true or false" 28 | def apply_bool(p, x, xs): 29 | validate(value_name, x) 30 | value_boolean = x.lower() == "true" 31 | opts.data[value_name] = value_boolean 32 | return apply_bool 33 | 34 | def add_axis_options(): 35 | extra_axis_options = [ 36 | xyz_grid.AxisOption("[Hypertile] Unet First pass Enabled", str, bool_applier("hypertile_enable_unet"), choices=xyz_grid.boolean_choice(reverse=True)), 37 | xyz_grid.AxisOption("[Hypertile] Unet Second pass Enabled", str, bool_applier("hypertile_enable_unet_secondpass"), choices=xyz_grid.boolean_choice(reverse=True)), 38 | xyz_grid.AxisOption("[Hypertile] Unet Max Depth", int, int_applier("hypertile_max_depth_unet", 0, 3), choices=lambda: [str(x) for x in range(4)]), 39 | xyz_grid.AxisOption("[Hypertile] Unet Max Tile Size", int, int_applier("hypertile_max_tile_unet", 0, 512)), 40 | xyz_grid.AxisOption("[Hypertile] Unet Swap Size", int, int_applier("hypertile_swap_size_unet", 0, 64)), 41 | xyz_grid.AxisOption("[Hypertile] VAE Enabled", str, bool_applier("hypertile_enable_vae"), choices=xyz_grid.boolean_choice(reverse=True)), 42 | xyz_grid.AxisOption("[Hypertile] VAE Max Depth", int, int_applier("hypertile_max_depth_vae", 0, 3), choices=lambda: [str(x) for x in range(4)]), 43 | xyz_grid.AxisOption("[Hypertile] VAE Max Tile Size", int, int_applier("hypertile_max_tile_vae", 0, 512)), 44 | xyz_grid.AxisOption("[Hypertile] VAE Swap Size", int, int_applier("hypertile_swap_size_vae", 0, 64)), 45 | ] 46 | set_a = {opt.label for opt in xyz_grid.axis_options} 47 | set_b = {opt.label for opt in extra_axis_options} 48 | if set_a.intersection(set_b): 49 | return 50 | 51 | xyz_grid.axis_options.extend(extra_axis_options) 52 | -------------------------------------------------------------------------------- /extensions-builtin/mobile/javascript/mobile.js: -------------------------------------------------------------------------------- 1 | var isSetupForMobile = false; 2 | 3 | function isMobile() { 4 | for (var tab of ["txt2img", "img2img"]) { 5 | var imageTab = gradioApp().getElementById(tab + '_results'); 6 | if (imageTab && imageTab.offsetParent && imageTab.offsetLeft == 0) { 7 | return true; 8 | } 9 | } 10 | 11 | return false; 12 | } 13 | 14 | function reportWindowSize() { 15 | if (gradioApp().querySelector('.toprow-compact-tools')) return; // not applicable for compact prompt layout 16 | 17 | var currentlyMobile = isMobile(); 18 | if (currentlyMobile == isSetupForMobile) return; 19 | isSetupForMobile = currentlyMobile; 20 | 21 | for (var tab of ["txt2img", "img2img"]) { 22 | var button = gradioApp().getElementById(tab + '_generate_box'); 23 | var target = gradioApp().getElementById(currentlyMobile ? tab + '_results' : tab + '_actions_column'); 24 | target.insertBefore(button, target.firstElementChild); 25 | 26 | gradioApp().getElementById(tab + '_results').classList.toggle('mobile', currentlyMobile); 27 | } 28 | } 29 | 30 | window.addEventListener("resize", reportWindowSize); 31 | 32 | onUiLoaded(function() { 33 | reportWindowSize(); 34 | }); 35 | -------------------------------------------------------------------------------- /extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js: -------------------------------------------------------------------------------- 1 | // Stable Diffusion WebUI - Bracket checker 2 | // By Hingashi no Florin/Bwin4L & @akx 3 | // Counts open and closed brackets (round, square, curly) in the prompt and negative prompt text boxes in the txt2img and img2img tabs. 4 | // If there's a mismatch, the keyword counter turns red and if you hover on it, a tooltip tells you what's wrong. 5 | 6 | function checkBrackets(textArea, counterElt) { 7 | var counts = {}; 8 | (textArea.value.match(/[(){}[\]]/g) || []).forEach(bracket => { 9 | counts[bracket] = (counts[bracket] || 0) + 1; 10 | }); 11 | var errors = []; 12 | 13 | function checkPair(open, close, kind) { 14 | if (counts[open] !== counts[close]) { 15 | errors.push( 16 | `${open}...${close} - Detected ${counts[open] || 0} opening and ${counts[close] || 0} closing ${kind}.` 17 | ); 18 | } 19 | } 20 | 21 | checkPair('(', ')', 'round brackets'); 22 | checkPair('[', ']', 'square brackets'); 23 | checkPair('{', '}', 'curly brackets'); 24 | counterElt.title = errors.join('\n'); 25 | counterElt.classList.toggle('error', errors.length !== 0); 26 | } 27 | 28 | function setupBracketChecking(id_prompt, id_counter) { 29 | var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea"); 30 | var counter = gradioApp().getElementById(id_counter); 31 | 32 | if (textarea && counter) { 33 | textarea.addEventListener("input", () => checkBrackets(textarea, counter)); 34 | } 35 | } 36 | 37 | onUiLoaded(function() { 38 | setupBracketChecking('txt2img_prompt', 'txt2img_token_counter'); 39 | setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter'); 40 | setupBracketChecking('img2img_prompt', 'img2img_token_counter'); 41 | setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter'); 42 | }); 43 | -------------------------------------------------------------------------------- /extensions/put extensions here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/extensions/put extensions here.txt -------------------------------------------------------------------------------- /html/card-no-preview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/html/card-no-preview.png -------------------------------------------------------------------------------- /html/extra-networks-card.html: -------------------------------------------------------------------------------- 1 |
2 | {background_image} 3 |
4 | {metadata_button} 5 | {edit_button} 6 |
7 |
8 |
9 | 10 |
11 | {name} 12 | {description} 13 |
14 |
15 | -------------------------------------------------------------------------------- /html/extra-networks-no-cards.html: -------------------------------------------------------------------------------- 1 |
2 |

Nothing here. Add some content to the following directories:

3 | 4 |
    5 | {dirs} 6 |
7 |
8 | 9 | -------------------------------------------------------------------------------- /html/footer.html: -------------------------------------------------------------------------------- 1 |
2 | API 3 |  •  4 | Github 5 |  •  6 | Gradio 7 |  •  8 | Startup profile 9 |  •  10 | Reload UI 11 |
12 |
13 |
14 | {versions} 15 |
16 | -------------------------------------------------------------------------------- /init.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/init.png -------------------------------------------------------------------------------- /init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Init Stable Diffusion Environment" 3 | sudo cog run python /src/init_env.py 4 | echo "Install Requirements" 5 | sudo cog run pip install --no-cache-dir -r requirements.txt 6 | echo "Download Models" 7 | command2 8 | -------------------------------------------------------------------------------- /init_env.py: -------------------------------------------------------------------------------- 1 | import sys 2 | #sys.path.extend(['/stable-diffusion-webui']) 3 | 4 | #from modules import timer 5 | from modules import launch_utils 6 | 7 | with launch_utils.startup_timer.subcategory("prepare environment"): 8 | launch_utils.prepare_environment() -------------------------------------------------------------------------------- /javascript/edit-order.js: -------------------------------------------------------------------------------- 1 | /* alt+left/right moves text in prompt */ 2 | 3 | function keyupEditOrder(event) { 4 | if (!opts.keyedit_move) return; 5 | 6 | let target = event.originalTarget || event.composedPath()[0]; 7 | if (!target.matches("*:is([id*='_toprow'] [id*='_prompt'], .prompt) textarea")) return; 8 | if (!event.altKey) return; 9 | 10 | let isLeft = event.key == "ArrowLeft"; 11 | let isRight = event.key == "ArrowRight"; 12 | if (!isLeft && !isRight) return; 13 | event.preventDefault(); 14 | 15 | let selectionStart = target.selectionStart; 16 | let selectionEnd = target.selectionEnd; 17 | let text = target.value; 18 | let items = text.split(","); 19 | let indexStart = (text.slice(0, selectionStart).match(/,/g) || []).length; 20 | let indexEnd = (text.slice(0, selectionEnd).match(/,/g) || []).length; 21 | let range = indexEnd - indexStart + 1; 22 | 23 | if (isLeft && indexStart > 0) { 24 | items.splice(indexStart - 1, 0, ...items.splice(indexStart, range)); 25 | target.value = items.join(); 26 | target.selectionStart = items.slice(0, indexStart - 1).join().length + (indexStart == 1 ? 0 : 1); 27 | target.selectionEnd = items.slice(0, indexEnd).join().length; 28 | } else if (isRight && indexEnd < items.length - 1) { 29 | items.splice(indexStart + 1, 0, ...items.splice(indexStart, range)); 30 | target.value = items.join(); 31 | target.selectionStart = items.slice(0, indexStart + 1).join().length + 1; 32 | target.selectionEnd = items.slice(0, indexEnd + 2).join().length; 33 | } 34 | 35 | event.preventDefault(); 36 | updateInput(target); 37 | } 38 | 39 | addEventListener('keydown', (event) => { 40 | keyupEditOrder(event); 41 | }); 42 | -------------------------------------------------------------------------------- /javascript/extensions.js: -------------------------------------------------------------------------------- 1 | 2 | function extensions_apply(_disabled_list, _update_list, disable_all) { 3 | var disable = []; 4 | var update = []; 5 | 6 | gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x) { 7 | if (x.name.startsWith("enable_") && !x.checked) { 8 | disable.push(x.name.substring(7)); 9 | } 10 | 11 | if (x.name.startsWith("update_") && x.checked) { 12 | update.push(x.name.substring(7)); 13 | } 14 | }); 15 | 16 | restart_reload(); 17 | 18 | return [JSON.stringify(disable), JSON.stringify(update), disable_all]; 19 | } 20 | 21 | function extensions_check() { 22 | var disable = []; 23 | 24 | gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x) { 25 | if (x.name.startsWith("enable_") && !x.checked) { 26 | disable.push(x.name.substring(7)); 27 | } 28 | }); 29 | 30 | gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x) { 31 | x.innerHTML = "Loading..."; 32 | }); 33 | 34 | 35 | var id = randomId(); 36 | requestProgress(id, gradioApp().getElementById('extensions_installed_html'), null, function() { 37 | 38 | }); 39 | 40 | return [id, JSON.stringify(disable)]; 41 | } 42 | 43 | function install_extension_from_index(button, url) { 44 | button.disabled = "disabled"; 45 | button.value = "Installing..."; 46 | 47 | var textarea = gradioApp().querySelector('#extension_to_install textarea'); 48 | textarea.value = url; 49 | updateInput(textarea); 50 | 51 | gradioApp().querySelector('#install_extension_button').click(); 52 | } 53 | 54 | function config_state_confirm_restore(_, config_state_name, config_restore_type) { 55 | if (config_state_name == "Current") { 56 | return [false, config_state_name, config_restore_type]; 57 | } 58 | let restored = ""; 59 | if (config_restore_type == "extensions") { 60 | restored = "all saved extension versions"; 61 | } else if (config_restore_type == "webui") { 62 | restored = "the webui version"; 63 | } else { 64 | restored = "the webui version and all saved extension versions"; 65 | } 66 | let confirmed = confirm("Are you sure you want to restore from this state?\nThis will reset " + restored + "."); 67 | if (confirmed) { 68 | restart_reload(); 69 | gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x) { 70 | x.innerHTML = "Loading..."; 71 | }); 72 | } 73 | return [confirmed, config_state_name, config_restore_type]; 74 | } 75 | 76 | function toggle_all_extensions(event) { 77 | gradioApp().querySelectorAll('#extensions .extension_toggle').forEach(function(checkbox_el) { 78 | checkbox_el.checked = event.target.checked; 79 | }); 80 | } 81 | 82 | function toggle_extension() { 83 | let all_extensions_toggled = true; 84 | for (const checkbox_el of gradioApp().querySelectorAll('#extensions .extension_toggle')) { 85 | if (!checkbox_el.checked) { 86 | all_extensions_toggled = false; 87 | break; 88 | } 89 | } 90 | 91 | gradioApp().querySelector('#extensions .all_extensions_toggle').checked = all_extensions_toggled; 92 | } 93 | -------------------------------------------------------------------------------- /javascript/generationParams.js: -------------------------------------------------------------------------------- 1 | // attaches listeners to the txt2img and img2img galleries to update displayed generation param text when the image changes 2 | 3 | let txt2img_gallery, img2img_gallery, modal = undefined; 4 | onAfterUiUpdate(function() { 5 | if (!txt2img_gallery) { 6 | txt2img_gallery = attachGalleryListeners("txt2img"); 7 | } 8 | if (!img2img_gallery) { 9 | img2img_gallery = attachGalleryListeners("img2img"); 10 | } 11 | if (!modal) { 12 | modal = gradioApp().getElementById('lightboxModal'); 13 | modalObserver.observe(modal, {attributes: true, attributeFilter: ['style']}); 14 | } 15 | }); 16 | 17 | let modalObserver = new MutationObserver(function(mutations) { 18 | mutations.forEach(function(mutationRecord) { 19 | let selectedTab = gradioApp().querySelector('#tabs div button.selected')?.innerText; 20 | if (mutationRecord.target.style.display === 'none' && (selectedTab === 'txt2img' || selectedTab === 'img2img')) { 21 | gradioApp().getElementById(selectedTab + "_generation_info_button")?.click(); 22 | } 23 | }); 24 | }); 25 | 26 | function attachGalleryListeners(tab_name) { 27 | var gallery = gradioApp().querySelector('#' + tab_name + '_gallery'); 28 | gallery?.addEventListener('click', () => gradioApp().getElementById(tab_name + "_generation_info_button").click()); 29 | gallery?.addEventListener('keydown', (e) => { 30 | if (e.keyCode == 37 || e.keyCode == 39) { // left or right arrow 31 | gradioApp().getElementById(tab_name + "_generation_info_button").click(); 32 | } 33 | }); 34 | return gallery; 35 | } 36 | -------------------------------------------------------------------------------- /javascript/hires_fix.js: -------------------------------------------------------------------------------- 1 | 2 | function onCalcResolutionHires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y) { 3 | function setInactive(elem, inactive) { 4 | elem.classList.toggle('inactive', !!inactive); 5 | } 6 | 7 | var hrUpscaleBy = gradioApp().getElementById('txt2img_hr_scale'); 8 | var hrResizeX = gradioApp().getElementById('txt2img_hr_resize_x'); 9 | var hrResizeY = gradioApp().getElementById('txt2img_hr_resize_y'); 10 | 11 | gradioApp().getElementById('txt2img_hires_fix_row2').style.display = opts.use_old_hires_fix_width_height ? "none" : ""; 12 | 13 | setInactive(hrUpscaleBy, opts.use_old_hires_fix_width_height || hr_resize_x > 0 || hr_resize_y > 0); 14 | setInactive(hrResizeX, opts.use_old_hires_fix_width_height || hr_resize_x == 0); 15 | setInactive(hrResizeY, opts.use_old_hires_fix_width_height || hr_resize_y == 0); 16 | 17 | return [enable, width, height, hr_scale, hr_resize_x, hr_resize_y]; 18 | } 19 | -------------------------------------------------------------------------------- /javascript/imageMaskFix.js: -------------------------------------------------------------------------------- 1 | /** 2 | * temporary fix for https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/668 3 | * @see https://github.com/gradio-app/gradio/issues/1721 4 | */ 5 | function imageMaskResize() { 6 | const canvases = gradioApp().querySelectorAll('#img2maskimg .touch-none canvas'); 7 | if (!canvases.length) { 8 | window.removeEventListener('resize', imageMaskResize); 9 | return; 10 | } 11 | 12 | const wrapper = canvases[0].closest('.touch-none'); 13 | const previewImage = wrapper.previousElementSibling; 14 | 15 | if (!previewImage.complete) { 16 | previewImage.addEventListener('load', imageMaskResize); 17 | return; 18 | } 19 | 20 | const w = previewImage.width; 21 | const h = previewImage.height; 22 | const nw = previewImage.naturalWidth; 23 | const nh = previewImage.naturalHeight; 24 | const portrait = nh > nw; 25 | 26 | const wW = Math.min(w, portrait ? h / nh * nw : w / nw * nw); 27 | const wH = Math.min(h, portrait ? h / nh * nh : w / nw * nh); 28 | 29 | wrapper.style.width = `${wW}px`; 30 | wrapper.style.height = `${wH}px`; 31 | wrapper.style.left = `0px`; 32 | wrapper.style.top = `0px`; 33 | 34 | canvases.forEach(c => { 35 | c.style.width = c.style.height = ''; 36 | c.style.maxWidth = '100%'; 37 | c.style.maxHeight = '100%'; 38 | c.style.objectFit = 'contain'; 39 | }); 40 | } 41 | 42 | onAfterUiUpdate(imageMaskResize); 43 | window.addEventListener('resize', imageMaskResize); 44 | -------------------------------------------------------------------------------- /javascript/imageviewerGamepad.js: -------------------------------------------------------------------------------- 1 | let gamepads = []; 2 | 3 | window.addEventListener('gamepadconnected', (e) => { 4 | const index = e.gamepad.index; 5 | let isWaiting = false; 6 | gamepads[index] = setInterval(async() => { 7 | if (!opts.js_modal_lightbox_gamepad || isWaiting) return; 8 | const gamepad = navigator.getGamepads()[index]; 9 | const xValue = gamepad.axes[0]; 10 | if (xValue <= -0.3) { 11 | modalPrevImage(e); 12 | isWaiting = true; 13 | } else if (xValue >= 0.3) { 14 | modalNextImage(e); 15 | isWaiting = true; 16 | } 17 | if (isWaiting) { 18 | await sleepUntil(() => { 19 | const xValue = navigator.getGamepads()[index].axes[0]; 20 | if (xValue < 0.3 && xValue > -0.3) { 21 | return true; 22 | } 23 | }, opts.js_modal_lightbox_gamepad_repeat); 24 | isWaiting = false; 25 | } 26 | }, 10); 27 | }); 28 | 29 | window.addEventListener('gamepaddisconnected', (e) => { 30 | clearInterval(gamepads[e.gamepad.index]); 31 | }); 32 | 33 | /* 34 | Primarily for vr controller type pointer devices. 35 | I use the wheel event because there's currently no way to do it properly with web xr. 36 | */ 37 | let isScrolling = false; 38 | window.addEventListener('wheel', (e) => { 39 | if (!opts.js_modal_lightbox_gamepad || isScrolling) return; 40 | isScrolling = true; 41 | 42 | if (e.deltaX <= -0.6) { 43 | modalPrevImage(e); 44 | } else if (e.deltaX >= 0.6) { 45 | modalNextImage(e); 46 | } 47 | 48 | setTimeout(() => { 49 | isScrolling = false; 50 | }, opts.js_modal_lightbox_gamepad_repeat); 51 | }); 52 | 53 | function sleepUntil(f, timeout) { 54 | return new Promise((resolve) => { 55 | const timeStart = new Date(); 56 | const wait = setInterval(function() { 57 | if (f() || new Date() - timeStart > timeout) { 58 | clearInterval(wait); 59 | resolve(); 60 | } 61 | }, 20); 62 | }); 63 | } 64 | -------------------------------------------------------------------------------- /javascript/inputAccordion.js: -------------------------------------------------------------------------------- 1 | function inputAccordionChecked(id, checked) { 2 | var accordion = gradioApp().getElementById(id); 3 | accordion.visibleCheckbox.checked = checked; 4 | accordion.onVisibleCheckboxChange(); 5 | } 6 | 7 | function setupAccordion(accordion) { 8 | var labelWrap = accordion.querySelector('.label-wrap'); 9 | var gradioCheckbox = gradioApp().querySelector('#' + accordion.id + "-checkbox input"); 10 | var extra = gradioApp().querySelector('#' + accordion.id + "-extra"); 11 | var span = labelWrap.querySelector('span'); 12 | var linked = true; 13 | 14 | var isOpen = function() { 15 | return labelWrap.classList.contains('open'); 16 | }; 17 | 18 | var observerAccordionOpen = new MutationObserver(function(mutations) { 19 | mutations.forEach(function(mutationRecord) { 20 | accordion.classList.toggle('input-accordion-open', isOpen()); 21 | 22 | if (linked) { 23 | accordion.visibleCheckbox.checked = isOpen(); 24 | accordion.onVisibleCheckboxChange(); 25 | } 26 | }); 27 | }); 28 | observerAccordionOpen.observe(labelWrap, {attributes: true, attributeFilter: ['class']}); 29 | 30 | if (extra) { 31 | labelWrap.insertBefore(extra, labelWrap.lastElementChild); 32 | } 33 | 34 | accordion.onChecked = function(checked) { 35 | if (isOpen() != checked) { 36 | labelWrap.click(); 37 | } 38 | }; 39 | 40 | var visibleCheckbox = document.createElement('INPUT'); 41 | visibleCheckbox.type = 'checkbox'; 42 | visibleCheckbox.checked = isOpen(); 43 | visibleCheckbox.id = accordion.id + "-visible-checkbox"; 44 | visibleCheckbox.className = gradioCheckbox.className + " input-accordion-checkbox"; 45 | span.insertBefore(visibleCheckbox, span.firstChild); 46 | 47 | accordion.visibleCheckbox = visibleCheckbox; 48 | accordion.onVisibleCheckboxChange = function() { 49 | if (linked && isOpen() != visibleCheckbox.checked) { 50 | labelWrap.click(); 51 | } 52 | 53 | gradioCheckbox.checked = visibleCheckbox.checked; 54 | updateInput(gradioCheckbox); 55 | }; 56 | 57 | visibleCheckbox.addEventListener('click', function(event) { 58 | linked = false; 59 | event.stopPropagation(); 60 | }); 61 | visibleCheckbox.addEventListener('input', accordion.onVisibleCheckboxChange); 62 | } 63 | 64 | onUiLoaded(function() { 65 | for (var accordion of gradioApp().querySelectorAll('.input-accordion')) { 66 | setupAccordion(accordion); 67 | } 68 | }); 69 | -------------------------------------------------------------------------------- /javascript/localStorage.js: -------------------------------------------------------------------------------- 1 | 2 | function localSet(k, v) { 3 | try { 4 | localStorage.setItem(k, v); 5 | } catch (e) { 6 | console.warn(`Failed to save ${k} to localStorage: ${e}`); 7 | } 8 | } 9 | 10 | function localGet(k, def) { 11 | try { 12 | return localStorage.getItem(k); 13 | } catch (e) { 14 | console.warn(`Failed to load ${k} from localStorage: ${e}`); 15 | } 16 | 17 | return def; 18 | } 19 | 20 | function localRemove(k) { 21 | try { 22 | return localStorage.removeItem(k); 23 | } catch (e) { 24 | console.warn(`Failed to remove ${k} from localStorage: ${e}`); 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /javascript/notification.js: -------------------------------------------------------------------------------- 1 | // Monitors the gallery and sends a browser notification when the leading image is new. 2 | 3 | let lastHeadImg = null; 4 | 5 | let notificationButton = null; 6 | 7 | onAfterUiUpdate(function() { 8 | if (notificationButton == null) { 9 | notificationButton = gradioApp().getElementById('request_notifications'); 10 | 11 | if (notificationButton != null) { 12 | notificationButton.addEventListener('click', () => { 13 | void Notification.requestPermission(); 14 | }, true); 15 | } 16 | } 17 | 18 | const galleryPreviews = gradioApp().querySelectorAll('div[id^="tab_"] div[id$="_results"] .thumbnail-item > img'); 19 | 20 | if (galleryPreviews == null) return; 21 | 22 | const headImg = galleryPreviews[0]?.src; 23 | 24 | if (headImg == null || headImg == lastHeadImg) return; 25 | 26 | lastHeadImg = headImg; 27 | 28 | // play notification sound if available 29 | const notificationAudio = gradioApp().querySelector('#audio_notification audio'); 30 | if (notificationAudio) { 31 | notificationAudio.volume = opts.notification_volume / 100.0 || 1.0; 32 | notificationAudio.play(); 33 | } 34 | 35 | if (document.hasFocus()) return; 36 | 37 | // Multiple copies of the images are in the DOM when one is selected. Dedup with a Set to get the real number generated. 38 | const imgs = new Set(Array.from(galleryPreviews).map(img => img.src)); 39 | 40 | const notification = new Notification( 41 | 'Stable Diffusion', 42 | { 43 | body: `Generated ${imgs.size > 1 ? imgs.size - opts.return_grid : 1} image${imgs.size > 1 ? 's' : ''}`, 44 | icon: headImg, 45 | image: headImg, 46 | } 47 | ); 48 | 49 | notification.onclick = function(_) { 50 | parent.focus(); 51 | this.close(); 52 | }; 53 | }); 54 | -------------------------------------------------------------------------------- /javascript/settings.js: -------------------------------------------------------------------------------- 1 | let settingsExcludeTabsFromShowAll = { 2 | settings_tab_defaults: 1, 3 | settings_tab_sysinfo: 1, 4 | settings_tab_actions: 1, 5 | settings_tab_licenses: 1, 6 | }; 7 | 8 | function settingsShowAllTabs() { 9 | gradioApp().querySelectorAll('#settings > div').forEach(function(elem) { 10 | if (settingsExcludeTabsFromShowAll[elem.id]) return; 11 | 12 | elem.style.display = "block"; 13 | }); 14 | } 15 | 16 | function settingsShowOneTab() { 17 | gradioApp().querySelector('#settings_show_one_page').click(); 18 | } 19 | 20 | onUiLoaded(function() { 21 | var edit = gradioApp().querySelector('#settings_search'); 22 | var editTextarea = gradioApp().querySelector('#settings_search > label > input'); 23 | var buttonShowAllPages = gradioApp().getElementById('settings_show_all_pages'); 24 | var settings_tabs = gradioApp().querySelector('#settings div'); 25 | 26 | onEdit('settingsSearch', editTextarea, 250, function() { 27 | var searchText = (editTextarea.value || "").trim().toLowerCase(); 28 | 29 | gradioApp().querySelectorAll('#settings > div[id^=settings_] div[id^=column_settings_] > *').forEach(function(elem) { 30 | var visible = elem.textContent.trim().toLowerCase().indexOf(searchText) != -1; 31 | elem.style.display = visible ? "" : "none"; 32 | }); 33 | 34 | if (searchText != "") { 35 | settingsShowAllTabs(); 36 | } else { 37 | settingsShowOneTab(); 38 | } 39 | }); 40 | 41 | settings_tabs.insertBefore(edit, settings_tabs.firstChild); 42 | settings_tabs.appendChild(buttonShowAllPages); 43 | 44 | 45 | buttonShowAllPages.addEventListener("click", settingsShowAllTabs); 46 | }); 47 | 48 | 49 | onOptionsChanged(function() { 50 | if (gradioApp().querySelector('#settings .settings-category')) return; 51 | 52 | var sectionMap = {}; 53 | gradioApp().querySelectorAll('#settings > div > button').forEach(function(x) { 54 | sectionMap[x.textContent.trim()] = x; 55 | }); 56 | 57 | opts._categories.forEach(function(x) { 58 | var section = x[0]; 59 | var category = x[1]; 60 | 61 | var span = document.createElement('SPAN'); 62 | span.textContent = category; 63 | span.className = 'settings-category'; 64 | 65 | var sectionElem = sectionMap[section]; 66 | if (!sectionElem) return; 67 | 68 | sectionElem.parentElement.insertBefore(span, sectionElem); 69 | }); 70 | }); 71 | 72 | -------------------------------------------------------------------------------- /javascript/textualInversion.js: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | function start_training_textual_inversion() { 5 | gradioApp().querySelector('#ti_error').innerHTML = ''; 6 | 7 | var id = randomId(); 8 | requestProgress(id, gradioApp().getElementById('ti_output'), gradioApp().getElementById('ti_gallery'), function() {}, function(progress) { 9 | gradioApp().getElementById('ti_progress').innerHTML = progress.textinfo; 10 | }); 11 | 12 | var res = Array.from(arguments); 13 | 14 | res[0] = id; 15 | 16 | return res; 17 | } 18 | -------------------------------------------------------------------------------- /javascript/token-counters.js: -------------------------------------------------------------------------------- 1 | let promptTokenCountUpdateFunctions = {}; 2 | 3 | function update_txt2img_tokens(...args) { 4 | // Called from Gradio 5 | update_token_counter("txt2img_token_button"); 6 | update_token_counter("txt2img_negative_token_button"); 7 | if (args.length == 2) { 8 | return args[0]; 9 | } 10 | return args; 11 | } 12 | 13 | function update_img2img_tokens(...args) { 14 | // Called from Gradio 15 | update_token_counter("img2img_token_button"); 16 | update_token_counter("img2img_negative_token_button"); 17 | if (args.length == 2) { 18 | return args[0]; 19 | } 20 | return args; 21 | } 22 | 23 | function update_token_counter(button_id) { 24 | promptTokenCountUpdateFunctions[button_id]?.(); 25 | } 26 | 27 | 28 | function recalculatePromptTokens(name) { 29 | promptTokenCountUpdateFunctions[name]?.(); 30 | } 31 | 32 | function recalculate_prompts_txt2img() { 33 | // Called from Gradio 34 | recalculatePromptTokens('txt2img_prompt'); 35 | recalculatePromptTokens('txt2img_neg_prompt'); 36 | return Array.from(arguments); 37 | } 38 | 39 | function recalculate_prompts_img2img() { 40 | // Called from Gradio 41 | recalculatePromptTokens('img2img_prompt'); 42 | recalculatePromptTokens('img2img_neg_prompt'); 43 | return Array.from(arguments); 44 | } 45 | 46 | function setupTokenCounting(id, id_counter, id_button) { 47 | var prompt = gradioApp().getElementById(id); 48 | var counter = gradioApp().getElementById(id_counter); 49 | var textarea = gradioApp().querySelector(`#${id} > label > textarea`); 50 | 51 | if (opts.disable_token_counters) { 52 | counter.style.display = "none"; 53 | return; 54 | } 55 | 56 | if (counter.parentElement == prompt.parentElement) { 57 | return; 58 | } 59 | 60 | prompt.parentElement.insertBefore(counter, prompt); 61 | prompt.parentElement.style.position = "relative"; 62 | 63 | var func = onEdit(id, textarea, 800, function() { 64 | gradioApp().getElementById(id_button)?.click(); 65 | }); 66 | promptTokenCountUpdateFunctions[id] = func; 67 | promptTokenCountUpdateFunctions[id_button] = func; 68 | } 69 | 70 | function setupTokenCounters() { 71 | setupTokenCounting('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button'); 72 | setupTokenCounting('txt2img_neg_prompt', 'txt2img_negative_token_counter', 'txt2img_negative_token_button'); 73 | setupTokenCounting('img2img_prompt', 'img2img_token_counter', 'img2img_token_button'); 74 | setupTokenCounting('img2img_neg_prompt', 'img2img_negative_token_counter', 'img2img_negative_token_button'); 75 | } 76 | -------------------------------------------------------------------------------- /javascript/ui_settings_hints.js: -------------------------------------------------------------------------------- 1 | // various hints and extra info for the settings tab 2 | 3 | var settingsHintsSetup = false; 4 | 5 | onOptionsChanged(function() { 6 | if (settingsHintsSetup) return; 7 | settingsHintsSetup = true; 8 | 9 | gradioApp().querySelectorAll('#settings [id^=setting_]').forEach(function(div) { 10 | var name = div.id.substr(8); 11 | var commentBefore = opts._comments_before[name]; 12 | var commentAfter = opts._comments_after[name]; 13 | 14 | if (!commentBefore && !commentAfter) return; 15 | 16 | var span = null; 17 | if (div.classList.contains('gradio-checkbox')) span = div.querySelector('label span'); 18 | else if (div.classList.contains('gradio-checkboxgroup')) span = div.querySelector('span').firstChild; 19 | else if (div.classList.contains('gradio-radio')) span = div.querySelector('span').firstChild; 20 | else span = div.querySelector('label span').firstChild; 21 | 22 | if (!span) return; 23 | 24 | if (commentBefore) { 25 | var comment = document.createElement('DIV'); 26 | comment.className = 'settings-comment'; 27 | comment.innerHTML = commentBefore; 28 | span.parentElement.insertBefore(document.createTextNode('\xa0'), span); 29 | span.parentElement.insertBefore(comment, span); 30 | span.parentElement.insertBefore(document.createTextNode('\xa0'), span); 31 | } 32 | if (commentAfter) { 33 | comment = document.createElement('DIV'); 34 | comment.className = 'settings-comment'; 35 | comment.innerHTML = commentAfter; 36 | span.parentElement.insertBefore(comment, span.nextSibling); 37 | span.parentElement.insertBefore(document.createTextNode('\xa0'), span.nextSibling); 38 | } 39 | }); 40 | }); 41 | 42 | function settingsHintsShowQuicksettings() { 43 | requestGet("./internal/quicksettings-hint", {}, function(data) { 44 | var table = document.createElement('table'); 45 | table.className = 'popup-table'; 46 | 47 | data.forEach(function(obj) { 48 | var tr = document.createElement('tr'); 49 | var td = document.createElement('td'); 50 | td.textContent = obj.name; 51 | tr.appendChild(td); 52 | 53 | td = document.createElement('td'); 54 | td.textContent = obj.label; 55 | tr.appendChild(td); 56 | 57 | table.appendChild(tr); 58 | }); 59 | 60 | popup(table); 61 | }); 62 | } 63 | -------------------------------------------------------------------------------- /launch.py: -------------------------------------------------------------------------------- 1 | from modules import launch_utils 2 | 3 | args = launch_utils.args 4 | python = launch_utils.python 5 | git = launch_utils.git 6 | index_url = launch_utils.index_url 7 | dir_repos = launch_utils.dir_repos 8 | 9 | commit_hash = launch_utils.commit_hash 10 | git_tag = launch_utils.git_tag 11 | 12 | run = launch_utils.run 13 | is_installed = launch_utils.is_installed 14 | repo_dir = launch_utils.repo_dir 15 | 16 | run_pip = launch_utils.run_pip 17 | check_run_python = launch_utils.check_run_python 18 | git_clone = launch_utils.git_clone 19 | git_pull_recursive = launch_utils.git_pull_recursive 20 | list_extensions = launch_utils.list_extensions 21 | run_extension_installer = launch_utils.run_extension_installer 22 | prepare_environment = launch_utils.prepare_environment 23 | configure_for_tests = launch_utils.configure_for_tests 24 | start = launch_utils.start 25 | 26 | 27 | def main(): 28 | if args.dump_sysinfo: 29 | filename = launch_utils.dump_sysinfo() 30 | 31 | print(f"Sysinfo saved as {filename}. Exiting...") 32 | 33 | exit(0) 34 | 35 | launch_utils.startup_timer.record("initial startup") 36 | 37 | with launch_utils.startup_timer.subcategory("prepare environment"): 38 | if not args.skip_prepare_environment: 39 | prepare_environment() 40 | 41 | if args.test_server: 42 | configure_for_tests() 43 | 44 | start() 45 | 46 | 47 | if __name__ == "__main__": 48 | main() 49 | -------------------------------------------------------------------------------- /localizations/Put localization files here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/localizations/Put localization files here.txt -------------------------------------------------------------------------------- /models/Lora/more_details.safetensors: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/models/Lora/more_details.safetensors -------------------------------------------------------------------------------- /models/Stable-diffusion/Put Stable Diffusion checkpoints here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/models/Stable-diffusion/Put Stable Diffusion checkpoints here.txt -------------------------------------------------------------------------------- /models/VAE-approx/model.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/models/VAE-approx/model.pt -------------------------------------------------------------------------------- /models/VAE/Put VAE here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/models/VAE/Put VAE here.txt -------------------------------------------------------------------------------- /models/deepbooru/Put your deepbooru release project folder here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/models/deepbooru/Put your deepbooru release project folder here.txt -------------------------------------------------------------------------------- /models/karlo/ViT-L-14_stats.th: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/models/karlo/ViT-L-14_stats.th -------------------------------------------------------------------------------- /modules.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/modules.zip -------------------------------------------------------------------------------- /modules/Roboto-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/modules/Roboto-Regular.ttf -------------------------------------------------------------------------------- /modules/deepbooru.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | 4 | import torch 5 | import numpy as np 6 | 7 | from modules import modelloader, paths, deepbooru_model, devices, images, shared 8 | 9 | re_special = re.compile(r'([\\()])') 10 | 11 | 12 | class DeepDanbooru: 13 | def __init__(self): 14 | self.model = None 15 | 16 | def load(self): 17 | if self.model is not None: 18 | return 19 | 20 | files = modelloader.load_models( 21 | model_path=os.path.join(paths.models_path, "torch_deepdanbooru"), 22 | model_url='https://github.com/AUTOMATIC1111/TorchDeepDanbooru/releases/download/v1/model-resnet_custom_v3.pt', 23 | ext_filter=[".pt"], 24 | download_name='model-resnet_custom_v3.pt', 25 | ) 26 | 27 | self.model = deepbooru_model.DeepDanbooruModel() 28 | self.model.load_state_dict(torch.load(files[0], map_location="cpu")) 29 | 30 | self.model.eval() 31 | self.model.to(devices.cpu, devices.dtype) 32 | 33 | def start(self): 34 | self.load() 35 | self.model.to(devices.device) 36 | 37 | def stop(self): 38 | if not shared.opts.interrogate_keep_models_in_memory: 39 | self.model.to(devices.cpu) 40 | devices.torch_gc() 41 | 42 | def tag(self, pil_image): 43 | self.start() 44 | res = self.tag_multi(pil_image) 45 | self.stop() 46 | 47 | return res 48 | 49 | def tag_multi(self, pil_image, force_disable_ranks=False): 50 | threshold = shared.opts.interrogate_deepbooru_score_threshold 51 | use_spaces = shared.opts.deepbooru_use_spaces 52 | use_escape = shared.opts.deepbooru_escape 53 | alpha_sort = shared.opts.deepbooru_sort_alpha 54 | include_ranks = shared.opts.interrogate_return_ranks and not force_disable_ranks 55 | 56 | pic = images.resize_image(2, pil_image.convert("RGB"), 512, 512) 57 | a = np.expand_dims(np.array(pic, dtype=np.float32), 0) / 255 58 | 59 | with torch.no_grad(), devices.autocast(): 60 | x = torch.from_numpy(a).to(devices.device) 61 | y = self.model(x)[0].detach().cpu().numpy() 62 | 63 | probability_dict = {} 64 | 65 | for tag, probability in zip(self.model.tags, y): 66 | if probability < threshold: 67 | continue 68 | 69 | if tag.startswith("rating:"): 70 | continue 71 | 72 | probability_dict[tag] = probability 73 | 74 | if alpha_sort: 75 | tags = sorted(probability_dict) 76 | else: 77 | tags = [tag for tag, _ in sorted(probability_dict.items(), key=lambda x: -x[1])] 78 | 79 | res = [] 80 | 81 | filtertags = {x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")} 82 | 83 | for tag in [x for x in tags if x not in filtertags]: 84 | probability = probability_dict[tag] 85 | tag_outformat = tag 86 | if use_spaces: 87 | tag_outformat = tag_outformat.replace('_', ' ') 88 | if use_escape: 89 | tag_outformat = re.sub(re_special, r'\\\1', tag_outformat) 90 | if include_ranks: 91 | tag_outformat = f"({tag_outformat}:{probability:.3f})" 92 | 93 | res.append(tag_outformat) 94 | 95 | return ", ".join(res) 96 | 97 | 98 | model = DeepDanbooru() 99 | -------------------------------------------------------------------------------- /modules/extra_networks_hypernet.py: -------------------------------------------------------------------------------- 1 | from modules import extra_networks, shared 2 | from modules.hypernetworks import hypernetwork 3 | 4 | 5 | class ExtraNetworkHypernet(extra_networks.ExtraNetwork): 6 | def __init__(self): 7 | super().__init__('hypernet') 8 | 9 | def activate(self, p, params_list): 10 | additional = shared.opts.sd_hypernetwork 11 | 12 | if additional != "None" and additional in shared.hypernetworks and not any(x for x in params_list if x.items[0] == additional): 13 | hypernet_prompt_text = f"" 14 | p.all_prompts = [f"{prompt}{hypernet_prompt_text}" for prompt in p.all_prompts] 15 | params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) 16 | 17 | names = [] 18 | multipliers = [] 19 | for params in params_list: 20 | assert params.items 21 | 22 | names.append(params.items[0]) 23 | multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0) 24 | 25 | hypernetwork.load_hypernetworks(names, multipliers) 26 | 27 | def deactivate(self, p): 28 | pass 29 | -------------------------------------------------------------------------------- /modules/face_restoration.py: -------------------------------------------------------------------------------- 1 | from modules import shared 2 | 3 | 4 | class FaceRestoration: 5 | def name(self): 6 | return "None" 7 | 8 | def restore(self, np_image): 9 | return np_image 10 | 11 | 12 | def restore_faces(np_image): 13 | face_restorers = [x for x in shared.face_restorers if x.name() == shared.opts.face_restoration_model or shared.opts.face_restoration_model is None] 14 | if len(face_restorers) == 0: 15 | return np_image 16 | 17 | face_restorer = face_restorers[0] 18 | 19 | return face_restorer.restore(np_image) 20 | -------------------------------------------------------------------------------- /modules/fifo_lock.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import collections 3 | 4 | 5 | # reference: https://gist.github.com/vitaliyp/6d54dd76ca2c3cdfc1149d33007dc34a 6 | class FIFOLock(object): 7 | def __init__(self): 8 | self._lock = threading.Lock() 9 | self._inner_lock = threading.Lock() 10 | self._pending_threads = collections.deque() 11 | 12 | def acquire(self, blocking=True): 13 | with self._inner_lock: 14 | lock_acquired = self._lock.acquire(False) 15 | if lock_acquired: 16 | return True 17 | elif not blocking: 18 | return False 19 | 20 | release_event = threading.Event() 21 | self._pending_threads.append(release_event) 22 | 23 | release_event.wait() 24 | return self._lock.acquire() 25 | 26 | def release(self): 27 | with self._inner_lock: 28 | if self._pending_threads: 29 | release_event = self._pending_threads.popleft() 30 | release_event.set() 31 | 32 | self._lock.release() 33 | 34 | __enter__ = acquire 35 | 36 | def __exit__(self, t, v, tb): 37 | self.release() 38 | -------------------------------------------------------------------------------- /modules/gitpython_hack.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import io 4 | import subprocess 5 | 6 | import git 7 | 8 | 9 | class Git(git.Git): 10 | """ 11 | Git subclassed to never use persistent processes. 12 | """ 13 | 14 | def _get_persistent_cmd(self, attr_name, cmd_name, *args, **kwargs): 15 | raise NotImplementedError(f"Refusing to use persistent process: {attr_name} ({cmd_name} {args} {kwargs})") 16 | 17 | def get_object_header(self, ref: str | bytes) -> tuple[str, str, int]: 18 | ret = subprocess.check_output( 19 | [self.GIT_PYTHON_GIT_EXECUTABLE, "cat-file", "--batch-check"], 20 | input=self._prepare_ref(ref), 21 | cwd=self._working_dir, 22 | timeout=2, 23 | ) 24 | return self._parse_object_header(ret) 25 | 26 | def stream_object_data(self, ref: str) -> tuple[str, str, int, Git.CatFileContentStream]: 27 | # Not really streaming, per se; this buffers the entire object in memory. 28 | # Shouldn't be a problem for our use case, since we're only using this for 29 | # object headers (commit objects). 30 | ret = subprocess.check_output( 31 | [self.GIT_PYTHON_GIT_EXECUTABLE, "cat-file", "--batch"], 32 | input=self._prepare_ref(ref), 33 | cwd=self._working_dir, 34 | timeout=30, 35 | ) 36 | bio = io.BytesIO(ret) 37 | hexsha, typename, size = self._parse_object_header(bio.readline()) 38 | return (hexsha, typename, size, self.CatFileContentStream(size, bio)) 39 | 40 | 41 | class Repo(git.Repo): 42 | GitCommandWrapperType = Git 43 | -------------------------------------------------------------------------------- /modules/gradio_extensons.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | 3 | from modules import scripts, ui_tempdir, patches 4 | 5 | 6 | def add_classes_to_gradio_component(comp): 7 | """ 8 | this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others 9 | """ 10 | 11 | comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])] 12 | 13 | if getattr(comp, 'multiselect', False): 14 | comp.elem_classes.append('multiselect') 15 | 16 | 17 | def IOComponent_init(self, *args, **kwargs): 18 | self.webui_tooltip = kwargs.pop('tooltip', None) 19 | 20 | if scripts.scripts_current is not None: 21 | scripts.scripts_current.before_component(self, **kwargs) 22 | 23 | scripts.script_callbacks.before_component_callback(self, **kwargs) 24 | 25 | res = original_IOComponent_init(self, *args, **kwargs) 26 | 27 | add_classes_to_gradio_component(self) 28 | 29 | scripts.script_callbacks.after_component_callback(self, **kwargs) 30 | 31 | if scripts.scripts_current is not None: 32 | scripts.scripts_current.after_component(self, **kwargs) 33 | 34 | return res 35 | 36 | 37 | def Block_get_config(self): 38 | config = original_Block_get_config(self) 39 | 40 | webui_tooltip = getattr(self, 'webui_tooltip', None) 41 | if webui_tooltip: 42 | config["webui_tooltip"] = webui_tooltip 43 | 44 | config.pop('example_inputs', None) 45 | 46 | return config 47 | 48 | 49 | def BlockContext_init(self, *args, **kwargs): 50 | if scripts.scripts_current is not None: 51 | scripts.scripts_current.before_component(self, **kwargs) 52 | 53 | scripts.script_callbacks.before_component_callback(self, **kwargs) 54 | 55 | res = original_BlockContext_init(self, *args, **kwargs) 56 | 57 | add_classes_to_gradio_component(self) 58 | 59 | scripts.script_callbacks.after_component_callback(self, **kwargs) 60 | 61 | if scripts.scripts_current is not None: 62 | scripts.scripts_current.after_component(self, **kwargs) 63 | 64 | return res 65 | 66 | 67 | def Blocks_get_config_file(self, *args, **kwargs): 68 | config = original_Blocks_get_config_file(self, *args, **kwargs) 69 | 70 | for comp_config in config["components"]: 71 | if "example_inputs" in comp_config: 72 | comp_config["example_inputs"] = {"serialized": []} 73 | 74 | return config 75 | 76 | 77 | original_IOComponent_init = patches.patch(__name__, obj=gr.components.IOComponent, field="__init__", replacement=IOComponent_init) 78 | original_Block_get_config = patches.patch(__name__, obj=gr.blocks.Block, field="get_config", replacement=Block_get_config) 79 | original_BlockContext_init = patches.patch(__name__, obj=gr.blocks.BlockContext, field="__init__", replacement=BlockContext_init) 80 | original_Blocks_get_config_file = patches.patch(__name__, obj=gr.blocks.Blocks, field="get_config_file", replacement=Blocks_get_config_file) 81 | 82 | 83 | ui_tempdir.install_ui_tempdir_override() 84 | -------------------------------------------------------------------------------- /modules/hashes.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import os.path 3 | 4 | from modules import shared 5 | import modules.cache 6 | 7 | dump_cache = modules.cache.dump_cache 8 | cache = modules.cache.cache 9 | 10 | 11 | def calculate_sha256(filename): 12 | hash_sha256 = hashlib.sha256() 13 | blksize = 1024 * 1024 14 | 15 | with open(filename, "rb") as f: 16 | for chunk in iter(lambda: f.read(blksize), b""): 17 | hash_sha256.update(chunk) 18 | 19 | return hash_sha256.hexdigest() 20 | 21 | 22 | def sha256_from_cache(filename, title, use_addnet_hash=False): 23 | hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes") 24 | ondisk_mtime = os.path.getmtime(filename) 25 | 26 | if title not in hashes: 27 | return None 28 | 29 | cached_sha256 = hashes[title].get("sha256", None) 30 | cached_mtime = hashes[title].get("mtime", 0) 31 | 32 | if ondisk_mtime > cached_mtime or cached_sha256 is None: 33 | return None 34 | 35 | return cached_sha256 36 | 37 | 38 | def sha256(filename, title, use_addnet_hash=False): 39 | hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes") 40 | 41 | sha256_value = sha256_from_cache(filename, title, use_addnet_hash) 42 | if sha256_value is not None: 43 | return sha256_value 44 | 45 | if shared.cmd_opts.no_hashing: 46 | return None 47 | 48 | print(f"Calculating sha256 for {filename}: ", end='') 49 | if use_addnet_hash: 50 | with open(filename, "rb") as file: 51 | sha256_value = addnet_hash_safetensors(file) 52 | else: 53 | sha256_value = calculate_sha256(filename) 54 | print(f"{sha256_value}") 55 | 56 | hashes[title] = { 57 | "mtime": os.path.getmtime(filename), 58 | "sha256": sha256_value, 59 | } 60 | 61 | dump_cache() 62 | 63 | return sha256_value 64 | 65 | 66 | def addnet_hash_safetensors(b): 67 | """kohya-ss hash for safetensors from https://github.com/kohya-ss/sd-scripts/blob/main/library/train_util.py""" 68 | hash_sha256 = hashlib.sha256() 69 | blksize = 1024 * 1024 70 | 71 | b.seek(0) 72 | header = b.read(8) 73 | n = int.from_bytes(header, "little") 74 | 75 | offset = n + 8 76 | b.seek(offset) 77 | for chunk in iter(lambda: b.read(blksize), b""): 78 | hash_sha256.update(chunk) 79 | 80 | return hash_sha256.hexdigest() 81 | 82 | -------------------------------------------------------------------------------- /modules/hypernetworks/ui.py: -------------------------------------------------------------------------------- 1 | import html 2 | 3 | import gradio as gr 4 | import modules.hypernetworks.hypernetwork 5 | from modules import devices, sd_hijack, shared 6 | 7 | not_available = ["hardswish", "multiheadattention"] 8 | keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict if x not in not_available] 9 | 10 | 11 | def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None): 12 | filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure) 13 | 14 | return gr.Dropdown.update(choices=sorted(shared.hypernetworks)), f"Created: {filename}", "" 15 | 16 | 17 | def train_hypernetwork(*args): 18 | shared.loaded_hypernetworks = [] 19 | 20 | assert not shared.cmd_opts.lowvram, 'Training models with lowvram is not possible' 21 | 22 | try: 23 | sd_hijack.undo_optimizations() 24 | 25 | hypernetwork, filename = modules.hypernetworks.hypernetwork.train_hypernetwork(*args) 26 | 27 | res = f""" 28 | Training {'interrupted' if shared.state.interrupted else 'finished'} at {hypernetwork.step} steps. 29 | Hypernetwork saved to {html.escape(filename)} 30 | """ 31 | return res, "" 32 | except Exception: 33 | raise 34 | finally: 35 | shared.sd_model.cond_stage_model.to(devices.device) 36 | shared.sd_model.first_stage_model.to(devices.device) 37 | sd_hijack.apply_optimizations() 38 | 39 | -------------------------------------------------------------------------------- /modules/import_hook.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | # this will break any attempt to import xformers which will prevent stability diffusion repo from trying to use it 4 | if "--xformers" not in "".join(sys.argv): 5 | print("import_hook.py tried to disable xformers, but it was not requested. Ignoring") 6 | #sys.modules["xformers"] = None 7 | 8 | # Hack to fix a changed import in torchvision 0.17+, which otherwise breaks 9 | # basicsr; see https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/13985 10 | try: 11 | import torchvision.transforms.functional_tensor # noqa: F401 12 | except ImportError: 13 | try: 14 | import torchvision.transforms.functional as functional 15 | sys.modules["torchvision.transforms.functional_tensor"] = functional 16 | except ImportError: 17 | pass # shrug... 18 | -------------------------------------------------------------------------------- /modules/localization.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | from modules import errors, scripts 5 | 6 | localizations = {} 7 | 8 | 9 | def list_localizations(dirname): 10 | localizations.clear() 11 | 12 | for file in os.listdir(dirname): 13 | fn, ext = os.path.splitext(file) 14 | if ext.lower() != ".json": 15 | continue 16 | 17 | localizations[fn] = [os.path.join(dirname, file)] 18 | 19 | for file in scripts.list_scripts("localizations", ".json"): 20 | fn, ext = os.path.splitext(file.filename) 21 | if fn not in localizations: 22 | localizations[fn] = [] 23 | localizations[fn].append(file.path) 24 | 25 | 26 | def localization_js(current_localization_name: str) -> str: 27 | fns = localizations.get(current_localization_name, None) 28 | data = {} 29 | if fns is not None: 30 | for fn in fns: 31 | try: 32 | with open(fn, "r", encoding="utf8") as file: 33 | data.update(json.load(file)) 34 | except Exception: 35 | errors.report(f"Error loading localization from {fn}", exc_info=True) 36 | 37 | return f"window.localization = {json.dumps(data)}" 38 | -------------------------------------------------------------------------------- /modules/logging_config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | 4 | try: 5 | from tqdm.auto import tqdm 6 | 7 | class TqdmLoggingHandler(logging.Handler): 8 | def __init__(self, level=logging.INFO): 9 | super().__init__(level) 10 | 11 | def emit(self, record): 12 | try: 13 | msg = self.format(record) 14 | tqdm.write(msg) 15 | self.flush() 16 | except Exception: 17 | self.handleError(record) 18 | 19 | TQDM_IMPORTED = True 20 | except ImportError: 21 | # tqdm does not exist before first launch 22 | # I will import once the UI finishes seting up the enviroment and reloads. 23 | TQDM_IMPORTED = False 24 | 25 | def setup_logging(loglevel): 26 | if loglevel is None: 27 | loglevel = os.environ.get("SD_WEBUI_LOG_LEVEL") 28 | 29 | loghandlers = [] 30 | 31 | if TQDM_IMPORTED: 32 | loghandlers.append(TqdmLoggingHandler()) 33 | 34 | if loglevel: 35 | log_level = getattr(logging, loglevel.upper(), None) or logging.INFO 36 | logging.basicConfig( 37 | level=log_level, 38 | format='%(asctime)s %(levelname)s [%(name)s] %(message)s', 39 | datefmt='%Y-%m-%d %H:%M:%S', 40 | handlers=loghandlers 41 | ) 42 | -------------------------------------------------------------------------------- /modules/memmon.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import time 3 | from collections import defaultdict 4 | 5 | import torch 6 | 7 | 8 | class MemUsageMonitor(threading.Thread): 9 | run_flag = None 10 | device = None 11 | disabled = False 12 | opts = None 13 | data = None 14 | 15 | def __init__(self, name, device, opts): 16 | threading.Thread.__init__(self) 17 | self.name = name 18 | self.device = device 19 | self.opts = opts 20 | 21 | self.daemon = True 22 | self.run_flag = threading.Event() 23 | self.data = defaultdict(int) 24 | 25 | try: 26 | self.cuda_mem_get_info() 27 | torch.cuda.memory_stats(self.device) 28 | except Exception as e: # AMD or whatever 29 | print(f"Warning: caught exception '{e}', memory monitor disabled") 30 | self.disabled = True 31 | 32 | def cuda_mem_get_info(self): 33 | index = self.device.index if self.device.index is not None else torch.cuda.current_device() 34 | return torch.cuda.mem_get_info(index) 35 | 36 | def run(self): 37 | if self.disabled: 38 | return 39 | 40 | while True: 41 | self.run_flag.wait() 42 | 43 | torch.cuda.reset_peak_memory_stats() 44 | self.data.clear() 45 | 46 | if self.opts.memmon_poll_rate <= 0: 47 | self.run_flag.clear() 48 | continue 49 | 50 | self.data["min_free"] = self.cuda_mem_get_info()[0] 51 | 52 | while self.run_flag.is_set(): 53 | free, total = self.cuda_mem_get_info() 54 | self.data["min_free"] = min(self.data["min_free"], free) 55 | 56 | time.sleep(1 / self.opts.memmon_poll_rate) 57 | 58 | def dump_debug(self): 59 | print(self, 'recorded data:') 60 | for k, v in self.read().items(): 61 | print(k, -(v // -(1024 ** 2))) 62 | 63 | print(self, 'raw torch memory stats:') 64 | tm = torch.cuda.memory_stats(self.device) 65 | for k, v in tm.items(): 66 | if 'bytes' not in k: 67 | continue 68 | print('\t' if 'peak' in k else '', k, -(v // -(1024 ** 2))) 69 | 70 | print(torch.cuda.memory_summary()) 71 | 72 | def monitor(self): 73 | self.run_flag.set() 74 | 75 | def read(self): 76 | if not self.disabled: 77 | free, total = self.cuda_mem_get_info() 78 | self.data["free"] = free 79 | self.data["total"] = total 80 | 81 | torch_stats = torch.cuda.memory_stats(self.device) 82 | self.data["active"] = torch_stats["active.all.current"] 83 | self.data["active_peak"] = torch_stats["active_bytes.all.peak"] 84 | self.data["reserved"] = torch_stats["reserved_bytes.all.current"] 85 | self.data["reserved_peak"] = torch_stats["reserved_bytes.all.peak"] 86 | self.data["system_peak"] = total - self.data["min_free"] 87 | 88 | return self.data 89 | 90 | def stop(self): 91 | self.run_flag.clear() 92 | return self.read() 93 | -------------------------------------------------------------------------------- /modules/models/diffusion/uni_pc/__init__.py: -------------------------------------------------------------------------------- 1 | from .sampler import UniPCSampler # noqa: F401 2 | -------------------------------------------------------------------------------- /modules/ngrok.py: -------------------------------------------------------------------------------- 1 | import ngrok 2 | 3 | # Connect to ngrok for ingress 4 | def connect(token, port, options): 5 | account = None 6 | if token is None: 7 | token = 'None' 8 | else: 9 | if ':' in token: 10 | # token = authtoken:username:password 11 | token, username, password = token.split(':', 2) 12 | account = f"{username}:{password}" 13 | 14 | # For all options see: https://github.com/ngrok/ngrok-py/blob/main/examples/ngrok-connect-full.py 15 | if not options.get('authtoken_from_env'): 16 | options['authtoken'] = token 17 | if account: 18 | options['basic_auth'] = account 19 | if not options.get('session_metadata'): 20 | options['session_metadata'] = 'stable-diffusion-webui' 21 | 22 | 23 | try: 24 | public_url = ngrok.connect(f"127.0.0.1:{port}", **options).url() 25 | except Exception as e: 26 | print(f'Invalid ngrok authtoken? ngrok connection aborted due to: {e}\n' 27 | f'Your token: {token}, get the right one on https://dashboard.ngrok.com/get-started/your-authtoken') 28 | else: 29 | print(f'ngrok connected to localhost:{port}! URL: {public_url}\n' 30 | 'You can use this link after the launch is complete.') 31 | -------------------------------------------------------------------------------- /modules/patches.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | 3 | 4 | def patch(key, obj, field, replacement): 5 | """Replaces a function in a module or a class. 6 | 7 | Also stores the original function in this module, possible to be retrieved via original(key, obj, field). 8 | If the function is already replaced by this caller (key), an exception is raised -- use undo() before that. 9 | 10 | Arguments: 11 | key: identifying information for who is doing the replacement. You can use __name__. 12 | obj: the module or the class 13 | field: name of the function as a string 14 | replacement: the new function 15 | 16 | Returns: 17 | the original function 18 | """ 19 | 20 | patch_key = (obj, field) 21 | if patch_key in originals[key]: 22 | raise RuntimeError(f"patch for {field} is already applied") 23 | 24 | original_func = getattr(obj, field) 25 | originals[key][patch_key] = original_func 26 | 27 | setattr(obj, field, replacement) 28 | 29 | return original_func 30 | 31 | 32 | def undo(key, obj, field): 33 | """Undoes the peplacement by the patch(). 34 | 35 | If the function is not replaced, raises an exception. 36 | 37 | Arguments: 38 | key: identifying information for who is doing the replacement. You can use __name__. 39 | obj: the module or the class 40 | field: name of the function as a string 41 | 42 | Returns: 43 | Always None 44 | """ 45 | 46 | patch_key = (obj, field) 47 | 48 | if patch_key not in originals[key]: 49 | raise RuntimeError(f"there is no patch for {field} to undo") 50 | 51 | original_func = originals[key].pop(patch_key) 52 | setattr(obj, field, original_func) 53 | 54 | return None 55 | 56 | 57 | def original(key, obj, field): 58 | """Returns the original function for the patch created by the patch() function""" 59 | patch_key = (obj, field) 60 | 61 | return originals[key].get(patch_key, None) 62 | 63 | 64 | originals = defaultdict(dict) 65 | -------------------------------------------------------------------------------- /modules/paths.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, cwd # noqa: F401 4 | 5 | import modules.safe # noqa: F401 6 | 7 | 8 | def mute_sdxl_imports(): 9 | """create fake modules that SDXL wants to import but doesn't actually use for our purposes""" 10 | 11 | class Dummy: 12 | pass 13 | 14 | module = Dummy() 15 | module.LPIPS = None 16 | sys.modules['taming.modules.losses.lpips'] = module 17 | 18 | module = Dummy() 19 | module.StableDataModuleFromConfig = None 20 | sys.modules['sgm.data'] = module 21 | 22 | 23 | # data_path = cmd_opts_pre.data 24 | sys.path.insert(0, script_path) 25 | 26 | # search for directory of stable diffusion in following places 27 | sd_path = None 28 | possible_sd_paths = [os.path.join(script_path, 'repositories/stable-diffusion-stability-ai'), '.', os.path.dirname(script_path)] 29 | for possible_sd_path in possible_sd_paths: 30 | if os.path.exists(os.path.join(possible_sd_path, 'ldm/models/diffusion/ddpm.py')): 31 | sd_path = os.path.abspath(possible_sd_path) 32 | break 33 | 34 | assert sd_path is not None, f"Couldn't find Stable Diffusion in any of: {possible_sd_paths}" 35 | 36 | mute_sdxl_imports() 37 | 38 | path_dirs = [ 39 | (sd_path, 'ldm', 'Stable Diffusion', []), 40 | (os.path.join(sd_path, '../generative-models'), 'sgm', 'Stable Diffusion XL', ["sgm"]), 41 | (os.path.join(sd_path, '../CodeFormer'), 'inference_codeformer.py', 'CodeFormer', []), 42 | (os.path.join(sd_path, '../BLIP'), 'models/blip.py', 'BLIP', []), 43 | (os.path.join(sd_path, '../k-diffusion'), 'k_diffusion/sampling.py', 'k_diffusion', ["atstart"]), 44 | ] 45 | 46 | paths = {} 47 | 48 | for d, must_exist, what, options in path_dirs: 49 | must_exist_path = os.path.abspath(os.path.join(script_path, d, must_exist)) 50 | if not os.path.exists(must_exist_path): 51 | print(f"Warning: {what} not found at path {must_exist_path}", file=sys.stderr) 52 | else: 53 | d = os.path.abspath(d) 54 | if "atstart" in options: 55 | sys.path.insert(0, d) 56 | elif "sgm" in options: 57 | # Stable Diffusion XL repo has scripts dir with __init__.py in it which ruins every extension's scripts dir, so we 58 | # import sgm and remove it from sys.path so that when a script imports scripts.something, it doesbn't use sgm's scripts dir. 59 | 60 | sys.path.insert(0, d) 61 | import sgm # noqa: F401 62 | sys.path.pop(0) 63 | else: 64 | sys.path.append(d) 65 | paths[what] = d 66 | -------------------------------------------------------------------------------- /modules/paths_internal.py: -------------------------------------------------------------------------------- 1 | """this module defines internal paths used by program and is safe to import before dependencies are installed in launch.py""" 2 | 3 | import argparse 4 | import os 5 | import sys 6 | import shlex 7 | 8 | commandline_args = os.environ.get('COMMANDLINE_ARGS', "") 9 | sys.argv += shlex.split(commandline_args) 10 | 11 | cwd = os.getcwd() 12 | modules_path = os.path.dirname(os.path.realpath(__file__)) 13 | script_path = os.path.dirname(modules_path) 14 | 15 | sd_configs_path = os.path.join(script_path, "configs") 16 | sd_default_config = os.path.join(sd_configs_path, "v1-inference.yaml") 17 | sd_model_file = os.path.join(script_path, 'model.ckpt') 18 | default_sd_model_file = sd_model_file 19 | 20 | # Parse the --data-dir flag first so we can use it as a base for our other argument default values 21 | parser_pre = argparse.ArgumentParser(add_help=False) 22 | parser_pre.add_argument("--data-dir", type=str, default=os.path.dirname(modules_path), help="base path where all user data is stored", ) 23 | cmd_opts_pre = parser_pre.parse_known_args()[0] 24 | 25 | data_path = cmd_opts_pre.data_dir 26 | 27 | models_path = os.path.join(data_path, "models") 28 | extensions_dir = os.path.join(data_path, "extensions") 29 | extensions_builtin_dir = os.path.join(script_path, "extensions-builtin") 30 | config_states_dir = os.path.join(script_path, "config_states") 31 | 32 | roboto_ttf_file = os.path.join(modules_path, 'Roboto-Regular.ttf') 33 | -------------------------------------------------------------------------------- /modules/processing_scripts/refiner.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | 3 | from modules import scripts, sd_models 4 | from modules.ui_common import create_refresh_button 5 | from modules.ui_components import InputAccordion 6 | 7 | 8 | class ScriptRefiner(scripts.ScriptBuiltinUI): 9 | section = "accordions" 10 | create_group = False 11 | 12 | def __init__(self): 13 | pass 14 | 15 | def title(self): 16 | return "Refiner" 17 | 18 | def show(self, is_img2img): 19 | return scripts.AlwaysVisible 20 | 21 | def ui(self, is_img2img): 22 | with InputAccordion(False, label="Refiner", elem_id=self.elem_id("enable")) as enable_refiner: 23 | with gr.Row(): 24 | refiner_checkpoint = gr.Dropdown(label='Checkpoint', elem_id=self.elem_id("checkpoint"), choices=sd_models.checkpoint_tiles(), value='', tooltip="switch to another model in the middle of generation") 25 | create_refresh_button(refiner_checkpoint, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, self.elem_id("checkpoint_refresh")) 26 | 27 | refiner_switch_at = gr.Slider(value=0.8, label="Switch at", minimum=0.01, maximum=1.0, step=0.01, elem_id=self.elem_id("switch_at"), tooltip="fraction of sampling steps when the switch to refiner model should happen; 1=never, 0.5=switch in the middle of generation") 28 | 29 | def lookup_checkpoint(title): 30 | info = sd_models.get_closet_checkpoint_match(title) 31 | return None if info is None else info.title 32 | 33 | self.infotext_fields = [ 34 | (enable_refiner, lambda d: 'Refiner' in d), 35 | (refiner_checkpoint, lambda d: lookup_checkpoint(d.get('Refiner'))), 36 | (refiner_switch_at, 'Refiner switch at'), 37 | ] 38 | 39 | return enable_refiner, refiner_checkpoint, refiner_switch_at 40 | 41 | def setup(self, p, enable_refiner, refiner_checkpoint, refiner_switch_at): 42 | # the actual implementation is in sd_samplers_common.py, apply_refiner 43 | 44 | if not enable_refiner or refiner_checkpoint in (None, "", "None"): 45 | p.refiner_checkpoint = None 46 | p.refiner_switch_at = None 47 | else: 48 | p.refiner_checkpoint = refiner_checkpoint 49 | p.refiner_switch_at = refiner_switch_at 50 | -------------------------------------------------------------------------------- /modules/restart.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | from modules.paths_internal import script_path 5 | 6 | 7 | def is_restartable() -> bool: 8 | """ 9 | Return True if the webui is restartable (i.e. there is something watching to restart it with) 10 | """ 11 | return bool(os.environ.get('SD_WEBUI_RESTART')) 12 | 13 | 14 | def restart_program() -> None: 15 | """creates file tmp/restart and immediately stops the process, which webui.bat/webui.sh interpret as a command to start webui again""" 16 | 17 | tmpdir = Path(script_path) / "tmp" 18 | tmpdir.mkdir(parents=True, exist_ok=True) 19 | (tmpdir / "restart").touch() 20 | 21 | stop_program() 22 | 23 | 24 | def stop_program() -> None: 25 | os._exit(0) 26 | -------------------------------------------------------------------------------- /modules/script_loading.py: -------------------------------------------------------------------------------- 1 | import os 2 | import importlib.util 3 | 4 | from modules import errors 5 | 6 | 7 | def load_module(path): 8 | module_spec = importlib.util.spec_from_file_location(os.path.basename(path), path) 9 | module = importlib.util.module_from_spec(module_spec) 10 | module_spec.loader.exec_module(module) 11 | 12 | return module 13 | 14 | 15 | def preload_extensions(extensions_dir, parser, extension_list=None): 16 | if not os.path.isdir(extensions_dir): 17 | return 18 | 19 | extensions = extension_list if extension_list is not None else os.listdir(extensions_dir) 20 | for dirname in sorted(extensions): 21 | preload_script = os.path.join(extensions_dir, dirname, "preload.py") 22 | if not os.path.isfile(preload_script): 23 | continue 24 | 25 | try: 26 | module = load_module(preload_script) 27 | if hasattr(module, 'preload'): 28 | module.preload(parser) 29 | 30 | except Exception: 31 | errors.report(f"Error running preload() for {preload_script}", exc_info=True) 32 | -------------------------------------------------------------------------------- /modules/scripts_auto_postprocessing.py: -------------------------------------------------------------------------------- 1 | from modules import scripts, scripts_postprocessing, shared 2 | 3 | 4 | class ScriptPostprocessingForMainUI(scripts.Script): 5 | def __init__(self, script_postproc): 6 | self.script: scripts_postprocessing.ScriptPostprocessing = script_postproc 7 | self.postprocessing_controls = None 8 | 9 | def title(self): 10 | return self.script.name 11 | 12 | def show(self, is_img2img): 13 | return scripts.AlwaysVisible 14 | 15 | def ui(self, is_img2img): 16 | self.postprocessing_controls = self.script.ui() 17 | return self.postprocessing_controls.values() 18 | 19 | def postprocess_image(self, p, script_pp, *args): 20 | args_dict = dict(zip(self.postprocessing_controls, args)) 21 | 22 | pp = scripts_postprocessing.PostprocessedImage(script_pp.image) 23 | pp.info = {} 24 | self.script.process(pp, **args_dict) 25 | p.extra_generation_params.update(pp.info) 26 | script_pp.image = pp.image 27 | 28 | 29 | def create_auto_preprocessing_script_data(): 30 | from modules import scripts 31 | 32 | res = [] 33 | 34 | for name in shared.opts.postprocessing_enable_in_main_ui: 35 | script = next(iter([x for x in scripts.postprocessing_scripts_data if x.script_class.name == name]), None) 36 | if script is None: 37 | continue 38 | 39 | constructor = lambda s=script: ScriptPostprocessingForMainUI(s.script_class()) 40 | res.append(scripts.ScriptClassData(script_class=constructor, path=script.path, basedir=script.basedir, module=script.module)) 41 | 42 | return res 43 | -------------------------------------------------------------------------------- /modules/sd_hijack_checkpoint.py: -------------------------------------------------------------------------------- 1 | from torch.utils.checkpoint import checkpoint 2 | 3 | import ldm.modules.attention 4 | import ldm.modules.diffusionmodules.openaimodel 5 | 6 | 7 | def BasicTransformerBlock_forward(self, x, context=None): 8 | return checkpoint(self._forward, x, context) 9 | 10 | 11 | def AttentionBlock_forward(self, x): 12 | return checkpoint(self._forward, x) 13 | 14 | 15 | def ResBlock_forward(self, x, emb): 16 | return checkpoint(self._forward, x, emb) 17 | 18 | 19 | stored = [] 20 | 21 | 22 | def add(): 23 | if len(stored) != 0: 24 | return 25 | 26 | stored.extend([ 27 | ldm.modules.attention.BasicTransformerBlock.forward, 28 | ldm.modules.diffusionmodules.openaimodel.ResBlock.forward, 29 | ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward 30 | ]) 31 | 32 | ldm.modules.attention.BasicTransformerBlock.forward = BasicTransformerBlock_forward 33 | ldm.modules.diffusionmodules.openaimodel.ResBlock.forward = ResBlock_forward 34 | ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward = AttentionBlock_forward 35 | 36 | 37 | def remove(): 38 | if len(stored) == 0: 39 | return 40 | 41 | ldm.modules.attention.BasicTransformerBlock.forward = stored[0] 42 | ldm.modules.diffusionmodules.openaimodel.ResBlock.forward = stored[1] 43 | ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward = stored[2] 44 | 45 | stored.clear() 46 | 47 | -------------------------------------------------------------------------------- /modules/sd_hijack_ip2p.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | 4 | def should_hijack_ip2p(checkpoint_info): 5 | from modules import sd_models_config 6 | 7 | ckpt_basename = os.path.basename(checkpoint_info.filename).lower() 8 | cfg_basename = os.path.basename(sd_models_config.find_checkpoint_config_near_filename(checkpoint_info)).lower() 9 | 10 | return "pix2pix" in ckpt_basename and "pix2pix" not in cfg_basename 11 | -------------------------------------------------------------------------------- /modules/sd_hijack_open_clip.py: -------------------------------------------------------------------------------- 1 | import open_clip.tokenizer 2 | import torch 3 | 4 | from modules import sd_hijack_clip, devices 5 | from modules.shared import opts 6 | 7 | tokenizer = open_clip.tokenizer._tokenizer 8 | 9 | 10 | class FrozenOpenCLIPEmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase): 11 | def __init__(self, wrapped, hijack): 12 | super().__init__(wrapped, hijack) 13 | 14 | self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ','][0] 15 | self.id_start = tokenizer.encoder[""] 16 | self.id_end = tokenizer.encoder[""] 17 | self.id_pad = 0 18 | 19 | def tokenize(self, texts): 20 | assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip' 21 | 22 | tokenized = [tokenizer.encode(text) for text in texts] 23 | 24 | return tokenized 25 | 26 | def encode_with_transformers(self, tokens): 27 | # set self.wrapped.layer_idx here according to opts.CLIP_stop_at_last_layers 28 | z = self.wrapped.encode_with_transformer(tokens) 29 | 30 | return z 31 | 32 | def encode_embedding_init_text(self, init_text, nvpt): 33 | ids = tokenizer.encode(init_text) 34 | ids = torch.asarray([ids], device=devices.device, dtype=torch.int) 35 | embedded = self.wrapped.model.token_embedding.wrapped(ids).squeeze(0) 36 | 37 | return embedded 38 | 39 | 40 | class FrozenOpenCLIPEmbedder2WithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase): 41 | def __init__(self, wrapped, hijack): 42 | super().__init__(wrapped, hijack) 43 | 44 | self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ','][0] 45 | self.id_start = tokenizer.encoder[""] 46 | self.id_end = tokenizer.encoder[""] 47 | self.id_pad = 0 48 | 49 | def tokenize(self, texts): 50 | assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip' 51 | 52 | tokenized = [tokenizer.encode(text) for text in texts] 53 | 54 | return tokenized 55 | 56 | def encode_with_transformers(self, tokens): 57 | d = self.wrapped.encode_with_transformer(tokens) 58 | z = d[self.wrapped.layer] 59 | 60 | pooled = d.get("pooled") 61 | if pooled is not None: 62 | z.pooled = pooled 63 | 64 | return z 65 | 66 | def encode_embedding_init_text(self, init_text, nvpt): 67 | ids = tokenizer.encode(init_text) 68 | ids = torch.asarray([ids], device=devices.device, dtype=torch.int) 69 | embedded = self.wrapped.model.token_embedding.wrapped(ids.to(self.wrapped.model.token_embedding.wrapped.weight.device)).squeeze(0) 70 | 71 | return embedded 72 | -------------------------------------------------------------------------------- /modules/sd_hijack_utils.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | 3 | class CondFunc: 4 | def __new__(cls, orig_func, sub_func, cond_func): 5 | self = super(CondFunc, cls).__new__(cls) 6 | if isinstance(orig_func, str): 7 | func_path = orig_func.split('.') 8 | for i in range(len(func_path)-1, -1, -1): 9 | try: 10 | resolved_obj = importlib.import_module('.'.join(func_path[:i])) 11 | break 12 | except ImportError: 13 | pass 14 | for attr_name in func_path[i:-1]: 15 | resolved_obj = getattr(resolved_obj, attr_name) 16 | orig_func = getattr(resolved_obj, func_path[-1]) 17 | setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs)) 18 | self.__init__(orig_func, sub_func, cond_func) 19 | return lambda *args, **kwargs: self(*args, **kwargs) 20 | def __init__(self, orig_func, sub_func, cond_func): 21 | self.__orig_func = orig_func 22 | self.__sub_func = sub_func 23 | self.__cond_func = cond_func 24 | def __call__(self, *args, **kwargs): 25 | if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs): 26 | return self.__sub_func(self.__orig_func, *args, **kwargs) 27 | else: 28 | return self.__orig_func(*args, **kwargs) 29 | -------------------------------------------------------------------------------- /modules/sd_hijack_xlmr.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from modules import sd_hijack_clip, devices 4 | 5 | 6 | class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords): 7 | def __init__(self, wrapped, hijack): 8 | super().__init__(wrapped, hijack) 9 | 10 | self.id_start = wrapped.config.bos_token_id 11 | self.id_end = wrapped.config.eos_token_id 12 | self.id_pad = wrapped.config.pad_token_id 13 | 14 | self.comma_token = self.tokenizer.get_vocab().get(',', None) # alt diffusion doesn't have bits for comma 15 | 16 | def encode_with_transformers(self, tokens): 17 | # there's no CLIP Skip here because all hidden layers have size of 1024 and the last one uses a 18 | # trained layer to transform those 1024 into 768 for unet; so you can't choose which transformer 19 | # layer to work with - you have to use the last 20 | 21 | attention_mask = (tokens != self.id_pad).to(device=tokens.device, dtype=torch.int64) 22 | features = self.wrapped(input_ids=tokens, attention_mask=attention_mask) 23 | z = features['projection_state'] 24 | 25 | return z 26 | 27 | def encode_embedding_init_text(self, init_text, nvpt): 28 | embedding_layer = self.wrapped.roberta.embeddings 29 | ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"] 30 | embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0) 31 | 32 | return embedded 33 | -------------------------------------------------------------------------------- /modules/sd_models_types.py: -------------------------------------------------------------------------------- 1 | from ldm.models.diffusion.ddpm import LatentDiffusion 2 | from typing import TYPE_CHECKING 3 | 4 | 5 | if TYPE_CHECKING: 6 | from modules.sd_models import CheckpointInfo 7 | 8 | 9 | class WebuiSdModel(LatentDiffusion): 10 | """This class is not actually instantinated, but its fields are created and fieeld by webui""" 11 | 12 | lowvram: bool 13 | """True if lowvram/medvram optimizations are enabled -- see modules.lowvram for more info""" 14 | 15 | sd_model_hash: str 16 | """short hash, 10 first characters of SHA1 hash of the model file; may be None if --no-hashing flag is used""" 17 | 18 | sd_model_checkpoint: str 19 | """path to the file on disk that model weights were obtained from""" 20 | 21 | sd_checkpoint_info: 'CheckpointInfo' 22 | """structure with additional information about the file with model's weights""" 23 | 24 | is_sdxl: bool 25 | """True if the model's architecture is SDXL or SSD""" 26 | 27 | is_ssd: bool 28 | """True if the model is SSD""" 29 | 30 | is_sd2: bool 31 | """True if the model's architecture is SD 2.x""" 32 | 33 | is_sd1: bool 34 | """True if the model's architecture is SD 1.x""" 35 | -------------------------------------------------------------------------------- /modules/sd_samplers.py: -------------------------------------------------------------------------------- 1 | from modules import sd_samplers_kdiffusion, sd_samplers_timesteps, shared 2 | 3 | # imports for functions that previously were here and are used by other modules 4 | from modules.sd_samplers_common import samples_to_image_grid, sample_to_image # noqa: F401 5 | 6 | all_samplers = [ 7 | *sd_samplers_kdiffusion.samplers_data_k_diffusion, 8 | *sd_samplers_timesteps.samplers_data_timesteps, 9 | ] 10 | all_samplers_map = {x.name: x for x in all_samplers} 11 | 12 | samplers = [] 13 | samplers_for_img2img = [] 14 | samplers_map = {} 15 | samplers_hidden = {} 16 | 17 | 18 | def find_sampler_config(name): 19 | if name is not None: 20 | config = all_samplers_map.get(name, None) 21 | else: 22 | config = all_samplers[0] 23 | 24 | return config 25 | 26 | 27 | def create_sampler(name, model): 28 | config = find_sampler_config(name) 29 | 30 | assert config is not None, f'bad sampler name: {name}' 31 | 32 | if model.is_sdxl and config.options.get("no_sdxl", False): 33 | raise Exception(f"Sampler {config.name} is not supported for SDXL") 34 | 35 | sampler = config.constructor(model) 36 | sampler.config = config 37 | 38 | return sampler 39 | 40 | 41 | def set_samplers(): 42 | global samplers, samplers_for_img2img, samplers_hidden 43 | 44 | samplers_hidden = set(shared.opts.hide_samplers) 45 | samplers = all_samplers 46 | samplers_for_img2img = all_samplers 47 | 48 | samplers_map.clear() 49 | for sampler in all_samplers: 50 | samplers_map[sampler.name.lower()] = sampler.name 51 | for alias in sampler.aliases: 52 | samplers_map[alias.lower()] = sampler.name 53 | 54 | 55 | def visible_sampler_names(): 56 | return [x.name for x in samplers if x.name not in samplers_hidden] 57 | 58 | 59 | set_samplers() 60 | -------------------------------------------------------------------------------- /modules/sd_samplers_compvis.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/modules/sd_samplers_compvis.py -------------------------------------------------------------------------------- /modules/sd_samplers_extra.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import tqdm 3 | import k_diffusion.sampling 4 | 5 | 6 | @torch.no_grad() 7 | def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list=None): 8 | """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023) 9 | Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]} 10 | If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list 11 | """ 12 | extra_args = {} if extra_args is None else extra_args 13 | s_in = x.new_ones([x.shape[0]]) 14 | step_id = 0 15 | from k_diffusion.sampling import to_d, get_sigmas_karras 16 | 17 | def heun_step(x, old_sigma, new_sigma, second_order=True): 18 | nonlocal step_id 19 | denoised = model(x, old_sigma * s_in, **extra_args) 20 | d = to_d(x, old_sigma, denoised) 21 | if callback is not None: 22 | callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised}) 23 | dt = new_sigma - old_sigma 24 | if new_sigma == 0 or not second_order: 25 | # Euler method 26 | x = x + d * dt 27 | else: 28 | # Heun's method 29 | x_2 = x + d * dt 30 | denoised_2 = model(x_2, new_sigma * s_in, **extra_args) 31 | d_2 = to_d(x_2, new_sigma, denoised_2) 32 | d_prime = (d + d_2) / 2 33 | x = x + d_prime * dt 34 | step_id += 1 35 | return x 36 | 37 | steps = sigmas.shape[0] - 1 38 | if restart_list is None: 39 | if steps >= 20: 40 | restart_steps = 9 41 | restart_times = 1 42 | if steps >= 36: 43 | restart_steps = steps // 4 44 | restart_times = 2 45 | sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device) 46 | restart_list = {0.1: [restart_steps + 1, restart_times, 2]} 47 | else: 48 | restart_list = {} 49 | 50 | restart_list = {int(torch.argmin(abs(sigmas - key), dim=0)): value for key, value in restart_list.items()} 51 | 52 | step_list = [] 53 | for i in range(len(sigmas) - 1): 54 | step_list.append((sigmas[i], sigmas[i + 1])) 55 | if i + 1 in restart_list: 56 | restart_steps, restart_times, restart_max = restart_list[i + 1] 57 | min_idx = i + 1 58 | max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0)) 59 | if max_idx < min_idx: 60 | sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1] 61 | while restart_times > 0: 62 | restart_times -= 1 63 | step_list.extend(zip(sigma_restart[:-1], sigma_restart[1:])) 64 | 65 | last_sigma = None 66 | for old_sigma, new_sigma in tqdm.tqdm(step_list, disable=disable): 67 | if last_sigma is None: 68 | last_sigma = old_sigma 69 | elif last_sigma < old_sigma: 70 | x = x + k_diffusion.sampling.torch.randn_like(x) * s_noise * (old_sigma ** 2 - last_sigma ** 2) ** 0.5 71 | x = heun_step(x, old_sigma, new_sigma) 72 | last_sigma = new_sigma 73 | 74 | return x 75 | -------------------------------------------------------------------------------- /modules/sd_unet.py: -------------------------------------------------------------------------------- 1 | import torch.nn 2 | 3 | from modules import script_callbacks, shared, devices 4 | 5 | unet_options = [] 6 | current_unet_option = None 7 | current_unet = None 8 | original_forward = None # not used, only left temporarily for compatibility 9 | 10 | def list_unets(): 11 | new_unets = script_callbacks.list_unets_callback() 12 | 13 | unet_options.clear() 14 | unet_options.extend(new_unets) 15 | 16 | 17 | def get_unet_option(option=None): 18 | option = option or shared.opts.sd_unet 19 | 20 | if option == "None": 21 | return None 22 | 23 | if option == "Automatic": 24 | name = shared.sd_model.sd_checkpoint_info.model_name 25 | 26 | options = [x for x in unet_options if x.model_name == name] 27 | 28 | option = options[0].label if options else "None" 29 | 30 | return next(iter([x for x in unet_options if x.label == option]), None) 31 | 32 | 33 | def apply_unet(option=None): 34 | global current_unet_option 35 | global current_unet 36 | 37 | new_option = get_unet_option(option) 38 | if new_option == current_unet_option: 39 | return 40 | 41 | if current_unet is not None: 42 | print(f"Dectivating unet: {current_unet.option.label}") 43 | current_unet.deactivate() 44 | 45 | current_unet_option = new_option 46 | if current_unet_option is None: 47 | current_unet = None 48 | 49 | if not shared.sd_model.lowvram: 50 | shared.sd_model.model.diffusion_model.to(devices.device) 51 | 52 | return 53 | 54 | shared.sd_model.model.diffusion_model.to(devices.cpu) 55 | devices.torch_gc() 56 | 57 | current_unet = current_unet_option.create_unet() 58 | current_unet.option = current_unet_option 59 | print(f"Activating unet: {current_unet.option.label}") 60 | current_unet.activate() 61 | 62 | 63 | class SdUnetOption: 64 | model_name = None 65 | """name of related checkpoint - this option will be selected automatically for unet if the name of checkpoint matches this""" 66 | 67 | label = None 68 | """name of the unet in UI""" 69 | 70 | def create_unet(self): 71 | """returns SdUnet object to be used as a Unet instead of built-in unet when making pictures""" 72 | raise NotImplementedError() 73 | 74 | 75 | class SdUnet(torch.nn.Module): 76 | def forward(self, x, timesteps, context, *args, **kwargs): 77 | raise NotImplementedError() 78 | 79 | def activate(self): 80 | pass 81 | 82 | def deactivate(self): 83 | pass 84 | 85 | 86 | def create_unet_forward(original_forward): 87 | def UNetModel_forward(self, x, timesteps=None, context=None, *args, **kwargs): 88 | if current_unet is not None: 89 | return current_unet.forward(x, timesteps, context, *args, **kwargs) 90 | 91 | return original_forward(self, x, timesteps, context, *args, **kwargs) 92 | 93 | return UNetModel_forward 94 | 95 | -------------------------------------------------------------------------------- /modules/sd_vae_approx.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import torch 4 | from torch import nn 5 | from modules import devices, paths, shared 6 | 7 | sd_vae_approx_models = {} 8 | 9 | 10 | class VAEApprox(nn.Module): 11 | def __init__(self): 12 | super(VAEApprox, self).__init__() 13 | self.conv1 = nn.Conv2d(4, 8, (7, 7)) 14 | self.conv2 = nn.Conv2d(8, 16, (5, 5)) 15 | self.conv3 = nn.Conv2d(16, 32, (3, 3)) 16 | self.conv4 = nn.Conv2d(32, 64, (3, 3)) 17 | self.conv5 = nn.Conv2d(64, 32, (3, 3)) 18 | self.conv6 = nn.Conv2d(32, 16, (3, 3)) 19 | self.conv7 = nn.Conv2d(16, 8, (3, 3)) 20 | self.conv8 = nn.Conv2d(8, 3, (3, 3)) 21 | 22 | def forward(self, x): 23 | extra = 11 24 | x = nn.functional.interpolate(x, (x.shape[2] * 2, x.shape[3] * 2)) 25 | x = nn.functional.pad(x, (extra, extra, extra, extra)) 26 | 27 | for layer in [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5, self.conv6, self.conv7, self.conv8, ]: 28 | x = layer(x) 29 | x = nn.functional.leaky_relu(x, 0.1) 30 | 31 | return x 32 | 33 | 34 | def download_model(model_path, model_url): 35 | if not os.path.exists(model_path): 36 | os.makedirs(os.path.dirname(model_path), exist_ok=True) 37 | 38 | print(f'Downloading VAEApprox model to: {model_path}') 39 | torch.hub.download_url_to_file(model_url, model_path) 40 | 41 | 42 | def model(): 43 | model_name = "vaeapprox-sdxl.pt" if getattr(shared.sd_model, 'is_sdxl', False) else "model.pt" 44 | loaded_model = sd_vae_approx_models.get(model_name) 45 | 46 | if loaded_model is None: 47 | model_path = os.path.join(paths.models_path, "VAE-approx", model_name) 48 | if not os.path.exists(model_path): 49 | model_path = os.path.join(paths.script_path, "models", "VAE-approx", model_name) 50 | 51 | if not os.path.exists(model_path): 52 | model_path = os.path.join(paths.models_path, "VAE-approx", model_name) 53 | download_model(model_path, 'https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/download/v1.0.0-pre/' + model_name) 54 | 55 | loaded_model = VAEApprox() 56 | loaded_model.load_state_dict(torch.load(model_path, map_location='cpu' if devices.device.type != 'cuda' else None)) 57 | loaded_model.eval() 58 | loaded_model.to(devices.device, devices.dtype) 59 | sd_vae_approx_models[model_name] = loaded_model 60 | 61 | return loaded_model 62 | 63 | 64 | def cheap_approximation(sample): 65 | # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/2 66 | 67 | if shared.sd_model.is_sdxl: 68 | coeffs = [ 69 | [ 0.3448, 0.4168, 0.4395], 70 | [-0.1953, -0.0290, 0.0250], 71 | [ 0.1074, 0.0886, -0.0163], 72 | [-0.3730, -0.2499, -0.2088], 73 | ] 74 | else: 75 | coeffs = [ 76 | [ 0.298, 0.207, 0.208], 77 | [ 0.187, 0.286, 0.173], 78 | [-0.158, 0.189, 0.264], 79 | [-0.184, -0.271, -0.473], 80 | ] 81 | 82 | coefs = torch.tensor(coeffs).to(sample.device) 83 | 84 | x_sample = torch.einsum("...lxy,lr -> ...rxy", sample, coefs) 85 | 86 | return x_sample 87 | -------------------------------------------------------------------------------- /modules/shared.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import gradio as gr 4 | 5 | from modules import shared_cmd_options, shared_gradio_themes, options, shared_items, sd_models_types 6 | from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 7 | from modules import util 8 | 9 | cmd_opts = shared_cmd_options.cmd_opts 10 | parser = shared_cmd_options.parser 11 | 12 | batch_cond_uncond = True # old field, unused now in favor of shared.opts.batch_cond_uncond 13 | parallel_processing_allowed = True 14 | styles_filename = cmd_opts.styles_file 15 | config_filename = cmd_opts.ui_settings_file 16 | hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config} 17 | 18 | demo = None 19 | 20 | device = None 21 | 22 | weight_load_location = None 23 | 24 | xformers_available = False 25 | 26 | hypernetworks = {} 27 | 28 | loaded_hypernetworks = [] 29 | 30 | state = None 31 | 32 | prompt_styles = None 33 | 34 | interrogator = None 35 | 36 | face_restorers = [] 37 | 38 | options_templates = None 39 | opts = None 40 | restricted_opts = None 41 | 42 | sd_model: sd_models_types.WebuiSdModel = None 43 | 44 | settings_components = None 45 | """assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" 46 | 47 | tab_names = [] 48 | 49 | latent_upscale_default_mode = "Latent" 50 | latent_upscale_modes = { 51 | "Latent": {"mode": "bilinear", "antialias": False}, 52 | "Latent (antialiased)": {"mode": "bilinear", "antialias": True}, 53 | "Latent (bicubic)": {"mode": "bicubic", "antialias": False}, 54 | "Latent (bicubic antialiased)": {"mode": "bicubic", "antialias": True}, 55 | "Latent (nearest)": {"mode": "nearest", "antialias": False}, 56 | "Latent (nearest-exact)": {"mode": "nearest-exact", "antialias": False}, 57 | } 58 | 59 | sd_upscalers = [] 60 | 61 | clip_model = None 62 | 63 | progress_print_out = sys.stdout 64 | 65 | gradio_theme = gr.themes.Base() 66 | 67 | total_tqdm = None 68 | 69 | mem_mon = None 70 | 71 | options_section = options.options_section 72 | OptionInfo = options.OptionInfo 73 | OptionHTML = options.OptionHTML 74 | 75 | natural_sort_key = util.natural_sort_key 76 | listfiles = util.listfiles 77 | html_path = util.html_path 78 | html = util.html 79 | walk_files = util.walk_files 80 | ldm_print = util.ldm_print 81 | 82 | reload_gradio_theme = shared_gradio_themes.reload_gradio_theme 83 | 84 | list_checkpoint_tiles = shared_items.list_checkpoint_tiles 85 | refresh_checkpoints = shared_items.refresh_checkpoints 86 | list_samplers = shared_items.list_samplers 87 | reload_hypernetworks = shared_items.reload_hypernetworks 88 | -------------------------------------------------------------------------------- /modules/shared_cmd_options.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import launch 4 | from modules import cmd_args, script_loading 5 | from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 6 | 7 | parser = cmd_args.parser 8 | 9 | script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file)) 10 | script_loading.preload_extensions(extensions_builtin_dir, parser) 11 | 12 | if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None: 13 | cmd_opts = parser.parse_args() 14 | else: 15 | cmd_opts, _ = parser.parse_known_args() 16 | 17 | cmd_opts.webui_is_non_local = any([cmd_opts.share, cmd_opts.listen, cmd_opts.ngrok, cmd_opts.server_name]) 18 | cmd_opts.disable_extension_access = cmd_opts.webui_is_non_local and not cmd_opts.enable_insecure_extension_access 19 | -------------------------------------------------------------------------------- /modules/shared_gradio_themes.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import gradio as gr 4 | 5 | from modules import errors, shared 6 | from modules.paths_internal import script_path 7 | 8 | 9 | # https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json 10 | gradio_hf_hub_themes = [ 11 | "gradio/base", 12 | "gradio/glass", 13 | "gradio/monochrome", 14 | "gradio/seafoam", 15 | "gradio/soft", 16 | "gradio/dracula_test", 17 | "abidlabs/dracula_test", 18 | "abidlabs/Lime", 19 | "abidlabs/pakistan", 20 | "Ama434/neutral-barlow", 21 | "dawood/microsoft_windows", 22 | "finlaymacklon/smooth_slate", 23 | "Franklisi/darkmode", 24 | "freddyaboulton/dracula_revamped", 25 | "freddyaboulton/test-blue", 26 | "gstaff/xkcd", 27 | "Insuz/Mocha", 28 | "Insuz/SimpleIndigo", 29 | "JohnSmith9982/small_and_pretty", 30 | "nota-ai/theme", 31 | "nuttea/Softblue", 32 | "ParityError/Anime", 33 | "reilnuud/polite", 34 | "remilia/Ghostly", 35 | "rottenlittlecreature/Moon_Goblin", 36 | "step-3-profit/Midnight-Deep", 37 | "Taithrah/Minimal", 38 | "ysharma/huggingface", 39 | "ysharma/steampunk", 40 | "NoCrypt/miku" 41 | ] 42 | 43 | 44 | def reload_gradio_theme(theme_name=None): 45 | if not theme_name: 46 | theme_name = shared.opts.gradio_theme 47 | 48 | default_theme_args = dict( 49 | font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'], 50 | font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], 51 | ) 52 | 53 | if theme_name == "Default": 54 | shared.gradio_theme = gr.themes.Default(**default_theme_args) 55 | else: 56 | try: 57 | theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes') 58 | theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json') 59 | if shared.opts.gradio_themes_cache and os.path.exists(theme_cache_path): 60 | shared.gradio_theme = gr.themes.ThemeClass.load(theme_cache_path) 61 | else: 62 | os.makedirs(theme_cache_dir, exist_ok=True) 63 | shared.gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) 64 | shared.gradio_theme.dump(theme_cache_path) 65 | except Exception as e: 66 | errors.display(e, "changing gradio theme") 67 | shared.gradio_theme = gr.themes.Default(**default_theme_args) 68 | -------------------------------------------------------------------------------- /modules/shared_init.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import torch 4 | 5 | from modules import shared 6 | from modules.shared import cmd_opts 7 | 8 | 9 | def initialize(): 10 | """Initializes fields inside the shared module in a controlled manner. 11 | 12 | Should be called early because some other modules you can import mingt need these fields to be already set. 13 | """ 14 | 15 | os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True) 16 | 17 | from modules import options, shared_options 18 | shared.options_templates = shared_options.options_templates 19 | shared.opts = options.Options(shared_options.options_templates, shared_options.restricted_opts) 20 | shared.restricted_opts = shared_options.restricted_opts 21 | if os.path.exists(shared.config_filename): 22 | shared.opts.load(shared.config_filename) 23 | 24 | from modules import devices 25 | devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ 26 | (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer']) 27 | 28 | devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16 29 | devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16 30 | 31 | shared.device = devices.device 32 | shared.weight_load_location = None if cmd_opts.lowram else "cpu" 33 | 34 | from modules import shared_state 35 | shared.state = shared_state.State() 36 | 37 | from modules import styles 38 | shared.prompt_styles = styles.StyleDatabase(shared.styles_filename) 39 | 40 | from modules import interrogate 41 | shared.interrogator = interrogate.InterrogateModels("interrogate") 42 | 43 | from modules import shared_total_tqdm 44 | shared.total_tqdm = shared_total_tqdm.TotalTQDM() 45 | 46 | from modules import memmon, devices 47 | shared.mem_mon = memmon.MemUsageMonitor("MemMon", devices.device, shared.opts) 48 | shared.mem_mon.start() 49 | 50 | -------------------------------------------------------------------------------- /modules/shared_total_tqdm.py: -------------------------------------------------------------------------------- 1 | import tqdm 2 | 3 | from modules import shared 4 | 5 | 6 | class TotalTQDM: 7 | def __init__(self): 8 | self._tqdm = None 9 | 10 | def reset(self): 11 | self._tqdm = tqdm.tqdm( 12 | desc="Total progress", 13 | total=shared.state.job_count * shared.state.sampling_steps, 14 | position=1, 15 | file=shared.progress_print_out 16 | ) 17 | 18 | def update(self): 19 | if not shared.opts.multiple_tqdm or shared.cmd_opts.disable_console_progressbars: 20 | return 21 | if self._tqdm is None: 22 | self.reset() 23 | self._tqdm.update() 24 | 25 | def updateTotal(self, new_total): 26 | if not shared.opts.multiple_tqdm or shared.cmd_opts.disable_console_progressbars: 27 | return 28 | if self._tqdm is None: 29 | self.reset() 30 | self._tqdm.total = new_total 31 | 32 | def clear(self): 33 | if self._tqdm is not None: 34 | self._tqdm.refresh() 35 | self._tqdm.close() 36 | self._tqdm = None 37 | 38 | -------------------------------------------------------------------------------- /modules/textual_inversion/learn_schedule.py: -------------------------------------------------------------------------------- 1 | import tqdm 2 | 3 | 4 | class LearnScheduleIterator: 5 | def __init__(self, learn_rate, max_steps, cur_step=0): 6 | """ 7 | specify learn_rate as "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000 8 | """ 9 | 10 | pairs = learn_rate.split(',') 11 | self.rates = [] 12 | self.it = 0 13 | self.maxit = 0 14 | try: 15 | for pair in pairs: 16 | if not pair.strip(): 17 | continue 18 | tmp = pair.split(':') 19 | if len(tmp) == 2: 20 | step = int(tmp[1]) 21 | if step > cur_step: 22 | self.rates.append((float(tmp[0]), min(step, max_steps))) 23 | self.maxit += 1 24 | if step > max_steps: 25 | return 26 | elif step == -1: 27 | self.rates.append((float(tmp[0]), max_steps)) 28 | self.maxit += 1 29 | return 30 | else: 31 | self.rates.append((float(tmp[0]), max_steps)) 32 | self.maxit += 1 33 | return 34 | assert self.rates 35 | except (ValueError, AssertionError) as e: 36 | raise Exception('Invalid learning rate schedule. It should be a number or, for example, like "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000.') from e 37 | 38 | 39 | def __iter__(self): 40 | return self 41 | 42 | def __next__(self): 43 | if self.it < self.maxit: 44 | self.it += 1 45 | return self.rates[self.it - 1] 46 | else: 47 | raise StopIteration 48 | 49 | 50 | class LearnRateScheduler: 51 | def __init__(self, learn_rate, max_steps, cur_step=0, verbose=True): 52 | self.schedules = LearnScheduleIterator(learn_rate, max_steps, cur_step) 53 | (self.learn_rate, self.end_step) = next(self.schedules) 54 | self.verbose = verbose 55 | 56 | if self.verbose: 57 | print(f'Training at rate of {self.learn_rate} until step {self.end_step}') 58 | 59 | self.finished = False 60 | 61 | def step(self, step_number): 62 | if step_number < self.end_step: 63 | return False 64 | 65 | try: 66 | (self.learn_rate, self.end_step) = next(self.schedules) 67 | except StopIteration: 68 | self.finished = True 69 | return False 70 | return True 71 | 72 | def apply(self, optimizer, step_number): 73 | if not self.step(step_number): 74 | return 75 | 76 | if self.verbose: 77 | tqdm.tqdm.write(f'Training at rate of {self.learn_rate} until step {self.end_step}') 78 | 79 | for pg in optimizer.param_groups: 80 | pg['lr'] = self.learn_rate 81 | 82 | -------------------------------------------------------------------------------- /modules/textual_inversion/logging.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import json 3 | import os 4 | 5 | saved_params_shared = { 6 | "batch_size", 7 | "clip_grad_mode", 8 | "clip_grad_value", 9 | "create_image_every", 10 | "data_root", 11 | "gradient_step", 12 | "initial_step", 13 | "latent_sampling_method", 14 | "learn_rate", 15 | "log_directory", 16 | "model_hash", 17 | "model_name", 18 | "num_of_dataset_images", 19 | "steps", 20 | "template_file", 21 | "training_height", 22 | "training_width", 23 | } 24 | saved_params_ti = { 25 | "embedding_name", 26 | "num_vectors_per_token", 27 | "save_embedding_every", 28 | "save_image_with_stored_embedding", 29 | } 30 | saved_params_hypernet = { 31 | "activation_func", 32 | "add_layer_norm", 33 | "hypernetwork_name", 34 | "layer_structure", 35 | "save_hypernetwork_every", 36 | "use_dropout", 37 | "weight_init", 38 | } 39 | saved_params_all = saved_params_shared | saved_params_ti | saved_params_hypernet 40 | saved_params_previews = { 41 | "preview_cfg_scale", 42 | "preview_height", 43 | "preview_negative_prompt", 44 | "preview_prompt", 45 | "preview_sampler_index", 46 | "preview_seed", 47 | "preview_steps", 48 | "preview_width", 49 | } 50 | 51 | 52 | def save_settings_to_file(log_directory, all_params): 53 | now = datetime.datetime.now() 54 | params = {"datetime": now.strftime("%Y-%m-%d %H:%M:%S")} 55 | 56 | keys = saved_params_all 57 | if all_params.get('preview_from_txt2img'): 58 | keys = keys | saved_params_previews 59 | 60 | params.update({k: v for k, v in all_params.items() if k in keys}) 61 | 62 | filename = f'settings-{now.strftime("%Y-%m-%d-%H-%M-%S")}.json' 63 | with open(os.path.join(log_directory, filename), "w") as file: 64 | json.dump(params, file, indent=4) 65 | -------------------------------------------------------------------------------- /modules/textual_inversion/test_embedding.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/modules/textual_inversion/test_embedding.png -------------------------------------------------------------------------------- /modules/textual_inversion/ui.py: -------------------------------------------------------------------------------- 1 | import html 2 | 3 | import gradio as gr 4 | 5 | import modules.textual_inversion.textual_inversion 6 | from modules import sd_hijack, shared 7 | 8 | 9 | def create_embedding(name, initialization_text, nvpt, overwrite_old): 10 | filename = modules.textual_inversion.textual_inversion.create_embedding(name, nvpt, overwrite_old, init_text=initialization_text) 11 | 12 | sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() 13 | 14 | return gr.Dropdown.update(choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())), f"Created: {filename}", "" 15 | 16 | 17 | def train_embedding(*args): 18 | 19 | assert not shared.cmd_opts.lowvram, 'Training models with lowvram not possible' 20 | 21 | apply_optimizations = shared.opts.training_xattention_optimizations 22 | try: 23 | if not apply_optimizations: 24 | sd_hijack.undo_optimizations() 25 | 26 | embedding, filename = modules.textual_inversion.textual_inversion.train_embedding(*args) 27 | 28 | res = f""" 29 | Training {'interrupted' if shared.state.interrupted else 'finished'} at {embedding.step} steps. 30 | Embedding saved to {html.escape(filename)} 31 | """ 32 | return res, "" 33 | except Exception: 34 | raise 35 | finally: 36 | if not apply_optimizations: 37 | sd_hijack.apply_optimizations() 38 | 39 | -------------------------------------------------------------------------------- /modules/tiling/seamless_tiling.py: -------------------------------------------------------------------------------- 1 | from PIL import ImageOps, ImageEnhance, Image 2 | from modules.tiling.img_utils import convert_binary_img_to_pil, \ 3 | convert_pil_img_to_binary, \ 4 | expand_canvas_tiling 5 | 6 | def preprocess_expand_canvas_for_tile_image(img, border_size=128, darken=False, force_original_res=True): 7 | ## first lets check the data type of the image, it could be binary or a PIL.Image... 8 | if isinstance(img, bytes): 9 | input_type = 'binary' 10 | img = convert_binary_img_to_pil(img) 11 | elif isinstance(img, Image.Image): 12 | ### then it's a PIL.Image. 13 | input_type = 'pil' 14 | 15 | # Specify the path to your original image and the output path 16 | width, height = img.size 17 | org_size = width 18 | 19 | expanded_img = expand_canvas_tiling(img, border_size=border_size, darken=darken) 20 | 21 | if force_original_res: 22 | expanded_img = expanded_img.resize((width, height), Image.Resampling.LANCZOS) 23 | pass 24 | 25 | ## finally if the input_type was binary, we need to convert the pil img back to binary 26 | if input_type=='binary': 27 | expanded_img = convert_pil_img_to_binary(expanded_img) 28 | 29 | return expanded_img 30 | 31 | def postprocess_crop_canvas_back(img, border_size=128): 32 | ## check if the incoming image is binary or pil 33 | if isinstance(img, bytes): 34 | input_type = 'binary' 35 | img = convert_binary_img_to_pil(img) 36 | elif isinstance(img, Image.Image): 37 | input_type = 'pil' 38 | ## get the image size 39 | width, height = img.size 40 | ## now crop back the image 41 | cropped_img = img.crop((border_size, border_size, width - border_size, height - border_size)) 42 | 43 | ## if the input was binary, make sure to convert it back to binary 44 | if input_type=='binary': 45 | cropped_img = convert_pil_img_to_binary(cropped_img) 46 | 47 | return cropped_img 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /modules/timer.py: -------------------------------------------------------------------------------- 1 | import time 2 | import argparse 3 | 4 | 5 | class TimerSubcategory: 6 | def __init__(self, timer, category): 7 | self.timer = timer 8 | self.category = category 9 | self.start = None 10 | self.original_base_category = timer.base_category 11 | 12 | def __enter__(self): 13 | self.start = time.time() 14 | self.timer.base_category = self.original_base_category + self.category + "/" 15 | self.timer.subcategory_level += 1 16 | 17 | if self.timer.print_log: 18 | print(f"{' ' * self.timer.subcategory_level}{self.category}:") 19 | 20 | def __exit__(self, exc_type, exc_val, exc_tb): 21 | elapsed_for_subcategroy = time.time() - self.start 22 | self.timer.base_category = self.original_base_category 23 | self.timer.add_time_to_record(self.original_base_category + self.category, elapsed_for_subcategroy) 24 | self.timer.subcategory_level -= 1 25 | self.timer.record(self.category, disable_log=True) 26 | 27 | 28 | class Timer: 29 | def __init__(self, print_log=False): 30 | self.start = time.time() 31 | self.records = {} 32 | self.total = 0 33 | self.base_category = '' 34 | self.print_log = print_log 35 | self.subcategory_level = 0 36 | 37 | def elapsed(self): 38 | end = time.time() 39 | res = end - self.start 40 | self.start = end 41 | return res 42 | 43 | def add_time_to_record(self, category, amount): 44 | if category not in self.records: 45 | self.records[category] = 0 46 | 47 | self.records[category] += amount 48 | 49 | def record(self, category, extra_time=0, disable_log=False): 50 | e = self.elapsed() 51 | 52 | self.add_time_to_record(self.base_category + category, e + extra_time) 53 | 54 | self.total += e + extra_time 55 | 56 | if self.print_log and not disable_log: 57 | print(f"{' ' * self.subcategory_level}{category}: done in {e + extra_time:.3f}s") 58 | 59 | def subcategory(self, name): 60 | self.elapsed() 61 | 62 | subcat = TimerSubcategory(self, name) 63 | return subcat 64 | 65 | def summary(self): 66 | res = f"{self.total:.1f}s" 67 | 68 | additions = [(category, time_taken) for category, time_taken in self.records.items() if time_taken >= 0.1 and '/' not in category] 69 | if not additions: 70 | return res 71 | 72 | res += " (" 73 | res += ", ".join([f"{category}: {time_taken:.1f}s" for category, time_taken in additions]) 74 | res += ")" 75 | 76 | return res 77 | 78 | def dump(self): 79 | return {'total': self.total, 'records': self.records} 80 | 81 | def reset(self): 82 | self.__init__() 83 | 84 | 85 | parser = argparse.ArgumentParser(add_help=False) 86 | parser.add_argument("--log-startup", action='store_true', help="print a detailed log of what's happening at startup") 87 | args = parser.parse_known_args()[0] 88 | 89 | startup_timer = Timer(print_log=args.log_startup) 90 | 91 | startup_record = None 92 | -------------------------------------------------------------------------------- /modules/txt2img.py: -------------------------------------------------------------------------------- 1 | from contextlib import closing 2 | 3 | import modules.scripts 4 | from modules import processing 5 | from modules.generation_parameters_copypaste import create_override_settings_dict 6 | from modules.shared import opts 7 | import modules.shared as shared 8 | from modules.ui import plaintext_to_html 9 | import gradio as gr 10 | 11 | 12 | def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, n_iter: int, batch_size: int, cfg_scale: float, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): 13 | override_settings = create_override_settings_dict(override_settings_texts) 14 | 15 | p = processing.StableDiffusionProcessingTxt2Img( 16 | sd_model=shared.sd_model, 17 | outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples, 18 | outpath_grids=opts.outdir_grids or opts.outdir_txt2img_grids, 19 | prompt=prompt, 20 | styles=prompt_styles, 21 | negative_prompt=negative_prompt, 22 | sampler_name=sampler_name, 23 | batch_size=batch_size, 24 | n_iter=n_iter, 25 | steps=steps, 26 | cfg_scale=cfg_scale, 27 | width=width, 28 | height=height, 29 | enable_hr=enable_hr, 30 | denoising_strength=denoising_strength if enable_hr else None, 31 | hr_scale=hr_scale, 32 | hr_upscaler=hr_upscaler, 33 | hr_second_pass_steps=hr_second_pass_steps, 34 | hr_resize_x=hr_resize_x, 35 | hr_resize_y=hr_resize_y, 36 | hr_checkpoint_name=None if hr_checkpoint_name == 'Use same checkpoint' else hr_checkpoint_name, 37 | hr_sampler_name=None if hr_sampler_name == 'Use same sampler' else hr_sampler_name, 38 | hr_prompt=hr_prompt, 39 | hr_negative_prompt=hr_negative_prompt, 40 | override_settings=override_settings, 41 | ) 42 | 43 | p.scripts = modules.scripts.scripts_txt2img 44 | p.script_args = args 45 | 46 | p.user = request.username 47 | 48 | if shared.opts.enable_console_prompts: 49 | print(f"\ntxt2img: {prompt}", file=shared.progress_print_out) 50 | 51 | with closing(p): 52 | processed = modules.scripts.scripts_txt2img.run(p, *args) 53 | 54 | if processed is None: 55 | processed = processing.process_images(p) 56 | 57 | shared.total_tqdm.clear() 58 | 59 | generation_info_js = processed.js() 60 | if opts.samples_log_stdout: 61 | print(generation_info_js) 62 | 63 | if opts.do_not_show_images: 64 | processed.images = [] 65 | 66 | return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments, classname="comments") 67 | -------------------------------------------------------------------------------- /modules/ui_extra_networks_checkpoints.py: -------------------------------------------------------------------------------- 1 | import html 2 | import os 3 | 4 | from modules import shared, ui_extra_networks, sd_models 5 | from modules.ui_extra_networks import quote_js 6 | from modules.ui_extra_networks_checkpoints_user_metadata import CheckpointUserMetadataEditor 7 | 8 | 9 | class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage): 10 | def __init__(self): 11 | super().__init__('Checkpoints') 12 | 13 | self.allow_prompt = False 14 | 15 | def refresh(self): 16 | shared.refresh_checkpoints() 17 | 18 | def create_item(self, name, index=None, enable_filter=True): 19 | checkpoint: sd_models.CheckpointInfo = sd_models.checkpoint_aliases.get(name) 20 | if checkpoint is None: 21 | return 22 | 23 | path, ext = os.path.splitext(checkpoint.filename) 24 | return { 25 | "name": checkpoint.name_for_extra, 26 | "filename": checkpoint.filename, 27 | "shorthash": checkpoint.shorthash, 28 | "preview": self.find_preview(path), 29 | "description": self.find_description(path), 30 | "search_term": self.search_terms_from_path(checkpoint.filename) + " " + (checkpoint.sha256 or ""), 31 | "onclick": '"' + html.escape(f"""return selectCheckpoint({quote_js(name)})""") + '"', 32 | "local_preview": f"{path}.{shared.opts.samples_format}", 33 | "metadata": checkpoint.metadata, 34 | "sort_keys": {'default': index, **self.get_sort_keys(checkpoint.filename)}, 35 | } 36 | 37 | def list_items(self): 38 | # instantiate a list to protect against concurrent modification 39 | names = list(sd_models.checkpoints_list) 40 | for index, name in enumerate(names): 41 | item = self.create_item(name, index) 42 | if item is not None: 43 | yield item 44 | 45 | def allowed_directories_for_previews(self): 46 | return [v for v in [shared.cmd_opts.ckpt_dir, sd_models.model_path] if v is not None] 47 | 48 | def create_user_metadata_editor(self, ui, tabname): 49 | return CheckpointUserMetadataEditor(ui, tabname, self) 50 | -------------------------------------------------------------------------------- /modules/ui_extra_networks_checkpoints_user_metadata.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | 3 | from modules import ui_extra_networks_user_metadata, sd_vae, shared 4 | from modules.ui_common import create_refresh_button 5 | 6 | 7 | class CheckpointUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor): 8 | def __init__(self, ui, tabname, page): 9 | super().__init__(ui, tabname, page) 10 | 11 | self.select_vae = None 12 | 13 | def save_user_metadata(self, name, desc, notes, vae): 14 | user_metadata = self.get_user_metadata(name) 15 | user_metadata["description"] = desc 16 | user_metadata["notes"] = notes 17 | user_metadata["vae"] = vae 18 | 19 | self.write_user_metadata(name, user_metadata) 20 | 21 | def update_vae(self, name): 22 | if name == shared.sd_model.sd_checkpoint_info.name_for_extra: 23 | sd_vae.reload_vae_weights() 24 | 25 | def put_values_into_components(self, name): 26 | user_metadata = self.get_user_metadata(name) 27 | values = super().put_values_into_components(name) 28 | 29 | return [ 30 | *values[0:5], 31 | user_metadata.get('vae', ''), 32 | ] 33 | 34 | def create_editor(self): 35 | self.create_default_editor_elems() 36 | 37 | with gr.Row(): 38 | self.select_vae = gr.Dropdown(choices=["Automatic", "None"] + list(sd_vae.vae_dict), value="None", label="Preferred VAE", elem_id="checpoint_edit_user_metadata_preferred_vae") 39 | create_refresh_button(self.select_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["Automatic", "None"] + list(sd_vae.vae_dict)}, "checpoint_edit_user_metadata_refresh_preferred_vae") 40 | 41 | self.edit_notes = gr.TextArea(label='Notes', lines=4) 42 | 43 | self.create_default_buttons() 44 | 45 | viewed_components = [ 46 | self.edit_name, 47 | self.edit_description, 48 | self.html_filedata, 49 | self.html_preview, 50 | self.edit_notes, 51 | self.select_vae, 52 | ] 53 | 54 | self.button_edit\ 55 | .click(fn=self.put_values_into_components, inputs=[self.edit_name_input], outputs=viewed_components)\ 56 | .then(fn=lambda: gr.update(visible=True), inputs=[], outputs=[self.box]) 57 | 58 | edited_components = [ 59 | self.edit_description, 60 | self.edit_notes, 61 | self.select_vae, 62 | ] 63 | 64 | self.setup_save_handler(self.button_save, self.save_user_metadata, edited_components) 65 | self.button_save.click(fn=self.update_vae, inputs=[self.edit_name_input]) 66 | 67 | -------------------------------------------------------------------------------- /modules/ui_extra_networks_hypernets.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from modules import shared, ui_extra_networks 4 | from modules.ui_extra_networks import quote_js 5 | from modules.hashes import sha256_from_cache 6 | 7 | 8 | class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage): 9 | def __init__(self): 10 | super().__init__('Hypernetworks') 11 | 12 | def refresh(self): 13 | shared.reload_hypernetworks() 14 | 15 | def create_item(self, name, index=None, enable_filter=True): 16 | full_path = shared.hypernetworks.get(name) 17 | if full_path is None: 18 | return 19 | 20 | path, ext = os.path.splitext(full_path) 21 | sha256 = sha256_from_cache(full_path, f'hypernet/{name}') 22 | shorthash = sha256[0:10] if sha256 else None 23 | 24 | return { 25 | "name": name, 26 | "filename": full_path, 27 | "shorthash": shorthash, 28 | "preview": self.find_preview(path), 29 | "description": self.find_description(path), 30 | "search_term": self.search_terms_from_path(path) + " " + (sha256 or ""), 31 | "prompt": quote_js(f""), 32 | "local_preview": f"{path}.preview.{shared.opts.samples_format}", 33 | "sort_keys": {'default': index, **self.get_sort_keys(path + ext)}, 34 | } 35 | 36 | def list_items(self): 37 | # instantiate a list to protect against concurrent modification 38 | names = list(shared.hypernetworks) 39 | for index, name in enumerate(names): 40 | item = self.create_item(name, index) 41 | if item is not None: 42 | yield item 43 | 44 | def allowed_directories_for_previews(self): 45 | return [shared.cmd_opts.hypernetwork_dir] 46 | 47 | -------------------------------------------------------------------------------- /modules/ui_extra_networks_textual_inversion.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from modules import ui_extra_networks, sd_hijack, shared 4 | from modules.ui_extra_networks import quote_js 5 | 6 | 7 | class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage): 8 | def __init__(self): 9 | super().__init__('Textual Inversion') 10 | self.allow_negative_prompt = True 11 | 12 | def refresh(self): 13 | sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) 14 | 15 | def create_item(self, name, index=None, enable_filter=True): 16 | embedding = sd_hijack.model_hijack.embedding_db.word_embeddings.get(name) 17 | if embedding is None: 18 | return 19 | 20 | path, ext = os.path.splitext(embedding.filename) 21 | return { 22 | "name": name, 23 | "filename": embedding.filename, 24 | "shorthash": embedding.shorthash, 25 | "preview": self.find_preview(path), 26 | "description": self.find_description(path), 27 | "search_term": self.search_terms_from_path(embedding.filename) + " " + (embedding.hash or ""), 28 | "prompt": quote_js(embedding.name), 29 | "local_preview": f"{path}.preview.{shared.opts.samples_format}", 30 | "sort_keys": {'default': index, **self.get_sort_keys(embedding.filename)}, 31 | } 32 | 33 | def list_items(self): 34 | # instantiate a list to protect against concurrent modification 35 | names = list(sd_hijack.model_hijack.embedding_db.word_embeddings) 36 | for index, name in enumerate(names): 37 | item = self.create_item(name, index) 38 | if item is not None: 39 | yield item 40 | 41 | def allowed_directories_for_previews(self): 42 | return list(sd_hijack.model_hijack.embedding_db.embedding_dirs) 43 | -------------------------------------------------------------------------------- /modules/ui_gradio_extensions.py: -------------------------------------------------------------------------------- 1 | import os 2 | import gradio as gr 3 | 4 | from modules import localization, shared, scripts 5 | from modules.paths import script_path, data_path, cwd 6 | 7 | 8 | def webpath(fn): 9 | if fn.startswith(cwd): 10 | web_path = os.path.relpath(fn, cwd) 11 | else: 12 | web_path = os.path.abspath(fn) 13 | 14 | return f'file={web_path}?{os.path.getmtime(fn)}' 15 | 16 | 17 | def javascript_html(): 18 | # Ensure localization is in `window` before scripts 19 | head = f'\n' 20 | 21 | script_js = os.path.join(script_path, "script.js") 22 | head += f'\n' 23 | 24 | for script in scripts.list_scripts("javascript", ".js"): 25 | head += f'\n' 26 | 27 | for script in scripts.list_scripts("javascript", ".mjs"): 28 | head += f'\n' 29 | 30 | if shared.cmd_opts.theme: 31 | head += f'\n' 32 | 33 | return head 34 | 35 | 36 | def css_html(): 37 | head = "" 38 | 39 | def stylesheet(fn): 40 | return f'' 41 | 42 | for cssfile in scripts.list_files_with_name("style.css"): 43 | if not os.path.isfile(cssfile): 44 | continue 45 | 46 | head += stylesheet(cssfile) 47 | 48 | if os.path.exists(os.path.join(data_path, "user.css")): 49 | head += stylesheet(os.path.join(data_path, "user.css")) 50 | 51 | return head 52 | 53 | 54 | def reload_javascript(): 55 | js = javascript_html() 56 | css = css_html() 57 | 58 | def template_response(*args, **kwargs): 59 | res = shared.GradioTemplateResponseOriginal(*args, **kwargs) 60 | res.body = res.body.replace(b'', f'{js}'.encode("utf8")) 61 | res.body = res.body.replace(b'', f'{css}'.encode("utf8")) 62 | res.init_headers() 63 | return res 64 | 65 | gr.routes.templates.TemplateResponse = template_response 66 | 67 | 68 | if not hasattr(shared, 'GradioTemplateResponseOriginal'): 69 | shared.GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse 70 | -------------------------------------------------------------------------------- /modules/ui_postprocessing.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | from modules import scripts, shared, ui_common, postprocessing, call_queue, ui_toprow 3 | import modules.generation_parameters_copypaste as parameters_copypaste 4 | 5 | 6 | def create_ui(): 7 | dummy_component = gr.Label(visible=False) 8 | tab_index = gr.State(value=0) 9 | 10 | with gr.Row(equal_height=False, variant='compact'): 11 | with gr.Column(variant='compact'): 12 | with gr.Tabs(elem_id="mode_extras"): 13 | with gr.TabItem('Single Image', id="single_image", elem_id="extras_single_tab") as tab_single: 14 | extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil", elem_id="extras_image") 15 | 16 | with gr.TabItem('Batch Process', id="batch_process", elem_id="extras_batch_process_tab") as tab_batch: 17 | image_batch = gr.Files(label="Batch Process", interactive=True, elem_id="extras_image_batch") 18 | 19 | with gr.TabItem('Batch from Directory', id="batch_from_directory", elem_id="extras_batch_directory_tab") as tab_batch_dir: 20 | extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, placeholder="A directory on the same machine where the server is running.", elem_id="extras_batch_input_dir") 21 | extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.", elem_id="extras_batch_output_dir") 22 | show_extras_results = gr.Checkbox(label='Show result images', value=True, elem_id="extras_show_extras_results") 23 | 24 | script_inputs = scripts.scripts_postproc.setup_ui() 25 | 26 | with gr.Column(): 27 | toprow = ui_toprow.Toprow(is_compact=True, is_img2img=False, id_part="extras") 28 | toprow.create_inline_toprow_image() 29 | submit = toprow.submit 30 | 31 | result_images, html_info_x, html_info, html_log = ui_common.create_output_panel("extras", shared.opts.outdir_extras_samples) 32 | 33 | tab_single.select(fn=lambda: 0, inputs=[], outputs=[tab_index]) 34 | tab_batch.select(fn=lambda: 1, inputs=[], outputs=[tab_index]) 35 | tab_batch_dir.select(fn=lambda: 2, inputs=[], outputs=[tab_index]) 36 | 37 | submit.click( 38 | fn=call_queue.wrap_gradio_gpu_call(postprocessing.run_postprocessing_webui, extra_outputs=[None, '']), 39 | _js="submit_extras", 40 | inputs=[ 41 | dummy_component, 42 | tab_index, 43 | extras_image, 44 | image_batch, 45 | extras_batch_input_dir, 46 | extras_batch_output_dir, 47 | show_extras_results, 48 | *script_inputs 49 | ], 50 | outputs=[ 51 | result_images, 52 | html_info_x, 53 | html_log, 54 | ], 55 | show_progress=False, 56 | ) 57 | 58 | parameters_copypaste.add_paste_fields("extras", extras_image, None) 59 | 60 | extras_image.change( 61 | fn=scripts.scripts_postproc.image_changed, 62 | inputs=[], outputs=[] 63 | ) 64 | -------------------------------------------------------------------------------- /modules/ui_tempdir.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | from collections import namedtuple 4 | from pathlib import Path 5 | 6 | import gradio.components 7 | 8 | from PIL import PngImagePlugin 9 | 10 | from modules import shared 11 | 12 | 13 | Savedfile = namedtuple("Savedfile", ["name"]) 14 | 15 | 16 | def register_tmp_file(gradio, filename): 17 | if hasattr(gradio, 'temp_file_sets'): # gradio 3.15 18 | gradio.temp_file_sets[0] = gradio.temp_file_sets[0] | {os.path.abspath(filename)} 19 | 20 | if hasattr(gradio, 'temp_dirs'): # gradio 3.9 21 | gradio.temp_dirs = gradio.temp_dirs | {os.path.abspath(os.path.dirname(filename))} 22 | 23 | 24 | def check_tmp_file(gradio, filename): 25 | if hasattr(gradio, 'temp_file_sets'): 26 | return any(filename in fileset for fileset in gradio.temp_file_sets) 27 | 28 | if hasattr(gradio, 'temp_dirs'): 29 | return any(Path(temp_dir).resolve() in Path(filename).resolve().parents for temp_dir in gradio.temp_dirs) 30 | 31 | return False 32 | 33 | 34 | def save_pil_to_file(self, pil_image, dir=None, format="png"): 35 | already_saved_as = getattr(pil_image, 'already_saved_as', None) 36 | if already_saved_as and os.path.isfile(already_saved_as): 37 | register_tmp_file(shared.demo, already_saved_as) 38 | filename = already_saved_as 39 | 40 | if not shared.opts.save_images_add_number: 41 | filename += f'?{os.path.getmtime(already_saved_as)}' 42 | 43 | return filename 44 | 45 | if shared.opts.temp_dir != "": 46 | dir = shared.opts.temp_dir 47 | else: 48 | os.makedirs(dir, exist_ok=True) 49 | 50 | use_metadata = False 51 | metadata = PngImagePlugin.PngInfo() 52 | for key, value in pil_image.info.items(): 53 | if isinstance(key, str) and isinstance(value, str): 54 | metadata.add_text(key, value) 55 | use_metadata = True 56 | 57 | file_obj = tempfile.NamedTemporaryFile(delete=False, suffix=".png", dir=dir) 58 | pil_image.save(file_obj, pnginfo=(metadata if use_metadata else None)) 59 | return file_obj.name 60 | 61 | 62 | def install_ui_tempdir_override(): 63 | """override save to file function so that it also writes PNG info""" 64 | gradio.components.IOComponent.pil_to_temp_file = save_pil_to_file 65 | 66 | 67 | def on_tmpdir_changed(): 68 | if shared.opts.temp_dir == "" or shared.demo is None: 69 | return 70 | 71 | os.makedirs(shared.opts.temp_dir, exist_ok=True) 72 | 73 | register_tmp_file(shared.demo, os.path.join(shared.opts.temp_dir, "x")) 74 | 75 | 76 | def cleanup_tmpdr(): 77 | temp_dir = shared.opts.temp_dir 78 | if temp_dir == "" or not os.path.isdir(temp_dir): 79 | return 80 | 81 | for root, _, files in os.walk(temp_dir, topdown=False): 82 | for name in files: 83 | _, extension = os.path.splitext(name) 84 | if extension != ".png": 85 | continue 86 | 87 | filename = os.path.join(root, name) 88 | os.remove(filename) 89 | -------------------------------------------------------------------------------- /modules/util.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | 4 | from modules import shared 5 | from modules.paths_internal import script_path 6 | 7 | 8 | def natural_sort_key(s, regex=re.compile('([0-9]+)')): 9 | return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)] 10 | 11 | 12 | def listfiles(dirname): 13 | filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")] 14 | return [file for file in filenames if os.path.isfile(file)] 15 | 16 | 17 | def html_path(filename): 18 | return os.path.join(script_path, "html", filename) 19 | 20 | 21 | def html(filename): 22 | path = html_path(filename) 23 | 24 | if os.path.exists(path): 25 | with open(path, encoding="utf8") as file: 26 | return file.read() 27 | 28 | return "" 29 | 30 | 31 | def walk_files(path, allowed_extensions=None): 32 | if not os.path.exists(path): 33 | return 34 | 35 | if allowed_extensions is not None: 36 | allowed_extensions = set(allowed_extensions) 37 | 38 | items = list(os.walk(path, followlinks=True)) 39 | items = sorted(items, key=lambda x: natural_sort_key(x[0])) 40 | 41 | for root, _, files in items: 42 | for filename in sorted(files, key=natural_sort_key): 43 | if allowed_extensions is not None: 44 | _, ext = os.path.splitext(filename) 45 | if ext not in allowed_extensions: 46 | continue 47 | 48 | if not shared.opts.list_hidden_files and ("/." in root or "\\." in root): 49 | continue 50 | 51 | yield os.path.join(root, filename) 52 | 53 | 54 | def ldm_print(*args, **kwargs): 55 | if shared.opts.hide_ldm_prints: 56 | return 57 | 58 | print(*args, **kwargs) 59 | -------------------------------------------------------------------------------- /modules/xpu_specific.py: -------------------------------------------------------------------------------- 1 | from modules import shared 2 | from modules.sd_hijack_utils import CondFunc 3 | 4 | has_ipex = False 5 | try: 6 | import torch 7 | import intel_extension_for_pytorch as ipex # noqa: F401 8 | has_ipex = True 9 | except Exception: 10 | pass 11 | 12 | 13 | def check_for_xpu(): 14 | return has_ipex and hasattr(torch, 'xpu') and torch.xpu.is_available() 15 | 16 | 17 | def get_xpu_device_string(): 18 | if shared.cmd_opts.device_id is not None: 19 | return f"xpu:{shared.cmd_opts.device_id}" 20 | return "xpu" 21 | 22 | 23 | def torch_xpu_gc(): 24 | with torch.xpu.device(get_xpu_device_string()): 25 | torch.xpu.empty_cache() 26 | 27 | 28 | has_xpu = check_for_xpu() 29 | 30 | if has_xpu: 31 | # W/A for https://github.com/intel/intel-extension-for-pytorch/issues/452: torch.Generator API doesn't support XPU device 32 | CondFunc('torch.Generator', 33 | lambda orig_func, device=None: torch.xpu.Generator(device), 34 | lambda orig_func, device=None: device is not None and device.type == "xpu") 35 | 36 | # W/A for some OPs that could not handle different input dtypes 37 | CondFunc('torch.nn.functional.layer_norm', 38 | lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs: 39 | orig_func(input.to(weight.data.dtype), normalized_shape, weight, *args, **kwargs), 40 | lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs: 41 | weight is not None and input.dtype != weight.data.dtype) 42 | CondFunc('torch.nn.modules.GroupNorm.forward', 43 | lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), 44 | lambda orig_func, self, input: input.dtype != self.weight.data.dtype) 45 | CondFunc('torch.nn.modules.linear.Linear.forward', 46 | lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), 47 | lambda orig_func, self, input: input.dtype != self.weight.data.dtype) 48 | CondFunc('torch.nn.modules.conv.Conv2d.forward', 49 | lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), 50 | lambda orig_func, self, input: input.dtype != self.weight.data.dtype) 51 | CondFunc('torch.bmm', 52 | lambda orig_func, input, mat2, out=None: orig_func(input.to(mat2.dtype), mat2, out=out), 53 | lambda orig_func, input, mat2, out=None: input.dtype != mat2.dtype) 54 | CondFunc('torch.cat', 55 | lambda orig_func, tensors, dim=0, out=None: orig_func([t.to(tensors[0].dtype) for t in tensors], dim=dim, out=out), 56 | lambda orig_func, tensors, dim=0, out=None: not all(t.dtype == tensors[0].dtype for t in tensors)) 57 | CondFunc('torch.nn.functional.scaled_dot_product_attention', 58 | lambda orig_func, query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False: orig_func(query, key.to(query.dtype), value.to(query.dtype), attn_mask, dropout_p, is_causal), 59 | lambda orig_func, query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False: query.dtype != key.dtype or query.dtype != value.dtype) 60 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "stable-diffusion-webui", 3 | "version": "0.0.0", 4 | "devDependencies": { 5 | "eslint": "^8.40.0" 6 | }, 7 | "scripts": { 8 | "lint": "eslint .", 9 | "fix": "eslint --fix ." 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /params.txt: -------------------------------------------------------------------------------- 1 | cat 2 | Steps: 50, Sampler: Euler, CFG scale: 7.0, Seed: 803715625, Size: 512x512, Model hash: 6ce0161689, Model: v1-5-pruned-emaonly, Lora hashes: "more_details: 3b8aa1d351ef", Version: v1.7.0 -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.ruff] 2 | 3 | target-version = "py39" 4 | 5 | extend-select = [ 6 | "B", 7 | "C", 8 | "I", 9 | "W", 10 | ] 11 | 12 | exclude = [ 13 | "extensions", 14 | "extensions-disabled", 15 | ] 16 | 17 | ignore = [ 18 | "E501", # Line too long 19 | "E721", # Do not compare types, use `isinstance` 20 | "E731", # Do not assign a `lambda` expression, use a `def` 21 | 22 | "I001", # Import block is un-sorted or un-formatted 23 | "C901", # Function is too complex 24 | "C408", # Rewrite as a literal 25 | "W605", # invalid escape sequence, messes with some docstrings 26 | ] 27 | 28 | [tool.ruff.per-file-ignores] 29 | "webui.py" = ["E402"] # Module level import not at top of file 30 | 31 | [tool.ruff.flake8-bugbear] 32 | # Allow default arguments like, e.g., `data: List[str] = fastapi.Query(None)`. 33 | extend-immutable-calls = ["fastapi.Depends", "fastapi.security.HTTPBasic"] 34 | 35 | [tool.pytest.ini_options] 36 | base_url = "http://127.0.0.1:7860" 37 | -------------------------------------------------------------------------------- /requirements-test.txt: -------------------------------------------------------------------------------- 1 | pytest-base-url~=2.0 2 | pytest-cov~=4.0 3 | pytest~=7.3 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | GitPython 2 | Pillow 3 | accelerate 4 | 5 | basicsr 6 | blendmodes 7 | clean-fid 8 | einops 9 | fastapi>=0.90.1 10 | gfpgan 11 | gradio==3.41.2 12 | inflection 13 | jsonmerge 14 | kornia 15 | lark 16 | numpy 17 | omegaconf 18 | open-clip-torch 19 | 20 | piexif 21 | psutil 22 | pytorch_lightning 23 | realesrgan 24 | requests 25 | resize-right 26 | 27 | safetensors 28 | scikit-image>=0.19 29 | timm 30 | tomesd 31 | torch 32 | torchdiffeq 33 | torchsde 34 | transformers==4.30.2 35 | -------------------------------------------------------------------------------- /requirements_versions.txt: -------------------------------------------------------------------------------- 1 | GitPython==3.1.32 2 | Pillow==9.5.0 3 | accelerate==0.21.0 4 | basicsr==1.4.2 5 | blendmodes==2022 6 | clean-fid==0.1.35 7 | einops==0.4.1 8 | fastapi==0.94.0 9 | gfpgan==1.3.8 10 | gradio==3.41.2 11 | httpcore==0.15 12 | inflection==0.5.1 13 | jsonmerge==1.8.0 14 | kornia==0.6.7 15 | lark==1.1.2 16 | numpy==1.23.5 17 | omegaconf==2.2.3 18 | open-clip-torch==2.20.0 19 | piexif==1.1.3 20 | psutil==5.9.5 21 | pytorch_lightning==1.9.4 22 | realesrgan==0.3.0 23 | resize-right==0.0.2 24 | safetensors==0.3.1 25 | scikit-image==0.21.0 26 | timm==0.9.2 27 | tomesd==0.1.3 28 | torch 29 | torchdiffeq==0.2.3 30 | torchsde==0.2.6 31 | transformers==4.30.2 32 | httpx==0.24.1 33 | -------------------------------------------------------------------------------- /scripts/custom_code.py: -------------------------------------------------------------------------------- 1 | import modules.scripts as scripts 2 | import gradio as gr 3 | import ast 4 | import copy 5 | 6 | from modules.processing import Processed 7 | from modules.shared import cmd_opts 8 | 9 | 10 | def convertExpr2Expression(expr): 11 | expr.lineno = 0 12 | expr.col_offset = 0 13 | result = ast.Expression(expr.value, lineno=0, col_offset = 0) 14 | 15 | return result 16 | 17 | 18 | def exec_with_return(code, module): 19 | """ 20 | like exec() but can return values 21 | https://stackoverflow.com/a/52361938/5862977 22 | """ 23 | code_ast = ast.parse(code) 24 | 25 | init_ast = copy.deepcopy(code_ast) 26 | init_ast.body = code_ast.body[:-1] 27 | 28 | last_ast = copy.deepcopy(code_ast) 29 | last_ast.body = code_ast.body[-1:] 30 | 31 | exec(compile(init_ast, "", "exec"), module.__dict__) 32 | if type(last_ast.body[0]) == ast.Expr: 33 | return eval(compile(convertExpr2Expression(last_ast.body[0]), "", "eval"), module.__dict__) 34 | else: 35 | exec(compile(last_ast, "", "exec"), module.__dict__) 36 | 37 | 38 | class Script(scripts.Script): 39 | 40 | def title(self): 41 | return "Custom code" 42 | 43 | def show(self, is_img2img): 44 | return cmd_opts.allow_code 45 | 46 | def ui(self, is_img2img): 47 | example = """from modules.processing import process_images 48 | 49 | p.width = 768 50 | p.height = 768 51 | p.batch_size = 2 52 | p.steps = 10 53 | 54 | return process_images(p) 55 | """ 56 | 57 | 58 | code = gr.Code(value=example, language="python", label="Python code", elem_id=self.elem_id("code")) 59 | indent_level = gr.Number(label='Indent level', value=2, precision=0, elem_id=self.elem_id("indent_level")) 60 | 61 | return [code, indent_level] 62 | 63 | def run(self, p, code, indent_level): 64 | assert cmd_opts.allow_code, '--allow-code option must be enabled' 65 | 66 | display_result_data = [[], -1, ""] 67 | 68 | def display(imgs, s=display_result_data[1], i=display_result_data[2]): 69 | display_result_data[0] = imgs 70 | display_result_data[1] = s 71 | display_result_data[2] = i 72 | 73 | from types import ModuleType 74 | module = ModuleType("testmodule") 75 | module.__dict__.update(globals()) 76 | module.p = p 77 | module.display = display 78 | 79 | indent = " " * indent_level 80 | indented = code.replace('\n', f"\n{indent}") 81 | body = f"""def __webuitemp__(): 82 | {indent}{indented} 83 | __webuitemp__()""" 84 | 85 | result = exec_with_return(body, module) 86 | 87 | if isinstance(result, Processed): 88 | return result 89 | 90 | return Processed(p, *display_result_data) 91 | -------------------------------------------------------------------------------- /scripts/postprocessing_caption.py: -------------------------------------------------------------------------------- 1 | from modules import scripts_postprocessing, ui_components, deepbooru, shared 2 | import gradio as gr 3 | 4 | 5 | class ScriptPostprocessingCeption(scripts_postprocessing.ScriptPostprocessing): 6 | name = "Caption" 7 | order = 4000 8 | 9 | def ui(self): 10 | with ui_components.InputAccordion(False, label="Caption") as enable: 11 | option = gr.CheckboxGroup(value=["Deepbooru"], choices=["Deepbooru", "BLIP"], show_label=False) 12 | 13 | return { 14 | "enable": enable, 15 | "option": option, 16 | } 17 | 18 | def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, option): 19 | if not enable: 20 | return 21 | 22 | captions = [pp.caption] 23 | 24 | if "Deepbooru" in option: 25 | captions.append(deepbooru.model.tag(pp.image)) 26 | 27 | if "BLIP" in option: 28 | captions.append(shared.interrogator.generate_caption(pp.image)) 29 | 30 | pp.caption = ", ".join([x for x in captions if x]) 31 | -------------------------------------------------------------------------------- /scripts/postprocessing_codeformer.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import numpy as np 3 | 4 | from modules import scripts_postprocessing, codeformer_model, ui_components 5 | import gradio as gr 6 | 7 | 8 | class ScriptPostprocessingCodeFormer(scripts_postprocessing.ScriptPostprocessing): 9 | name = "CodeFormer" 10 | order = 3000 11 | 12 | def ui(self): 13 | with ui_components.InputAccordion(False, label="CodeFormer") as enable: 14 | with gr.Row(): 15 | codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Visibility", value=1.0, elem_id="extras_codeformer_visibility") 16 | codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Weight (0 = maximum effect, 1 = minimum effect)", value=0, elem_id="extras_codeformer_weight") 17 | 18 | return { 19 | "enable": enable, 20 | "codeformer_visibility": codeformer_visibility, 21 | "codeformer_weight": codeformer_weight, 22 | } 23 | 24 | def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, codeformer_visibility, codeformer_weight): 25 | if codeformer_visibility == 0 or not enable: 26 | return 27 | 28 | restored_img = codeformer_model.codeformer.restore(np.array(pp.image, dtype=np.uint8), w=codeformer_weight) 29 | res = Image.fromarray(restored_img) 30 | 31 | if codeformer_visibility < 1.0: 32 | res = Image.blend(pp.image, res, codeformer_visibility) 33 | 34 | pp.image = res 35 | pp.info["CodeFormer visibility"] = round(codeformer_visibility, 3) 36 | pp.info["CodeFormer weight"] = round(codeformer_weight, 3) 37 | -------------------------------------------------------------------------------- /scripts/postprocessing_create_flipped_copies.py: -------------------------------------------------------------------------------- 1 | from PIL import ImageOps, Image 2 | 3 | from modules import scripts_postprocessing, ui_components 4 | import gradio as gr 5 | 6 | 7 | class ScriptPostprocessingCreateFlippedCopies(scripts_postprocessing.ScriptPostprocessing): 8 | name = "Create flipped copies" 9 | order = 4000 10 | 11 | def ui(self): 12 | with ui_components.InputAccordion(False, label="Create flipped copies") as enable: 13 | with gr.Row(): 14 | option = gr.CheckboxGroup(value=["Horizontal"], choices=["Horizontal", "Vertical", "Both"], show_label=False) 15 | 16 | return { 17 | "enable": enable, 18 | "option": option, 19 | } 20 | 21 | def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, option): 22 | if not enable: 23 | return 24 | 25 | if "Horizontal" in option: 26 | pp.extra_images.append(ImageOps.mirror(pp.image)) 27 | 28 | if "Vertical" in option: 29 | pp.extra_images.append(pp.image.transpose(Image.Transpose.FLIP_TOP_BOTTOM)) 30 | 31 | if "Both" in option: 32 | pp.extra_images.append(pp.image.transpose(Image.Transpose.FLIP_TOP_BOTTOM).transpose(Image.Transpose.FLIP_LEFT_RIGHT)) 33 | -------------------------------------------------------------------------------- /scripts/postprocessing_focal_crop.py: -------------------------------------------------------------------------------- 1 | 2 | from modules import scripts_postprocessing, ui_components, errors 3 | import gradio as gr 4 | 5 | from modules.textual_inversion import autocrop 6 | 7 | 8 | class ScriptPostprocessingFocalCrop(scripts_postprocessing.ScriptPostprocessing): 9 | name = "Auto focal point crop" 10 | order = 4000 11 | 12 | def ui(self): 13 | with ui_components.InputAccordion(False, label="Auto focal point crop") as enable: 14 | face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_face_weight") 15 | entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_entropy_weight") 16 | edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_edges_weight") 17 | debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") 18 | 19 | return { 20 | "enable": enable, 21 | "face_weight": face_weight, 22 | "entropy_weight": entropy_weight, 23 | "edges_weight": edges_weight, 24 | "debug": debug, 25 | } 26 | 27 | def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, face_weight, entropy_weight, edges_weight, debug): 28 | if not enable: 29 | return 30 | 31 | if not pp.shared.target_width or not pp.shared.target_height: 32 | return 33 | 34 | dnn_model_path = None 35 | try: 36 | dnn_model_path = autocrop.download_and_cache_models() 37 | except Exception: 38 | errors.report("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", exc_info=True) 39 | 40 | autocrop_settings = autocrop.Settings( 41 | crop_width=pp.shared.target_width, 42 | crop_height=pp.shared.target_height, 43 | face_points_weight=face_weight, 44 | entropy_points_weight=entropy_weight, 45 | corner_points_weight=edges_weight, 46 | annotate_image=debug, 47 | dnn_model_path=dnn_model_path, 48 | ) 49 | 50 | result, *others = autocrop.crop_image(pp.image, autocrop_settings) 51 | 52 | pp.image = result 53 | pp.extra_images = [pp.create_copy(x, nametags=["focal-crop-debug"], disable_processing=True) for x in others] 54 | 55 | -------------------------------------------------------------------------------- /scripts/postprocessing_gfpgan.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import numpy as np 3 | 4 | from modules import scripts_postprocessing, gfpgan_model, ui_components 5 | import gradio as gr 6 | 7 | 8 | class ScriptPostprocessingGfpGan(scripts_postprocessing.ScriptPostprocessing): 9 | name = "GFPGAN" 10 | order = 2000 11 | 12 | def ui(self): 13 | with ui_components.InputAccordion(False, label="GFPGAN") as enable: 14 | gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Visibility", value=1.0, elem_id="extras_gfpgan_visibility") 15 | 16 | return { 17 | "enable": enable, 18 | "gfpgan_visibility": gfpgan_visibility, 19 | } 20 | 21 | def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, gfpgan_visibility): 22 | if gfpgan_visibility == 0 or not enable: 23 | return 24 | 25 | restored_img = gfpgan_model.gfpgan_fix_faces(np.array(pp.image, dtype=np.uint8)) 26 | res = Image.fromarray(restored_img) 27 | 28 | if gfpgan_visibility < 1.0: 29 | res = Image.blend(pp.image, res, gfpgan_visibility) 30 | 31 | pp.image = res 32 | pp.info["GFPGAN visibility"] = round(gfpgan_visibility, 3) 33 | -------------------------------------------------------------------------------- /scripts/postprocessing_split_oversized.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | from modules import scripts_postprocessing, ui_components 4 | import gradio as gr 5 | 6 | 7 | def split_pic(image, inverse_xy, width, height, overlap_ratio): 8 | if inverse_xy: 9 | from_w, from_h = image.height, image.width 10 | to_w, to_h = height, width 11 | else: 12 | from_w, from_h = image.width, image.height 13 | to_w, to_h = width, height 14 | h = from_h * to_w // from_w 15 | if inverse_xy: 16 | image = image.resize((h, to_w)) 17 | else: 18 | image = image.resize((to_w, h)) 19 | 20 | split_count = math.ceil((h - to_h * overlap_ratio) / (to_h * (1.0 - overlap_ratio))) 21 | y_step = (h - to_h) / (split_count - 1) 22 | for i in range(split_count): 23 | y = int(y_step * i) 24 | if inverse_xy: 25 | splitted = image.crop((y, 0, y + to_h, to_w)) 26 | else: 27 | splitted = image.crop((0, y, to_w, y + to_h)) 28 | yield splitted 29 | 30 | 31 | class ScriptPostprocessingSplitOversized(scripts_postprocessing.ScriptPostprocessing): 32 | name = "Split oversized images" 33 | order = 4000 34 | 35 | def ui(self): 36 | with ui_components.InputAccordion(False, label="Split oversized images") as enable: 37 | with gr.Row(): 38 | split_threshold = gr.Slider(label='Threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_split_threshold") 39 | overlap_ratio = gr.Slider(label='Overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="postprocess_overlap_ratio") 40 | 41 | return { 42 | "enable": enable, 43 | "split_threshold": split_threshold, 44 | "overlap_ratio": overlap_ratio, 45 | } 46 | 47 | def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, split_threshold, overlap_ratio): 48 | if not enable: 49 | return 50 | 51 | width = pp.shared.target_width 52 | height = pp.shared.target_height 53 | 54 | if not width or not height: 55 | return 56 | 57 | if pp.image.height > pp.image.width: 58 | ratio = (pp.image.width * height) / (pp.image.height * width) 59 | inverse_xy = False 60 | else: 61 | ratio = (pp.image.height * width) / (pp.image.width * height) 62 | inverse_xy = True 63 | 64 | if ratio >= 1.0 and ratio > split_threshold: 65 | return 66 | 67 | result, *others = split_pic(pp.image, inverse_xy, width, height, overlap_ratio) 68 | 69 | pp.image = result 70 | pp.extra_images = [pp.create_copy(x) for x in others] 71 | 72 | -------------------------------------------------------------------------------- /scripts/processing_autosized_crop.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | 3 | from modules import scripts_postprocessing, ui_components 4 | import gradio as gr 5 | 6 | 7 | def center_crop(image: Image, w: int, h: int): 8 | iw, ih = image.size 9 | if ih / h < iw / w: 10 | sw = w * ih / h 11 | box = (iw - sw) / 2, 0, iw - (iw - sw) / 2, ih 12 | else: 13 | sh = h * iw / w 14 | box = 0, (ih - sh) / 2, iw, ih - (ih - sh) / 2 15 | return image.resize((w, h), Image.Resampling.LANCZOS, box) 16 | 17 | 18 | def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, threshold): 19 | iw, ih = image.size 20 | err = lambda w, h: 1 - (lambda x: x if x < 1 else 1 / x)(iw / ih / (w / h)) 21 | wh = max(((w, h) for w in range(mindim, maxdim + 1, 64) for h in range(mindim, maxdim + 1, 64) 22 | if minarea <= w * h <= maxarea and err(w, h) <= threshold), 23 | key=lambda wh: (wh[0] * wh[1], -err(*wh))[::1 if objective == 'Maximize area' else -1], 24 | default=None 25 | ) 26 | return wh and center_crop(image, *wh) 27 | 28 | 29 | class ScriptPostprocessingAutosizedCrop(scripts_postprocessing.ScriptPostprocessing): 30 | name = "Auto-sized crop" 31 | order = 4000 32 | 33 | def ui(self): 34 | with ui_components.InputAccordion(False, label="Auto-sized crop") as enable: 35 | gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') 36 | with gr.Row(): 37 | mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="postprocess_multicrop_mindim") 38 | maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="postprocess_multicrop_maxdim") 39 | with gr.Row(): 40 | minarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area lower bound", value=64 * 64, elem_id="postprocess_multicrop_minarea") 41 | maxarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area upper bound", value=640 * 640, elem_id="postprocess_multicrop_maxarea") 42 | with gr.Row(): 43 | objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="postprocess_multicrop_objective") 44 | threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="postprocess_multicrop_threshold") 45 | 46 | return { 47 | "enable": enable, 48 | "mindim": mindim, 49 | "maxdim": maxdim, 50 | "minarea": minarea, 51 | "maxarea": maxarea, 52 | "objective": objective, 53 | "threshold": threshold, 54 | } 55 | 56 | def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, mindim, maxdim, minarea, maxarea, objective, threshold): 57 | if not enable: 58 | return 59 | 60 | cropped = multicrop_pic(pp.image, mindim, maxdim, minarea, maxarea, objective, threshold) 61 | if cropped is not None: 62 | pp.image = cropped 63 | else: 64 | print(f"skipped {pp.image.width}x{pp.image.height} image (can't find suitable size within error threshold)") 65 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/test/__init__.py -------------------------------------------------------------------------------- /test/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | import base64 5 | 6 | 7 | test_files_path = os.path.dirname(__file__) + "/test_files" 8 | 9 | 10 | def file_to_base64(filename): 11 | with open(filename, "rb") as file: 12 | data = file.read() 13 | 14 | base64_str = str(base64.b64encode(data), "utf-8") 15 | return "data:image/png;base64," + base64_str 16 | 17 | 18 | @pytest.fixture(scope="session") # session so we don't read this over and over 19 | def img2img_basic_image_base64() -> str: 20 | return file_to_base64(os.path.join(test_files_path, "img2img_basic.png")) 21 | 22 | 23 | @pytest.fixture(scope="session") # session so we don't read this over and over 24 | def mask_basic_image_base64() -> str: 25 | return file_to_base64(os.path.join(test_files_path, "mask_basic.png")) 26 | -------------------------------------------------------------------------------- /test/test_extras.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | 4 | def test_simple_upscaling_performed(base_url, img2img_basic_image_base64): 5 | payload = { 6 | "resize_mode": 0, 7 | "show_extras_results": True, 8 | "gfpgan_visibility": 0, 9 | "codeformer_visibility": 0, 10 | "codeformer_weight": 0, 11 | "upscaling_resize": 2, 12 | "upscaling_resize_w": 128, 13 | "upscaling_resize_h": 128, 14 | "upscaling_crop": True, 15 | "upscaler_1": "Lanczos", 16 | "upscaler_2": "None", 17 | "extras_upscaler_2_visibility": 0, 18 | "image": img2img_basic_image_base64, 19 | } 20 | assert requests.post(f"{base_url}/sdapi/v1/extra-single-image", json=payload).status_code == 200 21 | 22 | 23 | def test_png_info_performed(base_url, img2img_basic_image_base64): 24 | payload = { 25 | "image": img2img_basic_image_base64, 26 | } 27 | assert requests.post(f"{base_url}/sdapi/v1/extra-single-image", json=payload).status_code == 200 28 | 29 | 30 | def test_interrogate_performed(base_url, img2img_basic_image_base64): 31 | payload = { 32 | "image": img2img_basic_image_base64, 33 | "model": "clip", 34 | } 35 | assert requests.post(f"{base_url}/sdapi/v1/extra-single-image", json=payload).status_code == 200 36 | -------------------------------------------------------------------------------- /test/test_files/empty.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/test/test_files/empty.pt -------------------------------------------------------------------------------- /test/test_files/img2img_basic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/test/test_files/img2img_basic.png -------------------------------------------------------------------------------- /test/test_files/mask_basic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philz1337x/clarity-upscaler/cfbdb842c6c839c8e97741722b8cf9a4b7662d98/test/test_files/mask_basic.png -------------------------------------------------------------------------------- /test/test_img2img.py: -------------------------------------------------------------------------------- 1 | 2 | import pytest 3 | import requests 4 | 5 | 6 | @pytest.fixture() 7 | def url_img2img(base_url): 8 | return f"{base_url}/sdapi/v1/img2img" 9 | 10 | 11 | @pytest.fixture() 12 | def simple_img2img_request(img2img_basic_image_base64): 13 | return { 14 | "batch_size": 1, 15 | "cfg_scale": 7, 16 | "denoising_strength": 0.75, 17 | "eta": 0, 18 | "height": 64, 19 | "include_init_images": False, 20 | "init_images": [img2img_basic_image_base64], 21 | "inpaint_full_res": False, 22 | "inpaint_full_res_padding": 0, 23 | "inpainting_fill": 0, 24 | "inpainting_mask_invert": False, 25 | "mask": None, 26 | "mask_blur": 4, 27 | "n_iter": 1, 28 | "negative_prompt": "", 29 | "override_settings": {}, 30 | "prompt": "example prompt", 31 | "resize_mode": 0, 32 | "restore_faces": False, 33 | "s_churn": 0, 34 | "s_noise": 1, 35 | "s_tmax": 0, 36 | "s_tmin": 0, 37 | "sampler_index": "Euler a", 38 | "seed": -1, 39 | "seed_resize_from_h": -1, 40 | "seed_resize_from_w": -1, 41 | "steps": 3, 42 | "styles": [], 43 | "subseed": -1, 44 | "subseed_strength": 0, 45 | "tiling": False, 46 | "width": 64, 47 | } 48 | 49 | 50 | def test_img2img_simple_performed(url_img2img, simple_img2img_request): 51 | assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200 52 | 53 | 54 | def test_inpainting_masked_performed(url_img2img, simple_img2img_request, mask_basic_image_base64): 55 | simple_img2img_request["mask"] = mask_basic_image_base64 56 | assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200 57 | 58 | 59 | def test_inpainting_with_inverted_masked_performed(url_img2img, simple_img2img_request, mask_basic_image_base64): 60 | simple_img2img_request["mask"] = mask_basic_image_base64 61 | simple_img2img_request["inpainting_mask_invert"] = True 62 | assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200 63 | 64 | 65 | def test_img2img_sd_upscale_performed(url_img2img, simple_img2img_request): 66 | simple_img2img_request["script_name"] = "sd upscale" 67 | simple_img2img_request["script_args"] = ["", 8, "Lanczos", 2.0] 68 | assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200 69 | -------------------------------------------------------------------------------- /test/test_txt2img.py: -------------------------------------------------------------------------------- 1 | 2 | import pytest 3 | import requests 4 | 5 | 6 | @pytest.fixture() 7 | def url_txt2img(base_url): 8 | return f"{base_url}/sdapi/v1/txt2img" 9 | 10 | 11 | @pytest.fixture() 12 | def simple_txt2img_request(): 13 | return { 14 | "batch_size": 1, 15 | "cfg_scale": 7, 16 | "denoising_strength": 0, 17 | "enable_hr": False, 18 | "eta": 0, 19 | "firstphase_height": 0, 20 | "firstphase_width": 0, 21 | "height": 64, 22 | "n_iter": 1, 23 | "negative_prompt": "", 24 | "prompt": "example prompt", 25 | "restore_faces": False, 26 | "s_churn": 0, 27 | "s_noise": 1, 28 | "s_tmax": 0, 29 | "s_tmin": 0, 30 | "sampler_index": "Euler a", 31 | "seed": -1, 32 | "seed_resize_from_h": -1, 33 | "seed_resize_from_w": -1, 34 | "steps": 3, 35 | "styles": [], 36 | "subseed": -1, 37 | "subseed_strength": 0, 38 | "tiling": False, 39 | "width": 64, 40 | } 41 | 42 | 43 | def test_txt2img_simple_performed(url_txt2img, simple_txt2img_request): 44 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 45 | 46 | 47 | def test_txt2img_with_negative_prompt_performed(url_txt2img, simple_txt2img_request): 48 | simple_txt2img_request["negative_prompt"] = "example negative prompt" 49 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 50 | 51 | 52 | def test_txt2img_with_complex_prompt_performed(url_txt2img, simple_txt2img_request): 53 | simple_txt2img_request["prompt"] = "((emphasis)), (emphasis1:1.1), [to:1], [from::2], [from:to:0.3], [alt|alt1]" 54 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 55 | 56 | 57 | def test_txt2img_not_square_image_performed(url_txt2img, simple_txt2img_request): 58 | simple_txt2img_request["height"] = 128 59 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 60 | 61 | 62 | def test_txt2img_with_hrfix_performed(url_txt2img, simple_txt2img_request): 63 | simple_txt2img_request["enable_hr"] = True 64 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 65 | 66 | 67 | def test_txt2img_with_tiling_performed(url_txt2img, simple_txt2img_request): 68 | simple_txt2img_request["tiling"] = True 69 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 70 | 71 | 72 | def test_txt2img_with_restore_faces_performed(url_txt2img, simple_txt2img_request): 73 | simple_txt2img_request["restore_faces"] = True 74 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 75 | 76 | 77 | @pytest.mark.parametrize("sampler", ["PLMS", "DDIM", "UniPC"]) 78 | def test_txt2img_with_vanilla_sampler_performed(url_txt2img, simple_txt2img_request, sampler): 79 | simple_txt2img_request["sampler_index"] = sampler 80 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 81 | 82 | 83 | def test_txt2img_multiple_batches_performed(url_txt2img, simple_txt2img_request): 84 | simple_txt2img_request["n_iter"] = 2 85 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 86 | 87 | 88 | def test_txt2img_batch_performed(url_txt2img, simple_txt2img_request): 89 | simple_txt2img_request["batch_size"] = 2 90 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 91 | -------------------------------------------------------------------------------- /test/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import requests 3 | 4 | 5 | def test_options_write(base_url): 6 | url_options = f"{base_url}/sdapi/v1/options" 7 | response = requests.get(url_options) 8 | assert response.status_code == 200 9 | 10 | pre_value = response.json()["send_seed"] 11 | 12 | assert requests.post(url_options, json={'send_seed': (not pre_value)}).status_code == 200 13 | 14 | response = requests.get(url_options) 15 | assert response.status_code == 200 16 | assert response.json()['send_seed'] == (not pre_value) 17 | 18 | requests.post(url_options, json={"send_seed": pre_value}) 19 | 20 | 21 | @pytest.mark.parametrize("url", [ 22 | "sdapi/v1/cmd-flags", 23 | "sdapi/v1/samplers", 24 | "sdapi/v1/upscalers", 25 | "sdapi/v1/sd-models", 26 | "sdapi/v1/hypernetworks", 27 | "sdapi/v1/face-restorers", 28 | "sdapi/v1/realesrgan-models", 29 | "sdapi/v1/prompt-styles", 30 | "sdapi/v1/embeddings", 31 | ]) 32 | def test_get_api_url(base_url, url): 33 | assert requests.get(f"{base_url}/{url}").status_code == 200 34 | -------------------------------------------------------------------------------- /textual_inversion_templates/hypernetwork.txt: -------------------------------------------------------------------------------- 1 | a photo of a [filewords] 2 | a rendering of a [filewords] 3 | a cropped photo of the [filewords] 4 | the photo of a [filewords] 5 | a photo of a clean [filewords] 6 | a photo of a dirty [filewords] 7 | a dark photo of the [filewords] 8 | a photo of my [filewords] 9 | a photo of the cool [filewords] 10 | a close-up photo of a [filewords] 11 | a bright photo of the [filewords] 12 | a cropped photo of a [filewords] 13 | a photo of the [filewords] 14 | a good photo of the [filewords] 15 | a photo of one [filewords] 16 | a close-up photo of the [filewords] 17 | a rendition of the [filewords] 18 | a photo of the clean [filewords] 19 | a rendition of a [filewords] 20 | a photo of a nice [filewords] 21 | a good photo of a [filewords] 22 | a photo of the nice [filewords] 23 | a photo of the small [filewords] 24 | a photo of the weird [filewords] 25 | a photo of the large [filewords] 26 | a photo of a cool [filewords] 27 | a photo of a small [filewords] 28 | -------------------------------------------------------------------------------- /textual_inversion_templates/none.txt: -------------------------------------------------------------------------------- 1 | picture 2 | -------------------------------------------------------------------------------- /textual_inversion_templates/style.txt: -------------------------------------------------------------------------------- 1 | a painting, art by [name] 2 | a rendering, art by [name] 3 | a cropped painting, art by [name] 4 | the painting, art by [name] 5 | a clean painting, art by [name] 6 | a dirty painting, art by [name] 7 | a dark painting, art by [name] 8 | a picture, art by [name] 9 | a cool painting, art by [name] 10 | a close-up painting, art by [name] 11 | a bright painting, art by [name] 12 | a cropped painting, art by [name] 13 | a good painting, art by [name] 14 | a close-up painting, art by [name] 15 | a rendition, art by [name] 16 | a nice painting, art by [name] 17 | a small painting, art by [name] 18 | a weird painting, art by [name] 19 | a large painting, art by [name] 20 | -------------------------------------------------------------------------------- /textual_inversion_templates/style_filewords.txt: -------------------------------------------------------------------------------- 1 | a painting of [filewords], art by [name] 2 | a rendering of [filewords], art by [name] 3 | a cropped painting of [filewords], art by [name] 4 | the painting of [filewords], art by [name] 5 | a clean painting of [filewords], art by [name] 6 | a dirty painting of [filewords], art by [name] 7 | a dark painting of [filewords], art by [name] 8 | a picture of [filewords], art by [name] 9 | a cool painting of [filewords], art by [name] 10 | a close-up painting of [filewords], art by [name] 11 | a bright painting of [filewords], art by [name] 12 | a cropped painting of [filewords], art by [name] 13 | a good painting of [filewords], art by [name] 14 | a close-up painting of [filewords], art by [name] 15 | a rendition of [filewords], art by [name] 16 | a nice painting of [filewords], art by [name] 17 | a small painting of [filewords], art by [name] 18 | a weird painting of [filewords], art by [name] 19 | a large painting of [filewords], art by [name] 20 | -------------------------------------------------------------------------------- /textual_inversion_templates/subject.txt: -------------------------------------------------------------------------------- 1 | a photo of a [name] 2 | a rendering of a [name] 3 | a cropped photo of the [name] 4 | the photo of a [name] 5 | a photo of a clean [name] 6 | a photo of a dirty [name] 7 | a dark photo of the [name] 8 | a photo of my [name] 9 | a photo of the cool [name] 10 | a close-up photo of a [name] 11 | a bright photo of the [name] 12 | a cropped photo of a [name] 13 | a photo of the [name] 14 | a good photo of the [name] 15 | a photo of one [name] 16 | a close-up photo of the [name] 17 | a rendition of the [name] 18 | a photo of the clean [name] 19 | a rendition of a [name] 20 | a photo of a nice [name] 21 | a good photo of a [name] 22 | a photo of the nice [name] 23 | a photo of the small [name] 24 | a photo of the weird [name] 25 | a photo of the large [name] 26 | a photo of a cool [name] 27 | a photo of a small [name] 28 | -------------------------------------------------------------------------------- /textual_inversion_templates/subject_filewords.txt: -------------------------------------------------------------------------------- 1 | a photo of a [name], [filewords] 2 | a rendering of a [name], [filewords] 3 | a cropped photo of the [name], [filewords] 4 | the photo of a [name], [filewords] 5 | a photo of a clean [name], [filewords] 6 | a photo of a dirty [name], [filewords] 7 | a dark photo of the [name], [filewords] 8 | a photo of my [name], [filewords] 9 | a photo of the cool [name], [filewords] 10 | a close-up photo of a [name], [filewords] 11 | a bright photo of the [name], [filewords] 12 | a cropped photo of a [name], [filewords] 13 | a photo of the [name], [filewords] 14 | a good photo of the [name], [filewords] 15 | a photo of one [name], [filewords] 16 | a close-up photo of the [name], [filewords] 17 | a rendition of the [name], [filewords] 18 | a photo of the clean [name], [filewords] 19 | a rendition of a [name], [filewords] 20 | a photo of a nice [name], [filewords] 21 | a good photo of a [name], [filewords] 22 | a photo of the nice [name], [filewords] 23 | a photo of the small [name], [filewords] 24 | a photo of the weird [name], [filewords] 25 | a photo of the large [name], [filewords] 26 | a photo of a cool [name], [filewords] 27 | a photo of a small [name], [filewords] 28 | -------------------------------------------------------------------------------- /webui-macos-env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #################################################################### 3 | # macOS defaults # 4 | # Please modify webui-user.sh to change these instead of this file # 5 | #################################################################### 6 | 7 | if [[ -x "$(command -v python3.10)" ]] 8 | then 9 | python_cmd="python3.10" 10 | fi 11 | 12 | export install_dir="$HOME" 13 | #export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate" 14 | export TORCH_COMMAND="pip install torch==2.0.1 torchvision==0.15.2" 15 | export PYTORCH_ENABLE_MPS_FALLBACK=1 16 | 17 | #################################################################### 18 | -------------------------------------------------------------------------------- /webui-user.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | set PYTHON= 4 | set GIT= 5 | set VENV_DIR= 6 | set COMMANDLINE_ARGS= 7 | 8 | call webui.bat 9 | -------------------------------------------------------------------------------- /webui-user.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ######################################################### 3 | # Uncomment and change the variables below to your need:# 4 | ######################################################### 5 | 6 | # Install directory without trailing slash 7 | #install_dir="/home/$(whoami)" 8 | 9 | # Name of the subdirectory 10 | #clone_dir="stable-diffusion-webui" 11 | 12 | # Commandline arguments for webui.py, for example: export COMMANDLINE_ARGS="--medvram --opt-split-attention" 13 | #export COMMANDLINE_ARGS= 14 | 15 | # python3 executable 16 | #python_cmd="python3" 17 | 18 | # git executable 19 | #export GIT="git" 20 | 21 | # python3 venv without trailing slash (defaults to ${install_dir}/${clone_dir}/venv) 22 | #venv_dir="venv" 23 | 24 | # script to launch to start the app 25 | #export LAUNCH_SCRIPT="launch.py" 26 | 27 | # install command for torch 28 | #export TORCH_COMMAND="pip install torch==1.12.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113" 29 | 30 | # Requirements file to use for stable-diffusion-webui 31 | #export REQS_FILE="requirements_versions.txt" 32 | 33 | # Fixed git repos 34 | #export K_DIFFUSION_PACKAGE="" 35 | #export GFPGAN_PACKAGE="" 36 | 37 | # Fixed git commits 38 | #export STABLE_DIFFUSION_COMMIT_HASH="" 39 | #export CODEFORMER_COMMIT_HASH="" 40 | #export BLIP_COMMIT_HASH="" 41 | 42 | # Uncomment to enable accelerated launch 43 | #export ACCELERATE="True" 44 | 45 | # Uncomment to disable TCMalloc 46 | #export NO_TCMALLOC="True" 47 | 48 | ########################################### 49 | -------------------------------------------------------------------------------- /webui.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | if exist webui.settings.bat ( 4 | call webui.settings.bat 5 | ) 6 | 7 | if not defined PYTHON (set PYTHON=python) 8 | if defined GIT (set "GIT_PYTHON_GIT_EXECUTABLE=%GIT%") 9 | if not defined VENV_DIR (set "VENV_DIR=%~dp0%venv") 10 | 11 | set SD_WEBUI_RESTART=tmp/restart 12 | set ERROR_REPORTING=FALSE 13 | 14 | mkdir tmp 2>NUL 15 | 16 | %PYTHON% -c "" >tmp/stdout.txt 2>tmp/stderr.txt 17 | if %ERRORLEVEL% == 0 goto :check_pip 18 | echo Couldn't launch python 19 | goto :show_stdout_stderr 20 | 21 | :check_pip 22 | %PYTHON% -mpip --help >tmp/stdout.txt 2>tmp/stderr.txt 23 | if %ERRORLEVEL% == 0 goto :start_venv 24 | if "%PIP_INSTALLER_LOCATION%" == "" goto :show_stdout_stderr 25 | %PYTHON% "%PIP_INSTALLER_LOCATION%" >tmp/stdout.txt 2>tmp/stderr.txt 26 | if %ERRORLEVEL% == 0 goto :start_venv 27 | echo Couldn't install pip 28 | goto :show_stdout_stderr 29 | 30 | :start_venv 31 | if ["%VENV_DIR%"] == ["-"] goto :skip_venv 32 | if ["%SKIP_VENV%"] == ["1"] goto :skip_venv 33 | 34 | dir "%VENV_DIR%\Scripts\Python.exe" >tmp/stdout.txt 2>tmp/stderr.txt 35 | if %ERRORLEVEL% == 0 goto :activate_venv 36 | 37 | for /f "delims=" %%i in ('CALL %PYTHON% -c "import sys; print(sys.executable)"') do set PYTHON_FULLNAME="%%i" 38 | echo Creating venv in directory %VENV_DIR% using python %PYTHON_FULLNAME% 39 | %PYTHON_FULLNAME% -m venv "%VENV_DIR%" >tmp/stdout.txt 2>tmp/stderr.txt 40 | if %ERRORLEVEL% == 0 goto :activate_venv 41 | echo Unable to create venv in directory "%VENV_DIR%" 42 | goto :show_stdout_stderr 43 | 44 | :activate_venv 45 | set PYTHON="%VENV_DIR%\Scripts\Python.exe" 46 | echo venv %PYTHON% 47 | 48 | :skip_venv 49 | if [%ACCELERATE%] == ["True"] goto :accelerate 50 | goto :launch 51 | 52 | :accelerate 53 | echo Checking for accelerate 54 | set ACCELERATE="%VENV_DIR%\Scripts\accelerate.exe" 55 | if EXIST %ACCELERATE% goto :accelerate_launch 56 | 57 | :launch 58 | %PYTHON% launch.py %* 59 | if EXIST tmp/restart goto :skip_venv 60 | pause 61 | exit /b 62 | 63 | :accelerate_launch 64 | echo Accelerating 65 | %ACCELERATE% launch --num_cpu_threads_per_process=6 launch.py 66 | if EXIST tmp/restart goto :skip_venv 67 | pause 68 | exit /b 69 | 70 | :show_stdout_stderr 71 | 72 | echo. 73 | echo exit code: %errorlevel% 74 | 75 | for /f %%i in ("tmp\stdout.txt") do set size=%%~zi 76 | if %size% equ 0 goto :show_stderr 77 | echo. 78 | echo stdout: 79 | type tmp\stdout.txt 80 | 81 | :show_stderr 82 | for /f %%i in ("tmp\stderr.txt") do set size=%%~zi 83 | if %size% equ 0 goto :show_stderr 84 | echo. 85 | echo stderr: 86 | type tmp\stderr.txt 87 | 88 | :endofscript 89 | 90 | echo. 91 | echo Launch unsuccessful. Exiting. 92 | pause 93 | --------------------------------------------------------------------------------