├── .gitignore ├── LICENSE ├── README.md ├── images ├── webui-install.png └── webui.png ├── inference.py ├── inference_refiner.py ├── install.py ├── javascript ├── addAgreementLinks.js └── generationParams.js ├── requirements_webui.txt ├── scripts └── sd_webui_xldemo_txt2img.py ├── style.css ├── xldemo_txt2img.py ├── xldemo_txt2img_ui.py └── xldemo_txt2img_ui_common.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Life is boring, so programming 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Stable Diffusion XL 0.9 txt2img webui extension 2 | 3 | 4 | A custom extension for [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) that demo the SDXL 0.9 txt2img features 5 | 6 | # Tested environment 7 | * GPU: RTX 3060 12G VRAM 8 | * OS: Ubuntu 20.04.4 LTS, Windows 10 9 | * RAM: 64G 10 | * Automatic1111 WebUI version: v1.4.0 11 | * python: 3.10.9 12 | * torch: 2.0.1+cu118 13 | * xformers: 0.0.20 14 | * gradio: 3.32.0 15 | * checkpoint: 20af92d769 16 | 17 | # Overview 18 | * This project allows users to do txt2img using the SDXL 0.9 base checkpoint 19 | * Refine image using SDXL 0.9 refiner checkpoint 20 | * Setting samplers 21 | * Setting sampling steps 22 | * Setting image width and height 23 | * Setting batch size 24 | * Setting CFG Scale 25 | * Setting seed 26 | * Reuse seed 27 | * Use refiner 28 | * Setting refiner strength 29 | * Send to img2img 30 | * Send to inpaint 31 | * Send to extras 32 | 33 | # Tutorial 34 | There is a video to show how to use the extension 35 | 36 | [![Introducing Stable Diffusion XL 0.9 txt2img AUTOMATIC1111 webui extension](https://img.youtube.com/vi/iF4w7gFDaYM/sddefault.jpg)](https://www.youtube.com/watch?v=iF4w7gFDaYM) 37 | 38 | # Stable Diffusion extension 39 | This project can be run as a stable Diffusion extension inside the Stable Diffusion WebUI. 40 | 41 | ## Installation for stable Diffusion extension 42 | * Copy and paste `https://github.com/lifeisboringsoprogramming/sd-webui-xldemo-txt2img.git` to URL for extension's git repository 43 | * Press Install button 44 | * Apply and restart UI when finished installing 45 | 46 | 47 | 48 | # Samplers mapping 49 | 50 | |Sampler name|Diffusers schedulers class| 51 | |---|---| 52 | |dpmsolver_multistep|diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler| 53 | |deis_multistep|diffusers.schedulers.scheduling_deis_multistep.DEISMultistepScheduler| 54 | |unipc_multistep|diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler| 55 | |k_dpm_2_ancestral_discrete|diffusers.schedulers.scheduling_k_dpm_2_ancestral_discrete.KDPM2AncestralDiscreteScheduler| 56 | |ddim|diffusers.schedulers.scheduling_ddim.DDIMScheduler| 57 | |dpmsolver_singlestep|diffusers.schedulers.scheduling_dpmsolver_singlestep.DPMSolverSinglestepScheduler| 58 | |euler_ancestral_discrete|diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler| 59 | |ddpm|diffusers.schedulers.scheduling_ddpm.DDPMScheduler| 60 | |euler_discrete|diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler| 61 | |k_dpm_2_discrete|diffusers.schedulers.scheduling_k_dpm_2_discrete.KDPM2DiscreteScheduler| 62 | |pndm|diffusers.schedulers.scheduling_pndm.PNDMScheduler| 63 | |dpmsolver_sde|diffusers.schedulers.scheduling_dpmsolver_sde.DPMSolverSDEScheduler| 64 | |lms_discrete|diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler| 65 | |heun_discrete|diffusers.schedulers.scheduling_heun_discrete.HeunDiscreteScheduler| 66 | 67 | 68 | # Limitations 69 | * this extension does not work with other extension like control net 70 | * this extension does not work with LoRA, textual inversion embeddings, etc 71 | 72 | # YouTube Channel 73 | Please subscribe to my YouTube channel, thank you very much. 74 | 75 | [https://bit.ly/3odzTKX](https://bit.ly/3odzTKX) 76 | 77 | # Patreon 78 | ☕️ Please consider to support me in Patreon 🍻 79 | 80 | [https://www.patreon.com/lifeisboringsoprogramming](https://www.patreon.com/lifeisboringsoprogramming) 81 | -------------------------------------------------------------------------------- /images/webui-install.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lifeisboringsoprogramming/sd-webui-xldemo-txt2img/70a2b7091768ba7cf8b5ca4f24739a23b4220c08/images/webui-install.png -------------------------------------------------------------------------------- /images/webui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lifeisboringsoprogramming/sd-webui-xldemo-txt2img/70a2b7091768ba7cf8b5ca4f24739a23b4220c08/images/webui.png -------------------------------------------------------------------------------- /inference.py: -------------------------------------------------------------------------------- 1 | from diffusers import DiffusionPipeline 2 | import torch 3 | import gc 4 | import time as time_ 5 | import random 6 | 7 | 8 | def inference(seed=-1): 9 | access_token = '' 10 | 11 | model_key_base = "stabilityai/stable-diffusion-xl-base-0.9" 12 | model_key_refiner = "stabilityai/stable-diffusion-xl-refiner-0.9" 13 | 14 | print("Loading model", model_key_base) 15 | pipe = DiffusionPipeline.from_pretrained( 16 | model_key_base, torch_dtype=torch.float16, resume_download=True, use_auth_token=access_token) 17 | pipe.enable_model_cpu_offload() 18 | 19 | if seed == -1: 20 | seed = int(random.randrange(4294967294)) 21 | 22 | device = 'cuda' 23 | generator = torch.Generator(device=device) 24 | 25 | generator = generator.manual_seed(seed) 26 | 27 | latents = torch.randn( 28 | (1, pipe.unet.in_channels, 1024 // 8, 1024 // 8), 29 | generator=generator, 30 | device=device, 31 | dtype=torch.float16 32 | ) 33 | 34 | prompt = '✨aesthetic✨ aliens walk among us in Las Vegas, scratchy found film photograph' 35 | negative_prompt = 'low quality' 36 | guidance_scale = 7 37 | num_inference_steps = 20 38 | 39 | start_time = time_.time() 40 | 41 | images = pipe(prompt=[prompt], negative_prompt=[negative_prompt], 42 | guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, 43 | latents=latents).images 44 | 45 | end_time = time_.time() 46 | elapsed_time = end_time - start_time 47 | 48 | # Print the elapsed time 49 | print(f"Elapsed time: {elapsed_time:.4f} seconds") 50 | 51 | gc.collect() 52 | torch.cuda.empty_cache() 53 | 54 | images[0].save(f'inference-{int(time_.time())}-{seed}.png') 55 | 56 | 57 | if __name__ == "__main__": 58 | 59 | # Run your code 60 | inference(-1) 61 | 62 | # Ubuntu 20.04.4 LTS 63 | # +---------------------------------------------------------------------------------------+ 64 | # | NVIDIA-SMI 530.30.02 Driver Version: 530.30.02 CUDA Version: 12.1 | 65 | # |-----------------------------------------+----------------------+----------------------+ 66 | # | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | 67 | # | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | 68 | # | | | MIG M. | 69 | # |=========================================+======================+======================| 70 | # | 0 NVIDIA GeForce RTX 3060 On | 00000000:05:00.0 Off | N/A | 71 | # | 0% 42C P8 14W / 170W| 448MiB / 12288MiB | 41% Default | 72 | # | | | N/A | 73 | # +-----------------------------------------+----------------------+----------------------+ 74 | # 75 | # Python 3.10.9 76 | # torch 2.0.1+cu118 77 | # transformers 4.25.1 78 | # diffusers 0.18.1 79 | # 80 | # ~/.cache/huggingface/hub/models--stabilityai--stable-diffusion-xl-base-0.9/snapshots/025709258a55cc924dc47efd88959f18ae79830e$ tree 81 | # . 82 | # ├── model_index.json -> ../../blobs/4b76d56998e4e2c7bfe973ccb4d0f1c361e1287b 83 | # ├── scheduler 84 | # │   └── scheduler_config.json -> ../../../blobs/5bdb7b6e0eeda414c9c37ec916da0fc4ef294c7e 85 | # ├── text_encoder 86 | # │   ├── config.json -> ../../../blobs/15cf93d7088b7f349e6522a8692c457d8ae6fde9 87 | # │   └── model.safetensors -> ../../../blobs/22928c6a6a99759e4a19648ba56e044d1df47b650f7879470501b71ec996a3ef 88 | # ├── text_encoder_2 89 | # │   ├── config.json -> ../../../blobs/c4ad7f842f557f4371e748443299a3c70a5dcbe1 90 | # │   └── model.safetensors -> ../../../blobs/d65d20651dd313f3b699b03885da0032d8f852b8b5dbbbdf5b56ce9b10ca5e3d 91 | # ├── tokenizer 92 | # │   ├── merges.txt -> ../../../blobs/76e821f1b6f0a9709293c3b6b51ed90980b3166b 93 | # │   ├── special_tokens_map.json -> ../../../blobs/2c2130b544c0c5a72d5d00da071ba130a9800fb2 94 | # │   ├── tokenizer_config.json -> ../../../blobs/2e8612a429492973fe60635b3f44a28b065cfac0 95 | # │   └── vocab.json -> ../../../blobs/469be27c5c010538f845f518c4f5e8574c78f7c8 96 | # ├── tokenizer_2 97 | # │   ├── merges.txt -> ../../../blobs/76e821f1b6f0a9709293c3b6b51ed90980b3166b 98 | # │   ├── special_tokens_map.json -> ../../../blobs/ae0c5be6f35217e51c4c000fd325d8de0294e99c 99 | # │   ├── tokenizer_config.json -> ../../../blobs/a8438e020c4497a429240d6b89e0bf9a6e2ffa92 100 | # │   └── vocab.json -> ../../../blobs/469be27c5c010538f845f518c4f5e8574c78f7c8 101 | # ├── unet 102 | # │   ├── config.json -> ../../../blobs/e53796e5812b975c00aefbeb475cce337c88fde9 103 | # │   └── diffusion_pytorch_model.safetensors -> ../../../blobs/7a516d65c0f41e82e7f3c16cad90d2362a01533beec7309e3606d59cd682797f 104 | # └── vae 105 | # ├── config.json -> ../../../blobs/6e9694046afd2a944dd17a2390b98773cacf2f7c 106 | # └── diffusion_pytorch_model.safetensors -> ../../../blobs/1598f3d24932bcfe6634e8b618ea1e30ab1d57f5aad13a6d2de446d2199f2341 107 | # 7 directories, 18 files 108 | # 109 | # Elapsed time: 29.2194 seconds 110 | 111 | # Windows 10 112 | # +---------------------------------------------------------------------------------------+ 113 | # | NVIDIA-SMI 536.40 Driver Version: 536.40 CUDA Version: 12.2 | 114 | # |-----------------------------------------+----------------------+----------------------+ 115 | # | GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC | 116 | # | Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | 117 | # | | | MIG M. | 118 | # |=========================================+======================+======================| 119 | # | 0 NVIDIA GeForce RTX 3060 WDDM | 00000000:05:00.0 On | N/A | 120 | # | 0% 46C P8 14W / 170W | 557MiB / 12288MiB | 8% Default | 121 | # | | | N/A | 122 | # +-----------------------------------------+----------------------+----------------------+ 123 | # 124 | # Python 3.10.6 125 | # torch 2.0.1+cu118 126 | # transformers 4.25.1 127 | # diffusers 0.18.1 128 | # 129 | # C:\Users\libsp\.cache\huggingface\hub\models--stabilityai--stable-diffusion-xl-base-0.9\snapshots\025709258a55cc924dc47efd88959f18ae79830e>tree /F 130 | # Folder PATH listing 131 | # Volume serial number is 82DA-B681 132 | # C:. 133 | # │ model_index.json 134 | # │ 135 | # ├───scheduler 136 | # │ scheduler_config.json 137 | # │ 138 | # ├───text_encoder 139 | # │ config.json 140 | # │ pytorch_model.bin 141 | # │ 142 | # ├───text_encoder_2 143 | # │ config.json 144 | # │ pytorch_model.bin 145 | # │ 146 | # ├───tokenizer 147 | # │ merges.txt 148 | # │ special_tokens_map.json 149 | # │ tokenizer_config.json 150 | # │ vocab.json 151 | # │ 152 | # ├───tokenizer_2 153 | # │ merges.txt 154 | # │ special_tokens_map.json 155 | # │ tokenizer_config.json 156 | # │ vocab.json 157 | # │ 158 | # ├───unet 159 | # │ config.json 160 | # │ diffusion_pytorch_model.bin 161 | # │ 162 | # └───vae 163 | # config.json 164 | # diffusion_pytorch_model.bin 165 | # 166 | # Elapsed time: 69.5944 seconds 167 | -------------------------------------------------------------------------------- /inference_refiner.py: -------------------------------------------------------------------------------- 1 | from diffusers import DiffusionPipeline 2 | import torch 3 | import gc 4 | import time as time_ 5 | import random 6 | 7 | 8 | def inference(seed=-1): 9 | access_token = '' 10 | 11 | model_key_base = "stabilityai/stable-diffusion-xl-base-0.9" 12 | model_key_refiner = "stabilityai/stable-diffusion-xl-refiner-0.9" 13 | 14 | print("Loading model", model_key_base) 15 | pipe = DiffusionPipeline.from_pretrained( 16 | model_key_base, torch_dtype=torch.float16, resume_download=True, use_auth_token=access_token) 17 | pipe.enable_model_cpu_offload() 18 | 19 | pipe_refiner = DiffusionPipeline.from_pretrained( 20 | model_key_refiner, torch_dtype=torch.float16, resume_download=True, use_auth_token=access_token) 21 | pipe_refiner.enable_model_cpu_offload() 22 | 23 | if seed == -1: 24 | seed = int(random.randrange(4294967294)) 25 | 26 | device = 'cuda' 27 | generator = torch.Generator(device=device) 28 | 29 | generator = generator.manual_seed(seed) 30 | 31 | latents = torch.randn( 32 | (1, pipe.unet.in_channels, 1024 // 8, 1024 // 8), 33 | generator=generator, 34 | device=device, 35 | dtype=torch.float16 36 | ) 37 | 38 | prompt = '✨aesthetic✨ aliens walk among us in Las Vegas, scratchy found film photograph' 39 | negative_prompt = 'low quality' 40 | guidance_scale = 7 41 | num_inference_steps = 20 42 | refiner_strength = 0.3 43 | 44 | images = pipe(prompt=[prompt], negative_prompt=[negative_prompt], 45 | guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, 46 | latents=latents).images 47 | 48 | gc.collect() 49 | torch.cuda.empty_cache() 50 | 51 | images[0].save(f'inference-{int(time_.time())}-{seed}.png') 52 | 53 | images = pipe_refiner(prompt=[prompt], negative_prompt=[negative_prompt], 54 | image=images, num_inference_steps=num_inference_steps, strength=refiner_strength).images 55 | 56 | gc.collect() 57 | torch.cuda.empty_cache() 58 | 59 | images[0].save(f'inference-{int(time_.time())}-{seed}-refiner.png') 60 | 61 | 62 | if __name__ == "__main__": 63 | start_time = time_.time() 64 | 65 | # Run your code 66 | inference(-1) 67 | 68 | end_time = time_.time() 69 | elapsed_time = end_time - start_time 70 | 71 | # Print the elapsed time 72 | print(f"Elapsed time: {elapsed_time:.4f} seconds") 73 | -------------------------------------------------------------------------------- /install.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | 4 | logger = logging.getLogger(__name__) 5 | 6 | 7 | def is_empty_line(line): 8 | return line is None or line.strip() == "" or line.strip().startswith("#") 9 | 10 | 11 | def check_versions() -> None: 12 | requirements = [ 13 | line 14 | for line in (Path(__file__).parent / "requirements_webui.txt") 15 | .read_text() 16 | .splitlines() 17 | if not is_empty_line(line) 18 | ] 19 | pip_command = f"install {' '.join(requirements)}" 20 | try: 21 | from launch import run_pip # from AUTOMATIC1111 22 | 23 | run_pip(pip_command, desc="sd-webui-xl-demo requirements_webui.txt") 24 | except Exception as e: 25 | logger.exception(e) 26 | 27 | 28 | check_versions() 29 | -------------------------------------------------------------------------------- /javascript/addAgreementLinks.js: -------------------------------------------------------------------------------- 1 | 2 | onAfterUiUpdate(function () { 3 | const divId = "setting_xldemo_txt2img_agreement_links"; 4 | let xlDemoTxt2ImgAgreementLinksDiv = document.getElementById(divId); 5 | if (xlDemoTxt2ImgAgreementLinksDiv == null) { 6 | 7 | xlDemoTxt2ImgAgreementLinksDiv = document.createElement("div"); 8 | xlDemoTxt2ImgAgreementLinksDiv.id = divId; 9 | const lines = [ 10 | "

Accept the SDXL 0.9 Research License Agreement here

", 11 | "

Accept the SDXL 1.0 Research License Agreement here

" 12 | ]; 13 | xlDemoTxt2ImgAgreementLinksDiv.innerHTML = lines.join(""); 14 | 15 | const settingUiDiv = document.getElementById("setting_xldemo_txt2img_model"); 16 | 17 | if (settingUiDiv && settingUiDiv.parentNode) { 18 | settingUiDiv.parentNode.insertBefore(xlDemoTxt2ImgAgreementLinksDiv, settingUiDiv); 19 | } 20 | } 21 | 22 | }); 23 | -------------------------------------------------------------------------------- /javascript/generationParams.js: -------------------------------------------------------------------------------- 1 | // attaches listeners to the xldemo_txt2img and img2img galleries to update displayed generation param text when the image changes 2 | 3 | let xldemo_txt2img_gallery, xldemo_txt2img_modal = undefined; 4 | onAfterUiUpdate(function() { 5 | if (!xldemo_txt2img_gallery) { 6 | xldemo_txt2img_gallery = attachGalleryListeners("xldemo_txt2img"); 7 | } 8 | if (!xldemo_txt2img_modal) { 9 | xldemo_txt2img_modal = gradioApp().getElementById('lightboxModal'); 10 | xldemo_txt2img_modalObserver.observe(xldemo_txt2img_modal, {attributes: true, attributeFilter: ['style']}); 11 | } 12 | }); 13 | 14 | let xldemo_txt2img_modalObserver = new MutationObserver(function(mutations) { 15 | mutations.forEach(function(mutationRecord) { 16 | let selectedTab = gradioApp().querySelector('#tabs div button.selected')?.innerText; 17 | if (mutationRecord.target.style.display === 'none' && (selectedTab === 'xldemo_txt2img')) { 18 | gradioApp().getElementById(selectedTab + "_generation_info_button")?.click(); 19 | } 20 | }); 21 | }); 22 | 23 | function attachGalleryListeners(tab_name) { 24 | var gallery = gradioApp().querySelector('#' + tab_name + '_gallery'); 25 | gallery?.addEventListener('click', () => gradioApp().getElementById(tab_name + "_generation_info_button").click()); 26 | gallery?.addEventListener('keydown', (e) => { 27 | if (e.keyCode == 37 || e.keyCode == 39) { // left or right arrow 28 | gradioApp().getElementById(tab_name + "_generation_info_button").click(); 29 | } 30 | }); 31 | return gallery; 32 | } 33 | -------------------------------------------------------------------------------- /requirements_webui.txt: -------------------------------------------------------------------------------- 1 | diffusers==0.18.1 2 | invisible-watermark==0.2.0 3 | transformers==4.27.4 4 | -------------------------------------------------------------------------------- /scripts/sd_webui_xldemo_txt2img.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | from modules.shared import opts, OptionInfo 3 | from modules import script_callbacks 4 | from xldemo_txt2img_ui import make_ui 5 | from xldemo_txt2img import XLDEMO_MODEL_CHOICES, XLDEMO_GENERATOR_DEVICE_CHOICES 6 | 7 | 8 | def on_ui_tabs(): 9 | return [(make_ui(), "SDXL Demo", "xldemo_txt2img")] 10 | 11 | 12 | def on_ui_settings(): 13 | section = ("xldemo_txt2img", "SDXL Demo") 14 | 15 | opts.add_option( 16 | "xldemo_txt2img_huggingface_access_token", OptionInfo( 17 | "", "Huggingface access token (Restart WebUI to take effect)", section=section) 18 | ) 19 | 20 | opts.add_option( 21 | "xldemo_txt2img_model", OptionInfo(XLDEMO_MODEL_CHOICES[0], "Model (Restart WebUI to take effect)", gr.Dropdown, lambda: { 22 | "choices": XLDEMO_MODEL_CHOICES}, section=section) 23 | ) 24 | 25 | opts.add_option( 26 | "xldemo_txt2img_generator_device", OptionInfo(XLDEMO_GENERATOR_DEVICE_CHOICES[0], "Generator device (Restart WebUI to take effect)", gr.Radio, lambda: { 27 | "choices": XLDEMO_GENERATOR_DEVICE_CHOICES}, section=section) 28 | ) 29 | 30 | opts.add_option( 31 | "xldemo_txt2img_load_refiner_on_startup", OptionInfo( 32 | True, "Enable refiner (Restart WebUI to take effect)", section=section) 33 | ) 34 | 35 | 36 | script_callbacks.on_ui_tabs(on_ui_tabs) 37 | script_callbacks.on_ui_settings(on_ui_settings) 38 | -------------------------------------------------------------------------------- /style.css: -------------------------------------------------------------------------------- 1 | #xldemo_txt2img_generate { 2 | min-height: 4.5em; 3 | } 4 | 5 | #xldemo_txt2img_refine { 6 | min-height: 4.5em; 7 | } 8 | 9 | #xldemo_txt2img_memory_column { 10 | min-width: min(60px, 100%) !important; 11 | } 12 | 13 | #xldemo_txt2img_unload_sd_model { 14 | width: 6em; 15 | height: 10em; 16 | } 17 | 18 | #setting_xldemo_txt2img_huggingface_access_token textarea { 19 | -webkit-text-security: disc; /* Safari, Chrome, and newer versions of Opera */ 20 | -moz-text-security: disc; /* Firefox */ 21 | -ms-text-security: disc; /* Internet Explorer */ 22 | } -------------------------------------------------------------------------------- /xldemo_txt2img.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | import torch 3 | import gc 4 | import json 5 | import random 6 | 7 | from diffusers import DiffusionPipeline 8 | from diffusers.schedulers.scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler as dpmsolver_multistep 9 | from diffusers.schedulers.scheduling_deis_multistep import DEISMultistepScheduler as deis_multistep 10 | from diffusers.schedulers.scheduling_unipc_multistep import UniPCMultistepScheduler as unipc_multistep 11 | from diffusers.schedulers.scheduling_k_dpm_2_ancestral_discrete import KDPM2AncestralDiscreteScheduler as k_dpm_2_ancestral_discrete 12 | from diffusers.schedulers.scheduling_ddim import DDIMScheduler as ddim 13 | from diffusers.schedulers.scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler as dpmsolver_singlestep 14 | from diffusers.schedulers.scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler as euler_ancestral_discrete 15 | from diffusers.schedulers.scheduling_ddpm import DDPMScheduler as ddpm 16 | from diffusers.schedulers.scheduling_euler_discrete import EulerDiscreteScheduler as euler_discrete 17 | from diffusers.schedulers.scheduling_k_dpm_2_discrete import KDPM2DiscreteScheduler as k_dpm_2_discrete 18 | from diffusers.schedulers.scheduling_pndm import PNDMScheduler as pndm 19 | from diffusers.schedulers.scheduling_dpmsolver_sde import DPMSolverSDEScheduler as dpmsolver_sde 20 | from diffusers.schedulers.scheduling_lms_discrete import LMSDiscreteScheduler as lms_discrete 21 | from diffusers.schedulers.scheduling_heun_discrete import HeunDiscreteScheduler as heun_discrete 22 | 23 | from modules.shared import opts 24 | import modules.images as sd_images 25 | from modules import generation_parameters_copypaste 26 | from modules.devices import torch_gc 27 | 28 | XLDEMO_MODEL_CHOICES = ["SDXL 0.9", 29 | "SDXL 0.9 (fp16)", "SDXL 1.0", "SDXL 1.0 (fp16)"] 30 | 31 | XLDEMO_GENERATOR_DEVICE_CHOICES = ['cpu', 'cuda', "I don't know", "I don't care"] 32 | 33 | XLDEMO_HUGGINGFACE_ACCESS_TOKEN = opts.data.get( 34 | "xldemo_txt2img_huggingface_access_token", "") 35 | 36 | XLDEMO_LOAD_REFINER_ON_STARTUP = opts.data.get( 37 | "xldemo_txt2img_load_refiner_on_startup", True) 38 | 39 | XLDEMO_MODEL = opts.data.get( 40 | "xldemo_txt2img_model", XLDEMO_MODEL_CHOICES[0]) 41 | 42 | XLDEMO_GENERATOR_DEVICE = opts.data.get( 43 | "xldemo_txt2img_generator_device", XLDEMO_GENERATOR_DEVICE_CHOICES[0]) 44 | 45 | XLDEMO_SCHEDULER_CHOICES = [ 46 | 'euler_discrete', 47 | 'ddim', 48 | 'ddpm', 49 | 'deis_multistep', 50 | 'dpmsolver_multistep', 51 | 'dpmsolver_sde', 52 | 'dpmsolver_singlestep', 53 | 'euler_ancestral_discrete', 54 | 'heun_discrete', 55 | 'k_dpm_2_ancestral_discrete', 56 | 'k_dpm_2_discrete', 57 | 'lms_discrete', 58 | 'pndm', 59 | 'unipc_multistep', 60 | ] 61 | 62 | 63 | def create_infotext(prompt, negative_prompt, seeds, sampler, steps, width, height, cfg_scale, index): 64 | 65 | generation_params = { 66 | "Sampler": sampler, 67 | "Steps": steps, 68 | "CFG scale": cfg_scale, 69 | "Seed": seeds[index], 70 | "Size": f"{width}x{height}", 71 | } 72 | 73 | generation_params['Model'] = XLDEMO_MODEL 74 | generation_params['Comment'] = "https://bit.ly/3pJKuhx" 75 | 76 | generation_params_text = ", ".join( 77 | [k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None]) 78 | 79 | negative_prompt_text = f"\nNegative prompt: {negative_prompt[index]}" if negative_prompt else "" 80 | 81 | return f"{prompt[index]}{negative_prompt_text}\n{generation_params_text}".strip() 82 | 83 | 84 | def create_infotext_for_refiner(prompt, negative_prompt, seeds, sampler, steps, width, height, index, refiner_strength): 85 | 86 | generation_params = { 87 | "Sampler": sampler, 88 | "Seed": seeds[index], 89 | "Size": f"{width}x{height}", 90 | "Refiner Steps": steps, 91 | "Refiner Strength": refiner_strength, 92 | } 93 | 94 | generation_params['Model'] = XLDEMO_MODEL 95 | generation_params['Comment'] = "https://bit.ly/3pJKuhx" 96 | 97 | generation_params_text = ", ".join( 98 | [k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None]) 99 | 100 | negative_prompt_text = f"\nNegative prompt: {negative_prompt[index]}" if negative_prompt else "" 101 | 102 | return f"{prompt[index]}{negative_prompt_text}\n{generation_params_text}".strip() 103 | 104 | 105 | class XLDemo: 106 | 107 | def __init__(self): 108 | 109 | self.model_name = XLDEMO_MODEL 110 | print(f"Using {self.model_name}") 111 | 112 | self.model_key_base = "stabilityai/stable-diffusion-xl-base-0.9" 113 | self.model_key_refiner = "stabilityai/stable-diffusion-xl-refiner-0.9" 114 | 115 | if self.model_name == "SDXL 1.0" or self.model_name == "SDXL 1.0 (fp16)": 116 | self.model_key_base = "stabilityai/stable-diffusion-xl-base-1.0" 117 | self.model_key_refiner = "stabilityai/stable-diffusion-xl-refiner-1.0" 118 | 119 | # Use refiner (eabled by default) 120 | self.load_refiner_on_startup = XLDEMO_LOAD_REFINER_ON_STARTUP 121 | 122 | try: 123 | if XLDEMO_HUGGINGFACE_ACCESS_TOKEN is not None and XLDEMO_HUGGINGFACE_ACCESS_TOKEN.strip() != '': 124 | access_token = XLDEMO_HUGGINGFACE_ACCESS_TOKEN 125 | 126 | print("Loading model", self.model_key_base) 127 | self.pipe = None 128 | if self.model_name == 'SDXL 0.9 (fp16)' or self.model_name == 'SDXL 1.0 (fp16)': 129 | self.pipe = DiffusionPipeline.from_pretrained( 130 | self.model_key_base, torch_dtype=torch.float16, resume_download=True, variant='fp16', use_auth_token=access_token) 131 | else: 132 | self.pipe = DiffusionPipeline.from_pretrained( 133 | self.model_key_base, torch_dtype=torch.float16, resume_download=True, use_auth_token=access_token) 134 | self.pipe.enable_model_cpu_offload() 135 | 136 | except Exception as ex: 137 | self.pipe = None 138 | print(str(ex)) 139 | print(f'Problem loading {self.model_key_base} weight') 140 | 141 | try: 142 | if XLDEMO_HUGGINGFACE_ACCESS_TOKEN is not None and XLDEMO_HUGGINGFACE_ACCESS_TOKEN.strip() != '': 143 | access_token = XLDEMO_HUGGINGFACE_ACCESS_TOKEN 144 | 145 | if self.load_refiner_on_startup: 146 | print("Loading model", self.model_key_refiner) 147 | self.pipe_refiner = None 148 | if self.model_name == 'SDXL 0.9 (fp16)' or self.model_name == 'SDXL 1.0 (fp16)': 149 | self.pipe_refiner = DiffusionPipeline.from_pretrained( 150 | self.model_key_refiner, torch_dtype=torch.float16, resume_download=True, variant='fp16', use_auth_token=access_token) 151 | else: 152 | self.pipe_refiner = DiffusionPipeline.from_pretrained( 153 | self.model_key_refiner, torch_dtype=torch.float16, resume_download=True, use_auth_token=access_token) 154 | self.pipe_refiner.enable_model_cpu_offload() 155 | 156 | except Exception as ex: 157 | self.pipe_refiner = None 158 | print(str(ex)) 159 | print(f'Problem loading {self.model_key_refiner} weight') 160 | 161 | def get_scheduler_by_name(self, name, pipe, seeds): 162 | if name == 'dpmsolver_multistep': 163 | return dpmsolver_multistep.from_config(pipe.scheduler.config) 164 | elif name == 'deis_multistep': 165 | return deis_multistep.from_config(pipe.scheduler.config) 166 | elif name == 'unipc_multistep': 167 | return unipc_multistep.from_config(pipe.scheduler.config) 168 | elif name == 'k_dpm_2_ancestral_discrete': 169 | return k_dpm_2_ancestral_discrete.from_config(pipe.scheduler.config) 170 | elif name == 'ddim': 171 | return ddim.from_config(pipe.scheduler.config) 172 | elif name == 'dpmsolver_singlestep': 173 | return dpmsolver_singlestep.from_config(pipe.scheduler.config) 174 | elif name == 'euler_ancestral_discrete': 175 | return euler_ancestral_discrete.from_config(pipe.scheduler.config) 176 | elif name == 'ddpm': 177 | return ddpm.from_config(pipe.scheduler.config) 178 | elif name == 'euler_discrete': 179 | return euler_discrete.from_config(pipe.scheduler.config) 180 | elif name == 'k_dpm_2_discrete': 181 | return k_dpm_2_discrete.from_config(pipe.scheduler.config) 182 | elif name == 'pndm': 183 | return pndm.from_config(pipe.scheduler.config) 184 | elif name == 'dpmsolver_sde': 185 | return dpmsolver_sde.from_config({**pipe.scheduler.config, 'noise_sampler_seed': seeds}) 186 | elif name == 'lms_discrete': 187 | return lms_discrete.from_config(pipe.scheduler.config) 188 | elif name == 'heun_discrete': 189 | return heun_discrete.from_config(pipe.scheduler.config) 190 | else: 191 | return euler_discrete.from_config(pipe.scheduler.config) 192 | 193 | def get_generator(self, seed): 194 | device = 'cpu' 195 | if XLDEMO_GENERATOR_DEVICE == 'cuda': 196 | device = 'cuda' 197 | 198 | generator = torch.Generator(device=device) 199 | generator.manual_seed(seed) 200 | return generator 201 | 202 | def get_fixed_seed(self, seed): 203 | if seed is None or seed == '' or seed == -1: 204 | return int(random.randrange(4294967294)) 205 | 206 | return seed 207 | 208 | def infer(self, prompt, negative, width, height, cfg_scale, seed, batch_count, batch_size, sampler, steps): 209 | prompt, negative = [prompt] * batch_size, [negative] * batch_size 210 | 211 | images = [] 212 | seeds = [] 213 | gen_info_seeds = [] 214 | images_b64_list = [] 215 | info_texts = [] 216 | 217 | if self.pipe: 218 | seed_base = self.get_fixed_seed(int(seed)) 219 | 220 | for bi in range(batch_count): 221 | seeds = [seed_base + bi * batch_size + i for i in range(batch_size)] 222 | 223 | generators = [self.get_generator(seeds[i]) for i in range(batch_size)] 224 | 225 | scheduler = self.get_scheduler_by_name(sampler, self.pipe, seeds) 226 | self.pipe.scheduler = scheduler 227 | self.pipe.scheduler.set_timesteps(steps) 228 | 229 | images = self.pipe(prompt=prompt, width=width, height=height, negative_prompt=negative, guidance_scale=cfg_scale, 230 | num_inference_steps=steps, generator=generators).images 231 | 232 | gc.collect() 233 | torch_gc() 234 | torch.cuda.empty_cache() 235 | 236 | for i, image in enumerate(images): 237 | info = create_infotext( 238 | prompt, negative, seeds, sampler, steps, width, height, cfg_scale, i) 239 | info_texts.append(info) 240 | sd_images.save_image(image, opts.outdir_txt2img_samples, '', seeds[i], 241 | prompt, opts.samples_format, info=info) 242 | images_b64_list.append(image) 243 | gen_info_seeds.append(seeds[i]) 244 | 245 | return images_b64_list, json.dumps({'all_prompts': prompt, 'index_of_first_image': 0, 'all_seeds': gen_info_seeds, "infotexts": info_texts}), info_texts[0], '' 246 | 247 | def refine(self, prompt, negative, seed, sampler, steps, enable_refiner, image_to_refine, refiner_strength): 248 | prompt, negative = [prompt] * 1, [negative] * 1 249 | 250 | images = [] 251 | seeds = [] 252 | gen_info_seeds = [] 253 | images_b64_list = [] 254 | info_texts = [] 255 | 256 | if self.load_refiner_on_startup and self.pipe_refiner and enable_refiner: 257 | 258 | # Get the width and height of the image 259 | width, height = image_to_refine.size 260 | 261 | images = [image_to_refine] 262 | 263 | seed_base = self.get_fixed_seed(int(seed)) 264 | seeds = [seed_base + i for i in range(len(images))] 265 | 266 | generators = [self.get_generator(seeds[i]) 267 | for i in range(len(images))] 268 | 269 | scheduler = self.get_scheduler_by_name( 270 | sampler, self.pipe_refiner, seeds) 271 | self.pipe_refiner.scheduler = scheduler 272 | self.pipe_refiner.scheduler.set_timesteps(steps) 273 | 274 | images = self.pipe_refiner(prompt=prompt, negative_prompt=negative, image=images, 275 | num_inference_steps=steps, strength=refiner_strength, generator=generators).images 276 | 277 | gc.collect() 278 | torch_gc() 279 | torch.cuda.empty_cache() 280 | 281 | for i, image in enumerate(images): 282 | info = create_infotext_for_refiner( 283 | prompt, negative, seeds, sampler, steps, width, height, i, refiner_strength) 284 | info_texts.append(info) 285 | sd_images.save_image(image, opts.outdir_txt2img_samples, '', 286 | seeds[i], prompt, opts.samples_format, info=info, suffix="-refiner") 287 | images_b64_list.append(image) 288 | gen_info_seeds.append(seeds[i]) 289 | 290 | return images_b64_list, json.dumps({'all_prompts': prompt, 'index_of_first_image': 0, 'all_seeds': gen_info_seeds, "infotexts": info_texts}), info_texts[0], '' 291 | 292 | 293 | xldemo_txt2img = XLDemo() 294 | 295 | 296 | def can_infer(): 297 | return xldemo_txt2img.pipe is not None 298 | 299 | 300 | def can_refine(): 301 | return xldemo_txt2img.pipe_refiner is not None 302 | 303 | 304 | def do_xldemo_txt2img_infer(prompt, negative, width, height, scale, seed, batch_count, batch_size, sampler, steps): 305 | 306 | try: 307 | return xldemo_txt2img.infer(prompt, negative, width, height, scale, seed, batch_count, batch_size, sampler, steps) 308 | except Exception as ex: 309 | # Raise an Error with a custom error message 310 | raise gr.Error(f"Error: {str(ex)}") 311 | 312 | 313 | def do_xldemo_txt2img_refine(prompt, negative, seed, sampler, steps, enable_refiner, image_to_refine, refiner_strength): 314 | 315 | if image_to_refine is None: 316 | raise gr.Error(f"Error: Please set the image for refiner") 317 | 318 | try: 319 | return xldemo_txt2img.refine(prompt, negative, seed, sampler, steps, enable_refiner, image_to_refine, refiner_strength) 320 | except Exception as ex: 321 | # Raise an Error with a custom error message 322 | raise gr.Error(f"Error: {str(ex)}") 323 | -------------------------------------------------------------------------------- /xldemo_txt2img_ui.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | 3 | from modules.shared import opts 4 | from modules.ui_components import ToolButton 5 | from modules import sd_models 6 | 7 | from xldemo_txt2img import XLDEMO_HUGGINGFACE_ACCESS_TOKEN, XLDEMO_LOAD_REFINER_ON_STARTUP 8 | from xldemo_txt2img import XLDEMO_SCHEDULER_CHOICES 9 | from xldemo_txt2img import do_xldemo_txt2img_infer, do_xldemo_txt2img_refine, can_infer, can_refine 10 | from xldemo_txt2img_ui_common import create_seed_inputs, create_output_panel, connect_reuse_seed, gr_show 11 | 12 | 13 | switch_values_symbol = '\U000021C5' # ⇅ 14 | 15 | 16 | def make_ui(): 17 | id_part = 'xldemo_txt2img' 18 | 19 | if XLDEMO_HUGGINGFACE_ACCESS_TOKEN is None or XLDEMO_HUGGINGFACE_ACCESS_TOKEN.strip() == '': 20 | with gr.Blocks(analytics_enabled=False) as ui_component: 21 | gr.HTML(value="""
""") 32 | 33 | return ui_component 34 | 35 | else: 36 | with gr.Blocks(analytics_enabled=False) as ui_component: 37 | 38 | with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): 39 | with gr.Column(scale=14, elem_id=f"{id_part}_prompt_container"): 40 | with gr.Row(): 41 | with gr.Column(scale=80): 42 | with gr.Row(): 43 | xldemo_txt2img_prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, 44 | placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) 45 | xldemo_txt2img_dummy_component = gr.Label( 46 | visible=False) 47 | 48 | with gr.Row(): 49 | with gr.Column(scale=80): 50 | with gr.Row(): 51 | xldemo_txt2img_negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, 52 | lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) 53 | 54 | with gr.Column(scale=1, elem_id=f"{id_part}_memory_column"): 55 | xldemo_txt2img_unload_sd_model = gr.Button( 56 | 'Unload SD checkpoint to free VRAM', elem_id=f"{id_part}_unload_sd_model") 57 | 58 | with gr.Column(scale=4, elem_id=f"{id_part}_actions_column"): 59 | with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): 60 | xldemo_txt2img_submit = gr.Button( 61 | 'Generate', elem_id=f"{id_part}_generate", variant='primary', interactive=can_infer()) 62 | 63 | with gr.Row(elem_id=f"{id_part}_refine_box", elem_classes="refine-box"): 64 | xldemo_txt2img_refine = gr.Button( 65 | 'Refine', interactive=False, elem_id=f"{id_part}_refine", variant='primary') 66 | 67 | with gr.Row(): 68 | with gr.Column(): 69 | 70 | with gr.Row(): 71 | xldemo_txt2img_sampler = gr.Dropdown(label='Sampling method', elem_id=f"{id_part}_sampling", choices=XLDEMO_SCHEDULER_CHOICES, value=XLDEMO_SCHEDULER_CHOICES[0]) 72 | 73 | xldemo_txt2img_steps = gr.Slider(minimum=1, maximum=150, step=1, 74 | elem_id=f"{id_part}_steps", label="Sampling steps", value=20) 75 | 76 | with gr.Row(): 77 | with gr.Column(elem_id="xldemo_txt2img_column_size", scale=4): 78 | xldemo_txt2img_width = gr.Slider( 79 | minimum=64, maximum=2048, step=8, label="Width", value=1024, elem_id="xldemo_txt2img_width") 80 | xldemo_txt2img_height = gr.Slider( 81 | minimum=64, maximum=2048, step=8, label="Height", value=1024, elem_id="xldemo_txt2img_height") 82 | 83 | with gr.Column(elem_id="xldemo_txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): 84 | xldemo_txt2img_res_switch_btn = ToolButton( 85 | value=switch_values_symbol, elem_id="xldemo_txt2img_res_switch_btn", label="Switch dims") 86 | 87 | with gr.Column(elem_id="txt2img_column_batch"): 88 | xldemo_txt2img_batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id=f"{id_part}_batch_count") 89 | 90 | xldemo_txt2img_batch_size = gr.Slider( 91 | minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id=f"{id_part}_batch_size") 92 | 93 | xldemo_txt2img_cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, 94 | label='CFG Scale', value=7.0, elem_id=f"{id_part}_cfg_scale") 95 | 96 | xldemo_txt2img_seed, xldemo_txt2img_reuse_seed = create_seed_inputs(id_part) 97 | 98 | xldemo_txt2img_enable_refiner = gr.Checkbox( 99 | visible=XLDEMO_LOAD_REFINER_ON_STARTUP, 100 | label='Refiner', value=False, elem_id=f"{id_part}_enable_refiner") 101 | 102 | with gr.Row(visible=False, elem_id="xldemo_txt2img_refiner_group", variant="compact") as xldemo_txt2img_refiner_group: 103 | with gr.Column(): 104 | xldemo_txt2img_image_to_refine = gr.Image(label="Image", type='pil') 105 | 106 | with gr.Column(): 107 | xldemo_txt2img_refiner_steps = gr.Slider(minimum=1, maximum=150, step=1, 108 | elem_id=f"{id_part}_refiner_steps", label="Refiner steps", value=20) 109 | 110 | xldemo_txt2img_refiner_strength = gr.Slider( 111 | interactive=XLDEMO_LOAD_REFINER_ON_STARTUP, 112 | label="Refiner Strength", minimum=0, maximum=1.0, value=0.3, step=0.1, elem_id=f"{id_part}_refiner_strength") 113 | 114 | with gr.Column(): 115 | xldemo_txt2img_gallery, xldemo_txt2img_generation_info, xldemo_txt2img_html_info, xldemo_txt2img_html_log = create_output_panel( 116 | id_part, opts.outdir_txt2img_samples) 117 | 118 | connect_reuse_seed(xldemo_txt2img_seed, xldemo_txt2img_reuse_seed, 119 | xldemo_txt2img_generation_info, xldemo_txt2img_dummy_component, is_subseed=False) 120 | 121 | xldemo_txt2img_res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('xldemo_txt2img')}", inputs=None, outputs=None, show_progress=False) 122 | 123 | xldemo_txt2img_enable_refiner.change( 124 | fn=lambda x: gr_show(x), 125 | inputs=[xldemo_txt2img_enable_refiner], 126 | outputs=[xldemo_txt2img_refiner_group], 127 | show_progress = False, 128 | ) 129 | 130 | xldemo_txt2img_enable_refiner.change( 131 | fn=lambda x: gr.update(interactive=x and can_refine()), 132 | inputs=[xldemo_txt2img_enable_refiner], 133 | outputs=[xldemo_txt2img_refine], 134 | show_progress = False, 135 | ) 136 | 137 | with gr.Row(): 138 | gr.HTML(value="

Watch 📺 video for detailed explanation 🔍 ☕️ Please consider supporting me in Patreon here 🍻

") 139 | 140 | 141 | xldemo_txt2img_submit.click(fn=do_xldemo_txt2img_infer, inputs=[ 142 | xldemo_txt2img_prompt, 143 | xldemo_txt2img_negative_prompt, 144 | xldemo_txt2img_width, 145 | xldemo_txt2img_height, 146 | xldemo_txt2img_cfg_scale, 147 | xldemo_txt2img_seed, 148 | xldemo_txt2img_batch_count, 149 | xldemo_txt2img_batch_size, 150 | xldemo_txt2img_sampler, 151 | xldemo_txt2img_steps 152 | ], outputs=[ 153 | xldemo_txt2img_gallery, 154 | xldemo_txt2img_generation_info, 155 | xldemo_txt2img_html_info, 156 | xldemo_txt2img_html_log, 157 | ], api_name="do_xldemo_txt2img_infer") 158 | 159 | 160 | xldemo_txt2img_refine.click(fn=do_xldemo_txt2img_refine, inputs=[ 161 | xldemo_txt2img_prompt, 162 | xldemo_txt2img_negative_prompt, 163 | xldemo_txt2img_seed, 164 | xldemo_txt2img_sampler, 165 | xldemo_txt2img_refiner_steps, 166 | xldemo_txt2img_enable_refiner, 167 | xldemo_txt2img_image_to_refine, 168 | xldemo_txt2img_refiner_strength 169 | ], outputs=[ 170 | xldemo_txt2img_gallery, 171 | xldemo_txt2img_generation_info, 172 | xldemo_txt2img_html_info, 173 | xldemo_txt2img_html_log, 174 | ], api_name="do_xldemo_txt2img_refine") 175 | 176 | xldemo_txt2img_unload_sd_model.click( 177 | fn=sd_models.unload_model_weights, 178 | inputs=[], 179 | outputs=[] 180 | ) 181 | 182 | return ui_component 183 | -------------------------------------------------------------------------------- /xldemo_txt2img_ui_common.py: -------------------------------------------------------------------------------- 1 | import json 2 | import html 3 | import os 4 | import platform 5 | import sys 6 | 7 | import gradio as gr 8 | import subprocess as sp 9 | 10 | from modules import errors 11 | import modules.images 12 | 13 | 14 | folder_symbol = '\U0001f4c2' # 📂 15 | refresh_symbol = '\U0001f504' # 🔄 16 | random_symbol = '\U0001f3b2\ufe0f' # 🎲️ 17 | reuse_symbol = '\u267b\ufe0f' # ♻️ 18 | 19 | 20 | def gr_show(visible=True): 21 | return {"visible": visible, "__type__": "update"} 22 | 23 | 24 | def create_seed_inputs(target_interface): 25 | with gr.Row(elem_id=f"{target_interface}_seed_row", variant="compact"): 26 | seed = gr.Number(label='Seed', value=-1, 27 | elem_id=f"{target_interface}_seed") 28 | seed.style(container=False) 29 | random_seed = gr.Button( 30 | random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed', elem_classes=["tool"]) 31 | reuse_seed = gr.Button( 32 | reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed', elem_classes=["tool"]) 33 | 34 | random_seed.click(fn=None, _js="function(){setRandomSeed('" + 35 | target_interface + "_seed')}", show_progress=False, inputs=[], outputs=[]) 36 | 37 | return seed, reuse_seed 38 | 39 | 40 | def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed): 41 | """ Connects a 'reuse (sub)seed' button's click event so that it copies last used 42 | (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength 43 | was 0, i.e. no variation seed was used, it copies the normal seed value instead.""" 44 | def copy_seed(gen_info_string: str, index): 45 | res = -1 46 | 47 | try: 48 | gen_info = json.loads(gen_info_string) 49 | index -= gen_info.get('index_of_first_image', 0) 50 | 51 | if is_subseed and gen_info.get('subseed_strength', 0) > 0: 52 | all_subseeds = gen_info.get('all_subseeds', [-1]) 53 | res = all_subseeds[index if 0 <= 54 | index < len(all_subseeds) else 0] 55 | else: 56 | all_seeds = gen_info.get('all_seeds', [-1]) 57 | res = all_seeds[index if 0 <= index < len(all_seeds) else 0] 58 | 59 | except json.decoder.JSONDecodeError: 60 | if gen_info_string: 61 | errors.report( 62 | f"Error parsing JSON generation info: {gen_info_string}") 63 | 64 | return [res, gr_show(False)] 65 | 66 | reuse_seed.click( 67 | fn=copy_seed, 68 | _js="(x, y) => [x, selected_gallery_index()]", 69 | show_progress=False, 70 | inputs=[generation_info, dummy_component], 71 | outputs=[seed, dummy_component] 72 | ) 73 | 74 | 75 | def update_generation_info(generation_info, html_info, img_index): 76 | 77 | try: 78 | generation_info = json.loads(generation_info) 79 | if img_index < 0 or img_index >= len(generation_info["infotexts"]): 80 | return html_info, gr.update() 81 | return plaintext_to_html(generation_info["infotexts"][img_index]), gr.update() 82 | except Exception: 83 | pass 84 | # if the json parse or anything else fails, just return the old html_info 85 | return html_info, gr.update() 86 | 87 | 88 | def plaintext_to_html(text): 89 | text = "

" + \ 90 | "
\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "

" 91 | return text 92 | 93 | 94 | def create_output_panel(tabname, outdir): 95 | from modules import shared 96 | import modules.generation_parameters_copypaste as parameters_copypaste 97 | 98 | def open_folder(f): 99 | if not os.path.exists(f): 100 | print( 101 | f'Folder "{f}" does not exist. After you create an image, the folder will be created.') 102 | return 103 | elif not os.path.isdir(f): 104 | print(f""" 105 | WARNING 106 | An open_folder request was made with an argument that is not a folder. 107 | This could be an error or a malicious attempt to run code on your computer. 108 | Requested path was: {f} 109 | """, file=sys.stderr) 110 | return 111 | 112 | if not shared.cmd_opts.hide_ui_dir_config: 113 | path = os.path.normpath(f) 114 | if platform.system() == "Windows": 115 | os.startfile(path) 116 | elif platform.system() == "Darwin": 117 | sp.Popen(["open", path]) 118 | elif "microsoft-standard-WSL2" in platform.uname().release: 119 | sp.Popen(["wsl-open", path]) 120 | else: 121 | sp.Popen(["xdg-open", path]) 122 | 123 | with gr.Column(variant='panel', elem_id=f"{tabname}_results"): 124 | with gr.Group(elem_id=f"{tabname}_gallery_container"): 125 | result_gallery = gr.Gallery( 126 | label='Output', show_label=False, elem_id=f"{tabname}_gallery").style(columns=4) 127 | 128 | generation_info = None 129 | with gr.Column(): 130 | with gr.Row(elem_id=f"image_buttons_{tabname}", elem_classes="image-buttons"): 131 | open_folder_button = gr.Button( 132 | folder_symbol, visible=not shared.cmd_opts.hide_ui_dir_config) 133 | 134 | if tabname != "extras": 135 | save = gr.Button( 136 | 'Save', elem_id=f'save_{tabname}', interactive=False) 137 | save_zip = gr.Button( 138 | 'Zip', elem_id=f'save_zip_{tabname}', interactive=False) 139 | 140 | buttons = parameters_copypaste.create_buttons( 141 | ["img2img", "inpaint", "extras"]) 142 | 143 | open_folder_button.click( 144 | fn=lambda: open_folder(shared.opts.outdir_samples or outdir), 145 | inputs=[], 146 | outputs=[], 147 | ) 148 | 149 | if tabname != "extras": 150 | download_files = gr.File(None, file_count="multiple", interactive=False, 151 | show_label=False, visible=False, elem_id=f'download_files_{tabname}') 152 | 153 | with gr.Group(): 154 | html_info = gr.HTML( 155 | elem_id=f'html_info_{tabname}', elem_classes="infotext") 156 | html_log = gr.HTML(elem_id=f'html_log_{tabname}') 157 | 158 | generation_info = gr.Textbox( 159 | visible=False, elem_id=f'generation_info_{tabname}') 160 | if tabname == 'txt2img' or tabname == 'img2img' or tabname == 'xldemo_txt2img': 161 | generation_info_button = gr.Button( 162 | visible=False, elem_id=f"{tabname}_generation_info_button") 163 | generation_info_button.click( 164 | fn=update_generation_info, 165 | _js="function(x, y, z){ return [x, y, selected_gallery_index()] }", 166 | inputs=[generation_info, html_info, html_info], 167 | outputs=[html_info, html_info], 168 | show_progress=False, 169 | ) 170 | 171 | else: 172 | html_info_x = gr.HTML(elem_id=f'html_info_x_{tabname}') 173 | html_info = gr.HTML( 174 | elem_id=f'html_info_{tabname}', elem_classes="infotext") 175 | html_log = gr.HTML(elem_id=f'html_log_{tabname}') 176 | 177 | paste_field_names = [] 178 | if tabname == "txt2img": 179 | paste_field_names = modules.scripts.scripts_txt2img.paste_field_names 180 | elif tabname == "img2img": 181 | paste_field_names = modules.scripts.scripts_img2img.paste_field_names 182 | 183 | for paste_tabname, paste_button in buttons.items(): 184 | parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( 185 | paste_button=paste_button, tabname=paste_tabname, source_tabname="txt2img" if tabname == "txt2img" else None, source_image_component=result_gallery, 186 | paste_field_names=paste_field_names 187 | )) 188 | 189 | return result_gallery, generation_info if tabname != "extras" else html_info_x, html_info, html_log 190 | --------------------------------------------------------------------------------