├── test
├── __init__.py
├── test_files
│ ├── empty.pt
│ ├── mask_basic.png
│ └── img2img_basic.png
├── conftest.py
├── test_utils.py
├── test_extras.py
├── test_img2img.py
└── test_txt2img.py
├── models
├── VAE
│ └── Put VAE here.txt
├── Stable-diffusion
│ └── Put Stable Diffusion checkpoints here.txt
├── deepbooru
│ └── Put your deepbooru release project folder here.txt
├── VAE-approx
│ └── model.pt
└── karlo
│ └── ViT-L-14_stats.th
├── modules
├── sd_samplers_compvis.py
├── models
│ └── diffusion
│ │ └── uni_pc
│ │ └── __init__.py
├── Roboto-Regular.ttf
├── textual_inversion
│ ├── test_embedding.png
│ ├── ui.py
│ ├── logging.py
│ └── learn_schedule.py
├── import_hook.py
├── sd_hijack_ip2p.py
├── logging_config.py
├── face_restoration.py
├── restart.py
├── shared_cmd_options.py
├── localization.py
├── script_loading.py
├── sd_models_types.py
├── shared_total_tqdm.py
├── fifo_lock.py
├── ngrok.py
├── extra_networks_hypernet.py
├── sd_hijack_utils.py
├── paths_internal.py
├── sd_hijack_checkpoint.py
├── sd_hijack_xlmr.py
├── gitpython_hack.py
├── ui_extra_networks_hypernets.py
├── hypernetworks
│ └── ui.py
├── scripts_auto_postprocessing.py
├── ui_extra_networks_textual_inversion.py
├── util.py
├── sd_samplers.py
├── ui_extra_networks_checkpoints.py
├── patches.py
├── shared_init.py
├── processing_scripts
│ └── refiner.py
├── hashes.py
├── shared_gradio_themes.py
├── ui_gradio_extensions.py
├── gradio_extensons.py
├── paths.py
├── ui_extra_networks_checkpoints_user_metadata.py
├── shared.py
├── sd_hijack_open_clip.py
├── sd_unet.py
├── ui_postprocessing.py
├── txt2img.py
├── memmon.py
├── ui_tempdir.py
├── timer.py
├── shared_items.py
├── sd_vae_approx.py
├── deepbooru.py
├── sd_samplers_extra.py
└── rng_philox.py
├── extensions
└── put extensions here.txt
├── localizations
└── Put localization files here.txt
├── textual_inversion_templates
├── none.txt
├── style.txt
├── subject.txt
├── hypernetwork.txt
├── style_filewords.txt
└── subject_filewords.txt
├── embeddings
└── Place Textual Inversion embeddings here.txt
├── .eslintignore
├── .git-blame-ignore-revs
├── requirements-test.txt
├── screenshot.png
├── html
├── card-no-preview.png
├── extra-networks-no-cards.html
├── extra-networks-card.html
└── footer.html
├── webui-user.bat
├── .pylintrc
├── environment-wsl2.yaml
├── package.json
├── .github
├── ISSUE_TEMPLATE
│ ├── config.yml
│ ├── feature_request.yml
│ └── bug_report.yml
├── workflows
│ ├── warns_merge_master.yml
│ ├── on_pull_request.yaml
│ └── run_tests.yaml
└── pull_request_template.md
├── CITATION.cff
├── extensions-builtin
├── LDSR
│ ├── preload.py
│ └── scripts
│ │ └── ldsr_model.py
├── ScuNET
│ └── preload.py
├── SwinIR
│ └── preload.py
├── Lora
│ ├── lora.py
│ ├── preload.py
│ ├── lyco_helpers.py
│ ├── network_ia3.py
│ ├── network_norm.py
│ ├── network_full.py
│ ├── network_hada.py
│ ├── lora_patches.py
│ ├── network_lokr.py
│ ├── extra_networks_lora.py
│ └── ui_extra_networks_lora.py
├── mobile
│ └── javascript
│ │ └── mobile.js
├── canvas-zoom-and-pan
│ ├── style.css
│ └── scripts
│ │ └── hotkey_config.py
├── prompt-bracket-checker
│ └── javascript
│ │ └── prompt-bracket-checker.js
└── extra-options-section
│ └── scripts
│ └── extra_options_section.py
├── requirements.txt
├── javascript
├── textualInversion.js
├── localStorage.js
├── hires_fix.js
├── inputAccordion.js
├── imageMaskFix.js
├── generationParams.js
├── notification.js
├── edit-order.js
├── imageviewerGamepad.js
├── ui_settings_hints.js
├── token-counters.js
└── extensions.js
├── .gitignore
├── requirements_versions.txt
├── CODEOWNERS
├── webui-macos-env.sh
├── pyproject.toml
├── scripts
├── postprocessing_gfpgan.py
├── postprocessing_codeformer.py
└── custom_code.py
├── launch.py
├── webui-user.sh
├── configs
├── v1-inference.yaml
├── alt-diffusion-inference.yaml
├── v1-inpainting-inference.yaml
└── instruct-pix2pix.yaml
├── webui.bat
└── .eslintrc.js
/test/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/VAE/Put VAE here.txt:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/modules/sd_samplers_compvis.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/extensions/put extensions here.txt:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/localizations/Put localization files here.txt:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/textual_inversion_templates/none.txt:
--------------------------------------------------------------------------------
1 | picture
2 |
--------------------------------------------------------------------------------
/embeddings/Place Textual Inversion embeddings here.txt:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/Stable-diffusion/Put Stable Diffusion checkpoints here.txt:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/deepbooru/Put your deepbooru release project folder here.txt:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.eslintignore:
--------------------------------------------------------------------------------
1 | extensions
2 | extensions-disabled
3 | repositories
4 | venv
--------------------------------------------------------------------------------
/.git-blame-ignore-revs:
--------------------------------------------------------------------------------
1 | # Apply ESlint
2 | 9c54b78d9dde5601e916f308d9a9d6953ec39430
--------------------------------------------------------------------------------
/requirements-test.txt:
--------------------------------------------------------------------------------
1 | pytest-base-url~=2.0
2 | pytest-cov~=4.0
3 | pytest~=7.3
4 |
--------------------------------------------------------------------------------
/screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/antfu/stable-diffusion-webui/HEAD/screenshot.png
--------------------------------------------------------------------------------
/modules/models/diffusion/uni_pc/__init__.py:
--------------------------------------------------------------------------------
1 | from .sampler import UniPCSampler # noqa: F401
2 |
--------------------------------------------------------------------------------
/html/card-no-preview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/antfu/stable-diffusion-webui/HEAD/html/card-no-preview.png
--------------------------------------------------------------------------------
/test/test_files/empty.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/antfu/stable-diffusion-webui/HEAD/test/test_files/empty.pt
--------------------------------------------------------------------------------
/models/VAE-approx/model.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/antfu/stable-diffusion-webui/HEAD/models/VAE-approx/model.pt
--------------------------------------------------------------------------------
/modules/Roboto-Regular.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/antfu/stable-diffusion-webui/HEAD/modules/Roboto-Regular.ttf
--------------------------------------------------------------------------------
/models/karlo/ViT-L-14_stats.th:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/antfu/stable-diffusion-webui/HEAD/models/karlo/ViT-L-14_stats.th
--------------------------------------------------------------------------------
/test/test_files/mask_basic.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/antfu/stable-diffusion-webui/HEAD/test/test_files/mask_basic.png
--------------------------------------------------------------------------------
/test/test_files/img2img_basic.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/antfu/stable-diffusion-webui/HEAD/test/test_files/img2img_basic.png
--------------------------------------------------------------------------------
/webui-user.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 | set PYTHON=
4 | set GIT=
5 | set VENV_DIR=
6 | set COMMANDLINE_ARGS=
7 |
8 | call webui.bat
9 |
--------------------------------------------------------------------------------
/.pylintrc:
--------------------------------------------------------------------------------
1 | # See https://pylint.pycqa.org/en/latest/user_guide/messages/message_control.html
2 | [MESSAGES CONTROL]
3 | disable=C,R,W,E,I
4 |
--------------------------------------------------------------------------------
/modules/textual_inversion/test_embedding.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/antfu/stable-diffusion-webui/HEAD/modules/textual_inversion/test_embedding.png
--------------------------------------------------------------------------------
/html/extra-networks-no-cards.html:
--------------------------------------------------------------------------------
1 |
2 |
Nothing here. Add some content to the following directories:
3 |
4 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/environment-wsl2.yaml:
--------------------------------------------------------------------------------
1 | name: automatic
2 | channels:
3 | - pytorch
4 | - defaults
5 | dependencies:
6 | - python=3.10
7 | - pip=23.0
8 | - cudatoolkit=11.8
9 | - pytorch=2.0
10 | - torchvision=0.15
11 | - numpy=1.23
12 |
--------------------------------------------------------------------------------
/modules/import_hook.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | # this will break any attempt to import xformers which will prevent stability diffusion repo from trying to use it
4 | if "--xformers" not in "".join(sys.argv):
5 | sys.modules["xformers"] = None
6 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "stable-diffusion-webui",
3 | "version": "0.0.0",
4 | "devDependencies": {
5 | "eslint": "^8.40.0"
6 | },
7 | "scripts": {
8 | "lint": "eslint .",
9 | "fix": "eslint --fix ."
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 | contact_links:
3 | - name: WebUI Community Support
4 | url: https://github.com/AUTOMATIC1111/stable-diffusion-webui/discussions
5 | about: Please ask and answer questions here.
6 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | message: "If you use this software, please cite it as below."
3 | authors:
4 | - given-names: AUTOMATIC1111
5 | title: "Stable Diffusion Web UI"
6 | date-released: 2022-08-22
7 | url: "https://github.com/AUTOMATIC1111/stable-diffusion-webui"
8 |
--------------------------------------------------------------------------------
/extensions-builtin/LDSR/preload.py:
--------------------------------------------------------------------------------
1 | import os
2 | from modules import paths
3 |
4 |
5 | def preload(parser):
6 | parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(paths.models_path, 'LDSR'))
7 |
--------------------------------------------------------------------------------
/extensions-builtin/ScuNET/preload.py:
--------------------------------------------------------------------------------
1 | import os
2 | from modules import paths
3 |
4 |
5 | def preload(parser):
6 | parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(paths.models_path, 'ScuNET'))
7 |
--------------------------------------------------------------------------------
/extensions-builtin/SwinIR/preload.py:
--------------------------------------------------------------------------------
1 | import os
2 | from modules import paths
3 |
4 |
5 | def preload(parser):
6 | parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(paths.models_path, 'SwinIR'))
7 |
--------------------------------------------------------------------------------
/extensions-builtin/Lora/lora.py:
--------------------------------------------------------------------------------
1 | import networks
2 |
3 | list_available_loras = networks.list_available_networks
4 |
5 | available_loras = networks.available_networks
6 | available_lora_aliases = networks.available_network_aliases
7 | available_lora_hash_lookup = networks.available_network_hash_lookup
8 | forbidden_lora_aliases = networks.forbidden_network_aliases
9 | loaded_loras = networks.loaded_networks
10 |
--------------------------------------------------------------------------------
/modules/sd_hijack_ip2p.py:
--------------------------------------------------------------------------------
1 | import os.path
2 |
3 |
4 | def should_hijack_ip2p(checkpoint_info):
5 | from modules import sd_models_config
6 |
7 | ckpt_basename = os.path.basename(checkpoint_info.filename).lower()
8 | cfg_basename = os.path.basename(sd_models_config.find_checkpoint_config_near_filename(checkpoint_info)).lower()
9 |
10 | return "pix2pix" in ckpt_basename and "pix2pix" not in cfg_basename
11 |
--------------------------------------------------------------------------------
/extensions-builtin/Lora/preload.py:
--------------------------------------------------------------------------------
1 | import os
2 | from modules import paths
3 |
4 |
5 | def preload(parser):
6 | parser.add_argument("--lora-dir", type=str, help="Path to directory with Lora networks.", default=os.path.join(paths.models_path, 'Lora'))
7 | parser.add_argument("--lyco-dir-backcompat", type=str, help="Path to directory with LyCORIS networks (for backawards compatibility; can also use --lyco-dir).", default=os.path.join(paths.models_path, 'LyCORIS'))
8 |
--------------------------------------------------------------------------------
/html/extra-networks-card.html:
--------------------------------------------------------------------------------
1 |
2 | {background_image}
3 |
4 | {metadata_button}
5 | {edit_button}
6 |
7 |
8 |
9 | {search_term}
10 |
11 |
{name}
12 |
{description}
13 |
14 |
15 |
--------------------------------------------------------------------------------
/modules/logging_config.py:
--------------------------------------------------------------------------------
1 | import os
2 | import logging
3 |
4 |
5 | def setup_logging(loglevel):
6 | if loglevel is None:
7 | loglevel = os.environ.get("SD_WEBUI_LOG_LEVEL")
8 |
9 | if loglevel:
10 | log_level = getattr(logging, loglevel.upper(), None) or logging.INFO
11 | logging.basicConfig(
12 | level=log_level,
13 | format='%(asctime)s %(levelname)s [%(name)s] %(message)s',
14 | datefmt='%Y-%m-%d %H:%M:%S',
15 | )
16 |
17 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | GitPython
2 | Pillow
3 | accelerate
4 |
5 | basicsr
6 | blendmodes
7 | clean-fid
8 | einops
9 | fastapi>=0.90.1
10 | gfpgan
11 | gradio==3.41.2
12 | inflection
13 | jsonmerge
14 | kornia
15 | lark
16 | numpy
17 | omegaconf
18 | open-clip-torch
19 |
20 | piexif
21 | psutil
22 | pytorch_lightning
23 | realesrgan
24 | requests
25 | resize-right
26 |
27 | safetensors
28 | scikit-image>=0.19
29 | timm
30 | tomesd
31 | torch
32 | torchdiffeq
33 | torchsde
34 | transformers==4.30.2
35 |
--------------------------------------------------------------------------------
/javascript/textualInversion.js:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | function start_training_textual_inversion() {
5 | gradioApp().querySelector('#ti_error').innerHTML = '';
6 |
7 | var id = randomId();
8 | requestProgress(id, gradioApp().getElementById('ti_output'), gradioApp().getElementById('ti_gallery'), function() {}, function(progress) {
9 | gradioApp().getElementById('ti_progress').innerHTML = progress.textinfo;
10 | });
11 |
12 | var res = Array.from(arguments);
13 |
14 | res[0] = id;
15 |
16 | return res;
17 | }
18 |
--------------------------------------------------------------------------------
/.github/workflows/warns_merge_master.yml:
--------------------------------------------------------------------------------
1 | name: Pull requests can't target master branch
2 |
3 | "on":
4 | pull_request:
5 | types:
6 | - opened
7 | - synchronize
8 | - reopened
9 | branches:
10 | - master
11 |
12 | jobs:
13 | check:
14 | runs-on: ubuntu-latest
15 | steps:
16 | - name: Warning marge into master
17 | run: |
18 | echo -e "::warning::This pull request directly merge into \"master\" branch, normally development happens on \"dev\" branch."
19 | exit 1
20 |
--------------------------------------------------------------------------------
/modules/face_restoration.py:
--------------------------------------------------------------------------------
1 | from modules import shared
2 |
3 |
4 | class FaceRestoration:
5 | def name(self):
6 | return "None"
7 |
8 | def restore(self, np_image):
9 | return np_image
10 |
11 |
12 | def restore_faces(np_image):
13 | face_restorers = [x for x in shared.face_restorers if x.name() == shared.opts.face_restoration_model or shared.opts.face_restoration_model is None]
14 | if len(face_restorers) == 0:
15 | return np_image
16 |
17 | face_restorer = face_restorers[0]
18 |
19 | return face_restorer.restore(np_image)
20 |
--------------------------------------------------------------------------------
/html/footer.html:
--------------------------------------------------------------------------------
1 |
12 |
13 |
14 | {versions}
15 |
16 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | ## Description
2 |
3 | * a simple description of what you're trying to accomplish
4 | * a summary of changes in code
5 | * which issues it fixes, if any
6 |
7 | ## Screenshots/videos:
8 |
9 |
10 | ## Checklist:
11 |
12 | - [ ] I have read [contributing wiki page](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing)
13 | - [ ] I have performed a self-review of my own code
14 | - [ ] My code follows the [style guidelines](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing#code-style)
15 | - [ ] My code passes [tests](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Tests)
16 |
--------------------------------------------------------------------------------
/javascript/localStorage.js:
--------------------------------------------------------------------------------
1 |
2 | function localSet(k, v) {
3 | try {
4 | localStorage.setItem(k, v);
5 | } catch (e) {
6 | console.warn(`Failed to save ${k} to localStorage: ${e}`);
7 | }
8 | }
9 |
10 | function localGet(k, def) {
11 | try {
12 | return localStorage.getItem(k);
13 | } catch (e) {
14 | console.warn(`Failed to load ${k} from localStorage: ${e}`);
15 | }
16 |
17 | return def;
18 | }
19 |
20 | function localRemove(k) {
21 | try {
22 | return localStorage.removeItem(k);
23 | } catch (e) {
24 | console.warn(`Failed to remove ${k} from localStorage: ${e}`);
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | *.ckpt
3 | *.safetensors
4 | *.pth
5 | /ESRGAN/*
6 | /SwinIR/*
7 | /repositories
8 | /venv
9 | /tmp
10 | /model.ckpt
11 | /models/**/*
12 | /GFPGANv1.3.pth
13 | /gfpgan/weights/*.pth
14 | /ui-config.json
15 | /outputs
16 | /config.json
17 | /log
18 | /webui.settings.bat
19 | /embeddings
20 | /styles.csv
21 | /params.txt
22 | /styles.csv.bak
23 | /webui-user.bat
24 | /webui-user.sh
25 | /interrogate
26 | /user.css
27 | /.idea
28 | notification.mp3
29 | /SwinIR
30 | /textual_inversion
31 | .vscode
32 | /extensions
33 | /test/stdout.txt
34 | /test/stderr.txt
35 | /cache.json*
36 | /config_states/
37 | /node_modules
38 | /package-lock.json
39 | /.coverage*
40 |
--------------------------------------------------------------------------------
/modules/restart.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pathlib import Path
3 |
4 | from modules.paths_internal import script_path
5 |
6 |
7 | def is_restartable() -> bool:
8 | """
9 | Return True if the webui is restartable (i.e. there is something watching to restart it with)
10 | """
11 | return bool(os.environ.get('SD_WEBUI_RESTART'))
12 |
13 |
14 | def restart_program() -> None:
15 | """creates file tmp/restart and immediately stops the process, which webui.bat/webui.sh interpret as a command to start webui again"""
16 |
17 | (Path(script_path) / "tmp" / "restart").touch()
18 |
19 | stop_program()
20 |
21 |
22 | def stop_program() -> None:
23 | os._exit(0)
24 |
--------------------------------------------------------------------------------
/requirements_versions.txt:
--------------------------------------------------------------------------------
1 | GitPython==3.1.32
2 | Pillow==9.5.0
3 | accelerate==0.21.0
4 | basicsr==1.4.2
5 | blendmodes==2022
6 | clean-fid==0.1.35
7 | einops==0.4.1
8 | fastapi==0.94.0
9 | gfpgan==1.3.8
10 | gradio==3.41.2
11 | httpcore==0.15
12 | inflection==0.5.1
13 | jsonmerge==1.8.0
14 | kornia==0.6.7
15 | lark==1.1.2
16 | numpy==1.23.5
17 | omegaconf==2.2.3
18 | open-clip-torch==2.20.0
19 | piexif==1.1.3
20 | psutil==5.9.5
21 | pytorch_lightning==1.9.4
22 | realesrgan==0.3.0
23 | resize-right==0.0.2
24 | safetensors==0.3.1
25 | scikit-image==0.21.0
26 | timm==0.9.2
27 | tomesd==0.1.3
28 | torch
29 | torchdiffeq==0.2.3
30 | torchsde==0.2.5
31 | transformers==4.30.2
32 |
--------------------------------------------------------------------------------
/textual_inversion_templates/style.txt:
--------------------------------------------------------------------------------
1 | a painting, art by [name]
2 | a rendering, art by [name]
3 | a cropped painting, art by [name]
4 | the painting, art by [name]
5 | a clean painting, art by [name]
6 | a dirty painting, art by [name]
7 | a dark painting, art by [name]
8 | a picture, art by [name]
9 | a cool painting, art by [name]
10 | a close-up painting, art by [name]
11 | a bright painting, art by [name]
12 | a cropped painting, art by [name]
13 | a good painting, art by [name]
14 | a close-up painting, art by [name]
15 | a rendition, art by [name]
16 | a nice painting, art by [name]
17 | a small painting, art by [name]
18 | a weird painting, art by [name]
19 | a large painting, art by [name]
20 |
--------------------------------------------------------------------------------
/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @AUTOMATIC1111
2 |
3 | # if you were managing a localization and were removed from this file, this is because
4 | # the intended way to do localizations now is via extensions. See:
5 | # https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Developing-extensions
6 | # Make a repo with your localization and since you are still listed as a collaborator
7 | # you can add it to the wiki page yourself. This change is because some people complained
8 | # the git commit log is cluttered with things unrelated to almost everyone and
9 | # because I believe this is the best overall for the project to handle localizations almost
10 | # entirely without my oversight.
11 |
12 |
13 |
--------------------------------------------------------------------------------
/webui-macos-env.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ####################################################################
3 | # macOS defaults #
4 | # Please modify webui-user.sh to change these instead of this file #
5 | ####################################################################
6 |
7 | if [[ -x "$(command -v python3.10)" ]]
8 | then
9 | python_cmd="python3.10"
10 | fi
11 |
12 | export install_dir="$HOME"
13 | export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate"
14 | export TORCH_COMMAND="pip install torch==2.0.1 torchvision==0.15.2"
15 | export PYTORCH_ENABLE_MPS_FALLBACK=1
16 |
17 | ####################################################################
18 |
--------------------------------------------------------------------------------
/extensions-builtin/Lora/lyco_helpers.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | def make_weight_cp(t, wa, wb):
5 | temp = torch.einsum('i j k l, j r -> i r k l', t, wb)
6 | return torch.einsum('i j k l, i r -> r j k l', temp, wa)
7 |
8 |
9 | def rebuild_conventional(up, down, shape, dyn_dim=None):
10 | up = up.reshape(up.size(0), -1)
11 | down = down.reshape(down.size(0), -1)
12 | if dyn_dim is not None:
13 | up = up[:, :dyn_dim]
14 | down = down[:dyn_dim, :]
15 | return (up @ down).reshape(shape)
16 |
17 |
18 | def rebuild_cp_decomposition(up, down, mid):
19 | up = up.reshape(up.size(0), -1)
20 | down = down.reshape(down.size(0), -1)
21 | return torch.einsum('n m k l, i n, m j -> i j k l', mid, up, down)
22 |
--------------------------------------------------------------------------------
/test/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import pytest
4 | import base64
5 |
6 |
7 | test_files_path = os.path.dirname(__file__) + "/test_files"
8 |
9 |
10 | def file_to_base64(filename):
11 | with open(filename, "rb") as file:
12 | data = file.read()
13 |
14 | base64_str = str(base64.b64encode(data), "utf-8")
15 | return "data:image/png;base64," + base64_str
16 |
17 |
18 | @pytest.fixture(scope="session") # session so we don't read this over and over
19 | def img2img_basic_image_base64() -> str:
20 | return file_to_base64(os.path.join(test_files_path, "img2img_basic.png"))
21 |
22 |
23 | @pytest.fixture(scope="session") # session so we don't read this over and over
24 | def mask_basic_image_base64() -> str:
25 | return file_to_base64(os.path.join(test_files_path, "mask_basic.png"))
26 |
--------------------------------------------------------------------------------
/textual_inversion_templates/subject.txt:
--------------------------------------------------------------------------------
1 | a photo of a [name]
2 | a rendering of a [name]
3 | a cropped photo of the [name]
4 | the photo of a [name]
5 | a photo of a clean [name]
6 | a photo of a dirty [name]
7 | a dark photo of the [name]
8 | a photo of my [name]
9 | a photo of the cool [name]
10 | a close-up photo of a [name]
11 | a bright photo of the [name]
12 | a cropped photo of a [name]
13 | a photo of the [name]
14 | a good photo of the [name]
15 | a photo of one [name]
16 | a close-up photo of the [name]
17 | a rendition of the [name]
18 | a photo of the clean [name]
19 | a rendition of a [name]
20 | a photo of a nice [name]
21 | a good photo of a [name]
22 | a photo of the nice [name]
23 | a photo of the small [name]
24 | a photo of the weird [name]
25 | a photo of the large [name]
26 | a photo of a cool [name]
27 | a photo of a small [name]
28 |
--------------------------------------------------------------------------------
/modules/shared_cmd_options.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import launch
4 | from modules import cmd_args, script_loading
5 | from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401
6 |
7 | parser = cmd_args.parser
8 |
9 | script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file))
10 | script_loading.preload_extensions(extensions_builtin_dir, parser)
11 |
12 | if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None:
13 | cmd_opts = parser.parse_args()
14 | else:
15 | cmd_opts, _ = parser.parse_known_args()
16 |
17 |
18 | cmd_opts.disable_extension_access = any([cmd_opts.share, cmd_opts.listen, cmd_opts.ngrok, cmd_opts.server_name]) and not cmd_opts.enable_insecure_extension_access
19 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.ruff]
2 |
3 | target-version = "py39"
4 |
5 | extend-select = [
6 | "B",
7 | "C",
8 | "I",
9 | "W",
10 | ]
11 |
12 | exclude = [
13 | "extensions",
14 | "extensions-disabled",
15 | ]
16 |
17 | ignore = [
18 | "E501", # Line too long
19 | "E731", # Do not assign a `lambda` expression, use a `def`
20 |
21 | "I001", # Import block is un-sorted or un-formatted
22 | "C901", # Function is too complex
23 | "C408", # Rewrite as a literal
24 | "W605", # invalid escape sequence, messes with some docstrings
25 | ]
26 |
27 | [tool.ruff.per-file-ignores]
28 | "webui.py" = ["E402"] # Module level import not at top of file
29 |
30 | [tool.ruff.flake8-bugbear]
31 | # Allow default arguments like, e.g., `data: List[str] = fastapi.Query(None)`.
32 | extend-immutable-calls = ["fastapi.Depends", "fastapi.security.HTTPBasic"]
33 |
34 | [tool.pytest.ini_options]
35 | base_url = "http://127.0.0.1:7860"
36 |
--------------------------------------------------------------------------------
/javascript/hires_fix.js:
--------------------------------------------------------------------------------
1 |
2 | function onCalcResolutionHires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y) {
3 | function setInactive(elem, inactive) {
4 | elem.classList.toggle('inactive', !!inactive);
5 | }
6 |
7 | var hrUpscaleBy = gradioApp().getElementById('txt2img_hr_scale');
8 | var hrResizeX = gradioApp().getElementById('txt2img_hr_resize_x');
9 | var hrResizeY = gradioApp().getElementById('txt2img_hr_resize_y');
10 |
11 | gradioApp().getElementById('txt2img_hires_fix_row2').style.display = opts.use_old_hires_fix_width_height ? "none" : "";
12 |
13 | setInactive(hrUpscaleBy, opts.use_old_hires_fix_width_height || hr_resize_x > 0 || hr_resize_y > 0);
14 | setInactive(hrResizeX, opts.use_old_hires_fix_width_height || hr_resize_x == 0);
15 | setInactive(hrResizeY, opts.use_old_hires_fix_width_height || hr_resize_y == 0);
16 |
17 | return [enable, width, height, hr_scale, hr_resize_x, hr_resize_y];
18 | }
19 |
--------------------------------------------------------------------------------
/textual_inversion_templates/hypernetwork.txt:
--------------------------------------------------------------------------------
1 | a photo of a [filewords]
2 | a rendering of a [filewords]
3 | a cropped photo of the [filewords]
4 | the photo of a [filewords]
5 | a photo of a clean [filewords]
6 | a photo of a dirty [filewords]
7 | a dark photo of the [filewords]
8 | a photo of my [filewords]
9 | a photo of the cool [filewords]
10 | a close-up photo of a [filewords]
11 | a bright photo of the [filewords]
12 | a cropped photo of a [filewords]
13 | a photo of the [filewords]
14 | a good photo of the [filewords]
15 | a photo of one [filewords]
16 | a close-up photo of the [filewords]
17 | a rendition of the [filewords]
18 | a photo of the clean [filewords]
19 | a rendition of a [filewords]
20 | a photo of a nice [filewords]
21 | a good photo of a [filewords]
22 | a photo of the nice [filewords]
23 | a photo of the small [filewords]
24 | a photo of the weird [filewords]
25 | a photo of the large [filewords]
26 | a photo of a cool [filewords]
27 | a photo of a small [filewords]
28 |
--------------------------------------------------------------------------------
/textual_inversion_templates/style_filewords.txt:
--------------------------------------------------------------------------------
1 | a painting of [filewords], art by [name]
2 | a rendering of [filewords], art by [name]
3 | a cropped painting of [filewords], art by [name]
4 | the painting of [filewords], art by [name]
5 | a clean painting of [filewords], art by [name]
6 | a dirty painting of [filewords], art by [name]
7 | a dark painting of [filewords], art by [name]
8 | a picture of [filewords], art by [name]
9 | a cool painting of [filewords], art by [name]
10 | a close-up painting of [filewords], art by [name]
11 | a bright painting of [filewords], art by [name]
12 | a cropped painting of [filewords], art by [name]
13 | a good painting of [filewords], art by [name]
14 | a close-up painting of [filewords], art by [name]
15 | a rendition of [filewords], art by [name]
16 | a nice painting of [filewords], art by [name]
17 | a small painting of [filewords], art by [name]
18 | a weird painting of [filewords], art by [name]
19 | a large painting of [filewords], art by [name]
20 |
--------------------------------------------------------------------------------
/test/test_utils.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import requests
3 |
4 |
5 | def test_options_write(base_url):
6 | url_options = f"{base_url}/sdapi/v1/options"
7 | response = requests.get(url_options)
8 | assert response.status_code == 200
9 |
10 | pre_value = response.json()["send_seed"]
11 |
12 | assert requests.post(url_options, json={'send_seed': (not pre_value)}).status_code == 200
13 |
14 | response = requests.get(url_options)
15 | assert response.status_code == 200
16 | assert response.json()['send_seed'] == (not pre_value)
17 |
18 | requests.post(url_options, json={"send_seed": pre_value})
19 |
20 |
21 | @pytest.mark.parametrize("url", [
22 | "sdapi/v1/cmd-flags",
23 | "sdapi/v1/samplers",
24 | "sdapi/v1/upscalers",
25 | "sdapi/v1/sd-models",
26 | "sdapi/v1/hypernetworks",
27 | "sdapi/v1/face-restorers",
28 | "sdapi/v1/realesrgan-models",
29 | "sdapi/v1/prompt-styles",
30 | "sdapi/v1/embeddings",
31 | ])
32 | def test_get_api_url(base_url, url):
33 | assert requests.get(f"{base_url}/{url}").status_code == 200
34 |
--------------------------------------------------------------------------------
/extensions-builtin/Lora/network_ia3.py:
--------------------------------------------------------------------------------
1 | import network
2 |
3 |
4 | class ModuleTypeIa3(network.ModuleType):
5 | def create_module(self, net: network.Network, weights: network.NetworkWeights):
6 | if all(x in weights.w for x in ["weight"]):
7 | return NetworkModuleIa3(net, weights)
8 |
9 | return None
10 |
11 |
12 | class NetworkModuleIa3(network.NetworkModule):
13 | def __init__(self, net: network.Network, weights: network.NetworkWeights):
14 | super().__init__(net, weights)
15 |
16 | self.w = weights.w["weight"]
17 | self.on_input = weights.w["on_input"].item()
18 |
19 | def calc_updown(self, orig_weight):
20 | w = self.w.to(orig_weight.device, dtype=orig_weight.dtype)
21 |
22 | output_shape = [w.size(0), orig_weight.size(1)]
23 | if self.on_input:
24 | output_shape.reverse()
25 | else:
26 | w = w.reshape(-1, 1)
27 |
28 | updown = orig_weight * w
29 |
30 | return self.finalize_updown(updown, orig_weight, output_shape)
31 |
--------------------------------------------------------------------------------
/extensions-builtin/Lora/network_norm.py:
--------------------------------------------------------------------------------
1 | import network
2 |
3 |
4 | class ModuleTypeNorm(network.ModuleType):
5 | def create_module(self, net: network.Network, weights: network.NetworkWeights):
6 | if all(x in weights.w for x in ["w_norm", "b_norm"]):
7 | return NetworkModuleNorm(net, weights)
8 |
9 | return None
10 |
11 |
12 | class NetworkModuleNorm(network.NetworkModule):
13 | def __init__(self, net: network.Network, weights: network.NetworkWeights):
14 | super().__init__(net, weights)
15 |
16 | self.w_norm = weights.w.get("w_norm")
17 | self.b_norm = weights.w.get("b_norm")
18 |
19 | def calc_updown(self, orig_weight):
20 | output_shape = self.w_norm.shape
21 | updown = self.w_norm.to(orig_weight.device, dtype=orig_weight.dtype)
22 |
23 | if self.b_norm is not None:
24 | ex_bias = self.b_norm.to(orig_weight.device, dtype=orig_weight.dtype)
25 | else:
26 | ex_bias = None
27 |
28 | return self.finalize_updown(updown, orig_weight, output_shape, ex_bias)
29 |
--------------------------------------------------------------------------------
/extensions-builtin/Lora/network_full.py:
--------------------------------------------------------------------------------
1 | import network
2 |
3 |
4 | class ModuleTypeFull(network.ModuleType):
5 | def create_module(self, net: network.Network, weights: network.NetworkWeights):
6 | if all(x in weights.w for x in ["diff"]):
7 | return NetworkModuleFull(net, weights)
8 |
9 | return None
10 |
11 |
12 | class NetworkModuleFull(network.NetworkModule):
13 | def __init__(self, net: network.Network, weights: network.NetworkWeights):
14 | super().__init__(net, weights)
15 |
16 | self.weight = weights.w.get("diff")
17 | self.ex_bias = weights.w.get("diff_b")
18 |
19 | def calc_updown(self, orig_weight):
20 | output_shape = self.weight.shape
21 | updown = self.weight.to(orig_weight.device, dtype=orig_weight.dtype)
22 | if self.ex_bias is not None:
23 | ex_bias = self.ex_bias.to(orig_weight.device, dtype=orig_weight.dtype)
24 | else:
25 | ex_bias = None
26 |
27 | return self.finalize_updown(updown, orig_weight, output_shape, ex_bias)
28 |
--------------------------------------------------------------------------------
/modules/localization.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 |
4 | from modules import errors, scripts
5 |
6 | localizations = {}
7 |
8 |
9 | def list_localizations(dirname):
10 | localizations.clear()
11 |
12 | for file in os.listdir(dirname):
13 | fn, ext = os.path.splitext(file)
14 | if ext.lower() != ".json":
15 | continue
16 |
17 | localizations[fn] = os.path.join(dirname, file)
18 |
19 | for file in scripts.list_scripts("localizations", ".json"):
20 | fn, ext = os.path.splitext(file.filename)
21 | localizations[fn] = file.path
22 |
23 |
24 | def localization_js(current_localization_name: str) -> str:
25 | fn = localizations.get(current_localization_name, None)
26 | data = {}
27 | if fn is not None:
28 | try:
29 | with open(fn, "r", encoding="utf8") as file:
30 | data = json.load(file)
31 | except Exception:
32 | errors.report(f"Error loading localization from {fn}", exc_info=True)
33 |
34 | return f"window.localization = {json.dumps(data)}"
35 |
--------------------------------------------------------------------------------
/modules/script_loading.py:
--------------------------------------------------------------------------------
1 | import os
2 | import importlib.util
3 |
4 | from modules import errors
5 |
6 |
7 | def load_module(path):
8 | module_spec = importlib.util.spec_from_file_location(os.path.basename(path), path)
9 | module = importlib.util.module_from_spec(module_spec)
10 | module_spec.loader.exec_module(module)
11 |
12 | return module
13 |
14 |
15 | def preload_extensions(extensions_dir, parser, extension_list=None):
16 | if not os.path.isdir(extensions_dir):
17 | return
18 |
19 | extensions = extension_list if extension_list is not None else os.listdir(extensions_dir)
20 | for dirname in sorted(extensions):
21 | preload_script = os.path.join(extensions_dir, dirname, "preload.py")
22 | if not os.path.isfile(preload_script):
23 | continue
24 |
25 | try:
26 | module = load_module(preload_script)
27 | if hasattr(module, 'preload'):
28 | module.preload(parser)
29 |
30 | except Exception:
31 | errors.report(f"Error running preload() for {preload_script}", exc_info=True)
32 |
--------------------------------------------------------------------------------
/extensions-builtin/mobile/javascript/mobile.js:
--------------------------------------------------------------------------------
1 | var isSetupForMobile = false;
2 |
3 | function isMobile() {
4 | for (var tab of ["txt2img", "img2img"]) {
5 | var imageTab = gradioApp().getElementById(tab + '_results');
6 | if (imageTab && imageTab.offsetParent && imageTab.offsetLeft == 0) {
7 | return true;
8 | }
9 | }
10 |
11 | return false;
12 | }
13 |
14 | function reportWindowSize() {
15 | var currentlyMobile = isMobile();
16 | if (currentlyMobile == isSetupForMobile) return;
17 | isSetupForMobile = currentlyMobile;
18 |
19 | for (var tab of ["txt2img", "img2img"]) {
20 | var button = gradioApp().getElementById(tab + '_generate_box');
21 | var target = gradioApp().getElementById(currentlyMobile ? tab + '_results' : tab + '_actions_column');
22 | target.insertBefore(button, target.firstElementChild);
23 |
24 | gradioApp().getElementById(tab + '_results').classList.toggle('mobile', currentlyMobile);
25 | }
26 | }
27 |
28 | window.addEventListener("resize", reportWindowSize);
29 |
30 | onUiLoaded(function() {
31 | reportWindowSize();
32 | });
33 |
--------------------------------------------------------------------------------
/modules/sd_models_types.py:
--------------------------------------------------------------------------------
1 | from ldm.models.diffusion.ddpm import LatentDiffusion
2 | from typing import TYPE_CHECKING
3 |
4 |
5 | if TYPE_CHECKING:
6 | from modules.sd_models import CheckpointInfo
7 |
8 |
9 | class WebuiSdModel(LatentDiffusion):
10 | """This class is not actually instantinated, but its fields are created and fieeld by webui"""
11 |
12 | lowvram: bool
13 | """True if lowvram/medvram optimizations are enabled -- see modules.lowvram for more info"""
14 |
15 | sd_model_hash: str
16 | """short hash, 10 first characters of SHA1 hash of the model file; may be None if --no-hashing flag is used"""
17 |
18 | sd_model_checkpoint: str
19 | """path to the file on disk that model weights were obtained from"""
20 |
21 | sd_checkpoint_info: 'CheckpointInfo'
22 | """structure with additional information about the file with model's weights"""
23 |
24 | is_sdxl: bool
25 | """True if the model's architecture is SDXL"""
26 |
27 | is_sd2: bool
28 | """True if the model's architecture is SD 2.x"""
29 |
30 | is_sd1: bool
31 | """True if the model's architecture is SD 1.x"""
32 |
--------------------------------------------------------------------------------
/modules/shared_total_tqdm.py:
--------------------------------------------------------------------------------
1 | import tqdm
2 |
3 | from modules import shared
4 |
5 |
6 | class TotalTQDM:
7 | def __init__(self):
8 | self._tqdm = None
9 |
10 | def reset(self):
11 | self._tqdm = tqdm.tqdm(
12 | desc="Total progress",
13 | total=shared.state.job_count * shared.state.sampling_steps,
14 | position=1,
15 | file=shared.progress_print_out
16 | )
17 |
18 | def update(self):
19 | if not shared.opts.multiple_tqdm or shared.cmd_opts.disable_console_progressbars:
20 | return
21 | if self._tqdm is None:
22 | self.reset()
23 | self._tqdm.update()
24 |
25 | def updateTotal(self, new_total):
26 | if not shared.opts.multiple_tqdm or shared.cmd_opts.disable_console_progressbars:
27 | return
28 | if self._tqdm is None:
29 | self.reset()
30 | self._tqdm.total = new_total
31 |
32 | def clear(self):
33 | if self._tqdm is not None:
34 | self._tqdm.refresh()
35 | self._tqdm.close()
36 | self._tqdm = None
37 |
38 |
--------------------------------------------------------------------------------
/modules/fifo_lock.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import collections
3 |
4 |
5 | # reference: https://gist.github.com/vitaliyp/6d54dd76ca2c3cdfc1149d33007dc34a
6 | class FIFOLock(object):
7 | def __init__(self):
8 | self._lock = threading.Lock()
9 | self._inner_lock = threading.Lock()
10 | self._pending_threads = collections.deque()
11 |
12 | def acquire(self, blocking=True):
13 | with self._inner_lock:
14 | lock_acquired = self._lock.acquire(False)
15 | if lock_acquired:
16 | return True
17 | elif not blocking:
18 | return False
19 |
20 | release_event = threading.Event()
21 | self._pending_threads.append(release_event)
22 |
23 | release_event.wait()
24 | return self._lock.acquire()
25 |
26 | def release(self):
27 | with self._inner_lock:
28 | if self._pending_threads:
29 | release_event = self._pending_threads.popleft()
30 | release_event.set()
31 |
32 | self._lock.release()
33 |
34 | __enter__ = acquire
35 |
36 | def __exit__(self, t, v, tb):
37 | self.release()
38 |
--------------------------------------------------------------------------------
/textual_inversion_templates/subject_filewords.txt:
--------------------------------------------------------------------------------
1 | a photo of a [name], [filewords]
2 | a rendering of a [name], [filewords]
3 | a cropped photo of the [name], [filewords]
4 | the photo of a [name], [filewords]
5 | a photo of a clean [name], [filewords]
6 | a photo of a dirty [name], [filewords]
7 | a dark photo of the [name], [filewords]
8 | a photo of my [name], [filewords]
9 | a photo of the cool [name], [filewords]
10 | a close-up photo of a [name], [filewords]
11 | a bright photo of the [name], [filewords]
12 | a cropped photo of a [name], [filewords]
13 | a photo of the [name], [filewords]
14 | a good photo of the [name], [filewords]
15 | a photo of one [name], [filewords]
16 | a close-up photo of the [name], [filewords]
17 | a rendition of the [name], [filewords]
18 | a photo of the clean [name], [filewords]
19 | a rendition of a [name], [filewords]
20 | a photo of a nice [name], [filewords]
21 | a good photo of a [name], [filewords]
22 | a photo of the nice [name], [filewords]
23 | a photo of the small [name], [filewords]
24 | a photo of the weird [name], [filewords]
25 | a photo of the large [name], [filewords]
26 | a photo of a cool [name], [filewords]
27 | a photo of a small [name], [filewords]
28 |
--------------------------------------------------------------------------------
/scripts/postprocessing_gfpgan.py:
--------------------------------------------------------------------------------
1 | from PIL import Image
2 | import numpy as np
3 |
4 | from modules import scripts_postprocessing, gfpgan_model
5 | import gradio as gr
6 |
7 | from modules.ui_components import FormRow
8 |
9 |
10 | class ScriptPostprocessingGfpGan(scripts_postprocessing.ScriptPostprocessing):
11 | name = "GFPGAN"
12 | order = 2000
13 |
14 | def ui(self):
15 | with FormRow():
16 | gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, elem_id="extras_gfpgan_visibility")
17 |
18 | return {
19 | "gfpgan_visibility": gfpgan_visibility,
20 | }
21 |
22 | def process(self, pp: scripts_postprocessing.PostprocessedImage, gfpgan_visibility):
23 | if gfpgan_visibility == 0:
24 | return
25 |
26 | restored_img = gfpgan_model.gfpgan_fix_faces(np.array(pp.image, dtype=np.uint8))
27 | res = Image.fromarray(restored_img)
28 |
29 | if gfpgan_visibility < 1.0:
30 | res = Image.blend(pp.image, res, gfpgan_visibility)
31 |
32 | pp.image = res
33 | pp.info["GFPGAN visibility"] = round(gfpgan_visibility, 3)
34 |
--------------------------------------------------------------------------------
/modules/ngrok.py:
--------------------------------------------------------------------------------
1 | import ngrok
2 |
3 | # Connect to ngrok for ingress
4 | def connect(token, port, options):
5 | account = None
6 | if token is None:
7 | token = 'None'
8 | else:
9 | if ':' in token:
10 | # token = authtoken:username:password
11 | token, username, password = token.split(':', 2)
12 | account = f"{username}:{password}"
13 |
14 | # For all options see: https://github.com/ngrok/ngrok-py/blob/main/examples/ngrok-connect-full.py
15 | if not options.get('authtoken_from_env'):
16 | options['authtoken'] = token
17 | if account:
18 | options['basic_auth'] = account
19 | if not options.get('session_metadata'):
20 | options['session_metadata'] = 'stable-diffusion-webui'
21 |
22 |
23 | try:
24 | public_url = ngrok.connect(f"127.0.0.1:{port}", **options).url()
25 | except Exception as e:
26 | print(f'Invalid ngrok authtoken? ngrok connection aborted due to: {e}\n'
27 | f'Your token: {token}, get the right one on https://dashboard.ngrok.com/get-started/your-authtoken')
28 | else:
29 | print(f'ngrok connected to localhost:{port}! URL: {public_url}\n'
30 | 'You can use this link after the launch is complete.')
31 |
--------------------------------------------------------------------------------
/modules/extra_networks_hypernet.py:
--------------------------------------------------------------------------------
1 | from modules import extra_networks, shared
2 | from modules.hypernetworks import hypernetwork
3 |
4 |
5 | class ExtraNetworkHypernet(extra_networks.ExtraNetwork):
6 | def __init__(self):
7 | super().__init__('hypernet')
8 |
9 | def activate(self, p, params_list):
10 | additional = shared.opts.sd_hypernetwork
11 |
12 | if additional != "None" and additional in shared.hypernetworks and not any(x for x in params_list if x.items[0] == additional):
13 | hypernet_prompt_text = f""
14 | p.all_prompts = [f"{prompt}{hypernet_prompt_text}" for prompt in p.all_prompts]
15 | params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
16 |
17 | names = []
18 | multipliers = []
19 | for params in params_list:
20 | assert params.items
21 |
22 | names.append(params.items[0])
23 | multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0)
24 |
25 | hypernetwork.load_hypernetworks(names, multipliers)
26 |
27 | def deactivate(self, p):
28 | pass
29 |
--------------------------------------------------------------------------------
/test/test_extras.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 |
4 | def test_simple_upscaling_performed(base_url, img2img_basic_image_base64):
5 | payload = {
6 | "resize_mode": 0,
7 | "show_extras_results": True,
8 | "gfpgan_visibility": 0,
9 | "codeformer_visibility": 0,
10 | "codeformer_weight": 0,
11 | "upscaling_resize": 2,
12 | "upscaling_resize_w": 128,
13 | "upscaling_resize_h": 128,
14 | "upscaling_crop": True,
15 | "upscaler_1": "Lanczos",
16 | "upscaler_2": "None",
17 | "extras_upscaler_2_visibility": 0,
18 | "image": img2img_basic_image_base64,
19 | }
20 | assert requests.post(f"{base_url}/sdapi/v1/extra-single-image", json=payload).status_code == 200
21 |
22 |
23 | def test_png_info_performed(base_url, img2img_basic_image_base64):
24 | payload = {
25 | "image": img2img_basic_image_base64,
26 | }
27 | assert requests.post(f"{base_url}/sdapi/v1/extra-single-image", json=payload).status_code == 200
28 |
29 |
30 | def test_interrogate_performed(base_url, img2img_basic_image_base64):
31 | payload = {
32 | "image": img2img_basic_image_base64,
33 | "model": "clip",
34 | }
35 | assert requests.post(f"{base_url}/sdapi/v1/extra-single-image", json=payload).status_code == 200
36 |
--------------------------------------------------------------------------------
/.github/workflows/on_pull_request.yaml:
--------------------------------------------------------------------------------
1 | name: Linter
2 |
3 | on:
4 | - push
5 | - pull_request
6 |
7 | jobs:
8 | lint-python:
9 | name: ruff
10 | runs-on: ubuntu-latest
11 | if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
12 | steps:
13 | - name: Checkout Code
14 | uses: actions/checkout@v3
15 | - uses: actions/setup-python@v4
16 | with:
17 | python-version: 3.11
18 | # NB: there's no cache: pip here since we're not installing anything
19 | # from the requirements.txt file(s) in the repository; it's faster
20 | # not to have GHA download an (at the time of writing) 4 GB cache
21 | # of PyTorch and other dependencies.
22 | - name: Install Ruff
23 | run: pip install ruff==0.0.272
24 | - name: Run Ruff
25 | run: ruff .
26 | lint-js:
27 | name: eslint
28 | runs-on: ubuntu-latest
29 | if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
30 | steps:
31 | - name: Checkout Code
32 | uses: actions/checkout@v3
33 | - name: Install Node.js
34 | uses: actions/setup-node@v3
35 | with:
36 | node-version: 18
37 | - run: npm i --ci
38 | - run: npm run lint
39 |
--------------------------------------------------------------------------------
/modules/sd_hijack_utils.py:
--------------------------------------------------------------------------------
1 | import importlib
2 |
3 | class CondFunc:
4 | def __new__(cls, orig_func, sub_func, cond_func):
5 | self = super(CondFunc, cls).__new__(cls)
6 | if isinstance(orig_func, str):
7 | func_path = orig_func.split('.')
8 | for i in range(len(func_path)-1, -1, -1):
9 | try:
10 | resolved_obj = importlib.import_module('.'.join(func_path[:i]))
11 | break
12 | except ImportError:
13 | pass
14 | for attr_name in func_path[i:-1]:
15 | resolved_obj = getattr(resolved_obj, attr_name)
16 | orig_func = getattr(resolved_obj, func_path[-1])
17 | setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
18 | self.__init__(orig_func, sub_func, cond_func)
19 | return lambda *args, **kwargs: self(*args, **kwargs)
20 | def __init__(self, orig_func, sub_func, cond_func):
21 | self.__orig_func = orig_func
22 | self.__sub_func = sub_func
23 | self.__cond_func = cond_func
24 | def __call__(self, *args, **kwargs):
25 | if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs):
26 | return self.__sub_func(self.__orig_func, *args, **kwargs)
27 | else:
28 | return self.__orig_func(*args, **kwargs)
29 |
--------------------------------------------------------------------------------
/extensions-builtin/canvas-zoom-and-pan/style.css:
--------------------------------------------------------------------------------
1 | .canvas-tooltip-info {
2 | position: absolute;
3 | top: 10px;
4 | left: 10px;
5 | cursor: help;
6 | background-color: rgba(0, 0, 0, 0.3);
7 | width: 20px;
8 | height: 20px;
9 | border-radius: 50%;
10 | display: flex;
11 | align-items: center;
12 | justify-content: center;
13 | flex-direction: column;
14 |
15 | z-index: 100;
16 | }
17 |
18 | .canvas-tooltip-info::after {
19 | content: '';
20 | display: block;
21 | width: 2px;
22 | height: 7px;
23 | background-color: white;
24 | margin-top: 2px;
25 | }
26 |
27 | .canvas-tooltip-info::before {
28 | content: '';
29 | display: block;
30 | width: 2px;
31 | height: 2px;
32 | background-color: white;
33 | }
34 |
35 | .canvas-tooltip-content {
36 | display: none;
37 | background-color: #f9f9f9;
38 | color: #333;
39 | border: 1px solid #ddd;
40 | padding: 15px;
41 | position: absolute;
42 | top: 40px;
43 | left: 10px;
44 | width: 250px;
45 | font-size: 16px;
46 | opacity: 0;
47 | border-radius: 8px;
48 | box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);
49 |
50 | z-index: 100;
51 | }
52 |
53 | .canvas-tooltip:hover .canvas-tooltip-content {
54 | display: block;
55 | animation: fadeIn 0.5s;
56 | opacity: 1;
57 | }
58 |
59 | @keyframes fadeIn {
60 | from {opacity: 0;}
61 | to {opacity: 1;}
62 | }
63 |
64 | .styler {
65 | overflow:inherit !important;
66 | }
--------------------------------------------------------------------------------
/modules/paths_internal.py:
--------------------------------------------------------------------------------
1 | """this module defines internal paths used by program and is safe to import before dependencies are installed in launch.py"""
2 |
3 | import argparse
4 | import os
5 | import sys
6 | import shlex
7 |
8 | commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
9 | sys.argv += shlex.split(commandline_args)
10 |
11 | modules_path = os.path.dirname(os.path.realpath(__file__))
12 | script_path = os.path.dirname(modules_path)
13 |
14 | sd_configs_path = os.path.join(script_path, "configs")
15 | sd_default_config = os.path.join(sd_configs_path, "v1-inference.yaml")
16 | sd_model_file = os.path.join(script_path, 'model.ckpt')
17 | default_sd_model_file = sd_model_file
18 |
19 | # Parse the --data-dir flag first so we can use it as a base for our other argument default values
20 | parser_pre = argparse.ArgumentParser(add_help=False)
21 | parser_pre.add_argument("--data-dir", type=str, default=os.path.dirname(modules_path), help="base path where all user data is stored", )
22 | cmd_opts_pre = parser_pre.parse_known_args()[0]
23 |
24 | data_path = cmd_opts_pre.data_dir
25 |
26 | models_path = os.path.join(data_path, "models")
27 | extensions_dir = os.path.join(data_path, "extensions")
28 | extensions_builtin_dir = os.path.join(script_path, "extensions-builtin")
29 | config_states_dir = os.path.join(script_path, "config_states")
30 |
31 | roboto_ttf_file = os.path.join(modules_path, 'Roboto-Regular.ttf')
32 |
--------------------------------------------------------------------------------
/modules/sd_hijack_checkpoint.py:
--------------------------------------------------------------------------------
1 | from torch.utils.checkpoint import checkpoint
2 |
3 | import ldm.modules.attention
4 | import ldm.modules.diffusionmodules.openaimodel
5 |
6 |
7 | def BasicTransformerBlock_forward(self, x, context=None):
8 | return checkpoint(self._forward, x, context)
9 |
10 |
11 | def AttentionBlock_forward(self, x):
12 | return checkpoint(self._forward, x)
13 |
14 |
15 | def ResBlock_forward(self, x, emb):
16 | return checkpoint(self._forward, x, emb)
17 |
18 |
19 | stored = []
20 |
21 |
22 | def add():
23 | if len(stored) != 0:
24 | return
25 |
26 | stored.extend([
27 | ldm.modules.attention.BasicTransformerBlock.forward,
28 | ldm.modules.diffusionmodules.openaimodel.ResBlock.forward,
29 | ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward
30 | ])
31 |
32 | ldm.modules.attention.BasicTransformerBlock.forward = BasicTransformerBlock_forward
33 | ldm.modules.diffusionmodules.openaimodel.ResBlock.forward = ResBlock_forward
34 | ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward = AttentionBlock_forward
35 |
36 |
37 | def remove():
38 | if len(stored) == 0:
39 | return
40 |
41 | ldm.modules.attention.BasicTransformerBlock.forward = stored[0]
42 | ldm.modules.diffusionmodules.openaimodel.ResBlock.forward = stored[1]
43 | ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward = stored[2]
44 |
45 | stored.clear()
46 |
47 |
--------------------------------------------------------------------------------
/javascript/inputAccordion.js:
--------------------------------------------------------------------------------
1 | var observerAccordionOpen = new MutationObserver(function(mutations) {
2 | mutations.forEach(function(mutationRecord) {
3 | var elem = mutationRecord.target;
4 | var open = elem.classList.contains('open');
5 |
6 | var accordion = elem.parentNode;
7 | accordion.classList.toggle('input-accordion-open', open);
8 |
9 | var checkbox = gradioApp().querySelector('#' + accordion.id + "-checkbox input");
10 | checkbox.checked = open;
11 | updateInput(checkbox);
12 |
13 | var extra = gradioApp().querySelector('#' + accordion.id + "-extra");
14 | if (extra) {
15 | extra.style.display = open ? "" : "none";
16 | }
17 | });
18 | });
19 |
20 | function inputAccordionChecked(id, checked) {
21 | var label = gradioApp().querySelector('#' + id + " .label-wrap");
22 | if (label.classList.contains('open') != checked) {
23 | label.click();
24 | }
25 | }
26 |
27 | onUiLoaded(function() {
28 | for (var accordion of gradioApp().querySelectorAll('.input-accordion')) {
29 | var labelWrap = accordion.querySelector('.label-wrap');
30 | observerAccordionOpen.observe(labelWrap, {attributes: true, attributeFilter: ['class']});
31 |
32 | var extra = gradioApp().querySelector('#' + accordion.id + "-extra");
33 | if (extra) {
34 | labelWrap.insertBefore(extra, labelWrap.lastElementChild);
35 | }
36 | }
37 | });
38 |
--------------------------------------------------------------------------------
/launch.py:
--------------------------------------------------------------------------------
1 | from modules import launch_utils
2 |
3 | args = launch_utils.args
4 | python = launch_utils.python
5 | git = launch_utils.git
6 | index_url = launch_utils.index_url
7 | dir_repos = launch_utils.dir_repos
8 |
9 | commit_hash = launch_utils.commit_hash
10 | git_tag = launch_utils.git_tag
11 |
12 | run = launch_utils.run
13 | is_installed = launch_utils.is_installed
14 | repo_dir = launch_utils.repo_dir
15 |
16 | run_pip = launch_utils.run_pip
17 | check_run_python = launch_utils.check_run_python
18 | git_clone = launch_utils.git_clone
19 | git_pull_recursive = launch_utils.git_pull_recursive
20 | list_extensions = launch_utils.list_extensions
21 | run_extension_installer = launch_utils.run_extension_installer
22 | prepare_environment = launch_utils.prepare_environment
23 | configure_for_tests = launch_utils.configure_for_tests
24 | start = launch_utils.start
25 |
26 |
27 | def main():
28 | if args.dump_sysinfo:
29 | filename = launch_utils.dump_sysinfo()
30 |
31 | print(f"Sysinfo saved as {filename}. Exiting...")
32 |
33 | exit(0)
34 |
35 | launch_utils.startup_timer.record("initial startup")
36 |
37 | with launch_utils.startup_timer.subcategory("prepare environment"):
38 | if not args.skip_prepare_environment:
39 | prepare_environment()
40 |
41 | if args.test_server:
42 | configure_for_tests()
43 |
44 | start()
45 |
46 |
47 | if __name__ == "__main__":
48 | main()
49 |
--------------------------------------------------------------------------------
/webui-user.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #########################################################
3 | # Uncomment and change the variables below to your need:#
4 | #########################################################
5 |
6 | # Install directory without trailing slash
7 | #install_dir="/home/$(whoami)"
8 |
9 | # Name of the subdirectory
10 | #clone_dir="stable-diffusion-webui"
11 |
12 | # Commandline arguments for webui.py, for example: export COMMANDLINE_ARGS="--medvram --opt-split-attention"
13 | #export COMMANDLINE_ARGS=""
14 |
15 | # python3 executable
16 | #python_cmd="python3"
17 |
18 | # git executable
19 | #export GIT="git"
20 |
21 | # python3 venv without trailing slash (defaults to ${install_dir}/${clone_dir}/venv)
22 | #venv_dir="venv"
23 |
24 | # script to launch to start the app
25 | #export LAUNCH_SCRIPT="launch.py"
26 |
27 | # install command for torch
28 | #export TORCH_COMMAND="pip install torch==1.12.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113"
29 |
30 | # Requirements file to use for stable-diffusion-webui
31 | #export REQS_FILE="requirements_versions.txt"
32 |
33 | # Fixed git repos
34 | #export K_DIFFUSION_PACKAGE=""
35 | #export GFPGAN_PACKAGE=""
36 |
37 | # Fixed git commits
38 | #export STABLE_DIFFUSION_COMMIT_HASH=""
39 | #export CODEFORMER_COMMIT_HASH=""
40 | #export BLIP_COMMIT_HASH=""
41 |
42 | # Uncomment to enable accelerated launch
43 | #export ACCELERATE="True"
44 |
45 | # Uncomment to disable TCMalloc
46 | #export NO_TCMALLOC="True"
47 |
48 | ###########################################
49 |
--------------------------------------------------------------------------------
/javascript/imageMaskFix.js:
--------------------------------------------------------------------------------
1 | /**
2 | * temporary fix for https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/668
3 | * @see https://github.com/gradio-app/gradio/issues/1721
4 | */
5 | function imageMaskResize() {
6 | const canvases = gradioApp().querySelectorAll('#img2maskimg .touch-none canvas');
7 | if (!canvases.length) {
8 | window.removeEventListener('resize', imageMaskResize);
9 | return;
10 | }
11 |
12 | const wrapper = canvases[0].closest('.touch-none');
13 | const previewImage = wrapper.previousElementSibling;
14 |
15 | if (!previewImage.complete) {
16 | previewImage.addEventListener('load', imageMaskResize);
17 | return;
18 | }
19 |
20 | const w = previewImage.width;
21 | const h = previewImage.height;
22 | const nw = previewImage.naturalWidth;
23 | const nh = previewImage.naturalHeight;
24 | const portrait = nh > nw;
25 |
26 | const wW = Math.min(w, portrait ? h / nh * nw : w / nw * nw);
27 | const wH = Math.min(h, portrait ? h / nh * nh : w / nw * nh);
28 |
29 | wrapper.style.width = `${wW}px`;
30 | wrapper.style.height = `${wH}px`;
31 | wrapper.style.left = `0px`;
32 | wrapper.style.top = `0px`;
33 |
34 | canvases.forEach(c => {
35 | c.style.width = c.style.height = '';
36 | c.style.maxWidth = '100%';
37 | c.style.maxHeight = '100%';
38 | c.style.objectFit = 'contain';
39 | });
40 | }
41 |
42 | onAfterUiUpdate(imageMaskResize);
43 | window.addEventListener('resize', imageMaskResize);
44 |
--------------------------------------------------------------------------------
/modules/sd_hijack_xlmr.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from modules import sd_hijack_clip, devices
4 |
5 |
6 | class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords):
7 | def __init__(self, wrapped, hijack):
8 | super().__init__(wrapped, hijack)
9 |
10 | self.id_start = wrapped.config.bos_token_id
11 | self.id_end = wrapped.config.eos_token_id
12 | self.id_pad = wrapped.config.pad_token_id
13 |
14 | self.comma_token = self.tokenizer.get_vocab().get(',', None) # alt diffusion doesn't have bits for comma
15 |
16 | def encode_with_transformers(self, tokens):
17 | # there's no CLIP Skip here because all hidden layers have size of 1024 and the last one uses a
18 | # trained layer to transform those 1024 into 768 for unet; so you can't choose which transformer
19 | # layer to work with - you have to use the last
20 |
21 | attention_mask = (tokens != self.id_pad).to(device=tokens.device, dtype=torch.int64)
22 | features = self.wrapped(input_ids=tokens, attention_mask=attention_mask)
23 | z = features['projection_state']
24 |
25 | return z
26 |
27 | def encode_embedding_init_text(self, init_text, nvpt):
28 | embedding_layer = self.wrapped.roberta.embeddings
29 | ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"]
30 | embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0)
31 |
32 | return embedded
33 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.yml:
--------------------------------------------------------------------------------
1 | name: Feature request
2 | description: Suggest an idea for this project
3 | title: "[Feature Request]: "
4 | labels: ["enhancement"]
5 |
6 | body:
7 | - type: checkboxes
8 | attributes:
9 | label: Is there an existing issue for this?
10 | description: Please search to see if an issue already exists for the feature you want, and that it's not implemented in a recent build/commit.
11 | options:
12 | - label: I have searched the existing issues and checked the recent builds/commits
13 | required: true
14 | - type: markdown
15 | attributes:
16 | value: |
17 | *Please fill this form with as much information as possible, provide screenshots and/or illustrations of the feature if possible*
18 | - type: textarea
19 | id: feature
20 | attributes:
21 | label: What would your feature do ?
22 | description: Tell us about your feature in a very clear and simple way, and what problem it would solve
23 | validations:
24 | required: true
25 | - type: textarea
26 | id: workflow
27 | attributes:
28 | label: Proposed workflow
29 | description: Please provide us with step by step information on how you'd like the feature to be accessed and used
30 | value: |
31 | 1. Go to ....
32 | 2. Press ....
33 | 3. ...
34 | validations:
35 | required: true
36 | - type: textarea
37 | id: misc
38 | attributes:
39 | label: Additional information
40 | description: Add any other context or screenshots about the feature request here.
41 |
--------------------------------------------------------------------------------
/scripts/postprocessing_codeformer.py:
--------------------------------------------------------------------------------
1 | from PIL import Image
2 | import numpy as np
3 |
4 | from modules import scripts_postprocessing, codeformer_model
5 | import gradio as gr
6 |
7 | from modules.ui_components import FormRow
8 |
9 |
10 | class ScriptPostprocessingCodeFormer(scripts_postprocessing.ScriptPostprocessing):
11 | name = "CodeFormer"
12 | order = 3000
13 |
14 | def ui(self):
15 | with FormRow():
16 | codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, elem_id="extras_codeformer_visibility")
17 | codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, elem_id="extras_codeformer_weight")
18 |
19 | return {
20 | "codeformer_visibility": codeformer_visibility,
21 | "codeformer_weight": codeformer_weight,
22 | }
23 |
24 | def process(self, pp: scripts_postprocessing.PostprocessedImage, codeformer_visibility, codeformer_weight):
25 | if codeformer_visibility == 0:
26 | return
27 |
28 | restored_img = codeformer_model.codeformer.restore(np.array(pp.image, dtype=np.uint8), w=codeformer_weight)
29 | res = Image.fromarray(restored_img)
30 |
31 | if codeformer_visibility < 1.0:
32 | res = Image.blend(pp.image, res, codeformer_visibility)
33 |
34 | pp.image = res
35 | pp.info["CodeFormer visibility"] = round(codeformer_visibility, 3)
36 | pp.info["CodeFormer weight"] = round(codeformer_weight, 3)
37 |
--------------------------------------------------------------------------------
/modules/gitpython_hack.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import io
4 | import subprocess
5 |
6 | import git
7 |
8 |
9 | class Git(git.Git):
10 | """
11 | Git subclassed to never use persistent processes.
12 | """
13 |
14 | def _get_persistent_cmd(self, attr_name, cmd_name, *args, **kwargs):
15 | raise NotImplementedError(f"Refusing to use persistent process: {attr_name} ({cmd_name} {args} {kwargs})")
16 |
17 | def get_object_header(self, ref: str | bytes) -> tuple[str, str, int]:
18 | ret = subprocess.check_output(
19 | [self.GIT_PYTHON_GIT_EXECUTABLE, "cat-file", "--batch-check"],
20 | input=self._prepare_ref(ref),
21 | cwd=self._working_dir,
22 | timeout=2,
23 | )
24 | return self._parse_object_header(ret)
25 |
26 | def stream_object_data(self, ref: str) -> tuple[str, str, int, "Git.CatFileContentStream"]:
27 | # Not really streaming, per se; this buffers the entire object in memory.
28 | # Shouldn't be a problem for our use case, since we're only using this for
29 | # object headers (commit objects).
30 | ret = subprocess.check_output(
31 | [self.GIT_PYTHON_GIT_EXECUTABLE, "cat-file", "--batch"],
32 | input=self._prepare_ref(ref),
33 | cwd=self._working_dir,
34 | timeout=30,
35 | )
36 | bio = io.BytesIO(ret)
37 | hexsha, typename, size = self._parse_object_header(bio.readline())
38 | return (hexsha, typename, size, self.CatFileContentStream(size, bio))
39 |
40 |
41 | class Repo(git.Repo):
42 | GitCommandWrapperType = Git
43 |
--------------------------------------------------------------------------------
/javascript/generationParams.js:
--------------------------------------------------------------------------------
1 | // attaches listeners to the txt2img and img2img galleries to update displayed generation param text when the image changes
2 |
3 | let txt2img_gallery, img2img_gallery, modal = undefined;
4 | onAfterUiUpdate(function() {
5 | if (!txt2img_gallery) {
6 | txt2img_gallery = attachGalleryListeners("txt2img");
7 | }
8 | if (!img2img_gallery) {
9 | img2img_gallery = attachGalleryListeners("img2img");
10 | }
11 | if (!modal) {
12 | modal = gradioApp().getElementById('lightboxModal');
13 | modalObserver.observe(modal, {attributes: true, attributeFilter: ['style']});
14 | }
15 | });
16 |
17 | let modalObserver = new MutationObserver(function(mutations) {
18 | mutations.forEach(function(mutationRecord) {
19 | let selectedTab = gradioApp().querySelector('#tabs div button.selected')?.innerText;
20 | if (mutationRecord.target.style.display === 'none' && (selectedTab === 'txt2img' || selectedTab === 'img2img')) {
21 | gradioApp().getElementById(selectedTab + "_generation_info_button")?.click();
22 | }
23 | });
24 | });
25 |
26 | function attachGalleryListeners(tab_name) {
27 | var gallery = gradioApp().querySelector('#' + tab_name + '_gallery');
28 | gallery?.addEventListener('click', () => gradioApp().getElementById(tab_name + "_generation_info_button").click());
29 | gallery?.addEventListener('keydown', (e) => {
30 | if (e.keyCode == 37 || e.keyCode == 39) { // left or right arrow
31 | gradioApp().getElementById(tab_name + "_generation_info_button").click();
32 | }
33 | });
34 | return gallery;
35 | }
36 |
--------------------------------------------------------------------------------
/modules/ui_extra_networks_hypernets.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from modules import shared, ui_extra_networks
4 | from modules.ui_extra_networks import quote_js
5 | from modules.hashes import sha256_from_cache
6 |
7 |
8 | class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage):
9 | def __init__(self):
10 | super().__init__('Hypernetworks')
11 |
12 | def refresh(self):
13 | shared.reload_hypernetworks()
14 |
15 | def create_item(self, name, index=None, enable_filter=True):
16 | full_path = shared.hypernetworks[name]
17 | path, ext = os.path.splitext(full_path)
18 | sha256 = sha256_from_cache(full_path, f'hypernet/{name}')
19 | shorthash = sha256[0:10] if sha256 else None
20 |
21 | return {
22 | "name": name,
23 | "filename": full_path,
24 | "shorthash": shorthash,
25 | "preview": self.find_preview(path),
26 | "description": self.find_description(path),
27 | "search_term": self.search_terms_from_path(path) + " " + (sha256 or ""),
28 | "prompt": quote_js(f""),
29 | "local_preview": f"{path}.preview.{shared.opts.samples_format}",
30 | "sort_keys": {'default': index, **self.get_sort_keys(path + ext)},
31 | }
32 |
33 | def list_items(self):
34 | for index, name in enumerate(shared.hypernetworks):
35 | yield self.create_item(name, index)
36 |
37 | def allowed_directories_for_previews(self):
38 | return [shared.cmd_opts.hypernetwork_dir]
39 |
40 |
--------------------------------------------------------------------------------
/modules/hypernetworks/ui.py:
--------------------------------------------------------------------------------
1 | import html
2 |
3 | import gradio as gr
4 | import modules.hypernetworks.hypernetwork
5 | from modules import devices, sd_hijack, shared
6 |
7 | not_available = ["hardswish", "multiheadattention"]
8 | keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict if x not in not_available]
9 |
10 |
11 | def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None):
12 | filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure)
13 |
14 | return gr.Dropdown.update(choices=sorted(shared.hypernetworks)), f"Created: {filename}", ""
15 |
16 |
17 | def train_hypernetwork(*args):
18 | shared.loaded_hypernetworks = []
19 |
20 | assert not shared.cmd_opts.lowvram, 'Training models with lowvram is not possible'
21 |
22 | try:
23 | sd_hijack.undo_optimizations()
24 |
25 | hypernetwork, filename = modules.hypernetworks.hypernetwork.train_hypernetwork(*args)
26 |
27 | res = f"""
28 | Training {'interrupted' if shared.state.interrupted else 'finished'} at {hypernetwork.step} steps.
29 | Hypernetwork saved to {html.escape(filename)}
30 | """
31 | return res, ""
32 | except Exception:
33 | raise
34 | finally:
35 | shared.sd_model.cond_stage_model.to(devices.device)
36 | shared.sd_model.first_stage_model.to(devices.device)
37 | sd_hijack.apply_optimizations()
38 |
39 |
--------------------------------------------------------------------------------
/modules/scripts_auto_postprocessing.py:
--------------------------------------------------------------------------------
1 | from modules import scripts, scripts_postprocessing, shared
2 |
3 |
4 | class ScriptPostprocessingForMainUI(scripts.Script):
5 | def __init__(self, script_postproc):
6 | self.script: scripts_postprocessing.ScriptPostprocessing = script_postproc
7 | self.postprocessing_controls = None
8 |
9 | def title(self):
10 | return self.script.name
11 |
12 | def show(self, is_img2img):
13 | return scripts.AlwaysVisible
14 |
15 | def ui(self, is_img2img):
16 | self.postprocessing_controls = self.script.ui()
17 | return self.postprocessing_controls.values()
18 |
19 | def postprocess_image(self, p, script_pp, *args):
20 | args_dict = dict(zip(self.postprocessing_controls, args))
21 |
22 | pp = scripts_postprocessing.PostprocessedImage(script_pp.image)
23 | pp.info = {}
24 | self.script.process(pp, **args_dict)
25 | p.extra_generation_params.update(pp.info)
26 | script_pp.image = pp.image
27 |
28 |
29 | def create_auto_preprocessing_script_data():
30 | from modules import scripts
31 |
32 | res = []
33 |
34 | for name in shared.opts.postprocessing_enable_in_main_ui:
35 | script = next(iter([x for x in scripts.postprocessing_scripts_data if x.script_class.name == name]), None)
36 | if script is None:
37 | continue
38 |
39 | constructor = lambda s=script: ScriptPostprocessingForMainUI(s.script_class())
40 | res.append(scripts.ScriptClassData(script_class=constructor, path=script.path, basedir=script.basedir, module=script.module))
41 |
42 | return res
43 |
--------------------------------------------------------------------------------
/modules/ui_extra_networks_textual_inversion.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from modules import ui_extra_networks, sd_hijack, shared
4 | from modules.ui_extra_networks import quote_js
5 |
6 |
7 | class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage):
8 | def __init__(self):
9 | super().__init__('Textual Inversion')
10 | self.allow_negative_prompt = True
11 |
12 | def refresh(self):
13 | sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True)
14 |
15 | def create_item(self, name, index=None, enable_filter=True):
16 | embedding = sd_hijack.model_hijack.embedding_db.word_embeddings.get(name)
17 |
18 | path, ext = os.path.splitext(embedding.filename)
19 | return {
20 | "name": name,
21 | "filename": embedding.filename,
22 | "shorthash": embedding.shorthash,
23 | "preview": self.find_preview(path),
24 | "description": self.find_description(path),
25 | "search_term": self.search_terms_from_path(embedding.filename) + " " + (embedding.hash or ""),
26 | "prompt": quote_js(embedding.name),
27 | "local_preview": f"{path}.preview.{shared.opts.samples_format}",
28 | "sort_keys": {'default': index, **self.get_sort_keys(embedding.filename)},
29 | }
30 |
31 | def list_items(self):
32 | for index, name in enumerate(sd_hijack.model_hijack.embedding_db.word_embeddings):
33 | yield self.create_item(name, index)
34 |
35 | def allowed_directories_for_previews(self):
36 | return list(sd_hijack.model_hijack.embedding_db.embedding_dirs)
37 |
--------------------------------------------------------------------------------
/modules/textual_inversion/ui.py:
--------------------------------------------------------------------------------
1 | import html
2 |
3 | import gradio as gr
4 |
5 | import modules.textual_inversion.textual_inversion
6 | import modules.textual_inversion.preprocess
7 | from modules import sd_hijack, shared
8 |
9 |
10 | def create_embedding(name, initialization_text, nvpt, overwrite_old):
11 | filename = modules.textual_inversion.textual_inversion.create_embedding(name, nvpt, overwrite_old, init_text=initialization_text)
12 |
13 | sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings()
14 |
15 | return gr.Dropdown.update(choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())), f"Created: {filename}", ""
16 |
17 |
18 | def preprocess(*args):
19 | modules.textual_inversion.preprocess.preprocess(*args)
20 |
21 | return f"Preprocessing {'interrupted' if shared.state.interrupted else 'finished'}.", ""
22 |
23 |
24 | def train_embedding(*args):
25 |
26 | assert not shared.cmd_opts.lowvram, 'Training models with lowvram not possible'
27 |
28 | apply_optimizations = shared.opts.training_xattention_optimizations
29 | try:
30 | if not apply_optimizations:
31 | sd_hijack.undo_optimizations()
32 |
33 | embedding, filename = modules.textual_inversion.textual_inversion.train_embedding(*args)
34 |
35 | res = f"""
36 | Training {'interrupted' if shared.state.interrupted else 'finished'} at {embedding.step} steps.
37 | Embedding saved to {html.escape(filename)}
38 | """
39 | return res, ""
40 | except Exception:
41 | raise
42 | finally:
43 | if not apply_optimizations:
44 | sd_hijack.apply_optimizations()
45 |
46 |
--------------------------------------------------------------------------------
/javascript/notification.js:
--------------------------------------------------------------------------------
1 | // Monitors the gallery and sends a browser notification when the leading image is new.
2 |
3 | let lastHeadImg = null;
4 |
5 | let notificationButton = null;
6 |
7 | onAfterUiUpdate(function() {
8 | if (notificationButton == null) {
9 | notificationButton = gradioApp().getElementById('request_notifications');
10 |
11 | if (notificationButton != null) {
12 | notificationButton.addEventListener('click', () => {
13 | void Notification.requestPermission();
14 | }, true);
15 | }
16 | }
17 |
18 | const galleryPreviews = gradioApp().querySelectorAll('div[id^="tab_"] div[id$="_results"] .thumbnail-item > img');
19 |
20 | if (galleryPreviews == null) return;
21 |
22 | const headImg = galleryPreviews[0]?.src;
23 |
24 | if (headImg == null || headImg == lastHeadImg) return;
25 |
26 | lastHeadImg = headImg;
27 |
28 | // play notification sound if available
29 | gradioApp().querySelector('#audio_notification audio')?.play();
30 |
31 | if (document.hasFocus()) return;
32 |
33 | // Multiple copies of the images are in the DOM when one is selected. Dedup with a Set to get the real number generated.
34 | const imgs = new Set(Array.from(galleryPreviews).map(img => img.src));
35 |
36 | const notification = new Notification(
37 | 'Stable Diffusion',
38 | {
39 | body: `Generated ${imgs.size > 1 ? imgs.size - opts.return_grid : 1} image${imgs.size > 1 ? 's' : ''}`,
40 | icon: headImg,
41 | image: headImg,
42 | }
43 | );
44 |
45 | notification.onclick = function(_) {
46 | parent.focus();
47 | this.close();
48 | };
49 | });
50 |
--------------------------------------------------------------------------------
/javascript/edit-order.js:
--------------------------------------------------------------------------------
1 | /* alt+left/right moves text in prompt */
2 |
3 | function keyupEditOrder(event) {
4 | if (!opts.keyedit_move) return;
5 |
6 | let target = event.originalTarget || event.composedPath()[0];
7 | if (!target.matches("*:is([id*='_toprow'] [id*='_prompt'], .prompt) textarea")) return;
8 | if (!event.altKey) return;
9 |
10 | let isLeft = event.key == "ArrowLeft";
11 | let isRight = event.key == "ArrowRight";
12 | if (!isLeft && !isRight) return;
13 | event.preventDefault();
14 |
15 | let selectionStart = target.selectionStart;
16 | let selectionEnd = target.selectionEnd;
17 | let text = target.value;
18 | let items = text.split(",");
19 | let indexStart = (text.slice(0, selectionStart).match(/,/g) || []).length;
20 | let indexEnd = (text.slice(0, selectionEnd).match(/,/g) || []).length;
21 | let range = indexEnd - indexStart + 1;
22 |
23 | if (isLeft && indexStart > 0) {
24 | items.splice(indexStart - 1, 0, ...items.splice(indexStart, range));
25 | target.value = items.join();
26 | target.selectionStart = items.slice(0, indexStart - 1).join().length + (indexStart == 1 ? 0 : 1);
27 | target.selectionEnd = items.slice(0, indexEnd).join().length;
28 | } else if (isRight && indexEnd < items.length - 1) {
29 | items.splice(indexStart + 1, 0, ...items.splice(indexStart, range));
30 | target.value = items.join();
31 | target.selectionStart = items.slice(0, indexStart + 1).join().length + 1;
32 | target.selectionEnd = items.slice(0, indexEnd + 2).join().length;
33 | }
34 |
35 | event.preventDefault();
36 | updateInput(target);
37 | }
38 |
39 | addEventListener('keydown', (event) => {
40 | keyupEditOrder(event);
41 | });
42 |
--------------------------------------------------------------------------------
/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py:
--------------------------------------------------------------------------------
1 | import gradio as gr
2 | from modules import shared
3 |
4 | shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas Hotkeys"), {
5 | "canvas_hotkey_zoom": shared.OptionInfo("Alt", "Zoom canvas", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
6 | "canvas_hotkey_adjust": shared.OptionInfo("Ctrl", "Adjust brush size", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
7 | "canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"),
8 | "canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "),
9 | "canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"),
10 | "canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"),
11 | "canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"),
12 | "canvas_auto_expand": shared.OptionInfo(True, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"),
13 | "canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"),
14 | "canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size", "Moving canvas","Fullscreen","Reset Zoom","Overlap"]}),
15 | }))
16 |
--------------------------------------------------------------------------------
/modules/util.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 |
4 | from modules import shared
5 | from modules.paths_internal import script_path
6 |
7 |
8 | def natural_sort_key(s, regex=re.compile('([0-9]+)')):
9 | return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)]
10 |
11 |
12 | def listfiles(dirname):
13 | filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")]
14 | return [file for file in filenames if os.path.isfile(file)]
15 |
16 |
17 | def html_path(filename):
18 | return os.path.join(script_path, "html", filename)
19 |
20 |
21 | def html(filename):
22 | path = html_path(filename)
23 |
24 | if os.path.exists(path):
25 | with open(path, encoding="utf8") as file:
26 | return file.read()
27 |
28 | return ""
29 |
30 |
31 | def walk_files(path, allowed_extensions=None):
32 | if not os.path.exists(path):
33 | return
34 |
35 | if allowed_extensions is not None:
36 | allowed_extensions = set(allowed_extensions)
37 |
38 | items = list(os.walk(path, followlinks=True))
39 | items = sorted(items, key=lambda x: natural_sort_key(x[0]))
40 |
41 | for root, _, files in items:
42 | for filename in sorted(files, key=natural_sort_key):
43 | if allowed_extensions is not None:
44 | _, ext = os.path.splitext(filename)
45 | if ext not in allowed_extensions:
46 | continue
47 |
48 | if not shared.opts.list_hidden_files and ("/." in root or "\\." in root):
49 | continue
50 |
51 | yield os.path.join(root, filename)
52 |
53 |
54 | def ldm_print(*args, **kwargs):
55 | if shared.opts.hide_ldm_prints:
56 | return
57 |
58 | print(*args, **kwargs)
59 |
--------------------------------------------------------------------------------
/modules/sd_samplers.py:
--------------------------------------------------------------------------------
1 | from modules import sd_samplers_kdiffusion, sd_samplers_timesteps, shared
2 |
3 | # imports for functions that previously were here and are used by other modules
4 | from modules.sd_samplers_common import samples_to_image_grid, sample_to_image # noqa: F401
5 |
6 | all_samplers = [
7 | *sd_samplers_kdiffusion.samplers_data_k_diffusion,
8 | *sd_samplers_timesteps.samplers_data_timesteps,
9 | ]
10 | all_samplers_map = {x.name: x for x in all_samplers}
11 |
12 | samplers = []
13 | samplers_for_img2img = []
14 | samplers_map = {}
15 | samplers_hidden = {}
16 |
17 |
18 | def find_sampler_config(name):
19 | if name is not None:
20 | config = all_samplers_map.get(name, None)
21 | else:
22 | config = all_samplers[0]
23 |
24 | return config
25 |
26 |
27 | def create_sampler(name, model):
28 | config = find_sampler_config(name)
29 |
30 | assert config is not None, f'bad sampler name: {name}'
31 |
32 | if model.is_sdxl and config.options.get("no_sdxl", False):
33 | raise Exception(f"Sampler {config.name} is not supported for SDXL")
34 |
35 | sampler = config.constructor(model)
36 | sampler.config = config
37 |
38 | return sampler
39 |
40 |
41 | def set_samplers():
42 | global samplers, samplers_for_img2img, samplers_hidden
43 |
44 | samplers_hidden = set(shared.opts.hide_samplers)
45 | samplers = all_samplers
46 | samplers_for_img2img = all_samplers
47 |
48 | samplers_map.clear()
49 | for sampler in all_samplers:
50 | samplers_map[sampler.name.lower()] = sampler.name
51 | for alias in sampler.aliases:
52 | samplers_map[alias.lower()] = sampler.name
53 |
54 |
55 | def visible_sampler_names():
56 | return [x.name for x in samplers if x.name not in samplers_hidden]
57 |
58 |
59 | set_samplers()
60 |
--------------------------------------------------------------------------------
/modules/textual_inversion/logging.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import json
3 | import os
4 |
5 | saved_params_shared = {
6 | "batch_size",
7 | "clip_grad_mode",
8 | "clip_grad_value",
9 | "create_image_every",
10 | "data_root",
11 | "gradient_step",
12 | "initial_step",
13 | "latent_sampling_method",
14 | "learn_rate",
15 | "log_directory",
16 | "model_hash",
17 | "model_name",
18 | "num_of_dataset_images",
19 | "steps",
20 | "template_file",
21 | "training_height",
22 | "training_width",
23 | }
24 | saved_params_ti = {
25 | "embedding_name",
26 | "num_vectors_per_token",
27 | "save_embedding_every",
28 | "save_image_with_stored_embedding",
29 | }
30 | saved_params_hypernet = {
31 | "activation_func",
32 | "add_layer_norm",
33 | "hypernetwork_name",
34 | "layer_structure",
35 | "save_hypernetwork_every",
36 | "use_dropout",
37 | "weight_init",
38 | }
39 | saved_params_all = saved_params_shared | saved_params_ti | saved_params_hypernet
40 | saved_params_previews = {
41 | "preview_cfg_scale",
42 | "preview_height",
43 | "preview_negative_prompt",
44 | "preview_prompt",
45 | "preview_sampler_index",
46 | "preview_seed",
47 | "preview_steps",
48 | "preview_width",
49 | }
50 |
51 |
52 | def save_settings_to_file(log_directory, all_params):
53 | now = datetime.datetime.now()
54 | params = {"datetime": now.strftime("%Y-%m-%d %H:%M:%S")}
55 |
56 | keys = saved_params_all
57 | if all_params.get('preview_from_txt2img'):
58 | keys = keys | saved_params_previews
59 |
60 | params.update({k: v for k, v in all_params.items() if k in keys})
61 |
62 | filename = f'settings-{now.strftime("%Y-%m-%d-%H-%M-%S")}.json'
63 | with open(os.path.join(log_directory, filename), "w") as file:
64 | json.dump(params, file, indent=4)
65 |
--------------------------------------------------------------------------------
/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js:
--------------------------------------------------------------------------------
1 | // Stable Diffusion WebUI - Bracket checker
2 | // By Hingashi no Florin/Bwin4L & @akx
3 | // Counts open and closed brackets (round, square, curly) in the prompt and negative prompt text boxes in the txt2img and img2img tabs.
4 | // If there's a mismatch, the keyword counter turns red and if you hover on it, a tooltip tells you what's wrong.
5 |
6 | function checkBrackets(textArea, counterElt) {
7 | var counts = {};
8 | (textArea.value.match(/[(){}[\]]/g) || []).forEach(bracket => {
9 | counts[bracket] = (counts[bracket] || 0) + 1;
10 | });
11 | var errors = [];
12 |
13 | function checkPair(open, close, kind) {
14 | if (counts[open] !== counts[close]) {
15 | errors.push(
16 | `${open}...${close} - Detected ${counts[open] || 0} opening and ${counts[close] || 0} closing ${kind}.`
17 | );
18 | }
19 | }
20 |
21 | checkPair('(', ')', 'round brackets');
22 | checkPair('[', ']', 'square brackets');
23 | checkPair('{', '}', 'curly brackets');
24 | counterElt.title = errors.join('\n');
25 | counterElt.classList.toggle('error', errors.length !== 0);
26 | }
27 |
28 | function setupBracketChecking(id_prompt, id_counter) {
29 | var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea");
30 | var counter = gradioApp().getElementById(id_counter);
31 |
32 | if (textarea && counter) {
33 | textarea.addEventListener("input", () => checkBrackets(textarea, counter));
34 | }
35 | }
36 |
37 | onUiLoaded(function() {
38 | setupBracketChecking('txt2img_prompt', 'txt2img_token_counter');
39 | setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter');
40 | setupBracketChecking('img2img_prompt', 'img2img_token_counter');
41 | setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter');
42 | });
43 |
--------------------------------------------------------------------------------
/modules/ui_extra_networks_checkpoints.py:
--------------------------------------------------------------------------------
1 | import html
2 | import os
3 |
4 | from modules import shared, ui_extra_networks, sd_models
5 | from modules.ui_extra_networks import quote_js
6 | from modules.ui_extra_networks_checkpoints_user_metadata import CheckpointUserMetadataEditor
7 |
8 |
9 | class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage):
10 | def __init__(self):
11 | super().__init__('Checkpoints')
12 |
13 | def refresh(self):
14 | shared.refresh_checkpoints()
15 |
16 | def create_item(self, name, index=None, enable_filter=True):
17 | checkpoint: sd_models.CheckpointInfo = sd_models.checkpoint_aliases.get(name)
18 | path, ext = os.path.splitext(checkpoint.filename)
19 | return {
20 | "name": checkpoint.name_for_extra,
21 | "filename": checkpoint.filename,
22 | "shorthash": checkpoint.shorthash,
23 | "preview": self.find_preview(path),
24 | "description": self.find_description(path),
25 | "search_term": self.search_terms_from_path(checkpoint.filename) + " " + (checkpoint.sha256 or ""),
26 | "onclick": '"' + html.escape(f"""return selectCheckpoint({quote_js(name)})""") + '"',
27 | "local_preview": f"{path}.{shared.opts.samples_format}",
28 | "metadata": checkpoint.metadata,
29 | "sort_keys": {'default': index, **self.get_sort_keys(checkpoint.filename)},
30 | }
31 |
32 | def list_items(self):
33 | names = list(sd_models.checkpoints_list)
34 | for index, name in enumerate(names):
35 | yield self.create_item(name, index)
36 |
37 | def allowed_directories_for_previews(self):
38 | return [v for v in [shared.cmd_opts.ckpt_dir, sd_models.model_path] if v is not None]
39 |
40 | def create_user_metadata_editor(self, ui, tabname):
41 | return CheckpointUserMetadataEditor(ui, tabname, self)
42 |
--------------------------------------------------------------------------------
/modules/patches.py:
--------------------------------------------------------------------------------
1 | from collections import defaultdict
2 |
3 |
4 | def patch(key, obj, field, replacement):
5 | """Replaces a function in a module or a class.
6 |
7 | Also stores the original function in this module, possible to be retrieved via original(key, obj, field).
8 | If the function is already replaced by this caller (key), an exception is raised -- use undo() before that.
9 |
10 | Arguments:
11 | key: identifying information for who is doing the replacement. You can use __name__.
12 | obj: the module or the class
13 | field: name of the function as a string
14 | replacement: the new function
15 |
16 | Returns:
17 | the original function
18 | """
19 |
20 | patch_key = (obj, field)
21 | if patch_key in originals[key]:
22 | raise RuntimeError(f"patch for {field} is already applied")
23 |
24 | original_func = getattr(obj, field)
25 | originals[key][patch_key] = original_func
26 |
27 | setattr(obj, field, replacement)
28 |
29 | return original_func
30 |
31 |
32 | def undo(key, obj, field):
33 | """Undoes the peplacement by the patch().
34 |
35 | If the function is not replaced, raises an exception.
36 |
37 | Arguments:
38 | key: identifying information for who is doing the replacement. You can use __name__.
39 | obj: the module or the class
40 | field: name of the function as a string
41 |
42 | Returns:
43 | Always None
44 | """
45 |
46 | patch_key = (obj, field)
47 |
48 | if patch_key not in originals[key]:
49 | raise RuntimeError(f"there is no patch for {field} to undo")
50 |
51 | original_func = originals[key].pop(patch_key)
52 | setattr(obj, field, original_func)
53 |
54 | return None
55 |
56 |
57 | def original(key, obj, field):
58 | """Returns the original function for the patch created by the patch() function"""
59 | patch_key = (obj, field)
60 |
61 | return originals[key].get(patch_key, None)
62 |
63 |
64 | originals = defaultdict(dict)
65 |
--------------------------------------------------------------------------------
/javascript/imageviewerGamepad.js:
--------------------------------------------------------------------------------
1 | let gamepads = [];
2 |
3 | window.addEventListener('gamepadconnected', (e) => {
4 | const index = e.gamepad.index;
5 | let isWaiting = false;
6 | gamepads[index] = setInterval(async() => {
7 | if (!opts.js_modal_lightbox_gamepad || isWaiting) return;
8 | const gamepad = navigator.getGamepads()[index];
9 | const xValue = gamepad.axes[0];
10 | if (xValue <= -0.3) {
11 | modalPrevImage(e);
12 | isWaiting = true;
13 | } else if (xValue >= 0.3) {
14 | modalNextImage(e);
15 | isWaiting = true;
16 | }
17 | if (isWaiting) {
18 | await sleepUntil(() => {
19 | const xValue = navigator.getGamepads()[index].axes[0];
20 | if (xValue < 0.3 && xValue > -0.3) {
21 | return true;
22 | }
23 | }, opts.js_modal_lightbox_gamepad_repeat);
24 | isWaiting = false;
25 | }
26 | }, 10);
27 | });
28 |
29 | window.addEventListener('gamepaddisconnected', (e) => {
30 | clearInterval(gamepads[e.gamepad.index]);
31 | });
32 |
33 | /*
34 | Primarily for vr controller type pointer devices.
35 | I use the wheel event because there's currently no way to do it properly with web xr.
36 | */
37 | let isScrolling = false;
38 | window.addEventListener('wheel', (e) => {
39 | if (!opts.js_modal_lightbox_gamepad || isScrolling) return;
40 | isScrolling = true;
41 |
42 | if (e.deltaX <= -0.6) {
43 | modalPrevImage(e);
44 | } else if (e.deltaX >= 0.6) {
45 | modalNextImage(e);
46 | }
47 |
48 | setTimeout(() => {
49 | isScrolling = false;
50 | }, opts.js_modal_lightbox_gamepad_repeat);
51 | });
52 |
53 | function sleepUntil(f, timeout) {
54 | return new Promise((resolve) => {
55 | const timeStart = new Date();
56 | const wait = setInterval(function() {
57 | if (f() || new Date() - timeStart > timeout) {
58 | clearInterval(wait);
59 | resolve();
60 | }
61 | }, 20);
62 | });
63 | }
64 |
--------------------------------------------------------------------------------
/modules/shared_init.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import torch
4 |
5 | from modules import shared
6 | from modules.shared import cmd_opts
7 |
8 |
9 | def initialize():
10 | """Initializes fields inside the shared module in a controlled manner.
11 |
12 | Should be called early because some other modules you can import mingt need these fields to be already set.
13 | """
14 |
15 | os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True)
16 |
17 | from modules import options, shared_options
18 | shared.options_templates = shared_options.options_templates
19 | shared.opts = options.Options(shared_options.options_templates, shared_options.restricted_opts)
20 | shared.restricted_opts = shared_options.restricted_opts
21 | if os.path.exists(shared.config_filename):
22 | shared.opts.load(shared.config_filename)
23 |
24 | from modules import devices
25 | devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \
26 | (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer'])
27 |
28 | devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16
29 | devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16
30 |
31 | shared.device = devices.device
32 | shared.weight_load_location = None if cmd_opts.lowram else "cpu"
33 |
34 | from modules import shared_state
35 | shared.state = shared_state.State()
36 |
37 | from modules import styles
38 | shared.prompt_styles = styles.StyleDatabase(shared.styles_filename)
39 |
40 | from modules import interrogate
41 | shared.interrogator = interrogate.InterrogateModels("interrogate")
42 |
43 | from modules import shared_total_tqdm
44 | shared.total_tqdm = shared_total_tqdm.TotalTQDM()
45 |
46 | from modules import memmon, devices
47 | shared.mem_mon = memmon.MemUsageMonitor("MemMon", devices.device, shared.opts)
48 | shared.mem_mon.start()
49 |
50 |
--------------------------------------------------------------------------------
/configs/v1-inference.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-04
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 10000 ]
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | image_size: 32 # unused
33 | in_channels: 4
34 | out_channels: 4
35 | model_channels: 320
36 | attention_resolutions: [ 4, 2, 1 ]
37 | num_res_blocks: 2
38 | channel_mult: [ 1, 2, 4, 4 ]
39 | num_heads: 8
40 | use_spatial_transformer: True
41 | transformer_depth: 1
42 | context_dim: 768
43 | use_checkpoint: True
44 | legacy: False
45 |
46 | first_stage_config:
47 | target: ldm.models.autoencoder.AutoencoderKL
48 | params:
49 | embed_dim: 4
50 | monitor: val/rec_loss
51 | ddconfig:
52 | double_z: true
53 | z_channels: 4
54 | resolution: 256
55 | in_channels: 3
56 | out_ch: 3
57 | ch: 128
58 | ch_mult:
59 | - 1
60 | - 2
61 | - 4
62 | - 4
63 | num_res_blocks: 2
64 | attn_resolutions: []
65 | dropout: 0.0
66 | lossconfig:
67 | target: torch.nn.Identity
68 |
69 | cond_stage_config:
70 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
71 |
--------------------------------------------------------------------------------
/configs/alt-diffusion-inference.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-04
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 10000 ]
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | image_size: 32 # unused
33 | in_channels: 4
34 | out_channels: 4
35 | model_channels: 320
36 | attention_resolutions: [ 4, 2, 1 ]
37 | num_res_blocks: 2
38 | channel_mult: [ 1, 2, 4, 4 ]
39 | num_heads: 8
40 | use_spatial_transformer: True
41 | transformer_depth: 1
42 | context_dim: 768
43 | use_checkpoint: True
44 | legacy: False
45 |
46 | first_stage_config:
47 | target: ldm.models.autoencoder.AutoencoderKL
48 | params:
49 | embed_dim: 4
50 | monitor: val/rec_loss
51 | ddconfig:
52 | double_z: true
53 | z_channels: 4
54 | resolution: 256
55 | in_channels: 3
56 | out_ch: 3
57 | ch: 128
58 | ch_mult:
59 | - 1
60 | - 2
61 | - 4
62 | - 4
63 | num_res_blocks: 2
64 | attn_resolutions: []
65 | dropout: 0.0
66 | lossconfig:
67 | target: torch.nn.Identity
68 |
69 | cond_stage_config:
70 | target: modules.xlmr.BertSeriesModelWithTransformation
71 | params:
72 | name: "XLMR-Large"
--------------------------------------------------------------------------------
/configs/v1-inpainting-inference.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 7.5e-05
3 | target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: hybrid # important
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | finetune_keys: null
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | image_size: 32 # unused
33 | in_channels: 9 # 4 data + 4 downscaled image + 1 mask
34 | out_channels: 4
35 | model_channels: 320
36 | attention_resolutions: [ 4, 2, 1 ]
37 | num_res_blocks: 2
38 | channel_mult: [ 1, 2, 4, 4 ]
39 | num_heads: 8
40 | use_spatial_transformer: True
41 | transformer_depth: 1
42 | context_dim: 768
43 | use_checkpoint: True
44 | legacy: False
45 |
46 | first_stage_config:
47 | target: ldm.models.autoencoder.AutoencoderKL
48 | params:
49 | embed_dim: 4
50 | monitor: val/rec_loss
51 | ddconfig:
52 | double_z: true
53 | z_channels: 4
54 | resolution: 256
55 | in_channels: 3
56 | out_ch: 3
57 | ch: 128
58 | ch_mult:
59 | - 1
60 | - 2
61 | - 4
62 | - 4
63 | num_res_blocks: 2
64 | attn_resolutions: []
65 | dropout: 0.0
66 | lossconfig:
67 | target: torch.nn.Identity
68 |
69 | cond_stage_config:
70 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
71 |
--------------------------------------------------------------------------------
/extensions-builtin/Lora/network_hada.py:
--------------------------------------------------------------------------------
1 | import lyco_helpers
2 | import network
3 |
4 |
5 | class ModuleTypeHada(network.ModuleType):
6 | def create_module(self, net: network.Network, weights: network.NetworkWeights):
7 | if all(x in weights.w for x in ["hada_w1_a", "hada_w1_b", "hada_w2_a", "hada_w2_b"]):
8 | return NetworkModuleHada(net, weights)
9 |
10 | return None
11 |
12 |
13 | class NetworkModuleHada(network.NetworkModule):
14 | def __init__(self, net: network.Network, weights: network.NetworkWeights):
15 | super().__init__(net, weights)
16 |
17 | if hasattr(self.sd_module, 'weight'):
18 | self.shape = self.sd_module.weight.shape
19 |
20 | self.w1a = weights.w["hada_w1_a"]
21 | self.w1b = weights.w["hada_w1_b"]
22 | self.dim = self.w1b.shape[0]
23 | self.w2a = weights.w["hada_w2_a"]
24 | self.w2b = weights.w["hada_w2_b"]
25 |
26 | self.t1 = weights.w.get("hada_t1")
27 | self.t2 = weights.w.get("hada_t2")
28 |
29 | def calc_updown(self, orig_weight):
30 | w1a = self.w1a.to(orig_weight.device, dtype=orig_weight.dtype)
31 | w1b = self.w1b.to(orig_weight.device, dtype=orig_weight.dtype)
32 | w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype)
33 | w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype)
34 |
35 | output_shape = [w1a.size(0), w1b.size(1)]
36 |
37 | if self.t1 is not None:
38 | output_shape = [w1a.size(1), w1b.size(1)]
39 | t1 = self.t1.to(orig_weight.device, dtype=orig_weight.dtype)
40 | updown1 = lyco_helpers.make_weight_cp(t1, w1a, w1b)
41 | output_shape += t1.shape[2:]
42 | else:
43 | if len(w1b.shape) == 4:
44 | output_shape += w1b.shape[2:]
45 | updown1 = lyco_helpers.rebuild_conventional(w1a, w1b, output_shape)
46 |
47 | if self.t2 is not None:
48 | t2 = self.t2.to(orig_weight.device, dtype=orig_weight.dtype)
49 | updown2 = lyco_helpers.make_weight_cp(t2, w2a, w2b)
50 | else:
51 | updown2 = lyco_helpers.rebuild_conventional(w2a, w2b, output_shape)
52 |
53 | updown = updown1 * updown2
54 |
55 | return self.finalize_updown(updown, orig_weight, output_shape)
56 |
--------------------------------------------------------------------------------
/modules/processing_scripts/refiner.py:
--------------------------------------------------------------------------------
1 | import gradio as gr
2 |
3 | from modules import scripts, sd_models
4 | from modules.ui_common import create_refresh_button
5 | from modules.ui_components import InputAccordion
6 |
7 |
8 | class ScriptRefiner(scripts.ScriptBuiltinUI):
9 | section = "accordions"
10 | create_group = False
11 |
12 | def __init__(self):
13 | pass
14 |
15 | def title(self):
16 | return "Refiner"
17 |
18 | def show(self, is_img2img):
19 | return scripts.AlwaysVisible
20 |
21 | def ui(self, is_img2img):
22 | with InputAccordion(False, label="Refiner", elem_id=self.elem_id("enable")) as enable_refiner:
23 | with gr.Row():
24 | refiner_checkpoint = gr.Dropdown(label='Checkpoint', elem_id=self.elem_id("checkpoint"), choices=sd_models.checkpoint_tiles(), value='', tooltip="switch to another model in the middle of generation")
25 | create_refresh_button(refiner_checkpoint, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, self.elem_id("checkpoint_refresh"))
26 |
27 | refiner_switch_at = gr.Slider(value=0.8, label="Switch at", minimum=0.01, maximum=1.0, step=0.01, elem_id=self.elem_id("switch_at"), tooltip="fraction of sampling steps when the switch to refiner model should happen; 1=never, 0.5=switch in the middle of generation")
28 |
29 | def lookup_checkpoint(title):
30 | info = sd_models.get_closet_checkpoint_match(title)
31 | return None if info is None else info.title
32 |
33 | self.infotext_fields = [
34 | (enable_refiner, lambda d: 'Refiner' in d),
35 | (refiner_checkpoint, lambda d: lookup_checkpoint(d.get('Refiner'))),
36 | (refiner_switch_at, 'Refiner switch at'),
37 | ]
38 |
39 | return enable_refiner, refiner_checkpoint, refiner_switch_at
40 |
41 | def setup(self, p, enable_refiner, refiner_checkpoint, refiner_switch_at):
42 | # the actual implementation is in sd_samplers_common.py, apply_refiner
43 |
44 | if not enable_refiner or refiner_checkpoint in (None, "", "None"):
45 | p.refiner_checkpoint = None
46 | p.refiner_switch_at = None
47 | else:
48 | p.refiner_checkpoint = refiner_checkpoint
49 | p.refiner_switch_at = refiner_switch_at
50 |
--------------------------------------------------------------------------------
/test/test_img2img.py:
--------------------------------------------------------------------------------
1 |
2 | import pytest
3 | import requests
4 |
5 |
6 | @pytest.fixture()
7 | def url_img2img(base_url):
8 | return f"{base_url}/sdapi/v1/img2img"
9 |
10 |
11 | @pytest.fixture()
12 | def simple_img2img_request(img2img_basic_image_base64):
13 | return {
14 | "batch_size": 1,
15 | "cfg_scale": 7,
16 | "denoising_strength": 0.75,
17 | "eta": 0,
18 | "height": 64,
19 | "include_init_images": False,
20 | "init_images": [img2img_basic_image_base64],
21 | "inpaint_full_res": False,
22 | "inpaint_full_res_padding": 0,
23 | "inpainting_fill": 0,
24 | "inpainting_mask_invert": False,
25 | "mask": None,
26 | "mask_blur": 4,
27 | "n_iter": 1,
28 | "negative_prompt": "",
29 | "override_settings": {},
30 | "prompt": "example prompt",
31 | "resize_mode": 0,
32 | "restore_faces": False,
33 | "s_churn": 0,
34 | "s_noise": 1,
35 | "s_tmax": 0,
36 | "s_tmin": 0,
37 | "sampler_index": "Euler a",
38 | "seed": -1,
39 | "seed_resize_from_h": -1,
40 | "seed_resize_from_w": -1,
41 | "steps": 3,
42 | "styles": [],
43 | "subseed": -1,
44 | "subseed_strength": 0,
45 | "tiling": False,
46 | "width": 64,
47 | }
48 |
49 |
50 | def test_img2img_simple_performed(url_img2img, simple_img2img_request):
51 | assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200
52 |
53 |
54 | def test_inpainting_masked_performed(url_img2img, simple_img2img_request, mask_basic_image_base64):
55 | simple_img2img_request["mask"] = mask_basic_image_base64
56 | assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200
57 |
58 |
59 | def test_inpainting_with_inverted_masked_performed(url_img2img, simple_img2img_request, mask_basic_image_base64):
60 | simple_img2img_request["mask"] = mask_basic_image_base64
61 | simple_img2img_request["inpainting_mask_invert"] = True
62 | assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200
63 |
64 |
65 | def test_img2img_sd_upscale_performed(url_img2img, simple_img2img_request):
66 | simple_img2img_request["script_name"] = "sd upscale"
67 | simple_img2img_request["script_args"] = ["", 8, "Lanczos", 2.0]
68 | assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200
69 |
--------------------------------------------------------------------------------
/modules/hashes.py:
--------------------------------------------------------------------------------
1 | import hashlib
2 | import os.path
3 |
4 | from modules import shared
5 | import modules.cache
6 |
7 | dump_cache = modules.cache.dump_cache
8 | cache = modules.cache.cache
9 |
10 |
11 | def calculate_sha256(filename):
12 | hash_sha256 = hashlib.sha256()
13 | blksize = 1024 * 1024
14 |
15 | with open(filename, "rb") as f:
16 | for chunk in iter(lambda: f.read(blksize), b""):
17 | hash_sha256.update(chunk)
18 |
19 | return hash_sha256.hexdigest()
20 |
21 |
22 | def sha256_from_cache(filename, title, use_addnet_hash=False):
23 | hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes")
24 | ondisk_mtime = os.path.getmtime(filename)
25 |
26 | if title not in hashes:
27 | return None
28 |
29 | cached_sha256 = hashes[title].get("sha256", None)
30 | cached_mtime = hashes[title].get("mtime", 0)
31 |
32 | if ondisk_mtime > cached_mtime or cached_sha256 is None:
33 | return None
34 |
35 | return cached_sha256
36 |
37 |
38 | def sha256(filename, title, use_addnet_hash=False):
39 | hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes")
40 |
41 | sha256_value = sha256_from_cache(filename, title, use_addnet_hash)
42 | if sha256_value is not None:
43 | return sha256_value
44 |
45 | if shared.cmd_opts.no_hashing:
46 | return None
47 |
48 | print(f"Calculating sha256 for {filename}: ", end='')
49 | if use_addnet_hash:
50 | with open(filename, "rb") as file:
51 | sha256_value = addnet_hash_safetensors(file)
52 | else:
53 | sha256_value = calculate_sha256(filename)
54 | print(f"{sha256_value}")
55 |
56 | hashes[title] = {
57 | "mtime": os.path.getmtime(filename),
58 | "sha256": sha256_value,
59 | }
60 |
61 | dump_cache()
62 |
63 | return sha256_value
64 |
65 |
66 | def addnet_hash_safetensors(b):
67 | """kohya-ss hash for safetensors from https://github.com/kohya-ss/sd-scripts/blob/main/library/train_util.py"""
68 | hash_sha256 = hashlib.sha256()
69 | blksize = 1024 * 1024
70 |
71 | b.seek(0)
72 | header = b.read(8)
73 | n = int.from_bytes(header, "little")
74 |
75 | offset = n + 8
76 | b.seek(offset)
77 | for chunk in iter(lambda: b.read(blksize), b""):
78 | hash_sha256.update(chunk)
79 |
80 | return hash_sha256.hexdigest()
81 |
82 |
--------------------------------------------------------------------------------
/modules/shared_gradio_themes.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import gradio as gr
4 |
5 | from modules import errors, shared
6 | from modules.paths_internal import script_path
7 |
8 |
9 | # https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json
10 | gradio_hf_hub_themes = [
11 | "gradio/base",
12 | "gradio/glass",
13 | "gradio/monochrome",
14 | "gradio/seafoam",
15 | "gradio/soft",
16 | "gradio/dracula_test",
17 | "abidlabs/dracula_test",
18 | "abidlabs/Lime",
19 | "abidlabs/pakistan",
20 | "Ama434/neutral-barlow",
21 | "dawood/microsoft_windows",
22 | "finlaymacklon/smooth_slate",
23 | "Franklisi/darkmode",
24 | "freddyaboulton/dracula_revamped",
25 | "freddyaboulton/test-blue",
26 | "gstaff/xkcd",
27 | "Insuz/Mocha",
28 | "Insuz/SimpleIndigo",
29 | "JohnSmith9982/small_and_pretty",
30 | "nota-ai/theme",
31 | "nuttea/Softblue",
32 | "ParityError/Anime",
33 | "reilnuud/polite",
34 | "remilia/Ghostly",
35 | "rottenlittlecreature/Moon_Goblin",
36 | "step-3-profit/Midnight-Deep",
37 | "Taithrah/Minimal",
38 | "ysharma/huggingface",
39 | "ysharma/steampunk",
40 | "NoCrypt/miku"
41 | ]
42 |
43 |
44 | def reload_gradio_theme(theme_name=None):
45 | if not theme_name:
46 | theme_name = shared.opts.gradio_theme
47 |
48 | default_theme_args = dict(
49 | font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'],
50 | font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'],
51 | )
52 |
53 | if theme_name == "Default":
54 | shared.gradio_theme = gr.themes.Default(**default_theme_args)
55 | else:
56 | try:
57 | theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes')
58 | theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json')
59 | if shared.opts.gradio_themes_cache and os.path.exists(theme_cache_path):
60 | shared.gradio_theme = gr.themes.ThemeClass.load(theme_cache_path)
61 | else:
62 | os.makedirs(theme_cache_dir, exist_ok=True)
63 | shared.gradio_theme = gr.themes.ThemeClass.from_hub(theme_name)
64 | shared.gradio_theme.dump(theme_cache_path)
65 | except Exception as e:
66 | errors.display(e, "changing gradio theme")
67 | shared.gradio_theme = gr.themes.Default(**default_theme_args)
68 |
--------------------------------------------------------------------------------
/modules/ui_gradio_extensions.py:
--------------------------------------------------------------------------------
1 | import os
2 | import gradio as gr
3 |
4 | from modules import localization, shared, scripts
5 | from modules.paths import script_path, data_path
6 |
7 |
8 | def webpath(fn):
9 | if fn.startswith(script_path):
10 | web_path = os.path.relpath(fn, script_path).replace('\\', '/')
11 | else:
12 | web_path = os.path.abspath(fn)
13 |
14 | return f'file={web_path}?{os.path.getmtime(fn)}'
15 |
16 |
17 | def javascript_html():
18 | # Ensure localization is in `window` before scripts
19 | head = f'\n'
20 |
21 | script_js = os.path.join(script_path, "script.js")
22 | head += f'\n'
23 |
24 | for script in scripts.list_scripts("javascript", ".js"):
25 | head += f'\n'
26 |
27 | for script in scripts.list_scripts("javascript", ".mjs"):
28 | head += f'\n'
29 |
30 | if shared.cmd_opts.theme:
31 | head += f'\n'
32 |
33 | return head
34 |
35 |
36 | def css_html():
37 | head = ""
38 |
39 | def stylesheet(fn):
40 | return f''
41 |
42 | for cssfile in scripts.list_files_with_name("style.css"):
43 | if not os.path.isfile(cssfile):
44 | continue
45 |
46 | head += stylesheet(cssfile)
47 |
48 | if os.path.exists(os.path.join(data_path, "user.css")):
49 | head += stylesheet(os.path.join(data_path, "user.css"))
50 |
51 | return head
52 |
53 |
54 | def reload_javascript():
55 | js = javascript_html()
56 | css = css_html()
57 |
58 | def template_response(*args, **kwargs):
59 | res = shared.GradioTemplateResponseOriginal(*args, **kwargs)
60 | res.body = res.body.replace(b'', f'{js}'.encode("utf8"))
61 | res.body = res.body.replace(b'