├── .eslintignore ├── .eslintrc.js ├── .git-blame-ignore-revs ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.yml │ ├── config.yml │ └── feature_request.yml ├── pull_request_template.md └── workflows │ ├── on_pull_request.yaml │ ├── run_tests.yaml │ └── warns_merge_master.yml ├── .gitignore ├── .pylintrc ├── CHANGELOG.md ├── CITATION.cff ├── CODEOWNERS ├── LICENSE.txt ├── README.md ├── configs ├── alt-diffusion-inference.yaml ├── instruct-pix2pix.yaml ├── v1-inference.yaml └── v1-inpainting-inference.yaml ├── embeddings └── Place Textual Inversion embeddings here.txt ├── environment-wsl2.yaml ├── extensions-builtin ├── LDSR │ ├── ldsr_model_arch.py │ ├── preload.py │ ├── scripts │ │ └── ldsr_model.py │ ├── sd_hijack_autoencoder.py │ ├── sd_hijack_ddpm_v1.py │ └── vqvae_quantize.py ├── Lora │ ├── extra_networks_lora.py │ ├── lora.py │ ├── lora_patches.py │ ├── lyco_helpers.py │ ├── network.py │ ├── network_full.py │ ├── network_hada.py │ ├── network_ia3.py │ ├── network_lokr.py │ ├── network_lora.py │ ├── network_norm.py │ ├── networks.py │ ├── preload.py │ ├── scripts │ │ └── lora_script.py │ ├── ui_edit_user_metadata.py │ └── ui_extra_networks_lora.py ├── ScuNET │ ├── preload.py │ ├── scripts │ │ └── scunet_model.py │ └── scunet_model_arch.py ├── SwinIR │ ├── preload.py │ ├── scripts │ │ └── swinir_model.py │ ├── swinir_model_arch.py │ └── swinir_model_arch_v2.py ├── canvas-zoom-and-pan │ ├── javascript │ │ └── zoom.js │ ├── scripts │ │ └── hotkey_config.py │ └── style.css ├── extra-options-section │ └── scripts │ │ └── extra_options_section.py ├── mobile │ └── javascript │ │ └── mobile.js └── prompt-bracket-checker │ └── javascript │ └── prompt-bracket-checker.js ├── extensions └── put extensions here.txt ├── first-time-runner.bat ├── html ├── card-no-preview.png ├── extra-networks-card.html ├── extra-networks-no-cards.html ├── footer.html └── licenses.html ├── javascript ├── aspectRatioOverlay.js ├── contextMenus.js ├── dragdrop.js ├── edit-attention.js ├── edit-order.js ├── extensions.js ├── extraNetworks.js ├── generationParams.js ├── hints.js ├── hires_fix.js ├── imageMaskFix.js ├── imageviewer.js ├── imageviewerGamepad.js ├── inputAccordion.js ├── localStorage.js ├── localization.js ├── notification.js ├── profilerVisualization.js ├── progressbar.js ├── resizeHandle.js ├── textualInversion.js ├── token-counters.js ├── ui.js └── ui_settings_hints.js ├── launch.py ├── localizations └── Put localization files here.txt ├── models ├── Stable-diffusion │ └── Put Stable Diffusion checkpoints here.txt ├── VAE-approx │ └── model.pt ├── VAE │ └── Put VAE here.txt ├── deepbooru │ └── Put your deepbooru release project folder here.txt └── karlo │ └── ViT-L-14_stats.th ├── modules ├── Roboto-Regular.ttf ├── api │ ├── api.py │ └── models.py ├── cache.py ├── call_queue.py ├── cmd_args.py ├── codeformer │ ├── codeformer_arch.py │ └── vqgan_arch.py ├── codeformer_model.py ├── config_states.py ├── deepbooru.py ├── deepbooru_model.py ├── devices.py ├── errors.py ├── esrgan_model.py ├── esrgan_model_arch.py ├── extensions.py ├── extra_networks.py ├── extra_networks_hypernet.py ├── extras.py ├── face_restoration.py ├── fifo_lock.py ├── generation_parameters_copypaste.py ├── gfpgan_model.py ├── gitpython_hack.py ├── gradio_extensons.py ├── hashes.py ├── hypernetworks │ ├── hypernetwork.py │ └── ui.py ├── images.py ├── img2img.py ├── import_hook.py ├── initialize.py ├── initialize_util.py ├── interrogate.py ├── launch_utils.py ├── localization.py ├── logging_config.py ├── lowvram.py ├── mac_specific.py ├── masking.py ├── memmon.py ├── modelloader.py ├── models │ └── diffusion │ │ ├── ddpm_edit.py │ │ └── uni_pc │ │ ├── __init__.py │ │ ├── sampler.py │ │ └── uni_pc.py ├── ngrok.py ├── options.py ├── patches.py ├── paths.py ├── paths_internal.py ├── postprocessing.py ├── processing.py ├── processing_scripts │ ├── refiner.py │ └── seed.py ├── progress.py ├── prompt_parser.py ├── realesrgan_model.py ├── restart.py ├── rng.py ├── rng_philox.py ├── safe.py ├── script_callbacks.py ├── script_loading.py ├── scripts.py ├── scripts_auto_postprocessing.py ├── scripts_postprocessing.py ├── sd_disable_initialization.py ├── sd_hijack.py ├── sd_hijack_checkpoint.py ├── sd_hijack_clip.py ├── sd_hijack_clip_old.py ├── sd_hijack_ip2p.py ├── sd_hijack_open_clip.py ├── sd_hijack_optimizations.py ├── sd_hijack_unet.py ├── sd_hijack_utils.py ├── sd_hijack_xlmr.py ├── sd_models.py ├── sd_models_config.py ├── sd_models_types.py ├── sd_models_xl.py ├── sd_samplers.py ├── sd_samplers_cfg_denoiser.py ├── sd_samplers_common.py ├── sd_samplers_compvis.py ├── sd_samplers_extra.py ├── sd_samplers_kdiffusion.py ├── sd_samplers_timesteps.py ├── sd_samplers_timesteps_impl.py ├── sd_unet.py ├── sd_vae.py ├── sd_vae_approx.py ├── sd_vae_taesd.py ├── shared.py ├── shared_cmd_options.py ├── shared_gradio_themes.py ├── shared_init.py ├── shared_items.py ├── shared_options.py ├── shared_state.py ├── shared_total_tqdm.py ├── styles.py ├── sub_quadratic_attention.py ├── sysinfo.py ├── textual_inversion │ ├── autocrop.py │ ├── dataset.py │ ├── image_embedding.py │ ├── learn_schedule.py │ ├── logging.py │ ├── preprocess.py │ ├── test_embedding.png │ ├── textual_inversion.py │ └── ui.py ├── timer.py ├── txt2img.py ├── ui.py ├── ui_checkpoint_merger.py ├── ui_common.py ├── ui_components.py ├── ui_extensions.py ├── ui_extra_networks.py ├── ui_extra_networks_checkpoints.py ├── ui_extra_networks_checkpoints_user_metadata.py ├── ui_extra_networks_hypernets.py ├── ui_extra_networks_textual_inversion.py ├── ui_extra_networks_user_metadata.py ├── ui_gradio_extensions.py ├── ui_loadsave.py ├── ui_postprocessing.py ├── ui_prompt_styles.py ├── ui_settings.py ├── ui_tempdir.py ├── upscaler.py ├── util.py └── xlmr.py ├── package.json ├── pyproject.toml ├── requirements-test.txt ├── requirements.txt ├── requirements_versions.txt ├── screenshot.png ├── screenshot_OpenVINO.png ├── script.js ├── scripts ├── custom_code.py ├── img2imgalt.py ├── loopback.py ├── openvino_accelerate.py ├── outpainting_mk_2.py ├── poor_mans_outpainting.py ├── postprocessing_codeformer.py ├── postprocessing_gfpgan.py ├── postprocessing_upscale.py ├── prompt_matrix.py ├── prompts_from_file.py ├── sd_upscale.py └── xyz_grid.py ├── style.css ├── test ├── __init__.py ├── conftest.py ├── test_extras.py ├── test_files │ ├── empty.pt │ ├── img2img_basic.png │ └── mask_basic.png ├── test_img2img.py ├── test_txt2img.py └── test_utils.py ├── textual_inversion_templates ├── hypernetwork.txt ├── none.txt ├── style.txt ├── style_filewords.txt ├── subject.txt └── subject_filewords.txt ├── webui-macos-env.sh ├── webui-user.bat ├── webui-user.sh ├── webui.bat ├── webui.py └── webui.sh /.eslintignore: -------------------------------------------------------------------------------- 1 | extensions 2 | extensions-disabled 3 | repositories 4 | venv -------------------------------------------------------------------------------- /.eslintrc.js: -------------------------------------------------------------------------------- 1 | /* global module */ 2 | module.exports = { 3 | env: { 4 | browser: true, 5 | es2021: true, 6 | }, 7 | extends: "eslint:recommended", 8 | parserOptions: { 9 | ecmaVersion: "latest", 10 | }, 11 | rules: { 12 | "arrow-spacing": "error", 13 | "block-spacing": "error", 14 | "brace-style": "error", 15 | "comma-dangle": ["error", "only-multiline"], 16 | "comma-spacing": "error", 17 | "comma-style": ["error", "last"], 18 | "curly": ["error", "multi-line", "consistent"], 19 | "eol-last": "error", 20 | "func-call-spacing": "error", 21 | "function-call-argument-newline": ["error", "consistent"], 22 | "function-paren-newline": ["error", "consistent"], 23 | "indent": ["error", 4], 24 | "key-spacing": "error", 25 | "keyword-spacing": "error", 26 | "linebreak-style": ["error", "unix"], 27 | "no-extra-semi": "error", 28 | "no-mixed-spaces-and-tabs": "error", 29 | "no-multi-spaces": "error", 30 | "no-redeclare": ["error", {builtinGlobals: false}], 31 | "no-trailing-spaces": "error", 32 | "no-unused-vars": "off", 33 | "no-whitespace-before-property": "error", 34 | "object-curly-newline": ["error", {consistent: true, multiline: true}], 35 | "object-curly-spacing": ["error", "never"], 36 | "operator-linebreak": ["error", "after"], 37 | "quote-props": ["error", "consistent-as-needed"], 38 | "semi": ["error", "always"], 39 | "semi-spacing": "error", 40 | "semi-style": ["error", "last"], 41 | "space-before-blocks": "error", 42 | "space-before-function-paren": ["error", "never"], 43 | "space-in-parens": ["error", "never"], 44 | "space-infix-ops": "error", 45 | "space-unary-ops": "error", 46 | "switch-colon-spacing": "error", 47 | "template-curly-spacing": ["error", "never"], 48 | "unicode-bom": "error", 49 | }, 50 | globals: { 51 | //script.js 52 | gradioApp: "readonly", 53 | executeCallbacks: "readonly", 54 | onAfterUiUpdate: "readonly", 55 | onOptionsChanged: "readonly", 56 | onUiLoaded: "readonly", 57 | onUiUpdate: "readonly", 58 | uiCurrentTab: "writable", 59 | uiElementInSight: "readonly", 60 | uiElementIsVisible: "readonly", 61 | //ui.js 62 | opts: "writable", 63 | all_gallery_buttons: "readonly", 64 | selected_gallery_button: "readonly", 65 | selected_gallery_index: "readonly", 66 | switch_to_txt2img: "readonly", 67 | switch_to_img2img_tab: "readonly", 68 | switch_to_img2img: "readonly", 69 | switch_to_sketch: "readonly", 70 | switch_to_inpaint: "readonly", 71 | switch_to_inpaint_sketch: "readonly", 72 | switch_to_extras: "readonly", 73 | get_tab_index: "readonly", 74 | create_submit_args: "readonly", 75 | restart_reload: "readonly", 76 | updateInput: "readonly", 77 | //extraNetworks.js 78 | requestGet: "readonly", 79 | popup: "readonly", 80 | // from python 81 | localization: "readonly", 82 | // progrssbar.js 83 | randomId: "readonly", 84 | requestProgress: "readonly", 85 | // imageviewer.js 86 | modalPrevImage: "readonly", 87 | modalNextImage: "readonly", 88 | // token-counters.js 89 | setupTokenCounters: "readonly", 90 | // localStorage.js 91 | localSet: "readonly", 92 | localGet: "readonly", 93 | localRemove: "readonly", 94 | // resizeHandle.js 95 | setupResizeHandle: "writable" 96 | } 97 | }; 98 | -------------------------------------------------------------------------------- /.git-blame-ignore-revs: -------------------------------------------------------------------------------- 1 | # Apply ESlint 2 | 9c54b78d9dde5601e916f308d9a9d6953ec39430 -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | name: Bug Report 2 | description: You think somethings is broken in the UI 3 | title: "[Bug]: " 4 | labels: ["bug-report"] 5 | 6 | body: 7 | - type: checkboxes 8 | attributes: 9 | label: Is there an existing issue for this? 10 | description: Please search to see if an issue already exists for the bug you encountered, and that it hasn't been fixed in a recent build/commit. 11 | options: 12 | - label: I have searched the existing issues and checked the recent builds/commits 13 | required: true 14 | - type: markdown 15 | attributes: 16 | value: | 17 | *Please fill this form with as much information as possible, don't forget to fill "What OS..." and "What browsers" and *provide screenshots if possible** 18 | - type: textarea 19 | id: what-did 20 | attributes: 21 | label: What happened? 22 | description: Tell us what happened in a very clear and simple way 23 | validations: 24 | required: true 25 | - type: textarea 26 | id: steps 27 | attributes: 28 | label: Steps to reproduce the problem 29 | description: Please provide us with precise step by step instructions on how to reproduce the bug 30 | value: | 31 | 1. Go to .... 32 | 2. Press .... 33 | 3. ... 34 | validations: 35 | required: true 36 | - type: textarea 37 | id: what-should 38 | attributes: 39 | label: What should have happened? 40 | description: Tell us what you think the normal behavior should be 41 | validations: 42 | required: true 43 | - type: textarea 44 | id: sysinfo 45 | attributes: 46 | label: Sysinfo 47 | description: System info file, generated by WebUI. You can generate it in settings, on the Sysinfo page. Drag the file into the field to upload it. If you submit your report without including the sysinfo file, the report will be closed. If needed, review the report to make sure it includes no personal information you don't want to share. If you can't start WebUI, you can use --dump-sysinfo commandline argument to generate the file. 48 | validations: 49 | required: true 50 | - type: dropdown 51 | id: browsers 52 | attributes: 53 | label: What browsers do you use to access the UI ? 54 | multiple: true 55 | options: 56 | - Mozilla Firefox 57 | - Google Chrome 58 | - Brave 59 | - Apple Safari 60 | - Microsoft Edge 61 | - Other 62 | - type: textarea 63 | id: logs 64 | attributes: 65 | label: Console logs 66 | description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after your bug happened. If it's very long, provide a link to pastebin or similar service. 67 | render: Shell 68 | validations: 69 | required: true 70 | - type: textarea 71 | id: misc 72 | attributes: 73 | label: Additional information 74 | description: Please provide us with any relevant additional info or context. 75 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: WebUI Community Support 4 | url: https://github.com/AUTOMATIC1111/stable-diffusion-webui/discussions 5 | about: Please ask and answer questions here. 6 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | name: Feature request 2 | description: Suggest an idea for this project 3 | title: "[Feature Request]: " 4 | labels: ["enhancement"] 5 | 6 | body: 7 | - type: checkboxes 8 | attributes: 9 | label: Is there an existing issue for this? 10 | description: Please search to see if an issue already exists for the feature you want, and that it's not implemented in a recent build/commit. 11 | options: 12 | - label: I have searched the existing issues and checked the recent builds/commits 13 | required: true 14 | - type: markdown 15 | attributes: 16 | value: | 17 | *Please fill this form with as much information as possible, provide screenshots and/or illustrations of the feature if possible* 18 | - type: textarea 19 | id: feature 20 | attributes: 21 | label: What would your feature do ? 22 | description: Tell us about your feature in a very clear and simple way, and what problem it would solve 23 | validations: 24 | required: true 25 | - type: textarea 26 | id: workflow 27 | attributes: 28 | label: Proposed workflow 29 | description: Please provide us with step by step information on how you'd like the feature to be accessed and used 30 | value: | 31 | 1. Go to .... 32 | 2. Press .... 33 | 3. ... 34 | validations: 35 | required: true 36 | - type: textarea 37 | id: misc 38 | attributes: 39 | label: Additional information 40 | description: Add any other context or screenshots about the feature request here. 41 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Description 2 | 3 | * a simple description of what you're trying to accomplish 4 | * a summary of changes in code 5 | * which issues it fixes, if any 6 | 7 | ## Screenshots/videos: 8 | 9 | 10 | ## Checklist: 11 | 12 | - [ ] I have read [contributing wiki page](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing) 13 | - [ ] I have performed a self-review of my own code 14 | - [ ] My code follows the [style guidelines](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing#code-style) 15 | - [ ] My code passes [tests](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Tests) 16 | -------------------------------------------------------------------------------- /.github/workflows/on_pull_request.yaml: -------------------------------------------------------------------------------- 1 | name: Linter 2 | 3 | on: 4 | - push 5 | - pull_request 6 | 7 | jobs: 8 | lint-python: 9 | name: ruff 10 | runs-on: ubuntu-latest 11 | if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name 12 | steps: 13 | - name: Checkout Code 14 | uses: actions/checkout@v3 15 | - uses: actions/setup-python@v4 16 | with: 17 | python-version: 3.11 18 | # NB: there's no cache: pip here since we're not installing anything 19 | # from the requirements.txt file(s) in the repository; it's faster 20 | # not to have GHA download an (at the time of writing) 4 GB cache 21 | # of PyTorch and other dependencies. 22 | - name: Install Ruff 23 | run: pip install ruff==0.0.272 24 | - name: Run Ruff 25 | run: ruff . 26 | lint-js: 27 | name: eslint 28 | runs-on: ubuntu-latest 29 | if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name 30 | steps: 31 | - name: Checkout Code 32 | uses: actions/checkout@v3 33 | - name: Install Node.js 34 | uses: actions/setup-node@v3 35 | with: 36 | node-version: 18 37 | - run: npm i --ci 38 | - run: npm run lint 39 | -------------------------------------------------------------------------------- /.github/workflows/run_tests.yaml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | - push 5 | - pull_request 6 | 7 | jobs: 8 | test: 9 | name: tests on CPU with empty model 10 | runs-on: ubuntu-latest 11 | if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name 12 | steps: 13 | - name: Checkout Code 14 | uses: actions/checkout@v3 15 | - name: Set up Python 3.10 16 | uses: actions/setup-python@v4 17 | with: 18 | python-version: 3.10.6 19 | cache: pip 20 | cache-dependency-path: | 21 | **/requirements*txt 22 | launch.py 23 | - name: Install test dependencies 24 | run: pip install wait-for-it -r requirements-test.txt 25 | env: 26 | PIP_DISABLE_PIP_VERSION_CHECK: "1" 27 | PIP_PROGRESS_BAR: "off" 28 | - name: Setup environment 29 | run: python launch.py --skip-torch-cuda-test --exit 30 | env: 31 | PIP_DISABLE_PIP_VERSION_CHECK: "1" 32 | PIP_PROGRESS_BAR: "off" 33 | TORCH_INDEX_URL: https://download.pytorch.org/whl/cpu 34 | WEBUI_LAUNCH_LIVE_OUTPUT: "1" 35 | PYTHONUNBUFFERED: "1" 36 | - name: Start test server 37 | run: > 38 | python -m coverage run 39 | --data-file=.coverage.server 40 | launch.py 41 | --skip-prepare-environment 42 | --skip-torch-cuda-test 43 | --test-server 44 | --do-not-download-clip 45 | --no-half 46 | --disable-opt-split-attention 47 | --use-cpu all 48 | --api-server-stop 49 | 2>&1 | tee output.txt & 50 | - name: Run tests 51 | run: | 52 | wait-for-it --service 127.0.0.1:7860 -t 600 53 | python -m pytest -vv --junitxml=test/results.xml --cov . --cov-report=xml --verify-base-url test 54 | - name: Kill test server 55 | if: always() 56 | run: curl -vv -XPOST http://127.0.0.1:7860/sdapi/v1/server-stop && sleep 10 57 | - name: Show coverage 58 | run: | 59 | python -m coverage combine .coverage* 60 | python -m coverage report -i 61 | python -m coverage html -i 62 | - name: Upload main app output 63 | uses: actions/upload-artifact@v3 64 | if: always() 65 | with: 66 | name: output 67 | path: output.txt 68 | - name: Upload coverage HTML 69 | uses: actions/upload-artifact@v3 70 | if: always() 71 | with: 72 | name: htmlcov 73 | path: htmlcov 74 | -------------------------------------------------------------------------------- /.github/workflows/warns_merge_master.yml: -------------------------------------------------------------------------------- 1 | name: Pull requests can't target master branch 2 | 3 | "on": 4 | pull_request: 5 | types: 6 | - opened 7 | - synchronize 8 | - reopened 9 | branches: 10 | - master 11 | 12 | jobs: 13 | check: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Warning marge into master 17 | run: | 18 | echo -e "::warning::This pull request directly merge into \"master\" branch, normally development happens on \"dev\" branch." 19 | exit 1 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | *.ckpt 3 | *.safetensors 4 | *.pth 5 | /ESRGAN/* 6 | /SwinIR/* 7 | /repositories 8 | /venv 9 | /tmp 10 | /model.ckpt 11 | /models/**/* 12 | /GFPGANv1.3.pth 13 | /gfpgan/weights/*.pth 14 | /ui-config.json 15 | /outputs 16 | /config.json 17 | /log 18 | /webui.settings.bat 19 | /embeddings 20 | /styles.csv 21 | /params.txt 22 | /styles.csv.bak 23 | /webui-user.bat 24 | /webui-user.sh 25 | /interrogate 26 | /user.css 27 | /.idea 28 | notification.mp3 29 | /SwinIR 30 | /textual_inversion 31 | .vscode 32 | /extensions 33 | /test/stdout.txt 34 | /test/stderr.txt 35 | /cache.json* 36 | /config_states/ 37 | /node_modules 38 | /package-lock.json 39 | /.coverage* 40 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | # See https://pylint.pycqa.org/en/latest/user_guide/messages/message_control.html 2 | [MESSAGES CONTROL] 3 | disable=C,R,W,E,I 4 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | message: "If you use this software, please cite it as below." 3 | authors: 4 | - given-names: AUTOMATIC1111 5 | title: "Stable Diffusion Web UI" 6 | date-released: 2022-08-22 7 | url: "https://github.com/AUTOMATIC1111/stable-diffusion-webui" 8 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @AUTOMATIC1111 2 | 3 | # if you were managing a localization and were removed from this file, this is because 4 | # the intended way to do localizations now is via extensions. See: 5 | # https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Developing-extensions 6 | # Make a repo with your localization and since you are still listed as a collaborator 7 | # you can add it to the wiki page yourself. This change is because some people complained 8 | # the git commit log is cluttered with things unrelated to almost everyone and 9 | # because I believe this is the best overall for the project to handle localizations almost 10 | # entirely without my oversight. 11 | 12 | 13 | -------------------------------------------------------------------------------- /configs/alt-diffusion-inference.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | base_learning_rate: 1.0e-04 3 | target: ldm.models.diffusion.ddpm.LatentDiffusion 4 | params: 5 | linear_start: 0.00085 6 | linear_end: 0.0120 7 | num_timesteps_cond: 1 8 | log_every_t: 200 9 | timesteps: 1000 10 | first_stage_key: "jpg" 11 | cond_stage_key: "txt" 12 | image_size: 64 13 | channels: 4 14 | cond_stage_trainable: false # Note: different from the one we trained before 15 | conditioning_key: crossattn 16 | monitor: val/loss_simple_ema 17 | scale_factor: 0.18215 18 | use_ema: False 19 | 20 | scheduler_config: # 10000 warmup steps 21 | target: ldm.lr_scheduler.LambdaLinearScheduler 22 | params: 23 | warm_up_steps: [ 10000 ] 24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases 25 | f_start: [ 1.e-6 ] 26 | f_max: [ 1. ] 27 | f_min: [ 1. ] 28 | 29 | unet_config: 30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel 31 | params: 32 | image_size: 32 # unused 33 | in_channels: 4 34 | out_channels: 4 35 | model_channels: 320 36 | attention_resolutions: [ 4, 2, 1 ] 37 | num_res_blocks: 2 38 | channel_mult: [ 1, 2, 4, 4 ] 39 | num_heads: 8 40 | use_spatial_transformer: True 41 | transformer_depth: 1 42 | context_dim: 768 43 | use_checkpoint: True 44 | legacy: False 45 | 46 | first_stage_config: 47 | target: ldm.models.autoencoder.AutoencoderKL 48 | params: 49 | embed_dim: 4 50 | monitor: val/rec_loss 51 | ddconfig: 52 | double_z: true 53 | z_channels: 4 54 | resolution: 256 55 | in_channels: 3 56 | out_ch: 3 57 | ch: 128 58 | ch_mult: 59 | - 1 60 | - 2 61 | - 4 62 | - 4 63 | num_res_blocks: 2 64 | attn_resolutions: [] 65 | dropout: 0.0 66 | lossconfig: 67 | target: torch.nn.Identity 68 | 69 | cond_stage_config: 70 | target: modules.xlmr.BertSeriesModelWithTransformation 71 | params: 72 | name: "XLMR-Large" -------------------------------------------------------------------------------- /configs/instruct-pix2pix.yaml: -------------------------------------------------------------------------------- 1 | # File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion). 2 | # See more details in LICENSE. 3 | 4 | model: 5 | base_learning_rate: 1.0e-04 6 | target: modules.models.diffusion.ddpm_edit.LatentDiffusion 7 | params: 8 | linear_start: 0.00085 9 | linear_end: 0.0120 10 | num_timesteps_cond: 1 11 | log_every_t: 200 12 | timesteps: 1000 13 | first_stage_key: edited 14 | cond_stage_key: edit 15 | # image_size: 64 16 | # image_size: 32 17 | image_size: 16 18 | channels: 4 19 | cond_stage_trainable: false # Note: different from the one we trained before 20 | conditioning_key: hybrid 21 | monitor: val/loss_simple_ema 22 | scale_factor: 0.18215 23 | use_ema: false 24 | 25 | scheduler_config: # 10000 warmup steps 26 | target: ldm.lr_scheduler.LambdaLinearScheduler 27 | params: 28 | warm_up_steps: [ 0 ] 29 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases 30 | f_start: [ 1.e-6 ] 31 | f_max: [ 1. ] 32 | f_min: [ 1. ] 33 | 34 | unet_config: 35 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel 36 | params: 37 | image_size: 32 # unused 38 | in_channels: 8 39 | out_channels: 4 40 | model_channels: 320 41 | attention_resolutions: [ 4, 2, 1 ] 42 | num_res_blocks: 2 43 | channel_mult: [ 1, 2, 4, 4 ] 44 | num_heads: 8 45 | use_spatial_transformer: True 46 | transformer_depth: 1 47 | context_dim: 768 48 | use_checkpoint: True 49 | legacy: False 50 | 51 | first_stage_config: 52 | target: ldm.models.autoencoder.AutoencoderKL 53 | params: 54 | embed_dim: 4 55 | monitor: val/rec_loss 56 | ddconfig: 57 | double_z: true 58 | z_channels: 4 59 | resolution: 256 60 | in_channels: 3 61 | out_ch: 3 62 | ch: 128 63 | ch_mult: 64 | - 1 65 | - 2 66 | - 4 67 | - 4 68 | num_res_blocks: 2 69 | attn_resolutions: [] 70 | dropout: 0.0 71 | lossconfig: 72 | target: torch.nn.Identity 73 | 74 | cond_stage_config: 75 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder 76 | 77 | data: 78 | target: main.DataModuleFromConfig 79 | params: 80 | batch_size: 128 81 | num_workers: 1 82 | wrap: false 83 | validation: 84 | target: edit_dataset.EditDataset 85 | params: 86 | path: data/clip-filtered-dataset 87 | cache_dir: data/ 88 | cache_name: data_10k 89 | split: val 90 | min_text_sim: 0.2 91 | min_image_sim: 0.75 92 | min_direction_sim: 0.2 93 | max_samples_per_prompt: 1 94 | min_resize_res: 512 95 | max_resize_res: 512 96 | crop_res: 512 97 | output_as_edit: False 98 | real_input: True 99 | -------------------------------------------------------------------------------- /configs/v1-inference.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | base_learning_rate: 1.0e-04 3 | target: ldm.models.diffusion.ddpm.LatentDiffusion 4 | params: 5 | linear_start: 0.00085 6 | linear_end: 0.0120 7 | num_timesteps_cond: 1 8 | log_every_t: 200 9 | timesteps: 1000 10 | first_stage_key: "jpg" 11 | cond_stage_key: "txt" 12 | image_size: 64 13 | channels: 4 14 | cond_stage_trainable: false # Note: different from the one we trained before 15 | conditioning_key: crossattn 16 | monitor: val/loss_simple_ema 17 | scale_factor: 0.18215 18 | use_ema: False 19 | 20 | scheduler_config: # 10000 warmup steps 21 | target: ldm.lr_scheduler.LambdaLinearScheduler 22 | params: 23 | warm_up_steps: [ 10000 ] 24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases 25 | f_start: [ 1.e-6 ] 26 | f_max: [ 1. ] 27 | f_min: [ 1. ] 28 | 29 | unet_config: 30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel 31 | params: 32 | image_size: 32 # unused 33 | in_channels: 4 34 | out_channels: 4 35 | model_channels: 320 36 | attention_resolutions: [ 4, 2, 1 ] 37 | num_res_blocks: 2 38 | channel_mult: [ 1, 2, 4, 4 ] 39 | num_heads: 8 40 | use_spatial_transformer: True 41 | transformer_depth: 1 42 | context_dim: 768 43 | use_checkpoint: True 44 | legacy: False 45 | 46 | first_stage_config: 47 | target: ldm.models.autoencoder.AutoencoderKL 48 | params: 49 | embed_dim: 4 50 | monitor: val/rec_loss 51 | ddconfig: 52 | double_z: true 53 | z_channels: 4 54 | resolution: 256 55 | in_channels: 3 56 | out_ch: 3 57 | ch: 128 58 | ch_mult: 59 | - 1 60 | - 2 61 | - 4 62 | - 4 63 | num_res_blocks: 2 64 | attn_resolutions: [] 65 | dropout: 0.0 66 | lossconfig: 67 | target: torch.nn.Identity 68 | 69 | cond_stage_config: 70 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder 71 | -------------------------------------------------------------------------------- /configs/v1-inpainting-inference.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | base_learning_rate: 7.5e-05 3 | target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion 4 | params: 5 | linear_start: 0.00085 6 | linear_end: 0.0120 7 | num_timesteps_cond: 1 8 | log_every_t: 200 9 | timesteps: 1000 10 | first_stage_key: "jpg" 11 | cond_stage_key: "txt" 12 | image_size: 64 13 | channels: 4 14 | cond_stage_trainable: false # Note: different from the one we trained before 15 | conditioning_key: hybrid # important 16 | monitor: val/loss_simple_ema 17 | scale_factor: 0.18215 18 | finetune_keys: null 19 | 20 | scheduler_config: # 10000 warmup steps 21 | target: ldm.lr_scheduler.LambdaLinearScheduler 22 | params: 23 | warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch 24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases 25 | f_start: [ 1.e-6 ] 26 | f_max: [ 1. ] 27 | f_min: [ 1. ] 28 | 29 | unet_config: 30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel 31 | params: 32 | image_size: 32 # unused 33 | in_channels: 9 # 4 data + 4 downscaled image + 1 mask 34 | out_channels: 4 35 | model_channels: 320 36 | attention_resolutions: [ 4, 2, 1 ] 37 | num_res_blocks: 2 38 | channel_mult: [ 1, 2, 4, 4 ] 39 | num_heads: 8 40 | use_spatial_transformer: True 41 | transformer_depth: 1 42 | context_dim: 768 43 | use_checkpoint: True 44 | legacy: False 45 | 46 | first_stage_config: 47 | target: ldm.models.autoencoder.AutoencoderKL 48 | params: 49 | embed_dim: 4 50 | monitor: val/rec_loss 51 | ddconfig: 52 | double_z: true 53 | z_channels: 4 54 | resolution: 256 55 | in_channels: 3 56 | out_ch: 3 57 | ch: 128 58 | ch_mult: 59 | - 1 60 | - 2 61 | - 4 62 | - 4 63 | num_res_blocks: 2 64 | attn_resolutions: [] 65 | dropout: 0.0 66 | lossconfig: 67 | target: torch.nn.Identity 68 | 69 | cond_stage_config: 70 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder 71 | -------------------------------------------------------------------------------- /embeddings/Place Textual Inversion embeddings here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openvinotoolkit/stable-diffusion-webui/e5a634da06c62d72dbdc764b16c65ef3408aa588/embeddings/Place Textual Inversion embeddings here.txt -------------------------------------------------------------------------------- /environment-wsl2.yaml: -------------------------------------------------------------------------------- 1 | name: automatic 2 | channels: 3 | - pytorch 4 | - defaults 5 | dependencies: 6 | - python=3.10 7 | - pip=23.0 8 | - cudatoolkit=11.8 9 | - pytorch=2.0 10 | - torchvision=0.15 11 | - numpy=1.23 12 | -------------------------------------------------------------------------------- /extensions-builtin/LDSR/preload.py: -------------------------------------------------------------------------------- 1 | import os 2 | from modules import paths 3 | 4 | 5 | def preload(parser): 6 | parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(paths.models_path, 'LDSR')) 7 | -------------------------------------------------------------------------------- /extensions-builtin/LDSR/scripts/ldsr_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from modules.modelloader import load_file_from_url 4 | from modules.upscaler import Upscaler, UpscalerData 5 | from ldsr_model_arch import LDSR 6 | from modules import shared, script_callbacks, errors 7 | import sd_hijack_autoencoder # noqa: F401 8 | import sd_hijack_ddpm_v1 # noqa: F401 9 | 10 | 11 | class UpscalerLDSR(Upscaler): 12 | def __init__(self, user_path): 13 | self.name = "LDSR" 14 | self.user_path = user_path 15 | self.model_url = "https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1" 16 | self.yaml_url = "https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1" 17 | super().__init__() 18 | scaler_data = UpscalerData("LDSR", None, self) 19 | self.scalers = [scaler_data] 20 | 21 | def load_model(self, path: str): 22 | # Remove incorrect project.yaml file if too big 23 | yaml_path = os.path.join(self.model_path, "project.yaml") 24 | old_model_path = os.path.join(self.model_path, "model.pth") 25 | new_model_path = os.path.join(self.model_path, "model.ckpt") 26 | 27 | local_model_paths = self.find_models(ext_filter=[".ckpt", ".safetensors"]) 28 | local_ckpt_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.ckpt")]), None) 29 | local_safetensors_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.safetensors")]), None) 30 | local_yaml_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("project.yaml")]), None) 31 | 32 | if os.path.exists(yaml_path): 33 | statinfo = os.stat(yaml_path) 34 | if statinfo.st_size >= 10485760: 35 | print("Removing invalid LDSR YAML file.") 36 | os.remove(yaml_path) 37 | 38 | if os.path.exists(old_model_path): 39 | print("Renaming model from model.pth to model.ckpt") 40 | os.rename(old_model_path, new_model_path) 41 | 42 | if local_safetensors_path is not None and os.path.exists(local_safetensors_path): 43 | model = local_safetensors_path 44 | else: 45 | model = local_ckpt_path or load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name="model.ckpt") 46 | 47 | yaml = local_yaml_path or load_file_from_url(self.yaml_url, model_dir=self.model_download_path, file_name="project.yaml") 48 | 49 | return LDSR(model, yaml) 50 | 51 | def do_upscale(self, img, path): 52 | try: 53 | ldsr = self.load_model(path) 54 | except Exception: 55 | errors.report(f"Failed loading LDSR model {path}", exc_info=True) 56 | return img 57 | ddim_steps = shared.opts.ldsr_steps 58 | return ldsr.super_resolution(img, ddim_steps, self.scale) 59 | 60 | 61 | def on_ui_settings(): 62 | import gradio as gr 63 | 64 | shared.opts.add_option("ldsr_steps", shared.OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}, section=('upscaling', "Upscaling"))) 65 | shared.opts.add_option("ldsr_cached", shared.OptionInfo(False, "Cache LDSR model in memory", gr.Checkbox, {"interactive": True}, section=('upscaling', "Upscaling"))) 66 | 67 | 68 | script_callbacks.on_ui_settings(on_ui_settings) 69 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/extra_networks_lora.py: -------------------------------------------------------------------------------- 1 | from modules import extra_networks, shared 2 | import networks 3 | 4 | 5 | class ExtraNetworkLora(extra_networks.ExtraNetwork): 6 | def __init__(self): 7 | super().__init__('lora') 8 | 9 | self.errors = {} 10 | """mapping of network names to the number of errors the network had during operation""" 11 | 12 | def activate(self, p, params_list): 13 | additional = shared.opts.sd_lora 14 | 15 | self.errors.clear() 16 | 17 | if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional): 18 | p.all_prompts = [x + f"" for x in p.all_prompts] 19 | params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) 20 | 21 | names = [] 22 | te_multipliers = [] 23 | unet_multipliers = [] 24 | dyn_dims = [] 25 | for params in params_list: 26 | assert params.items 27 | 28 | names.append(params.positional[0]) 29 | 30 | te_multiplier = float(params.positional[1]) if len(params.positional) > 1 else 1.0 31 | te_multiplier = float(params.named.get("te", te_multiplier)) 32 | 33 | unet_multiplier = float(params.positional[2]) if len(params.positional) > 2 else te_multiplier 34 | unet_multiplier = float(params.named.get("unet", unet_multiplier)) 35 | 36 | dyn_dim = int(params.positional[3]) if len(params.positional) > 3 else None 37 | dyn_dim = int(params.named["dyn"]) if "dyn" in params.named else dyn_dim 38 | 39 | te_multipliers.append(te_multiplier) 40 | unet_multipliers.append(unet_multiplier) 41 | dyn_dims.append(dyn_dim) 42 | 43 | networks.load_networks(names, te_multipliers, unet_multipliers, dyn_dims) 44 | 45 | if shared.opts.lora_add_hashes_to_infotext: 46 | network_hashes = [] 47 | for item in networks.loaded_networks: 48 | shorthash = item.network_on_disk.shorthash 49 | if not shorthash: 50 | continue 51 | 52 | alias = item.mentioned_name 53 | if not alias: 54 | continue 55 | 56 | alias = alias.replace(":", "").replace(",", "") 57 | 58 | network_hashes.append(f"{alias}: {shorthash}") 59 | 60 | if network_hashes: 61 | p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes) 62 | 63 | def deactivate(self, p): 64 | if self.errors: 65 | p.comment("Networks with errors: " + ", ".join(f"{k} ({v})" for k, v in self.errors.items())) 66 | 67 | self.errors.clear() 68 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/lora.py: -------------------------------------------------------------------------------- 1 | import networks 2 | 3 | list_available_loras = networks.list_available_networks 4 | 5 | available_loras = networks.available_networks 6 | available_lora_aliases = networks.available_network_aliases 7 | available_lora_hash_lookup = networks.available_network_hash_lookup 8 | forbidden_lora_aliases = networks.forbidden_network_aliases 9 | loaded_loras = networks.loaded_networks 10 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/lora_patches.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | import networks 4 | from modules import patches 5 | 6 | 7 | class LoraPatches: 8 | def __init__(self): 9 | self.Linear_forward = patches.patch(__name__, torch.nn.Linear, 'forward', networks.network_Linear_forward) 10 | self.Linear_load_state_dict = patches.patch(__name__, torch.nn.Linear, '_load_from_state_dict', networks.network_Linear_load_state_dict) 11 | self.Conv2d_forward = patches.patch(__name__, torch.nn.Conv2d, 'forward', networks.network_Conv2d_forward) 12 | self.Conv2d_load_state_dict = patches.patch(__name__, torch.nn.Conv2d, '_load_from_state_dict', networks.network_Conv2d_load_state_dict) 13 | self.GroupNorm_forward = patches.patch(__name__, torch.nn.GroupNorm, 'forward', networks.network_GroupNorm_forward) 14 | self.GroupNorm_load_state_dict = patches.patch(__name__, torch.nn.GroupNorm, '_load_from_state_dict', networks.network_GroupNorm_load_state_dict) 15 | self.LayerNorm_forward = patches.patch(__name__, torch.nn.LayerNorm, 'forward', networks.network_LayerNorm_forward) 16 | self.LayerNorm_load_state_dict = patches.patch(__name__, torch.nn.LayerNorm, '_load_from_state_dict', networks.network_LayerNorm_load_state_dict) 17 | self.MultiheadAttention_forward = patches.patch(__name__, torch.nn.MultiheadAttention, 'forward', networks.network_MultiheadAttention_forward) 18 | self.MultiheadAttention_load_state_dict = patches.patch(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict', networks.network_MultiheadAttention_load_state_dict) 19 | 20 | def undo(self): 21 | self.Linear_forward = patches.undo(__name__, torch.nn.Linear, 'forward') 22 | self.Linear_load_state_dict = patches.undo(__name__, torch.nn.Linear, '_load_from_state_dict') 23 | self.Conv2d_forward = patches.undo(__name__, torch.nn.Conv2d, 'forward') 24 | self.Conv2d_load_state_dict = patches.undo(__name__, torch.nn.Conv2d, '_load_from_state_dict') 25 | self.GroupNorm_forward = patches.undo(__name__, torch.nn.GroupNorm, 'forward') 26 | self.GroupNorm_load_state_dict = patches.undo(__name__, torch.nn.GroupNorm, '_load_from_state_dict') 27 | self.LayerNorm_forward = patches.undo(__name__, torch.nn.LayerNorm, 'forward') 28 | self.LayerNorm_load_state_dict = patches.undo(__name__, torch.nn.LayerNorm, '_load_from_state_dict') 29 | self.MultiheadAttention_forward = patches.undo(__name__, torch.nn.MultiheadAttention, 'forward') 30 | self.MultiheadAttention_load_state_dict = patches.undo(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict') 31 | 32 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/lyco_helpers.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def make_weight_cp(t, wa, wb): 5 | temp = torch.einsum('i j k l, j r -> i r k l', t, wb) 6 | return torch.einsum('i j k l, i r -> r j k l', temp, wa) 7 | 8 | 9 | def rebuild_conventional(up, down, shape, dyn_dim=None): 10 | up = up.reshape(up.size(0), -1) 11 | down = down.reshape(down.size(0), -1) 12 | if dyn_dim is not None: 13 | up = up[:, :dyn_dim] 14 | down = down[:dyn_dim, :] 15 | return (up @ down).reshape(shape) 16 | 17 | 18 | def rebuild_cp_decomposition(up, down, mid): 19 | up = up.reshape(up.size(0), -1) 20 | down = down.reshape(down.size(0), -1) 21 | return torch.einsum('n m k l, i n, m j -> i j k l', mid, up, down) 22 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/network_full.py: -------------------------------------------------------------------------------- 1 | import network 2 | 3 | 4 | class ModuleTypeFull(network.ModuleType): 5 | def create_module(self, net: network.Network, weights: network.NetworkWeights): 6 | if all(x in weights.w for x in ["diff"]): 7 | return NetworkModuleFull(net, weights) 8 | 9 | return None 10 | 11 | 12 | class NetworkModuleFull(network.NetworkModule): 13 | def __init__(self, net: network.Network, weights: network.NetworkWeights): 14 | super().__init__(net, weights) 15 | 16 | self.weight = weights.w.get("diff") 17 | self.ex_bias = weights.w.get("diff_b") 18 | 19 | def calc_updown(self, orig_weight): 20 | output_shape = self.weight.shape 21 | updown = self.weight.to(orig_weight.device, dtype=orig_weight.dtype) 22 | if self.ex_bias is not None: 23 | ex_bias = self.ex_bias.to(orig_weight.device, dtype=orig_weight.dtype) 24 | else: 25 | ex_bias = None 26 | 27 | return self.finalize_updown(updown, orig_weight, output_shape, ex_bias) 28 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/network_hada.py: -------------------------------------------------------------------------------- 1 | import lyco_helpers 2 | import network 3 | 4 | 5 | class ModuleTypeHada(network.ModuleType): 6 | def create_module(self, net: network.Network, weights: network.NetworkWeights): 7 | if all(x in weights.w for x in ["hada_w1_a", "hada_w1_b", "hada_w2_a", "hada_w2_b"]): 8 | return NetworkModuleHada(net, weights) 9 | 10 | return None 11 | 12 | 13 | class NetworkModuleHada(network.NetworkModule): 14 | def __init__(self, net: network.Network, weights: network.NetworkWeights): 15 | super().__init__(net, weights) 16 | 17 | if hasattr(self.sd_module, 'weight'): 18 | self.shape = self.sd_module.weight.shape 19 | 20 | self.w1a = weights.w["hada_w1_a"] 21 | self.w1b = weights.w["hada_w1_b"] 22 | self.dim = self.w1b.shape[0] 23 | self.w2a = weights.w["hada_w2_a"] 24 | self.w2b = weights.w["hada_w2_b"] 25 | 26 | self.t1 = weights.w.get("hada_t1") 27 | self.t2 = weights.w.get("hada_t2") 28 | 29 | def calc_updown(self, orig_weight): 30 | w1a = self.w1a.to(orig_weight.device, dtype=orig_weight.dtype) 31 | w1b = self.w1b.to(orig_weight.device, dtype=orig_weight.dtype) 32 | w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype) 33 | w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype) 34 | 35 | output_shape = [w1a.size(0), w1b.size(1)] 36 | 37 | if self.t1 is not None: 38 | output_shape = [w1a.size(1), w1b.size(1)] 39 | t1 = self.t1.to(orig_weight.device, dtype=orig_weight.dtype) 40 | updown1 = lyco_helpers.make_weight_cp(t1, w1a, w1b) 41 | output_shape += t1.shape[2:] 42 | else: 43 | if len(w1b.shape) == 4: 44 | output_shape += w1b.shape[2:] 45 | updown1 = lyco_helpers.rebuild_conventional(w1a, w1b, output_shape) 46 | 47 | if self.t2 is not None: 48 | t2 = self.t2.to(orig_weight.device, dtype=orig_weight.dtype) 49 | updown2 = lyco_helpers.make_weight_cp(t2, w2a, w2b) 50 | else: 51 | updown2 = lyco_helpers.rebuild_conventional(w2a, w2b, output_shape) 52 | 53 | updown = updown1 * updown2 54 | 55 | return self.finalize_updown(updown, orig_weight, output_shape) 56 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/network_ia3.py: -------------------------------------------------------------------------------- 1 | import network 2 | 3 | 4 | class ModuleTypeIa3(network.ModuleType): 5 | def create_module(self, net: network.Network, weights: network.NetworkWeights): 6 | if all(x in weights.w for x in ["weight"]): 7 | return NetworkModuleIa3(net, weights) 8 | 9 | return None 10 | 11 | 12 | class NetworkModuleIa3(network.NetworkModule): 13 | def __init__(self, net: network.Network, weights: network.NetworkWeights): 14 | super().__init__(net, weights) 15 | 16 | self.w = weights.w["weight"] 17 | self.on_input = weights.w["on_input"].item() 18 | 19 | def calc_updown(self, orig_weight): 20 | w = self.w.to(orig_weight.device, dtype=orig_weight.dtype) 21 | 22 | output_shape = [w.size(0), orig_weight.size(1)] 23 | if self.on_input: 24 | output_shape.reverse() 25 | else: 26 | w = w.reshape(-1, 1) 27 | 28 | updown = orig_weight * w 29 | 30 | return self.finalize_updown(updown, orig_weight, output_shape) 31 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/network_lokr.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | import lyco_helpers 4 | import network 5 | 6 | 7 | class ModuleTypeLokr(network.ModuleType): 8 | def create_module(self, net: network.Network, weights: network.NetworkWeights): 9 | has_1 = "lokr_w1" in weights.w or ("lokr_w1_a" in weights.w and "lokr_w1_b" in weights.w) 10 | has_2 = "lokr_w2" in weights.w or ("lokr_w2_a" in weights.w and "lokr_w2_b" in weights.w) 11 | if has_1 and has_2: 12 | return NetworkModuleLokr(net, weights) 13 | 14 | return None 15 | 16 | 17 | def make_kron(orig_shape, w1, w2): 18 | if len(w2.shape) == 4: 19 | w1 = w1.unsqueeze(2).unsqueeze(2) 20 | w2 = w2.contiguous() 21 | return torch.kron(w1, w2).reshape(orig_shape) 22 | 23 | 24 | class NetworkModuleLokr(network.NetworkModule): 25 | def __init__(self, net: network.Network, weights: network.NetworkWeights): 26 | super().__init__(net, weights) 27 | 28 | self.w1 = weights.w.get("lokr_w1") 29 | self.w1a = weights.w.get("lokr_w1_a") 30 | self.w1b = weights.w.get("lokr_w1_b") 31 | self.dim = self.w1b.shape[0] if self.w1b is not None else self.dim 32 | self.w2 = weights.w.get("lokr_w2") 33 | self.w2a = weights.w.get("lokr_w2_a") 34 | self.w2b = weights.w.get("lokr_w2_b") 35 | self.dim = self.w2b.shape[0] if self.w2b is not None else self.dim 36 | self.t2 = weights.w.get("lokr_t2") 37 | 38 | def calc_updown(self, orig_weight): 39 | if self.w1 is not None: 40 | w1 = self.w1.to(orig_weight.device, dtype=orig_weight.dtype) 41 | else: 42 | w1a = self.w1a.to(orig_weight.device, dtype=orig_weight.dtype) 43 | w1b = self.w1b.to(orig_weight.device, dtype=orig_weight.dtype) 44 | w1 = w1a @ w1b 45 | 46 | if self.w2 is not None: 47 | w2 = self.w2.to(orig_weight.device, dtype=orig_weight.dtype) 48 | elif self.t2 is None: 49 | w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype) 50 | w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype) 51 | w2 = w2a @ w2b 52 | else: 53 | t2 = self.t2.to(orig_weight.device, dtype=orig_weight.dtype) 54 | w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype) 55 | w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype) 56 | w2 = lyco_helpers.make_weight_cp(t2, w2a, w2b) 57 | 58 | output_shape = [w1.size(0) * w2.size(0), w1.size(1) * w2.size(1)] 59 | if len(orig_weight.shape) == 4: 60 | output_shape = orig_weight.shape 61 | 62 | updown = make_kron(output_shape, w1, w2) 63 | 64 | return self.finalize_updown(updown, orig_weight, output_shape) 65 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/network_norm.py: -------------------------------------------------------------------------------- 1 | import network 2 | 3 | 4 | class ModuleTypeNorm(network.ModuleType): 5 | def create_module(self, net: network.Network, weights: network.NetworkWeights): 6 | if all(x in weights.w for x in ["w_norm", "b_norm"]): 7 | return NetworkModuleNorm(net, weights) 8 | 9 | return None 10 | 11 | 12 | class NetworkModuleNorm(network.NetworkModule): 13 | def __init__(self, net: network.Network, weights: network.NetworkWeights): 14 | super().__init__(net, weights) 15 | 16 | self.w_norm = weights.w.get("w_norm") 17 | self.b_norm = weights.w.get("b_norm") 18 | 19 | def calc_updown(self, orig_weight): 20 | output_shape = self.w_norm.shape 21 | updown = self.w_norm.to(orig_weight.device, dtype=orig_weight.dtype) 22 | 23 | if self.b_norm is not None: 24 | ex_bias = self.b_norm.to(orig_weight.device, dtype=orig_weight.dtype) 25 | else: 26 | ex_bias = None 27 | 28 | return self.finalize_updown(updown, orig_weight, output_shape, ex_bias) 29 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/preload.py: -------------------------------------------------------------------------------- 1 | import os 2 | from modules import paths 3 | 4 | 5 | def preload(parser): 6 | parser.add_argument("--lora-dir", type=str, help="Path to directory with Lora networks.", default=os.path.join(paths.models_path, 'Lora')) 7 | parser.add_argument("--lyco-dir-backcompat", type=str, help="Path to directory with LyCORIS networks (for backawards compatibility; can also use --lyco-dir).", default=os.path.join(paths.models_path, 'LyCORIS')) 8 | -------------------------------------------------------------------------------- /extensions-builtin/Lora/ui_extra_networks_lora.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import network 4 | import networks 5 | 6 | from modules import shared, ui_extra_networks 7 | from modules.ui_extra_networks import quote_js 8 | from ui_edit_user_metadata import LoraUserMetadataEditor 9 | 10 | 11 | class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage): 12 | def __init__(self): 13 | super().__init__('Lora') 14 | 15 | def refresh(self): 16 | networks.list_available_networks() 17 | 18 | def create_item(self, name, index=None, enable_filter=True): 19 | lora_on_disk = networks.available_networks.get(name) 20 | 21 | path, ext = os.path.splitext(lora_on_disk.filename) 22 | 23 | alias = lora_on_disk.get_alias() 24 | 25 | item = { 26 | "name": name, 27 | "filename": lora_on_disk.filename, 28 | "shorthash": lora_on_disk.shorthash, 29 | "preview": self.find_preview(path), 30 | "description": self.find_description(path), 31 | "search_term": self.search_terms_from_path(lora_on_disk.filename) + " " + (lora_on_disk.hash or ""), 32 | "local_preview": f"{path}.{shared.opts.samples_format}", 33 | "metadata": lora_on_disk.metadata, 34 | "sort_keys": {'default': index, **self.get_sort_keys(lora_on_disk.filename)}, 35 | "sd_version": lora_on_disk.sd_version.name, 36 | } 37 | 38 | self.read_user_metadata(item) 39 | activation_text = item["user_metadata"].get("activation text") 40 | preferred_weight = item["user_metadata"].get("preferred weight", 0.0) 41 | item["prompt"] = quote_js(f"") 42 | 43 | if activation_text: 44 | item["prompt"] += " + " + quote_js(" " + activation_text) 45 | 46 | sd_version = item["user_metadata"].get("sd version") 47 | if sd_version in network.SdVersion.__members__: 48 | item["sd_version"] = sd_version 49 | sd_version = network.SdVersion[sd_version] 50 | else: 51 | sd_version = lora_on_disk.sd_version 52 | 53 | if shared.opts.lora_show_all or not enable_filter: 54 | pass 55 | elif sd_version == network.SdVersion.Unknown: 56 | model_version = network.SdVersion.SDXL if shared.sd_model.is_sdxl else network.SdVersion.SD2 if shared.sd_model.is_sd2 else network.SdVersion.SD1 57 | if model_version.name in shared.opts.lora_hide_unknown_for_versions: 58 | return None 59 | elif shared.sd_model.is_sdxl and sd_version != network.SdVersion.SDXL: 60 | return None 61 | elif shared.sd_model.is_sd2 and sd_version != network.SdVersion.SD2: 62 | return None 63 | elif shared.sd_model.is_sd1 and sd_version != network.SdVersion.SD1: 64 | return None 65 | 66 | return item 67 | 68 | def list_items(self): 69 | for index, name in enumerate(networks.available_networks): 70 | item = self.create_item(name, index) 71 | 72 | if item is not None: 73 | yield item 74 | 75 | def allowed_directories_for_previews(self): 76 | return [shared.cmd_opts.lora_dir, shared.cmd_opts.lyco_dir_backcompat] 77 | 78 | def create_user_metadata_editor(self, ui, tabname): 79 | return LoraUserMetadataEditor(ui, tabname, self) 80 | -------------------------------------------------------------------------------- /extensions-builtin/ScuNET/preload.py: -------------------------------------------------------------------------------- 1 | import os 2 | from modules import paths 3 | 4 | 5 | def preload(parser): 6 | parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(paths.models_path, 'ScuNET')) 7 | -------------------------------------------------------------------------------- /extensions-builtin/SwinIR/preload.py: -------------------------------------------------------------------------------- 1 | import os 2 | from modules import paths 3 | 4 | 5 | def preload(parser): 6 | parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(paths.models_path, 'SwinIR')) 7 | -------------------------------------------------------------------------------- /extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | from modules import shared 3 | 4 | shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas Hotkeys"), { 5 | "canvas_hotkey_zoom": shared.OptionInfo("Alt", "Zoom canvas", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"), 6 | "canvas_hotkey_adjust": shared.OptionInfo("Ctrl", "Adjust brush size", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"), 7 | "canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"), 8 | "canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "), 9 | "canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"), 10 | "canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"), 11 | "canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"), 12 | "canvas_auto_expand": shared.OptionInfo(True, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"), 13 | "canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"), 14 | "canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size", "Moving canvas","Fullscreen","Reset Zoom","Overlap"]}), 15 | })) 16 | -------------------------------------------------------------------------------- /extensions-builtin/canvas-zoom-and-pan/style.css: -------------------------------------------------------------------------------- 1 | .canvas-tooltip-info { 2 | position: absolute; 3 | top: 10px; 4 | left: 10px; 5 | cursor: help; 6 | background-color: rgba(0, 0, 0, 0.3); 7 | width: 20px; 8 | height: 20px; 9 | border-radius: 50%; 10 | display: flex; 11 | align-items: center; 12 | justify-content: center; 13 | flex-direction: column; 14 | 15 | z-index: 100; 16 | } 17 | 18 | .canvas-tooltip-info::after { 19 | content: ''; 20 | display: block; 21 | width: 2px; 22 | height: 7px; 23 | background-color: white; 24 | margin-top: 2px; 25 | } 26 | 27 | .canvas-tooltip-info::before { 28 | content: ''; 29 | display: block; 30 | width: 2px; 31 | height: 2px; 32 | background-color: white; 33 | } 34 | 35 | .canvas-tooltip-content { 36 | display: none; 37 | background-color: #f9f9f9; 38 | color: #333; 39 | border: 1px solid #ddd; 40 | padding: 15px; 41 | position: absolute; 42 | top: 40px; 43 | left: 10px; 44 | width: 250px; 45 | font-size: 16px; 46 | opacity: 0; 47 | border-radius: 8px; 48 | box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2); 49 | 50 | z-index: 100; 51 | } 52 | 53 | .canvas-tooltip:hover .canvas-tooltip-content { 54 | display: block; 55 | animation: fadeIn 0.5s; 56 | opacity: 1; 57 | } 58 | 59 | @keyframes fadeIn { 60 | from {opacity: 0;} 61 | to {opacity: 1;} 62 | } 63 | 64 | .styler { 65 | overflow:inherit !important; 66 | } -------------------------------------------------------------------------------- /extensions-builtin/extra-options-section/scripts/extra_options_section.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import gradio as gr 4 | from modules import scripts, shared, ui_components, ui_settings, generation_parameters_copypaste 5 | from modules.ui_components import FormColumn 6 | 7 | 8 | class ExtraOptionsSection(scripts.Script): 9 | section = "extra_options" 10 | 11 | def __init__(self): 12 | self.comps = None 13 | self.setting_names = None 14 | 15 | def title(self): 16 | return "Extra options" 17 | 18 | def show(self, is_img2img): 19 | return scripts.AlwaysVisible 20 | 21 | def ui(self, is_img2img): 22 | self.comps = [] 23 | self.setting_names = [] 24 | self.infotext_fields = [] 25 | extra_options = shared.opts.extra_options_img2img if is_img2img else shared.opts.extra_options_txt2img 26 | 27 | mapping = {k: v for v, k in generation_parameters_copypaste.infotext_to_setting_name_mapping} 28 | 29 | with gr.Blocks() as interface: 30 | with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and extra_options else gr.Group(): 31 | 32 | row_count = math.ceil(len(extra_options) / shared.opts.extra_options_cols) 33 | 34 | for row in range(row_count): 35 | with gr.Row(): 36 | for col in range(shared.opts.extra_options_cols): 37 | index = row * shared.opts.extra_options_cols + col 38 | if index >= len(extra_options): 39 | break 40 | 41 | setting_name = extra_options[index] 42 | 43 | with FormColumn(): 44 | comp = ui_settings.create_setting_component(setting_name) 45 | 46 | self.comps.append(comp) 47 | self.setting_names.append(setting_name) 48 | 49 | setting_infotext_name = mapping.get(setting_name) 50 | if setting_infotext_name is not None: 51 | self.infotext_fields.append((comp, setting_infotext_name)) 52 | 53 | def get_settings_values(): 54 | res = [ui_settings.get_value_for_setting(key) for key in self.setting_names] 55 | return res[0] if len(res) == 1 else res 56 | 57 | interface.load(fn=get_settings_values, inputs=[], outputs=self.comps, queue=False, show_progress=False) 58 | 59 | return self.comps 60 | 61 | def before_process(self, p, *args): 62 | for name, value in zip(self.setting_names, args): 63 | if name not in p.override_settings: 64 | p.override_settings[name] = value 65 | 66 | 67 | shared.options_templates.update(shared.options_section(('ui', "User interface"), { 68 | "extra_options_txt2img": shared.OptionInfo([], "Options in main UI - txt2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img interfaces").needs_reload_ui(), 69 | "extra_options_img2img": shared.OptionInfo([], "Options in main UI - img2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in img2img interfaces").needs_reload_ui(), 70 | "extra_options_cols": shared.OptionInfo(1, "Options in main UI - number of columns", gr.Number, {"precision": 0}).needs_reload_ui(), 71 | "extra_options_accordion": shared.OptionInfo(False, "Options in main UI - place into an accordion").needs_reload_ui() 72 | })) 73 | 74 | 75 | -------------------------------------------------------------------------------- /extensions-builtin/mobile/javascript/mobile.js: -------------------------------------------------------------------------------- 1 | var isSetupForMobile = false; 2 | 3 | function isMobile() { 4 | for (var tab of ["txt2img", "img2img"]) { 5 | var imageTab = gradioApp().getElementById(tab + '_results'); 6 | if (imageTab && imageTab.offsetParent && imageTab.offsetLeft == 0) { 7 | return true; 8 | } 9 | } 10 | 11 | return false; 12 | } 13 | 14 | function reportWindowSize() { 15 | var currentlyMobile = isMobile(); 16 | if (currentlyMobile == isSetupForMobile) return; 17 | isSetupForMobile = currentlyMobile; 18 | 19 | for (var tab of ["txt2img", "img2img"]) { 20 | var button = gradioApp().getElementById(tab + '_generate_box'); 21 | var target = gradioApp().getElementById(currentlyMobile ? tab + '_results' : tab + '_actions_column'); 22 | target.insertBefore(button, target.firstElementChild); 23 | 24 | gradioApp().getElementById(tab + '_results').classList.toggle('mobile', currentlyMobile); 25 | } 26 | } 27 | 28 | window.addEventListener("resize", reportWindowSize); 29 | 30 | onUiLoaded(function() { 31 | reportWindowSize(); 32 | }); 33 | -------------------------------------------------------------------------------- /extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js: -------------------------------------------------------------------------------- 1 | // Stable Diffusion WebUI - Bracket checker 2 | // By Hingashi no Florin/Bwin4L & @akx 3 | // Counts open and closed brackets (round, square, curly) in the prompt and negative prompt text boxes in the txt2img and img2img tabs. 4 | // If there's a mismatch, the keyword counter turns red and if you hover on it, a tooltip tells you what's wrong. 5 | 6 | function checkBrackets(textArea, counterElt) { 7 | var counts = {}; 8 | (textArea.value.match(/[(){}[\]]/g) || []).forEach(bracket => { 9 | counts[bracket] = (counts[bracket] || 0) + 1; 10 | }); 11 | var errors = []; 12 | 13 | function checkPair(open, close, kind) { 14 | if (counts[open] !== counts[close]) { 15 | errors.push( 16 | `${open}...${close} - Detected ${counts[open] || 0} opening and ${counts[close] || 0} closing ${kind}.` 17 | ); 18 | } 19 | } 20 | 21 | checkPair('(', ')', 'round brackets'); 22 | checkPair('[', ']', 'square brackets'); 23 | checkPair('{', '}', 'curly brackets'); 24 | counterElt.title = errors.join('\n'); 25 | counterElt.classList.toggle('error', errors.length !== 0); 26 | } 27 | 28 | function setupBracketChecking(id_prompt, id_counter) { 29 | var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea"); 30 | var counter = gradioApp().getElementById(id_counter); 31 | 32 | if (textarea && counter) { 33 | textarea.addEventListener("input", () => checkBrackets(textarea, counter)); 34 | } 35 | } 36 | 37 | onUiLoaded(function() { 38 | setupBracketChecking('txt2img_prompt', 'txt2img_token_counter'); 39 | setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter'); 40 | setupBracketChecking('img2img_prompt', 'img2img_token_counter'); 41 | setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter'); 42 | }); 43 | -------------------------------------------------------------------------------- /extensions/put extensions here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openvinotoolkit/stable-diffusion-webui/e5a634da06c62d72dbdc764b16c65ef3408aa588/extensions/put extensions here.txt -------------------------------------------------------------------------------- /first-time-runner.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | set "filePath=%cd%\webui-user.bat" 4 | 5 | 6 | ( 7 | echo @echo off 8 | echo. 9 | echo set GIT= 10 | echo set VENV_DIR= 11 | echo set COMMANDLINE_ARGS=--skip-torch-cuda-test --precision full --no-half 12 | echo set PYTORCH_TRACING_MODE=TORCHFX 13 | echo. 14 | echo call webui.bat 15 | 16 | ) > %filepath% 17 | 18 | 19 | call webui-user.bat 20 | 21 | pause 22 | -------------------------------------------------------------------------------- /html/card-no-preview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openvinotoolkit/stable-diffusion-webui/e5a634da06c62d72dbdc764b16c65ef3408aa588/html/card-no-preview.png -------------------------------------------------------------------------------- /html/extra-networks-card.html: -------------------------------------------------------------------------------- 1 |
2 | {background_image} 3 |
4 | {metadata_button} 5 | {edit_button} 6 |
7 |
8 |
9 | 10 |
11 | {name} 12 | {description} 13 |
14 |
15 | -------------------------------------------------------------------------------- /html/extra-networks-no-cards.html: -------------------------------------------------------------------------------- 1 |
2 |

Nothing here. Add some content to the following directories:

3 | 4 |
    5 | {dirs} 6 |
7 |
8 | 9 | -------------------------------------------------------------------------------- /html/footer.html: -------------------------------------------------------------------------------- 1 |
2 | API 3 |  •  4 | Github 5 |  •  6 | Gradio 7 |  •  8 | Startup profile 9 |  •  10 | Reload UI 11 |
12 |
13 |
14 | {versions} 15 |
16 | -------------------------------------------------------------------------------- /javascript/edit-order.js: -------------------------------------------------------------------------------- 1 | /* alt+left/right moves text in prompt */ 2 | 3 | function keyupEditOrder(event) { 4 | if (!opts.keyedit_move) return; 5 | 6 | let target = event.originalTarget || event.composedPath()[0]; 7 | if (!target.matches("*:is([id*='_toprow'] [id*='_prompt'], .prompt) textarea")) return; 8 | if (!event.altKey) return; 9 | 10 | let isLeft = event.key == "ArrowLeft"; 11 | let isRight = event.key == "ArrowRight"; 12 | if (!isLeft && !isRight) return; 13 | event.preventDefault(); 14 | 15 | let selectionStart = target.selectionStart; 16 | let selectionEnd = target.selectionEnd; 17 | let text = target.value; 18 | let items = text.split(","); 19 | let indexStart = (text.slice(0, selectionStart).match(/,/g) || []).length; 20 | let indexEnd = (text.slice(0, selectionEnd).match(/,/g) || []).length; 21 | let range = indexEnd - indexStart + 1; 22 | 23 | if (isLeft && indexStart > 0) { 24 | items.splice(indexStart - 1, 0, ...items.splice(indexStart, range)); 25 | target.value = items.join(); 26 | target.selectionStart = items.slice(0, indexStart - 1).join().length + (indexStart == 1 ? 0 : 1); 27 | target.selectionEnd = items.slice(0, indexEnd).join().length; 28 | } else if (isRight && indexEnd < items.length - 1) { 29 | items.splice(indexStart + 1, 0, ...items.splice(indexStart, range)); 30 | target.value = items.join(); 31 | target.selectionStart = items.slice(0, indexStart + 1).join().length + 1; 32 | target.selectionEnd = items.slice(0, indexEnd + 2).join().length; 33 | } 34 | 35 | event.preventDefault(); 36 | updateInput(target); 37 | } 38 | 39 | addEventListener('keydown', (event) => { 40 | keyupEditOrder(event); 41 | }); 42 | -------------------------------------------------------------------------------- /javascript/extensions.js: -------------------------------------------------------------------------------- 1 | 2 | function extensions_apply(_disabled_list, _update_list, disable_all) { 3 | var disable = []; 4 | var update = []; 5 | 6 | gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x) { 7 | if (x.name.startsWith("enable_") && !x.checked) { 8 | disable.push(x.name.substring(7)); 9 | } 10 | 11 | if (x.name.startsWith("update_") && x.checked) { 12 | update.push(x.name.substring(7)); 13 | } 14 | }); 15 | 16 | restart_reload(); 17 | 18 | return [JSON.stringify(disable), JSON.stringify(update), disable_all]; 19 | } 20 | 21 | function extensions_check() { 22 | var disable = []; 23 | 24 | gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x) { 25 | if (x.name.startsWith("enable_") && !x.checked) { 26 | disable.push(x.name.substring(7)); 27 | } 28 | }); 29 | 30 | gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x) { 31 | x.innerHTML = "Loading..."; 32 | }); 33 | 34 | 35 | var id = randomId(); 36 | requestProgress(id, gradioApp().getElementById('extensions_installed_html'), null, function() { 37 | 38 | }); 39 | 40 | return [id, JSON.stringify(disable)]; 41 | } 42 | 43 | function install_extension_from_index(button, url) { 44 | button.disabled = "disabled"; 45 | button.value = "Installing..."; 46 | 47 | var textarea = gradioApp().querySelector('#extension_to_install textarea'); 48 | textarea.value = url; 49 | updateInput(textarea); 50 | 51 | gradioApp().querySelector('#install_extension_button').click(); 52 | } 53 | 54 | function config_state_confirm_restore(_, config_state_name, config_restore_type) { 55 | if (config_state_name == "Current") { 56 | return [false, config_state_name, config_restore_type]; 57 | } 58 | let restored = ""; 59 | if (config_restore_type == "extensions") { 60 | restored = "all saved extension versions"; 61 | } else if (config_restore_type == "webui") { 62 | restored = "the webui version"; 63 | } else { 64 | restored = "the webui version and all saved extension versions"; 65 | } 66 | let confirmed = confirm("Are you sure you want to restore from this state?\nThis will reset " + restored + "."); 67 | if (confirmed) { 68 | restart_reload(); 69 | gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x) { 70 | x.innerHTML = "Loading..."; 71 | }); 72 | } 73 | return [confirmed, config_state_name, config_restore_type]; 74 | } 75 | 76 | function toggle_all_extensions(event) { 77 | gradioApp().querySelectorAll('#extensions .extension_toggle').forEach(function(checkbox_el) { 78 | checkbox_el.checked = event.target.checked; 79 | }); 80 | } 81 | 82 | function toggle_extension() { 83 | let all_extensions_toggled = true; 84 | for (const checkbox_el of gradioApp().querySelectorAll('#extensions .extension_toggle')) { 85 | if (!checkbox_el.checked) { 86 | all_extensions_toggled = false; 87 | break; 88 | } 89 | } 90 | 91 | gradioApp().querySelector('#extensions .all_extensions_toggle').checked = all_extensions_toggled; 92 | } 93 | -------------------------------------------------------------------------------- /javascript/generationParams.js: -------------------------------------------------------------------------------- 1 | // attaches listeners to the txt2img and img2img galleries to update displayed generation param text when the image changes 2 | 3 | let txt2img_gallery, img2img_gallery, modal = undefined; 4 | onAfterUiUpdate(function() { 5 | if (!txt2img_gallery) { 6 | txt2img_gallery = attachGalleryListeners("txt2img"); 7 | } 8 | if (!img2img_gallery) { 9 | img2img_gallery = attachGalleryListeners("img2img"); 10 | } 11 | if (!modal) { 12 | modal = gradioApp().getElementById('lightboxModal'); 13 | modalObserver.observe(modal, {attributes: true, attributeFilter: ['style']}); 14 | } 15 | }); 16 | 17 | let modalObserver = new MutationObserver(function(mutations) { 18 | mutations.forEach(function(mutationRecord) { 19 | let selectedTab = gradioApp().querySelector('#tabs div button.selected')?.innerText; 20 | if (mutationRecord.target.style.display === 'none' && (selectedTab === 'txt2img' || selectedTab === 'img2img')) { 21 | gradioApp().getElementById(selectedTab + "_generation_info_button")?.click(); 22 | } 23 | }); 24 | }); 25 | 26 | function attachGalleryListeners(tab_name) { 27 | var gallery = gradioApp().querySelector('#' + tab_name + '_gallery'); 28 | gallery?.addEventListener('click', () => gradioApp().getElementById(tab_name + "_generation_info_button").click()); 29 | gallery?.addEventListener('keydown', (e) => { 30 | if (e.keyCode == 37 || e.keyCode == 39) { // left or right arrow 31 | gradioApp().getElementById(tab_name + "_generation_info_button").click(); 32 | } 33 | }); 34 | return gallery; 35 | } 36 | -------------------------------------------------------------------------------- /javascript/hires_fix.js: -------------------------------------------------------------------------------- 1 | 2 | function onCalcResolutionHires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y) { 3 | function setInactive(elem, inactive) { 4 | elem.classList.toggle('inactive', !!inactive); 5 | } 6 | 7 | var hrUpscaleBy = gradioApp().getElementById('txt2img_hr_scale'); 8 | var hrResizeX = gradioApp().getElementById('txt2img_hr_resize_x'); 9 | var hrResizeY = gradioApp().getElementById('txt2img_hr_resize_y'); 10 | 11 | gradioApp().getElementById('txt2img_hires_fix_row2').style.display = opts.use_old_hires_fix_width_height ? "none" : ""; 12 | 13 | setInactive(hrUpscaleBy, opts.use_old_hires_fix_width_height || hr_resize_x > 0 || hr_resize_y > 0); 14 | setInactive(hrResizeX, opts.use_old_hires_fix_width_height || hr_resize_x == 0); 15 | setInactive(hrResizeY, opts.use_old_hires_fix_width_height || hr_resize_y == 0); 16 | 17 | return [enable, width, height, hr_scale, hr_resize_x, hr_resize_y]; 18 | } 19 | -------------------------------------------------------------------------------- /javascript/imageMaskFix.js: -------------------------------------------------------------------------------- 1 | /** 2 | * temporary fix for https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/668 3 | * @see https://github.com/gradio-app/gradio/issues/1721 4 | */ 5 | function imageMaskResize() { 6 | const canvases = gradioApp().querySelectorAll('#img2maskimg .touch-none canvas'); 7 | if (!canvases.length) { 8 | window.removeEventListener('resize', imageMaskResize); 9 | return; 10 | } 11 | 12 | const wrapper = canvases[0].closest('.touch-none'); 13 | const previewImage = wrapper.previousElementSibling; 14 | 15 | if (!previewImage.complete) { 16 | previewImage.addEventListener('load', imageMaskResize); 17 | return; 18 | } 19 | 20 | const w = previewImage.width; 21 | const h = previewImage.height; 22 | const nw = previewImage.naturalWidth; 23 | const nh = previewImage.naturalHeight; 24 | const portrait = nh > nw; 25 | 26 | const wW = Math.min(w, portrait ? h / nh * nw : w / nw * nw); 27 | const wH = Math.min(h, portrait ? h / nh * nh : w / nw * nh); 28 | 29 | wrapper.style.width = `${wW}px`; 30 | wrapper.style.height = `${wH}px`; 31 | wrapper.style.left = `0px`; 32 | wrapper.style.top = `0px`; 33 | 34 | canvases.forEach(c => { 35 | c.style.width = c.style.height = ''; 36 | c.style.maxWidth = '100%'; 37 | c.style.maxHeight = '100%'; 38 | c.style.objectFit = 'contain'; 39 | }); 40 | } 41 | 42 | onAfterUiUpdate(imageMaskResize); 43 | window.addEventListener('resize', imageMaskResize); 44 | -------------------------------------------------------------------------------- /javascript/imageviewerGamepad.js: -------------------------------------------------------------------------------- 1 | let gamepads = []; 2 | 3 | window.addEventListener('gamepadconnected', (e) => { 4 | const index = e.gamepad.index; 5 | let isWaiting = false; 6 | gamepads[index] = setInterval(async() => { 7 | if (!opts.js_modal_lightbox_gamepad || isWaiting) return; 8 | const gamepad = navigator.getGamepads()[index]; 9 | const xValue = gamepad.axes[0]; 10 | if (xValue <= -0.3) { 11 | modalPrevImage(e); 12 | isWaiting = true; 13 | } else if (xValue >= 0.3) { 14 | modalNextImage(e); 15 | isWaiting = true; 16 | } 17 | if (isWaiting) { 18 | await sleepUntil(() => { 19 | const xValue = navigator.getGamepads()[index].axes[0]; 20 | if (xValue < 0.3 && xValue > -0.3) { 21 | return true; 22 | } 23 | }, opts.js_modal_lightbox_gamepad_repeat); 24 | isWaiting = false; 25 | } 26 | }, 10); 27 | }); 28 | 29 | window.addEventListener('gamepaddisconnected', (e) => { 30 | clearInterval(gamepads[e.gamepad.index]); 31 | }); 32 | 33 | /* 34 | Primarily for vr controller type pointer devices. 35 | I use the wheel event because there's currently no way to do it properly with web xr. 36 | */ 37 | let isScrolling = false; 38 | window.addEventListener('wheel', (e) => { 39 | if (!opts.js_modal_lightbox_gamepad || isScrolling) return; 40 | isScrolling = true; 41 | 42 | if (e.deltaX <= -0.6) { 43 | modalPrevImage(e); 44 | } else if (e.deltaX >= 0.6) { 45 | modalNextImage(e); 46 | } 47 | 48 | setTimeout(() => { 49 | isScrolling = false; 50 | }, opts.js_modal_lightbox_gamepad_repeat); 51 | }); 52 | 53 | function sleepUntil(f, timeout) { 54 | return new Promise((resolve) => { 55 | const timeStart = new Date(); 56 | const wait = setInterval(function() { 57 | if (f() || new Date() - timeStart > timeout) { 58 | clearInterval(wait); 59 | resolve(); 60 | } 61 | }, 20); 62 | }); 63 | } 64 | -------------------------------------------------------------------------------- /javascript/inputAccordion.js: -------------------------------------------------------------------------------- 1 | var observerAccordionOpen = new MutationObserver(function(mutations) { 2 | mutations.forEach(function(mutationRecord) { 3 | var elem = mutationRecord.target; 4 | var open = elem.classList.contains('open'); 5 | 6 | var accordion = elem.parentNode; 7 | accordion.classList.toggle('input-accordion-open', open); 8 | 9 | var checkbox = gradioApp().querySelector('#' + accordion.id + "-checkbox input"); 10 | checkbox.checked = open; 11 | updateInput(checkbox); 12 | 13 | var extra = gradioApp().querySelector('#' + accordion.id + "-extra"); 14 | if (extra) { 15 | extra.style.display = open ? "" : "none"; 16 | } 17 | }); 18 | }); 19 | 20 | function inputAccordionChecked(id, checked) { 21 | var label = gradioApp().querySelector('#' + id + " .label-wrap"); 22 | if (label.classList.contains('open') != checked) { 23 | label.click(); 24 | } 25 | } 26 | 27 | onUiLoaded(function() { 28 | for (var accordion of gradioApp().querySelectorAll('.input-accordion')) { 29 | var labelWrap = accordion.querySelector('.label-wrap'); 30 | observerAccordionOpen.observe(labelWrap, {attributes: true, attributeFilter: ['class']}); 31 | 32 | var extra = gradioApp().querySelector('#' + accordion.id + "-extra"); 33 | if (extra) { 34 | labelWrap.insertBefore(extra, labelWrap.lastElementChild); 35 | } 36 | } 37 | }); 38 | -------------------------------------------------------------------------------- /javascript/localStorage.js: -------------------------------------------------------------------------------- 1 | 2 | function localSet(k, v) { 3 | try { 4 | localStorage.setItem(k, v); 5 | } catch (e) { 6 | console.warn(`Failed to save ${k} to localStorage: ${e}`); 7 | } 8 | } 9 | 10 | function localGet(k, def) { 11 | try { 12 | return localStorage.getItem(k); 13 | } catch (e) { 14 | console.warn(`Failed to load ${k} from localStorage: ${e}`); 15 | } 16 | 17 | return def; 18 | } 19 | 20 | function localRemove(k) { 21 | try { 22 | return localStorage.removeItem(k); 23 | } catch (e) { 24 | console.warn(`Failed to remove ${k} from localStorage: ${e}`); 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /javascript/notification.js: -------------------------------------------------------------------------------- 1 | // Monitors the gallery and sends a browser notification when the leading image is new. 2 | 3 | let lastHeadImg = null; 4 | 5 | let notificationButton = null; 6 | 7 | onAfterUiUpdate(function() { 8 | if (notificationButton == null) { 9 | notificationButton = gradioApp().getElementById('request_notifications'); 10 | 11 | if (notificationButton != null) { 12 | notificationButton.addEventListener('click', () => { 13 | void Notification.requestPermission(); 14 | }, true); 15 | } 16 | } 17 | 18 | const galleryPreviews = gradioApp().querySelectorAll('div[id^="tab_"] div[id$="_results"] .thumbnail-item > img'); 19 | 20 | if (galleryPreviews == null) return; 21 | 22 | const headImg = galleryPreviews[0]?.src; 23 | 24 | if (headImg == null || headImg == lastHeadImg) return; 25 | 26 | lastHeadImg = headImg; 27 | 28 | // play notification sound if available 29 | gradioApp().querySelector('#audio_notification audio')?.play(); 30 | 31 | if (document.hasFocus()) return; 32 | 33 | // Multiple copies of the images are in the DOM when one is selected. Dedup with a Set to get the real number generated. 34 | const imgs = new Set(Array.from(galleryPreviews).map(img => img.src)); 35 | 36 | const notification = new Notification( 37 | 'Stable Diffusion', 38 | { 39 | body: `Generated ${imgs.size > 1 ? imgs.size - opts.return_grid : 1} image${imgs.size > 1 ? 's' : ''}`, 40 | icon: headImg, 41 | image: headImg, 42 | } 43 | ); 44 | 45 | notification.onclick = function(_) { 46 | parent.focus(); 47 | this.close(); 48 | }; 49 | }); 50 | -------------------------------------------------------------------------------- /javascript/textualInversion.js: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | function start_training_textual_inversion() { 5 | gradioApp().querySelector('#ti_error').innerHTML = ''; 6 | 7 | var id = randomId(); 8 | requestProgress(id, gradioApp().getElementById('ti_output'), gradioApp().getElementById('ti_gallery'), function() {}, function(progress) { 9 | gradioApp().getElementById('ti_progress').innerHTML = progress.textinfo; 10 | }); 11 | 12 | var res = Array.from(arguments); 13 | 14 | res[0] = id; 15 | 16 | return res; 17 | } 18 | -------------------------------------------------------------------------------- /javascript/token-counters.js: -------------------------------------------------------------------------------- 1 | let promptTokenCountDebounceTime = 800; 2 | let promptTokenCountTimeouts = {}; 3 | var promptTokenCountUpdateFunctions = {}; 4 | 5 | function update_txt2img_tokens(...args) { 6 | // Called from Gradio 7 | update_token_counter("txt2img_token_button"); 8 | if (args.length == 2) { 9 | return args[0]; 10 | } 11 | return args; 12 | } 13 | 14 | function update_img2img_tokens(...args) { 15 | // Called from Gradio 16 | update_token_counter("img2img_token_button"); 17 | if (args.length == 2) { 18 | return args[0]; 19 | } 20 | return args; 21 | } 22 | 23 | function update_token_counter(button_id) { 24 | if (opts.disable_token_counters) { 25 | return; 26 | } 27 | if (promptTokenCountTimeouts[button_id]) { 28 | clearTimeout(promptTokenCountTimeouts[button_id]); 29 | } 30 | promptTokenCountTimeouts[button_id] = setTimeout( 31 | () => gradioApp().getElementById(button_id)?.click(), 32 | promptTokenCountDebounceTime, 33 | ); 34 | } 35 | 36 | 37 | function recalculatePromptTokens(name) { 38 | promptTokenCountUpdateFunctions[name]?.(); 39 | } 40 | 41 | function recalculate_prompts_txt2img() { 42 | // Called from Gradio 43 | recalculatePromptTokens('txt2img_prompt'); 44 | recalculatePromptTokens('txt2img_neg_prompt'); 45 | return Array.from(arguments); 46 | } 47 | 48 | function recalculate_prompts_img2img() { 49 | // Called from Gradio 50 | recalculatePromptTokens('img2img_prompt'); 51 | recalculatePromptTokens('img2img_neg_prompt'); 52 | return Array.from(arguments); 53 | } 54 | 55 | function setupTokenCounting(id, id_counter, id_button) { 56 | var prompt = gradioApp().getElementById(id); 57 | var counter = gradioApp().getElementById(id_counter); 58 | var textarea = gradioApp().querySelector(`#${id} > label > textarea`); 59 | 60 | if (opts.disable_token_counters) { 61 | counter.style.display = "none"; 62 | return; 63 | } 64 | 65 | if (counter.parentElement == prompt.parentElement) { 66 | return; 67 | } 68 | 69 | prompt.parentElement.insertBefore(counter, prompt); 70 | prompt.parentElement.style.position = "relative"; 71 | 72 | promptTokenCountUpdateFunctions[id] = function() { 73 | update_token_counter(id_button); 74 | }; 75 | textarea.addEventListener("input", promptTokenCountUpdateFunctions[id]); 76 | } 77 | 78 | function setupTokenCounters() { 79 | setupTokenCounting('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button'); 80 | setupTokenCounting('txt2img_neg_prompt', 'txt2img_negative_token_counter', 'txt2img_negative_token_button'); 81 | setupTokenCounting('img2img_prompt', 'img2img_token_counter', 'img2img_token_button'); 82 | setupTokenCounting('img2img_neg_prompt', 'img2img_negative_token_counter', 'img2img_negative_token_button'); 83 | } 84 | -------------------------------------------------------------------------------- /javascript/ui_settings_hints.js: -------------------------------------------------------------------------------- 1 | // various hints and extra info for the settings tab 2 | 3 | var settingsHintsSetup = false; 4 | 5 | onOptionsChanged(function() { 6 | if (settingsHintsSetup) return; 7 | settingsHintsSetup = true; 8 | 9 | gradioApp().querySelectorAll('#settings [id^=setting_]').forEach(function(div) { 10 | var name = div.id.substr(8); 11 | var commentBefore = opts._comments_before[name]; 12 | var commentAfter = opts._comments_after[name]; 13 | 14 | if (!commentBefore && !commentAfter) return; 15 | 16 | var span = null; 17 | if (div.classList.contains('gradio-checkbox')) span = div.querySelector('label span'); 18 | else if (div.classList.contains('gradio-checkboxgroup')) span = div.querySelector('span').firstChild; 19 | else if (div.classList.contains('gradio-radio')) span = div.querySelector('span').firstChild; 20 | else span = div.querySelector('label span').firstChild; 21 | 22 | if (!span) return; 23 | 24 | if (commentBefore) { 25 | var comment = document.createElement('DIV'); 26 | comment.className = 'settings-comment'; 27 | comment.innerHTML = commentBefore; 28 | span.parentElement.insertBefore(document.createTextNode('\xa0'), span); 29 | span.parentElement.insertBefore(comment, span); 30 | span.parentElement.insertBefore(document.createTextNode('\xa0'), span); 31 | } 32 | if (commentAfter) { 33 | comment = document.createElement('DIV'); 34 | comment.className = 'settings-comment'; 35 | comment.innerHTML = commentAfter; 36 | span.parentElement.insertBefore(comment, span.nextSibling); 37 | span.parentElement.insertBefore(document.createTextNode('\xa0'), span.nextSibling); 38 | } 39 | }); 40 | }); 41 | 42 | function settingsHintsShowQuicksettings() { 43 | requestGet("./internal/quicksettings-hint", {}, function(data) { 44 | var table = document.createElement('table'); 45 | table.className = 'popup-table'; 46 | 47 | data.forEach(function(obj) { 48 | var tr = document.createElement('tr'); 49 | var td = document.createElement('td'); 50 | td.textContent = obj.name; 51 | tr.appendChild(td); 52 | 53 | td = document.createElement('td'); 54 | td.textContent = obj.label; 55 | tr.appendChild(td); 56 | 57 | table.appendChild(tr); 58 | }); 59 | 60 | popup(table); 61 | }); 62 | } 63 | -------------------------------------------------------------------------------- /launch.py: -------------------------------------------------------------------------------- 1 | from modules import launch_utils 2 | 3 | args = launch_utils.args 4 | python = launch_utils.python 5 | git = launch_utils.git 6 | index_url = launch_utils.index_url 7 | dir_repos = launch_utils.dir_repos 8 | 9 | commit_hash = launch_utils.commit_hash 10 | git_tag = launch_utils.git_tag 11 | 12 | run = launch_utils.run 13 | is_installed = launch_utils.is_installed 14 | repo_dir = launch_utils.repo_dir 15 | 16 | run_pip = launch_utils.run_pip 17 | check_run_python = launch_utils.check_run_python 18 | git_clone = launch_utils.git_clone 19 | git_pull_recursive = launch_utils.git_pull_recursive 20 | list_extensions = launch_utils.list_extensions 21 | run_extension_installer = launch_utils.run_extension_installer 22 | prepare_environment = launch_utils.prepare_environment 23 | configure_for_tests = launch_utils.configure_for_tests 24 | start = launch_utils.start 25 | 26 | 27 | def main(): 28 | if args.dump_sysinfo: 29 | filename = launch_utils.dump_sysinfo() 30 | 31 | print(f"Sysinfo saved as {filename}. Exiting...") 32 | 33 | exit(0) 34 | 35 | launch_utils.startup_timer.record("initial startup") 36 | 37 | with launch_utils.startup_timer.subcategory("prepare environment"): 38 | if not args.skip_prepare_environment: 39 | prepare_environment() 40 | 41 | if args.test_server: 42 | configure_for_tests() 43 | 44 | start() 45 | 46 | 47 | if __name__ == "__main__": 48 | main() 49 | -------------------------------------------------------------------------------- /localizations/Put localization files here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openvinotoolkit/stable-diffusion-webui/e5a634da06c62d72dbdc764b16c65ef3408aa588/localizations/Put localization files here.txt -------------------------------------------------------------------------------- /models/Stable-diffusion/Put Stable Diffusion checkpoints here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openvinotoolkit/stable-diffusion-webui/e5a634da06c62d72dbdc764b16c65ef3408aa588/models/Stable-diffusion/Put Stable Diffusion checkpoints here.txt -------------------------------------------------------------------------------- /models/VAE-approx/model.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openvinotoolkit/stable-diffusion-webui/e5a634da06c62d72dbdc764b16c65ef3408aa588/models/VAE-approx/model.pt -------------------------------------------------------------------------------- /models/VAE/Put VAE here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openvinotoolkit/stable-diffusion-webui/e5a634da06c62d72dbdc764b16c65ef3408aa588/models/VAE/Put VAE here.txt -------------------------------------------------------------------------------- /models/deepbooru/Put your deepbooru release project folder here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openvinotoolkit/stable-diffusion-webui/e5a634da06c62d72dbdc764b16c65ef3408aa588/models/deepbooru/Put your deepbooru release project folder here.txt -------------------------------------------------------------------------------- /models/karlo/ViT-L-14_stats.th: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openvinotoolkit/stable-diffusion-webui/e5a634da06c62d72dbdc764b16c65ef3408aa588/models/karlo/ViT-L-14_stats.th -------------------------------------------------------------------------------- /modules/Roboto-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openvinotoolkit/stable-diffusion-webui/e5a634da06c62d72dbdc764b16c65ef3408aa588/modules/Roboto-Regular.ttf -------------------------------------------------------------------------------- /modules/deepbooru.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | 4 | import torch 5 | import numpy as np 6 | 7 | from modules import modelloader, paths, deepbooru_model, devices, images, shared 8 | 9 | re_special = re.compile(r'([\\()])') 10 | 11 | 12 | class DeepDanbooru: 13 | def __init__(self): 14 | self.model = None 15 | 16 | def load(self): 17 | if self.model is not None: 18 | return 19 | 20 | files = modelloader.load_models( 21 | model_path=os.path.join(paths.models_path, "torch_deepdanbooru"), 22 | model_url='https://github.com/AUTOMATIC1111/TorchDeepDanbooru/releases/download/v1/model-resnet_custom_v3.pt', 23 | ext_filter=[".pt"], 24 | download_name='model-resnet_custom_v3.pt', 25 | ) 26 | 27 | self.model = deepbooru_model.DeepDanbooruModel() 28 | self.model.load_state_dict(torch.load(files[0], map_location="cpu")) 29 | 30 | self.model.eval() 31 | self.model.to(devices.cpu, devices.dtype) 32 | 33 | def start(self): 34 | self.load() 35 | self.model.to(devices.device) 36 | 37 | def stop(self): 38 | if not shared.opts.interrogate_keep_models_in_memory: 39 | self.model.to(devices.cpu) 40 | devices.torch_gc() 41 | 42 | def tag(self, pil_image): 43 | self.start() 44 | res = self.tag_multi(pil_image) 45 | self.stop() 46 | 47 | return res 48 | 49 | def tag_multi(self, pil_image, force_disable_ranks=False): 50 | threshold = shared.opts.interrogate_deepbooru_score_threshold 51 | use_spaces = shared.opts.deepbooru_use_spaces 52 | use_escape = shared.opts.deepbooru_escape 53 | alpha_sort = shared.opts.deepbooru_sort_alpha 54 | include_ranks = shared.opts.interrogate_return_ranks and not force_disable_ranks 55 | 56 | pic = images.resize_image(2, pil_image.convert("RGB"), 512, 512) 57 | a = np.expand_dims(np.array(pic, dtype=np.float32), 0) / 255 58 | 59 | with torch.no_grad(), devices.autocast(): 60 | x = torch.from_numpy(a).to(devices.device) 61 | y = self.model(x)[0].detach().cpu().numpy() 62 | 63 | probability_dict = {} 64 | 65 | for tag, probability in zip(self.model.tags, y): 66 | if probability < threshold: 67 | continue 68 | 69 | if tag.startswith("rating:"): 70 | continue 71 | 72 | probability_dict[tag] = probability 73 | 74 | if alpha_sort: 75 | tags = sorted(probability_dict) 76 | else: 77 | tags = [tag for tag, _ in sorted(probability_dict.items(), key=lambda x: -x[1])] 78 | 79 | res = [] 80 | 81 | filtertags = {x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")} 82 | 83 | for tag in [x for x in tags if x not in filtertags]: 84 | probability = probability_dict[tag] 85 | tag_outformat = tag 86 | if use_spaces: 87 | tag_outformat = tag_outformat.replace('_', ' ') 88 | if use_escape: 89 | tag_outformat = re.sub(re_special, r'\\\1', tag_outformat) 90 | if include_ranks: 91 | tag_outformat = f"({tag_outformat}:{probability:.3f})" 92 | 93 | res.append(tag_outformat) 94 | 95 | return ", ".join(res) 96 | 97 | 98 | model = DeepDanbooru() 99 | -------------------------------------------------------------------------------- /modules/extra_networks_hypernet.py: -------------------------------------------------------------------------------- 1 | from modules import extra_networks, shared 2 | from modules.hypernetworks import hypernetwork 3 | 4 | 5 | class ExtraNetworkHypernet(extra_networks.ExtraNetwork): 6 | def __init__(self): 7 | super().__init__('hypernet') 8 | 9 | def activate(self, p, params_list): 10 | additional = shared.opts.sd_hypernetwork 11 | 12 | if additional != "None" and additional in shared.hypernetworks and not any(x for x in params_list if x.items[0] == additional): 13 | hypernet_prompt_text = f"" 14 | p.all_prompts = [f"{prompt}{hypernet_prompt_text}" for prompt in p.all_prompts] 15 | params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) 16 | 17 | names = [] 18 | multipliers = [] 19 | for params in params_list: 20 | assert params.items 21 | 22 | names.append(params.items[0]) 23 | multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0) 24 | 25 | hypernetwork.load_hypernetworks(names, multipliers) 26 | 27 | def deactivate(self, p): 28 | pass 29 | -------------------------------------------------------------------------------- /modules/face_restoration.py: -------------------------------------------------------------------------------- 1 | from modules import shared 2 | 3 | 4 | class FaceRestoration: 5 | def name(self): 6 | return "None" 7 | 8 | def restore(self, np_image): 9 | return np_image 10 | 11 | 12 | def restore_faces(np_image): 13 | face_restorers = [x for x in shared.face_restorers if x.name() == shared.opts.face_restoration_model or shared.opts.face_restoration_model is None] 14 | if len(face_restorers) == 0: 15 | return np_image 16 | 17 | face_restorer = face_restorers[0] 18 | 19 | return face_restorer.restore(np_image) 20 | -------------------------------------------------------------------------------- /modules/fifo_lock.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import collections 3 | 4 | 5 | # reference: https://gist.github.com/vitaliyp/6d54dd76ca2c3cdfc1149d33007dc34a 6 | class FIFOLock(object): 7 | def __init__(self): 8 | self._lock = threading.Lock() 9 | self._inner_lock = threading.Lock() 10 | self._pending_threads = collections.deque() 11 | 12 | def acquire(self, blocking=True): 13 | with self._inner_lock: 14 | lock_acquired = self._lock.acquire(False) 15 | if lock_acquired: 16 | return True 17 | elif not blocking: 18 | return False 19 | 20 | release_event = threading.Event() 21 | self._pending_threads.append(release_event) 22 | 23 | release_event.wait() 24 | return self._lock.acquire() 25 | 26 | def release(self): 27 | with self._inner_lock: 28 | if self._pending_threads: 29 | release_event = self._pending_threads.popleft() 30 | release_event.set() 31 | 32 | self._lock.release() 33 | 34 | __enter__ = acquire 35 | 36 | def __exit__(self, t, v, tb): 37 | self.release() 38 | -------------------------------------------------------------------------------- /modules/gitpython_hack.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import io 4 | import subprocess 5 | 6 | import git 7 | 8 | 9 | class Git(git.Git): 10 | """ 11 | Git subclassed to never use persistent processes. 12 | """ 13 | 14 | def _get_persistent_cmd(self, attr_name, cmd_name, *args, **kwargs): 15 | raise NotImplementedError(f"Refusing to use persistent process: {attr_name} ({cmd_name} {args} {kwargs})") 16 | 17 | def get_object_header(self, ref: str | bytes) -> tuple[str, str, int]: 18 | ret = subprocess.check_output( 19 | [self.GIT_PYTHON_GIT_EXECUTABLE, "cat-file", "--batch-check"], 20 | input=self._prepare_ref(ref), 21 | cwd=self._working_dir, 22 | timeout=2, 23 | ) 24 | return self._parse_object_header(ret) 25 | 26 | def stream_object_data(self, ref: str) -> tuple[str, str, int, "Git.CatFileContentStream"]: 27 | # Not really streaming, per se; this buffers the entire object in memory. 28 | # Shouldn't be a problem for our use case, since we're only using this for 29 | # object headers (commit objects). 30 | ret = subprocess.check_output( 31 | [self.GIT_PYTHON_GIT_EXECUTABLE, "cat-file", "--batch"], 32 | input=self._prepare_ref(ref), 33 | cwd=self._working_dir, 34 | timeout=30, 35 | ) 36 | bio = io.BytesIO(ret) 37 | hexsha, typename, size = self._parse_object_header(bio.readline()) 38 | return (hexsha, typename, size, self.CatFileContentStream(size, bio)) 39 | 40 | 41 | class Repo(git.Repo): 42 | GitCommandWrapperType = Git 43 | -------------------------------------------------------------------------------- /modules/gradio_extensons.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | 3 | from modules import scripts, ui_tempdir, patches 4 | 5 | 6 | def add_classes_to_gradio_component(comp): 7 | """ 8 | this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others 9 | """ 10 | 11 | comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])] 12 | 13 | if getattr(comp, 'multiselect', False): 14 | comp.elem_classes.append('multiselect') 15 | 16 | 17 | def IOComponent_init(self, *args, **kwargs): 18 | self.webui_tooltip = kwargs.pop('tooltip', None) 19 | 20 | if scripts.scripts_current is not None: 21 | scripts.scripts_current.before_component(self, **kwargs) 22 | 23 | scripts.script_callbacks.before_component_callback(self, **kwargs) 24 | 25 | res = original_IOComponent_init(self, *args, **kwargs) 26 | 27 | add_classes_to_gradio_component(self) 28 | 29 | scripts.script_callbacks.after_component_callback(self, **kwargs) 30 | 31 | if scripts.scripts_current is not None: 32 | scripts.scripts_current.after_component(self, **kwargs) 33 | 34 | return res 35 | 36 | 37 | def Block_get_config(self): 38 | config = original_Block_get_config(self) 39 | 40 | webui_tooltip = getattr(self, 'webui_tooltip', None) 41 | if webui_tooltip: 42 | config["webui_tooltip"] = webui_tooltip 43 | 44 | config.pop('example_inputs', None) 45 | 46 | return config 47 | 48 | 49 | def BlockContext_init(self, *args, **kwargs): 50 | res = original_BlockContext_init(self, *args, **kwargs) 51 | 52 | add_classes_to_gradio_component(self) 53 | 54 | return res 55 | 56 | 57 | def Blocks_get_config_file(self, *args, **kwargs): 58 | config = original_Blocks_get_config_file(self, *args, **kwargs) 59 | 60 | for comp_config in config["components"]: 61 | if "example_inputs" in comp_config: 62 | comp_config["example_inputs"] = {"serialized": []} 63 | 64 | return config 65 | 66 | 67 | original_IOComponent_init = patches.patch(__name__, obj=gr.components.IOComponent, field="__init__", replacement=IOComponent_init) 68 | original_Block_get_config = patches.patch(__name__, obj=gr.blocks.Block, field="get_config", replacement=Block_get_config) 69 | original_BlockContext_init = patches.patch(__name__, obj=gr.blocks.BlockContext, field="__init__", replacement=BlockContext_init) 70 | original_Blocks_get_config_file = patches.patch(__name__, obj=gr.blocks.Blocks, field="get_config_file", replacement=Blocks_get_config_file) 71 | 72 | 73 | ui_tempdir.install_ui_tempdir_override() 74 | -------------------------------------------------------------------------------- /modules/hashes.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import os.path 3 | 4 | from modules import shared 5 | import modules.cache 6 | 7 | dump_cache = modules.cache.dump_cache 8 | cache = modules.cache.cache 9 | 10 | 11 | def calculate_sha256(filename): 12 | hash_sha256 = hashlib.sha256() 13 | blksize = 1024 * 1024 14 | 15 | with open(filename, "rb") as f: 16 | for chunk in iter(lambda: f.read(blksize), b""): 17 | hash_sha256.update(chunk) 18 | 19 | return hash_sha256.hexdigest() 20 | 21 | 22 | def sha256_from_cache(filename, title, use_addnet_hash=False): 23 | hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes") 24 | ondisk_mtime = os.path.getmtime(filename) 25 | 26 | if title not in hashes: 27 | return None 28 | 29 | cached_sha256 = hashes[title].get("sha256", None) 30 | cached_mtime = hashes[title].get("mtime", 0) 31 | 32 | if ondisk_mtime > cached_mtime or cached_sha256 is None: 33 | return None 34 | 35 | return cached_sha256 36 | 37 | 38 | def sha256(filename, title, use_addnet_hash=False): 39 | hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes") 40 | 41 | sha256_value = sha256_from_cache(filename, title, use_addnet_hash) 42 | if sha256_value is not None: 43 | return sha256_value 44 | 45 | if shared.cmd_opts.no_hashing: 46 | return None 47 | 48 | print(f"Calculating sha256 for {filename}: ", end='') 49 | if use_addnet_hash: 50 | with open(filename, "rb") as file: 51 | sha256_value = addnet_hash_safetensors(file) 52 | else: 53 | sha256_value = calculate_sha256(filename) 54 | print(f"{sha256_value}") 55 | 56 | hashes[title] = { 57 | "mtime": os.path.getmtime(filename), 58 | "sha256": sha256_value, 59 | } 60 | 61 | dump_cache() 62 | 63 | return sha256_value 64 | 65 | 66 | def addnet_hash_safetensors(b): 67 | """kohya-ss hash for safetensors from https://github.com/kohya-ss/sd-scripts/blob/main/library/train_util.py""" 68 | hash_sha256 = hashlib.sha256() 69 | blksize = 1024 * 1024 70 | 71 | b.seek(0) 72 | header = b.read(8) 73 | n = int.from_bytes(header, "little") 74 | 75 | offset = n + 8 76 | b.seek(offset) 77 | for chunk in iter(lambda: b.read(blksize), b""): 78 | hash_sha256.update(chunk) 79 | 80 | return hash_sha256.hexdigest() 81 | 82 | -------------------------------------------------------------------------------- /modules/hypernetworks/ui.py: -------------------------------------------------------------------------------- 1 | import html 2 | 3 | import gradio as gr 4 | import modules.hypernetworks.hypernetwork 5 | from modules import devices, sd_hijack, shared 6 | 7 | not_available = ["hardswish", "multiheadattention"] 8 | keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict if x not in not_available] 9 | 10 | 11 | def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None): 12 | filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure) 13 | 14 | return gr.Dropdown.update(choices=sorted(shared.hypernetworks)), f"Created: {filename}", "" 15 | 16 | 17 | def train_hypernetwork(*args): 18 | shared.loaded_hypernetworks = [] 19 | 20 | assert not shared.cmd_opts.lowvram, 'Training models with lowvram is not possible' 21 | 22 | try: 23 | sd_hijack.undo_optimizations() 24 | 25 | hypernetwork, filename = modules.hypernetworks.hypernetwork.train_hypernetwork(*args) 26 | 27 | res = f""" 28 | Training {'interrupted' if shared.state.interrupted else 'finished'} at {hypernetwork.step} steps. 29 | Hypernetwork saved to {html.escape(filename)} 30 | """ 31 | return res, "" 32 | except Exception: 33 | raise 34 | finally: 35 | shared.sd_model.cond_stage_model.to(devices.device) 36 | shared.sd_model.first_stage_model.to(devices.device) 37 | sd_hijack.apply_optimizations() 38 | 39 | -------------------------------------------------------------------------------- /modules/import_hook.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | # this will break any attempt to import xformers which will prevent stability diffusion repo from trying to use it 4 | if "--xformers" not in "".join(sys.argv): 5 | sys.modules["xformers"] = None 6 | -------------------------------------------------------------------------------- /modules/localization.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | from modules import errors, scripts 5 | 6 | localizations = {} 7 | 8 | 9 | def list_localizations(dirname): 10 | localizations.clear() 11 | 12 | for file in os.listdir(dirname): 13 | fn, ext = os.path.splitext(file) 14 | if ext.lower() != ".json": 15 | continue 16 | 17 | localizations[fn] = os.path.join(dirname, file) 18 | 19 | for file in scripts.list_scripts("localizations", ".json"): 20 | fn, ext = os.path.splitext(file.filename) 21 | localizations[fn] = file.path 22 | 23 | 24 | def localization_js(current_localization_name: str) -> str: 25 | fn = localizations.get(current_localization_name, None) 26 | data = {} 27 | if fn is not None: 28 | try: 29 | with open(fn, "r", encoding="utf8") as file: 30 | data = json.load(file) 31 | except Exception: 32 | errors.report(f"Error loading localization from {fn}", exc_info=True) 33 | 34 | return f"window.localization = {json.dumps(data)}" 35 | -------------------------------------------------------------------------------- /modules/logging_config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | 4 | 5 | def setup_logging(loglevel): 6 | if loglevel is None: 7 | loglevel = os.environ.get("SD_WEBUI_LOG_LEVEL") 8 | 9 | if loglevel: 10 | log_level = getattr(logging, loglevel.upper(), None) or logging.INFO 11 | logging.basicConfig( 12 | level=log_level, 13 | format='%(asctime)s %(levelname)s [%(name)s] %(message)s', 14 | datefmt='%Y-%m-%d %H:%M:%S', 15 | ) 16 | 17 | -------------------------------------------------------------------------------- /modules/memmon.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import time 3 | from collections import defaultdict 4 | 5 | import torch 6 | 7 | 8 | class MemUsageMonitor(threading.Thread): 9 | run_flag = None 10 | device = None 11 | disabled = False 12 | opts = None 13 | data = None 14 | 15 | def __init__(self, name, device, opts): 16 | threading.Thread.__init__(self) 17 | self.name = name 18 | self.device = device 19 | self.opts = opts 20 | 21 | self.daemon = True 22 | self.run_flag = threading.Event() 23 | self.data = defaultdict(int) 24 | 25 | try: 26 | self.cuda_mem_get_info() 27 | torch.cuda.memory_stats(self.device) 28 | except Exception as e: # AMD or whatever 29 | print(f"Warning: caught exception '{e}', memory monitor disabled") 30 | self.disabled = True 31 | 32 | def cuda_mem_get_info(self): 33 | index = self.device.index if self.device.index is not None else torch.cuda.current_device() 34 | return torch.cuda.mem_get_info(index) 35 | 36 | def run(self): 37 | if self.disabled: 38 | return 39 | 40 | while True: 41 | self.run_flag.wait() 42 | 43 | torch.cuda.reset_peak_memory_stats() 44 | self.data.clear() 45 | 46 | if self.opts.memmon_poll_rate <= 0: 47 | self.run_flag.clear() 48 | continue 49 | 50 | self.data["min_free"] = self.cuda_mem_get_info()[0] 51 | 52 | while self.run_flag.is_set(): 53 | free, total = self.cuda_mem_get_info() 54 | self.data["min_free"] = min(self.data["min_free"], free) 55 | 56 | time.sleep(1 / self.opts.memmon_poll_rate) 57 | 58 | def dump_debug(self): 59 | print(self, 'recorded data:') 60 | for k, v in self.read().items(): 61 | print(k, -(v // -(1024 ** 2))) 62 | 63 | print(self, 'raw torch memory stats:') 64 | tm = torch.cuda.memory_stats(self.device) 65 | for k, v in tm.items(): 66 | if 'bytes' not in k: 67 | continue 68 | print('\t' if 'peak' in k else '', k, -(v // -(1024 ** 2))) 69 | 70 | print(torch.cuda.memory_summary()) 71 | 72 | def monitor(self): 73 | self.run_flag.set() 74 | 75 | def read(self): 76 | if not self.disabled: 77 | free, total = self.cuda_mem_get_info() 78 | self.data["free"] = free 79 | self.data["total"] = total 80 | 81 | torch_stats = torch.cuda.memory_stats(self.device) 82 | self.data["active"] = torch_stats["active.all.current"] 83 | self.data["active_peak"] = torch_stats["active_bytes.all.peak"] 84 | self.data["reserved"] = torch_stats["reserved_bytes.all.current"] 85 | self.data["reserved_peak"] = torch_stats["reserved_bytes.all.peak"] 86 | self.data["system_peak"] = total - self.data["min_free"] 87 | 88 | return self.data 89 | 90 | def stop(self): 91 | self.run_flag.clear() 92 | return self.read() 93 | -------------------------------------------------------------------------------- /modules/models/diffusion/uni_pc/__init__.py: -------------------------------------------------------------------------------- 1 | from .sampler import UniPCSampler # noqa: F401 2 | -------------------------------------------------------------------------------- /modules/ngrok.py: -------------------------------------------------------------------------------- 1 | import ngrok 2 | 3 | # Connect to ngrok for ingress 4 | def connect(token, port, options): 5 | account = None 6 | if token is None: 7 | token = 'None' 8 | else: 9 | if ':' in token: 10 | # token = authtoken:username:password 11 | token, username, password = token.split(':', 2) 12 | account = f"{username}:{password}" 13 | 14 | # For all options see: https://github.com/ngrok/ngrok-py/blob/main/examples/ngrok-connect-full.py 15 | if not options.get('authtoken_from_env'): 16 | options['authtoken'] = token 17 | if account: 18 | options['basic_auth'] = account 19 | if not options.get('session_metadata'): 20 | options['session_metadata'] = 'stable-diffusion-webui' 21 | 22 | 23 | try: 24 | public_url = ngrok.connect(f"127.0.0.1:{port}", **options).url() 25 | except Exception as e: 26 | print(f'Invalid ngrok authtoken? ngrok connection aborted due to: {e}\n' 27 | f'Your token: {token}, get the right one on https://dashboard.ngrok.com/get-started/your-authtoken') 28 | else: 29 | print(f'ngrok connected to localhost:{port}! URL: {public_url}\n' 30 | 'You can use this link after the launch is complete.') 31 | -------------------------------------------------------------------------------- /modules/patches.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | 3 | 4 | def patch(key, obj, field, replacement): 5 | """Replaces a function in a module or a class. 6 | 7 | Also stores the original function in this module, possible to be retrieved via original(key, obj, field). 8 | If the function is already replaced by this caller (key), an exception is raised -- use undo() before that. 9 | 10 | Arguments: 11 | key: identifying information for who is doing the replacement. You can use __name__. 12 | obj: the module or the class 13 | field: name of the function as a string 14 | replacement: the new function 15 | 16 | Returns: 17 | the original function 18 | """ 19 | 20 | patch_key = (obj, field) 21 | if patch_key in originals[key]: 22 | raise RuntimeError(f"patch for {field} is already applied") 23 | 24 | original_func = getattr(obj, field) 25 | originals[key][patch_key] = original_func 26 | 27 | setattr(obj, field, replacement) 28 | 29 | return original_func 30 | 31 | 32 | def undo(key, obj, field): 33 | """Undoes the peplacement by the patch(). 34 | 35 | If the function is not replaced, raises an exception. 36 | 37 | Arguments: 38 | key: identifying information for who is doing the replacement. You can use __name__. 39 | obj: the module or the class 40 | field: name of the function as a string 41 | 42 | Returns: 43 | Always None 44 | """ 45 | 46 | patch_key = (obj, field) 47 | 48 | if patch_key not in originals[key]: 49 | raise RuntimeError(f"there is no patch for {field} to undo") 50 | 51 | original_func = originals[key].pop(patch_key) 52 | setattr(obj, field, original_func) 53 | 54 | return None 55 | 56 | 57 | def original(key, obj, field): 58 | """Returns the original function for the patch created by the patch() function""" 59 | patch_key = (obj, field) 60 | 61 | return originals[key].get(patch_key, None) 62 | 63 | 64 | originals = defaultdict(dict) 65 | -------------------------------------------------------------------------------- /modules/paths.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir # noqa: F401 4 | 5 | import modules.safe # noqa: F401 6 | 7 | 8 | def mute_sdxl_imports(): 9 | """create fake modules that SDXL wants to import but doesn't actually use for our purposes""" 10 | 11 | class Dummy: 12 | pass 13 | 14 | module = Dummy() 15 | module.LPIPS = None 16 | sys.modules['taming.modules.losses.lpips'] = module 17 | 18 | module = Dummy() 19 | module.StableDataModuleFromConfig = None 20 | sys.modules['sgm.data'] = module 21 | 22 | 23 | # data_path = cmd_opts_pre.data 24 | sys.path.insert(0, script_path) 25 | 26 | # search for directory of stable diffusion in following places 27 | sd_path = None 28 | possible_sd_paths = [os.path.join(script_path, 'repositories/stable-diffusion-stability-ai'), '.', os.path.dirname(script_path)] 29 | for possible_sd_path in possible_sd_paths: 30 | if os.path.exists(os.path.join(possible_sd_path, 'ldm/models/diffusion/ddpm.py')): 31 | sd_path = os.path.abspath(possible_sd_path) 32 | break 33 | 34 | assert sd_path is not None, f"Couldn't find Stable Diffusion in any of: {possible_sd_paths}" 35 | 36 | mute_sdxl_imports() 37 | 38 | path_dirs = [ 39 | (sd_path, 'ldm', 'Stable Diffusion', []), 40 | (os.path.join(sd_path, '../generative-models'), 'sgm', 'Stable Diffusion XL', ["sgm"]), 41 | (os.path.join(sd_path, '../CodeFormer'), 'inference_codeformer.py', 'CodeFormer', []), 42 | (os.path.join(sd_path, '../BLIP'), 'models/blip.py', 'BLIP', []), 43 | (os.path.join(sd_path, '../k-diffusion'), 'k_diffusion/sampling.py', 'k_diffusion', ["atstart"]), 44 | ] 45 | 46 | paths = {} 47 | 48 | for d, must_exist, what, options in path_dirs: 49 | must_exist_path = os.path.abspath(os.path.join(script_path, d, must_exist)) 50 | if not os.path.exists(must_exist_path): 51 | print(f"Warning: {what} not found at path {must_exist_path}", file=sys.stderr) 52 | else: 53 | d = os.path.abspath(d) 54 | if "atstart" in options: 55 | sys.path.insert(0, d) 56 | elif "sgm" in options: 57 | # Stable Diffusion XL repo has scripts dir with __init__.py in it which ruins every extension's scripts dir, so we 58 | # import sgm and remove it from sys.path so that when a script imports scripts.something, it doesbn't use sgm's scripts dir. 59 | 60 | sys.path.insert(0, d) 61 | import sgm # noqa: F401 62 | sys.path.pop(0) 63 | else: 64 | sys.path.append(d) 65 | paths[what] = d 66 | -------------------------------------------------------------------------------- /modules/paths_internal.py: -------------------------------------------------------------------------------- 1 | """this module defines internal paths used by program and is safe to import before dependencies are installed in launch.py""" 2 | 3 | import argparse 4 | import os 5 | import sys 6 | import shlex 7 | 8 | commandline_args = os.environ.get('COMMANDLINE_ARGS', "") 9 | sys.argv += shlex.split(commandline_args) 10 | 11 | modules_path = os.path.dirname(os.path.realpath(__file__)) 12 | script_path = os.path.dirname(modules_path) 13 | 14 | sd_configs_path = os.path.join(script_path, "configs") 15 | sd_default_config = os.path.join(sd_configs_path, "v1-inference.yaml") 16 | sd_model_file = os.path.join(script_path, 'model.ckpt') 17 | default_sd_model_file = sd_model_file 18 | 19 | # Parse the --data-dir flag first so we can use it as a base for our other argument default values 20 | parser_pre = argparse.ArgumentParser(add_help=False) 21 | parser_pre.add_argument("--data-dir", type=str, default=os.path.dirname(modules_path), help="base path where all user data is stored", ) 22 | cmd_opts_pre = parser_pre.parse_known_args()[0] 23 | 24 | data_path = cmd_opts_pre.data_dir 25 | 26 | models_path = os.path.join(data_path, "models") 27 | extensions_dir = os.path.join(data_path, "extensions") 28 | extensions_builtin_dir = os.path.join(script_path, "extensions-builtin") 29 | config_states_dir = os.path.join(script_path, "config_states") 30 | 31 | roboto_ttf_file = os.path.join(modules_path, 'Roboto-Regular.ttf') 32 | -------------------------------------------------------------------------------- /modules/processing_scripts/refiner.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | 3 | from modules import scripts, sd_models 4 | from modules.ui_common import create_refresh_button 5 | from modules.ui_components import InputAccordion 6 | 7 | 8 | class ScriptRefiner(scripts.ScriptBuiltinUI): 9 | section = "accordions" 10 | create_group = False 11 | 12 | def __init__(self): 13 | pass 14 | 15 | def title(self): 16 | return "Refiner" 17 | 18 | def show(self, is_img2img): 19 | return scripts.AlwaysVisible 20 | 21 | def ui(self, is_img2img): 22 | with InputAccordion(False, label="Refiner", elem_id=self.elem_id("enable")) as enable_refiner: 23 | with gr.Row(): 24 | refiner_checkpoint = gr.Dropdown(label='Checkpoint', elem_id=self.elem_id("checkpoint"), choices=sd_models.checkpoint_tiles(), value='', tooltip="switch to another model in the middle of generation") 25 | create_refresh_button(refiner_checkpoint, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, self.elem_id("checkpoint_refresh")) 26 | 27 | refiner_switch_at = gr.Slider(value=0.8, label="Switch at", minimum=0.01, maximum=1.0, step=0.01, elem_id=self.elem_id("switch_at"), tooltip="fraction of sampling steps when the switch to refiner model should happen; 1=never, 0.5=switch in the middle of generation") 28 | 29 | def lookup_checkpoint(title): 30 | info = sd_models.get_closet_checkpoint_match(title) 31 | return None if info is None else info.title 32 | 33 | self.infotext_fields = [ 34 | (enable_refiner, lambda d: 'Refiner' in d), 35 | (refiner_checkpoint, lambda d: lookup_checkpoint(d.get('Refiner'))), 36 | (refiner_switch_at, 'Refiner switch at'), 37 | ] 38 | 39 | return enable_refiner, refiner_checkpoint, refiner_switch_at 40 | 41 | def setup(self, p, enable_refiner, refiner_checkpoint, refiner_switch_at): 42 | # the actual implementation is in sd_samplers_common.py, apply_refiner 43 | 44 | if not enable_refiner or refiner_checkpoint in (None, "", "None"): 45 | p.refiner_checkpoint = None 46 | p.refiner_switch_at = None 47 | else: 48 | p.refiner_checkpoint = refiner_checkpoint 49 | p.refiner_switch_at = refiner_switch_at 50 | -------------------------------------------------------------------------------- /modules/restart.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | from modules.paths_internal import script_path 5 | 6 | 7 | def is_restartable() -> bool: 8 | """ 9 | Return True if the webui is restartable (i.e. there is something watching to restart it with) 10 | """ 11 | return bool(os.environ.get('SD_WEBUI_RESTART')) 12 | 13 | 14 | def restart_program() -> None: 15 | """creates file tmp/restart and immediately stops the process, which webui.bat/webui.sh interpret as a command to start webui again""" 16 | 17 | (Path(script_path) / "tmp" / "restart").touch() 18 | 19 | stop_program() 20 | 21 | 22 | def stop_program() -> None: 23 | os._exit(0) 24 | -------------------------------------------------------------------------------- /modules/rng_philox.py: -------------------------------------------------------------------------------- 1 | """RNG imitiating torch cuda randn on CPU. You are welcome. 2 | 3 | Usage: 4 | 5 | ``` 6 | g = Generator(seed=0) 7 | print(g.randn(shape=(3, 4))) 8 | ``` 9 | 10 | Expected output: 11 | ``` 12 | [[-0.92466259 -0.42534415 -2.6438457 0.14518388] 13 | [-0.12086647 -0.57972564 -0.62285122 -0.32838709] 14 | [-1.07454231 -0.36314407 -1.67105067 2.26550497]] 15 | ``` 16 | """ 17 | 18 | import numpy as np 19 | 20 | philox_m = [0xD2511F53, 0xCD9E8D57] 21 | philox_w = [0x9E3779B9, 0xBB67AE85] 22 | 23 | two_pow32_inv = np.array([2.3283064e-10], dtype=np.float32) 24 | two_pow32_inv_2pi = np.array([2.3283064e-10 * 6.2831855], dtype=np.float32) 25 | 26 | 27 | def uint32(x): 28 | """Converts (N,) np.uint64 array into (2, N) np.unit32 array.""" 29 | return x.view(np.uint32).reshape(-1, 2).transpose(1, 0) 30 | 31 | 32 | def philox4_round(counter, key): 33 | """A single round of the Philox 4x32 random number generator.""" 34 | 35 | v1 = uint32(counter[0].astype(np.uint64) * philox_m[0]) 36 | v2 = uint32(counter[2].astype(np.uint64) * philox_m[1]) 37 | 38 | counter[0] = v2[1] ^ counter[1] ^ key[0] 39 | counter[1] = v2[0] 40 | counter[2] = v1[1] ^ counter[3] ^ key[1] 41 | counter[3] = v1[0] 42 | 43 | 44 | def philox4_32(counter, key, rounds=10): 45 | """Generates 32-bit random numbers using the Philox 4x32 random number generator. 46 | 47 | Parameters: 48 | counter (numpy.ndarray): A 4xN array of 32-bit integers representing the counter values (offset into generation). 49 | key (numpy.ndarray): A 2xN array of 32-bit integers representing the key values (seed). 50 | rounds (int): The number of rounds to perform. 51 | 52 | Returns: 53 | numpy.ndarray: A 4xN array of 32-bit integers containing the generated random numbers. 54 | """ 55 | 56 | for _ in range(rounds - 1): 57 | philox4_round(counter, key) 58 | 59 | key[0] = key[0] + philox_w[0] 60 | key[1] = key[1] + philox_w[1] 61 | 62 | philox4_round(counter, key) 63 | return counter 64 | 65 | 66 | def box_muller(x, y): 67 | """Returns just the first out of two numbers generated by Box–Muller transform algorithm.""" 68 | u = x * two_pow32_inv + two_pow32_inv / 2 69 | v = y * two_pow32_inv_2pi + two_pow32_inv_2pi / 2 70 | 71 | s = np.sqrt(-2.0 * np.log(u)) 72 | 73 | r1 = s * np.sin(v) 74 | return r1.astype(np.float32) 75 | 76 | 77 | class Generator: 78 | """RNG that produces same outputs as torch.randn(..., device='cuda') on CPU""" 79 | 80 | def __init__(self, seed): 81 | self.seed = seed 82 | self.offset = 0 83 | 84 | def randn(self, shape): 85 | """Generate a sequence of n standard normal random variables using the Philox 4x32 random number generator and the Box-Muller transform.""" 86 | 87 | n = 1 88 | for x in shape: 89 | n *= x 90 | 91 | counter = np.zeros((4, n), dtype=np.uint32) 92 | counter[0] = self.offset 93 | counter[2] = np.arange(n, dtype=np.uint32) # up to 2^32 numbers can be generated - if you want more you'd need to spill into counter[3] 94 | self.offset += 1 95 | 96 | key = np.empty(n, dtype=np.uint64) 97 | key.fill(self.seed) 98 | key = uint32(key) 99 | 100 | g = philox4_32(counter, key) 101 | 102 | return box_muller(g[0], g[1]).reshape(shape) # discard g[2] and g[3] 103 | -------------------------------------------------------------------------------- /modules/script_loading.py: -------------------------------------------------------------------------------- 1 | import os 2 | import importlib.util 3 | 4 | from modules import errors 5 | 6 | 7 | def load_module(path): 8 | module_spec = importlib.util.spec_from_file_location(os.path.basename(path), path) 9 | module = importlib.util.module_from_spec(module_spec) 10 | module_spec.loader.exec_module(module) 11 | 12 | return module 13 | 14 | 15 | def preload_extensions(extensions_dir, parser, extension_list=None): 16 | if not os.path.isdir(extensions_dir): 17 | return 18 | 19 | extensions = extension_list if extension_list is not None else os.listdir(extensions_dir) 20 | for dirname in sorted(extensions): 21 | preload_script = os.path.join(extensions_dir, dirname, "preload.py") 22 | if not os.path.isfile(preload_script): 23 | continue 24 | 25 | try: 26 | module = load_module(preload_script) 27 | if hasattr(module, 'preload'): 28 | module.preload(parser) 29 | 30 | except Exception: 31 | errors.report(f"Error running preload() for {preload_script}", exc_info=True) 32 | -------------------------------------------------------------------------------- /modules/scripts_auto_postprocessing.py: -------------------------------------------------------------------------------- 1 | from modules import scripts, scripts_postprocessing, shared 2 | 3 | 4 | class ScriptPostprocessingForMainUI(scripts.Script): 5 | def __init__(self, script_postproc): 6 | self.script: scripts_postprocessing.ScriptPostprocessing = script_postproc 7 | self.postprocessing_controls = None 8 | 9 | def title(self): 10 | return self.script.name 11 | 12 | def show(self, is_img2img): 13 | return scripts.AlwaysVisible 14 | 15 | def ui(self, is_img2img): 16 | self.postprocessing_controls = self.script.ui() 17 | return self.postprocessing_controls.values() 18 | 19 | def postprocess_image(self, p, script_pp, *args): 20 | args_dict = dict(zip(self.postprocessing_controls, args)) 21 | 22 | pp = scripts_postprocessing.PostprocessedImage(script_pp.image) 23 | pp.info = {} 24 | self.script.process(pp, **args_dict) 25 | p.extra_generation_params.update(pp.info) 26 | script_pp.image = pp.image 27 | 28 | 29 | def create_auto_preprocessing_script_data(): 30 | from modules import scripts 31 | 32 | res = [] 33 | 34 | for name in shared.opts.postprocessing_enable_in_main_ui: 35 | script = next(iter([x for x in scripts.postprocessing_scripts_data if x.script_class.name == name]), None) 36 | if script is None: 37 | continue 38 | 39 | constructor = lambda s=script: ScriptPostprocessingForMainUI(s.script_class()) 40 | res.append(scripts.ScriptClassData(script_class=constructor, path=script.path, basedir=script.basedir, module=script.module)) 41 | 42 | return res 43 | -------------------------------------------------------------------------------- /modules/sd_hijack_checkpoint.py: -------------------------------------------------------------------------------- 1 | from torch.utils.checkpoint import checkpoint 2 | 3 | import ldm.modules.attention 4 | import ldm.modules.diffusionmodules.openaimodel 5 | 6 | 7 | def BasicTransformerBlock_forward(self, x, context=None): 8 | return checkpoint(self._forward, x, context) 9 | 10 | 11 | def AttentionBlock_forward(self, x): 12 | return checkpoint(self._forward, x) 13 | 14 | 15 | def ResBlock_forward(self, x, emb): 16 | return checkpoint(self._forward, x, emb) 17 | 18 | 19 | stored = [] 20 | 21 | 22 | def add(): 23 | if len(stored) != 0: 24 | return 25 | 26 | stored.extend([ 27 | ldm.modules.attention.BasicTransformerBlock.forward, 28 | ldm.modules.diffusionmodules.openaimodel.ResBlock.forward, 29 | ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward 30 | ]) 31 | 32 | ldm.modules.attention.BasicTransformerBlock.forward = BasicTransformerBlock_forward 33 | ldm.modules.diffusionmodules.openaimodel.ResBlock.forward = ResBlock_forward 34 | ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward = AttentionBlock_forward 35 | 36 | 37 | def remove(): 38 | if len(stored) == 0: 39 | return 40 | 41 | ldm.modules.attention.BasicTransformerBlock.forward = stored[0] 42 | ldm.modules.diffusionmodules.openaimodel.ResBlock.forward = stored[1] 43 | ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward = stored[2] 44 | 45 | stored.clear() 46 | 47 | -------------------------------------------------------------------------------- /modules/sd_hijack_ip2p.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | 4 | def should_hijack_ip2p(checkpoint_info): 5 | from modules import sd_models_config 6 | 7 | ckpt_basename = os.path.basename(checkpoint_info.filename).lower() 8 | cfg_basename = os.path.basename(sd_models_config.find_checkpoint_config_near_filename(checkpoint_info)).lower() 9 | 10 | return "pix2pix" in ckpt_basename and "pix2pix" not in cfg_basename 11 | -------------------------------------------------------------------------------- /modules/sd_hijack_open_clip.py: -------------------------------------------------------------------------------- 1 | import open_clip.tokenizer 2 | import torch 3 | 4 | from modules import sd_hijack_clip, devices 5 | from modules.shared import opts 6 | 7 | tokenizer = open_clip.tokenizer._tokenizer 8 | 9 | 10 | class FrozenOpenCLIPEmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase): 11 | def __init__(self, wrapped, hijack): 12 | super().__init__(wrapped, hijack) 13 | 14 | self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ','][0] 15 | self.id_start = tokenizer.encoder[""] 16 | self.id_end = tokenizer.encoder[""] 17 | self.id_pad = 0 18 | 19 | def tokenize(self, texts): 20 | assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip' 21 | 22 | tokenized = [tokenizer.encode(text) for text in texts] 23 | 24 | return tokenized 25 | 26 | def encode_with_transformers(self, tokens): 27 | # set self.wrapped.layer_idx here according to opts.CLIP_stop_at_last_layers 28 | z = self.wrapped.encode_with_transformer(tokens) 29 | 30 | return z 31 | 32 | def encode_embedding_init_text(self, init_text, nvpt): 33 | ids = tokenizer.encode(init_text) 34 | ids = torch.asarray([ids], device=devices.device, dtype=torch.int) 35 | embedded = self.wrapped.model.token_embedding.wrapped(ids).squeeze(0) 36 | 37 | return embedded 38 | 39 | 40 | class FrozenOpenCLIPEmbedder2WithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase): 41 | def __init__(self, wrapped, hijack): 42 | super().__init__(wrapped, hijack) 43 | 44 | self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ','][0] 45 | self.id_start = tokenizer.encoder[""] 46 | self.id_end = tokenizer.encoder[""] 47 | self.id_pad = 0 48 | 49 | def tokenize(self, texts): 50 | assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip' 51 | 52 | tokenized = [tokenizer.encode(text) for text in texts] 53 | 54 | return tokenized 55 | 56 | def encode_with_transformers(self, tokens): 57 | d = self.wrapped.encode_with_transformer(tokens) 58 | z = d[self.wrapped.layer] 59 | 60 | pooled = d.get("pooled") 61 | if pooled is not None: 62 | z.pooled = pooled 63 | 64 | return z 65 | 66 | def encode_embedding_init_text(self, init_text, nvpt): 67 | ids = tokenizer.encode(init_text) 68 | ids = torch.asarray([ids], device=devices.device, dtype=torch.int) 69 | embedded = self.wrapped.model.token_embedding.wrapped(ids.to(self.wrapped.model.token_embedding.wrapped.weight.device)).squeeze(0) 70 | 71 | return embedded 72 | -------------------------------------------------------------------------------- /modules/sd_hijack_utils.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | 3 | class CondFunc: 4 | def __new__(cls, orig_func, sub_func, cond_func): 5 | self = super(CondFunc, cls).__new__(cls) 6 | if isinstance(orig_func, str): 7 | func_path = orig_func.split('.') 8 | for i in range(len(func_path)-1, -1, -1): 9 | try: 10 | resolved_obj = importlib.import_module('.'.join(func_path[:i])) 11 | break 12 | except ImportError: 13 | pass 14 | for attr_name in func_path[i:-1]: 15 | resolved_obj = getattr(resolved_obj, attr_name) 16 | orig_func = getattr(resolved_obj, func_path[-1]) 17 | setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs)) 18 | self.__init__(orig_func, sub_func, cond_func) 19 | return lambda *args, **kwargs: self(*args, **kwargs) 20 | def __init__(self, orig_func, sub_func, cond_func): 21 | self.__orig_func = orig_func 22 | self.__sub_func = sub_func 23 | self.__cond_func = cond_func 24 | def __call__(self, *args, **kwargs): 25 | if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs): 26 | return self.__sub_func(self.__orig_func, *args, **kwargs) 27 | else: 28 | return self.__orig_func(*args, **kwargs) 29 | -------------------------------------------------------------------------------- /modules/sd_hijack_xlmr.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from modules import sd_hijack_clip, devices 4 | 5 | 6 | class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords): 7 | def __init__(self, wrapped, hijack): 8 | super().__init__(wrapped, hijack) 9 | 10 | self.id_start = wrapped.config.bos_token_id 11 | self.id_end = wrapped.config.eos_token_id 12 | self.id_pad = wrapped.config.pad_token_id 13 | 14 | self.comma_token = self.tokenizer.get_vocab().get(',', None) # alt diffusion doesn't have bits for comma 15 | 16 | def encode_with_transformers(self, tokens): 17 | # there's no CLIP Skip here because all hidden layers have size of 1024 and the last one uses a 18 | # trained layer to transform those 1024 into 768 for unet; so you can't choose which transformer 19 | # layer to work with - you have to use the last 20 | 21 | attention_mask = (tokens != self.id_pad).to(device=tokens.device, dtype=torch.int64) 22 | features = self.wrapped(input_ids=tokens, attention_mask=attention_mask) 23 | z = features['projection_state'] 24 | 25 | return z 26 | 27 | def encode_embedding_init_text(self, init_text, nvpt): 28 | embedding_layer = self.wrapped.roberta.embeddings 29 | ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"] 30 | embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0) 31 | 32 | return embedded 33 | -------------------------------------------------------------------------------- /modules/sd_models_types.py: -------------------------------------------------------------------------------- 1 | from ldm.models.diffusion.ddpm import LatentDiffusion 2 | from typing import TYPE_CHECKING 3 | 4 | 5 | if TYPE_CHECKING: 6 | from modules.sd_models import CheckpointInfo 7 | 8 | 9 | class WebuiSdModel(LatentDiffusion): 10 | """This class is not actually instantinated, but its fields are created and fieeld by webui""" 11 | 12 | lowvram: bool 13 | """True if lowvram/medvram optimizations are enabled -- see modules.lowvram for more info""" 14 | 15 | sd_model_hash: str 16 | """short hash, 10 first characters of SHA1 hash of the model file; may be None if --no-hashing flag is used""" 17 | 18 | sd_model_checkpoint: str 19 | """path to the file on disk that model weights were obtained from""" 20 | 21 | sd_checkpoint_info: 'CheckpointInfo' 22 | """structure with additional information about the file with model's weights""" 23 | 24 | is_sdxl: bool 25 | """True if the model's architecture is SDXL""" 26 | 27 | is_sd2: bool 28 | """True if the model's architecture is SD 2.x""" 29 | 30 | is_sd1: bool 31 | """True if the model's architecture is SD 1.x""" 32 | -------------------------------------------------------------------------------- /modules/sd_samplers.py: -------------------------------------------------------------------------------- 1 | from modules import sd_samplers_kdiffusion, sd_samplers_timesteps, shared 2 | 3 | # imports for functions that previously were here and are used by other modules 4 | from modules.sd_samplers_common import samples_to_image_grid, sample_to_image # noqa: F401 5 | 6 | all_samplers = [ 7 | *sd_samplers_kdiffusion.samplers_data_k_diffusion, 8 | *sd_samplers_timesteps.samplers_data_timesteps, 9 | ] 10 | all_samplers_map = {x.name: x for x in all_samplers} 11 | 12 | samplers = [] 13 | samplers_for_img2img = [] 14 | samplers_map = {} 15 | samplers_hidden = {} 16 | 17 | 18 | def find_sampler_config(name): 19 | if name is not None: 20 | config = all_samplers_map.get(name, None) 21 | else: 22 | config = all_samplers[0] 23 | 24 | return config 25 | 26 | 27 | def create_sampler(name, model): 28 | config = find_sampler_config(name) 29 | 30 | assert config is not None, f'bad sampler name: {name}' 31 | 32 | if model.is_sdxl and config.options.get("no_sdxl", False): 33 | raise Exception(f"Sampler {config.name} is not supported for SDXL") 34 | 35 | sampler = config.constructor(model) 36 | sampler.config = config 37 | 38 | return sampler 39 | 40 | 41 | def set_samplers(): 42 | global samplers, samplers_for_img2img, samplers_hidden 43 | 44 | samplers_hidden = set(shared.opts.hide_samplers) 45 | samplers = all_samplers 46 | samplers_for_img2img = all_samplers 47 | 48 | samplers_map.clear() 49 | for sampler in all_samplers: 50 | samplers_map[sampler.name.lower()] = sampler.name 51 | for alias in sampler.aliases: 52 | samplers_map[alias.lower()] = sampler.name 53 | 54 | 55 | def visible_sampler_names(): 56 | return [x.name for x in samplers if x.name not in samplers_hidden] 57 | 58 | 59 | set_samplers() 60 | -------------------------------------------------------------------------------- /modules/sd_samplers_compvis.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openvinotoolkit/stable-diffusion-webui/e5a634da06c62d72dbdc764b16c65ef3408aa588/modules/sd_samplers_compvis.py -------------------------------------------------------------------------------- /modules/sd_samplers_extra.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import tqdm 3 | import k_diffusion.sampling 4 | 5 | 6 | @torch.no_grad() 7 | def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list=None): 8 | """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023) 9 | Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]} 10 | If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list 11 | """ 12 | extra_args = {} if extra_args is None else extra_args 13 | s_in = x.new_ones([x.shape[0]]) 14 | step_id = 0 15 | from k_diffusion.sampling import to_d, get_sigmas_karras 16 | 17 | def heun_step(x, old_sigma, new_sigma, second_order=True): 18 | nonlocal step_id 19 | denoised = model(x, old_sigma * s_in, **extra_args) 20 | d = to_d(x, old_sigma, denoised) 21 | if callback is not None: 22 | callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised}) 23 | dt = new_sigma - old_sigma 24 | if new_sigma == 0 or not second_order: 25 | # Euler method 26 | x = x + d * dt 27 | else: 28 | # Heun's method 29 | x_2 = x + d * dt 30 | denoised_2 = model(x_2, new_sigma * s_in, **extra_args) 31 | d_2 = to_d(x_2, new_sigma, denoised_2) 32 | d_prime = (d + d_2) / 2 33 | x = x + d_prime * dt 34 | step_id += 1 35 | return x 36 | 37 | steps = sigmas.shape[0] - 1 38 | if restart_list is None: 39 | if steps >= 20: 40 | restart_steps = 9 41 | restart_times = 1 42 | if steps >= 36: 43 | restart_steps = steps // 4 44 | restart_times = 2 45 | sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device) 46 | restart_list = {0.1: [restart_steps + 1, restart_times, 2]} 47 | else: 48 | restart_list = {} 49 | 50 | restart_list = {int(torch.argmin(abs(sigmas - key), dim=0)): value for key, value in restart_list.items()} 51 | 52 | step_list = [] 53 | for i in range(len(sigmas) - 1): 54 | step_list.append((sigmas[i], sigmas[i + 1])) 55 | if i + 1 in restart_list: 56 | restart_steps, restart_times, restart_max = restart_list[i + 1] 57 | min_idx = i + 1 58 | max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0)) 59 | if max_idx < min_idx: 60 | sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1] 61 | while restart_times > 0: 62 | restart_times -= 1 63 | step_list.extend([(old_sigma, new_sigma) for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:])]) 64 | 65 | last_sigma = None 66 | for old_sigma, new_sigma in tqdm.tqdm(step_list, disable=disable): 67 | if last_sigma is None: 68 | last_sigma = old_sigma 69 | elif last_sigma < old_sigma: 70 | x = x + k_diffusion.sampling.torch.randn_like(x) * s_noise * (old_sigma ** 2 - last_sigma ** 2) ** 0.5 71 | x = heun_step(x, old_sigma, new_sigma) 72 | last_sigma = new_sigma 73 | 74 | return x 75 | -------------------------------------------------------------------------------- /modules/sd_unet.py: -------------------------------------------------------------------------------- 1 | import torch.nn 2 | import ldm.modules.diffusionmodules.openaimodel 3 | 4 | from modules import script_callbacks, shared, devices 5 | 6 | unet_options = [] 7 | current_unet_option = None 8 | current_unet = None 9 | 10 | 11 | def list_unets(): 12 | new_unets = script_callbacks.list_unets_callback() 13 | 14 | unet_options.clear() 15 | unet_options.extend(new_unets) 16 | 17 | 18 | def get_unet_option(option=None): 19 | option = option or shared.opts.sd_unet 20 | 21 | if option == "None": 22 | return None 23 | 24 | if option == "Automatic": 25 | name = shared.sd_model.sd_checkpoint_info.model_name 26 | 27 | options = [x for x in unet_options if x.model_name == name] 28 | 29 | option = options[0].label if options else "None" 30 | 31 | return next(iter([x for x in unet_options if x.label == option]), None) 32 | 33 | 34 | def apply_unet(option=None): 35 | global current_unet_option 36 | global current_unet 37 | 38 | new_option = get_unet_option(option) 39 | if new_option == current_unet_option: 40 | return 41 | 42 | if current_unet is not None: 43 | print(f"Dectivating unet: {current_unet.option.label}") 44 | current_unet.deactivate() 45 | 46 | current_unet_option = new_option 47 | if current_unet_option is None: 48 | current_unet = None 49 | 50 | if not shared.sd_model.lowvram: 51 | shared.sd_model.model.diffusion_model.to(devices.device) 52 | 53 | return 54 | 55 | shared.sd_model.model.diffusion_model.to(devices.cpu) 56 | devices.torch_gc() 57 | 58 | current_unet = current_unet_option.create_unet() 59 | current_unet.option = current_unet_option 60 | print(f"Activating unet: {current_unet.option.label}") 61 | current_unet.activate() 62 | 63 | 64 | class SdUnetOption: 65 | model_name = None 66 | """name of related checkpoint - this option will be selected automatically for unet if the name of checkpoint matches this""" 67 | 68 | label = None 69 | """name of the unet in UI""" 70 | 71 | def create_unet(self): 72 | """returns SdUnet object to be used as a Unet instead of built-in unet when making pictures""" 73 | raise NotImplementedError() 74 | 75 | 76 | class SdUnet(torch.nn.Module): 77 | def forward(self, x, timesteps, context, *args, **kwargs): 78 | raise NotImplementedError() 79 | 80 | def activate(self): 81 | pass 82 | 83 | def deactivate(self): 84 | pass 85 | 86 | 87 | def UNetModel_forward(self, x, timesteps=None, context=None, *args, **kwargs): 88 | if current_unet is not None: 89 | return current_unet.forward(x, timesteps, context, *args, **kwargs) 90 | 91 | return ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui(self, x, timesteps, context, *args, **kwargs) 92 | 93 | -------------------------------------------------------------------------------- /modules/sd_vae_approx.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import torch 4 | from torch import nn 5 | from modules import devices, paths, shared 6 | 7 | sd_vae_approx_models = {} 8 | 9 | 10 | class VAEApprox(nn.Module): 11 | def __init__(self): 12 | super(VAEApprox, self).__init__() 13 | self.conv1 = nn.Conv2d(4, 8, (7, 7)) 14 | self.conv2 = nn.Conv2d(8, 16, (5, 5)) 15 | self.conv3 = nn.Conv2d(16, 32, (3, 3)) 16 | self.conv4 = nn.Conv2d(32, 64, (3, 3)) 17 | self.conv5 = nn.Conv2d(64, 32, (3, 3)) 18 | self.conv6 = nn.Conv2d(32, 16, (3, 3)) 19 | self.conv7 = nn.Conv2d(16, 8, (3, 3)) 20 | self.conv8 = nn.Conv2d(8, 3, (3, 3)) 21 | 22 | def forward(self, x): 23 | extra = 11 24 | x = nn.functional.interpolate(x, (x.shape[2] * 2, x.shape[3] * 2)) 25 | x = nn.functional.pad(x, (extra, extra, extra, extra)) 26 | 27 | for layer in [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5, self.conv6, self.conv7, self.conv8, ]: 28 | x = layer(x) 29 | x = nn.functional.leaky_relu(x, 0.1) 30 | 31 | return x 32 | 33 | 34 | def download_model(model_path, model_url): 35 | if not os.path.exists(model_path): 36 | os.makedirs(os.path.dirname(model_path), exist_ok=True) 37 | 38 | print(f'Downloading VAEApprox model to: {model_path}') 39 | torch.hub.download_url_to_file(model_url, model_path) 40 | 41 | 42 | def model(): 43 | model_name = "vaeapprox-sdxl.pt" if getattr(shared.sd_model, 'is_sdxl', False) else "model.pt" 44 | loaded_model = sd_vae_approx_models.get(model_name) 45 | 46 | if loaded_model is None: 47 | model_path = os.path.join(paths.models_path, "VAE-approx", model_name) 48 | if not os.path.exists(model_path): 49 | model_path = os.path.join(paths.script_path, "models", "VAE-approx", model_name) 50 | 51 | if not os.path.exists(model_path): 52 | model_path = os.path.join(paths.models_path, "VAE-approx", model_name) 53 | download_model(model_path, 'https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/download/v1.0.0-pre/' + model_name) 54 | 55 | loaded_model = VAEApprox() 56 | loaded_model.load_state_dict(torch.load(model_path, map_location='cpu' if devices.device.type != 'cuda' else None)) 57 | loaded_model.eval() 58 | loaded_model.to(devices.device, devices.dtype) 59 | sd_vae_approx_models[model_name] = loaded_model 60 | 61 | return loaded_model 62 | 63 | 64 | def cheap_approximation(sample): 65 | # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/2 66 | 67 | if shared.sd_model.is_sdxl: 68 | coeffs = [ 69 | [ 0.3448, 0.4168, 0.4395], 70 | [-0.1953, -0.0290, 0.0250], 71 | [ 0.1074, 0.0886, -0.0163], 72 | [-0.3730, -0.2499, -0.2088], 73 | ] 74 | else: 75 | coeffs = [ 76 | [ 0.298, 0.207, 0.208], 77 | [ 0.187, 0.286, 0.173], 78 | [-0.158, 0.189, 0.264], 79 | [-0.184, -0.271, -0.473], 80 | ] 81 | 82 | coefs = torch.tensor(coeffs).to(sample.device) 83 | 84 | x_sample = torch.einsum("...lxy,lr -> ...rxy", sample, coefs) 85 | 86 | return x_sample 87 | -------------------------------------------------------------------------------- /modules/shared.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import gradio as gr 4 | 5 | from modules import shared_cmd_options, shared_gradio_themes, options, shared_items, sd_models_types 6 | from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 7 | from modules import util 8 | 9 | cmd_opts = shared_cmd_options.cmd_opts 10 | parser = shared_cmd_options.parser 11 | 12 | batch_cond_uncond = True # old field, unused now in favor of shared.opts.batch_cond_uncond 13 | parallel_processing_allowed = True 14 | styles_filename = cmd_opts.styles_file 15 | config_filename = cmd_opts.ui_settings_file 16 | hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config} 17 | 18 | demo = None 19 | 20 | device = None 21 | 22 | weight_load_location = None 23 | 24 | xformers_available = False 25 | 26 | hypernetworks = {} 27 | 28 | loaded_hypernetworks = [] 29 | 30 | state = None 31 | 32 | prompt_styles = None 33 | 34 | interrogator = None 35 | 36 | face_restorers = [] 37 | 38 | options_templates = None 39 | opts = None 40 | restricted_opts = None 41 | 42 | sd_model: sd_models_types.WebuiSdModel = None 43 | 44 | settings_components = None 45 | """assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" 46 | 47 | tab_names = [] 48 | 49 | latent_upscale_default_mode = "Latent" 50 | latent_upscale_modes = { 51 | "Latent": {"mode": "bilinear", "antialias": False}, 52 | "Latent (antialiased)": {"mode": "bilinear", "antialias": True}, 53 | "Latent (bicubic)": {"mode": "bicubic", "antialias": False}, 54 | "Latent (bicubic antialiased)": {"mode": "bicubic", "antialias": True}, 55 | "Latent (nearest)": {"mode": "nearest", "antialias": False}, 56 | "Latent (nearest-exact)": {"mode": "nearest-exact", "antialias": False}, 57 | } 58 | 59 | sd_upscalers = [] 60 | 61 | clip_model = None 62 | 63 | progress_print_out = sys.stdout 64 | 65 | gradio_theme = gr.themes.Base() 66 | 67 | total_tqdm = None 68 | 69 | mem_mon = None 70 | 71 | options_section = options.options_section 72 | OptionInfo = options.OptionInfo 73 | OptionHTML = options.OptionHTML 74 | 75 | natural_sort_key = util.natural_sort_key 76 | listfiles = util.listfiles 77 | html_path = util.html_path 78 | html = util.html 79 | walk_files = util.walk_files 80 | ldm_print = util.ldm_print 81 | 82 | reload_gradio_theme = shared_gradio_themes.reload_gradio_theme 83 | 84 | list_checkpoint_tiles = shared_items.list_checkpoint_tiles 85 | refresh_checkpoints = shared_items.refresh_checkpoints 86 | list_samplers = shared_items.list_samplers 87 | reload_hypernetworks = shared_items.reload_hypernetworks 88 | -------------------------------------------------------------------------------- /modules/shared_cmd_options.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import launch 4 | from modules import cmd_args, script_loading 5 | from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 6 | 7 | parser = cmd_args.parser 8 | 9 | script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file)) 10 | script_loading.preload_extensions(extensions_builtin_dir, parser) 11 | 12 | if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None: 13 | cmd_opts = parser.parse_args() 14 | else: 15 | cmd_opts, _ = parser.parse_known_args() 16 | 17 | 18 | cmd_opts.disable_extension_access = any([cmd_opts.share, cmd_opts.listen, cmd_opts.ngrok, cmd_opts.server_name]) and not cmd_opts.enable_insecure_extension_access 19 | -------------------------------------------------------------------------------- /modules/shared_gradio_themes.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import gradio as gr 4 | 5 | from modules import errors, shared 6 | from modules.paths_internal import script_path 7 | 8 | 9 | # https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json 10 | gradio_hf_hub_themes = [ 11 | "gradio/base", 12 | "gradio/glass", 13 | "gradio/monochrome", 14 | "gradio/seafoam", 15 | "gradio/soft", 16 | "gradio/dracula_test", 17 | "abidlabs/dracula_test", 18 | "abidlabs/Lime", 19 | "abidlabs/pakistan", 20 | "Ama434/neutral-barlow", 21 | "dawood/microsoft_windows", 22 | "finlaymacklon/smooth_slate", 23 | "Franklisi/darkmode", 24 | "freddyaboulton/dracula_revamped", 25 | "freddyaboulton/test-blue", 26 | "gstaff/xkcd", 27 | "Insuz/Mocha", 28 | "Insuz/SimpleIndigo", 29 | "JohnSmith9982/small_and_pretty", 30 | "nota-ai/theme", 31 | "nuttea/Softblue", 32 | "ParityError/Anime", 33 | "reilnuud/polite", 34 | "remilia/Ghostly", 35 | "rottenlittlecreature/Moon_Goblin", 36 | "step-3-profit/Midnight-Deep", 37 | "Taithrah/Minimal", 38 | "ysharma/huggingface", 39 | "ysharma/steampunk", 40 | "NoCrypt/miku" 41 | ] 42 | 43 | 44 | def reload_gradio_theme(theme_name=None): 45 | if not theme_name: 46 | theme_name = shared.opts.gradio_theme 47 | 48 | default_theme_args = dict( 49 | font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'], 50 | font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], 51 | ) 52 | 53 | if theme_name == "Default": 54 | shared.gradio_theme = gr.themes.Default(**default_theme_args) 55 | else: 56 | try: 57 | theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes') 58 | theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json') 59 | if shared.opts.gradio_themes_cache and os.path.exists(theme_cache_path): 60 | shared.gradio_theme = gr.themes.ThemeClass.load(theme_cache_path) 61 | else: 62 | os.makedirs(theme_cache_dir, exist_ok=True) 63 | shared.gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) 64 | shared.gradio_theme.dump(theme_cache_path) 65 | except Exception as e: 66 | errors.display(e, "changing gradio theme") 67 | shared.gradio_theme = gr.themes.Default(**default_theme_args) 68 | -------------------------------------------------------------------------------- /modules/shared_init.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import torch 4 | 5 | from modules import shared 6 | from modules.shared import cmd_opts 7 | 8 | 9 | def initialize(): 10 | """Initializes fields inside the shared module in a controlled manner. 11 | 12 | Should be called early because some other modules you can import mingt need these fields to be already set. 13 | """ 14 | 15 | os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True) 16 | 17 | from modules import options, shared_options 18 | shared.options_templates = shared_options.options_templates 19 | shared.opts = options.Options(shared_options.options_templates, shared_options.restricted_opts) 20 | shared.restricted_opts = shared_options.restricted_opts 21 | if os.path.exists(shared.config_filename): 22 | shared.opts.load(shared.config_filename) 23 | 24 | from modules import devices 25 | devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ 26 | (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer']) 27 | 28 | devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16 29 | devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16 30 | 31 | shared.device = devices.device 32 | shared.weight_load_location = None if cmd_opts.lowram else "cpu" 33 | 34 | from modules import shared_state 35 | shared.state = shared_state.State() 36 | 37 | from modules import styles 38 | shared.prompt_styles = styles.StyleDatabase(shared.styles_filename) 39 | 40 | from modules import interrogate 41 | shared.interrogator = interrogate.InterrogateModels("interrogate") 42 | 43 | from modules import shared_total_tqdm 44 | shared.total_tqdm = shared_total_tqdm.TotalTQDM() 45 | 46 | from modules import memmon, devices 47 | shared.mem_mon = memmon.MemUsageMonitor("MemMon", devices.device, shared.opts) 48 | shared.mem_mon.start() 49 | 50 | -------------------------------------------------------------------------------- /modules/shared_items.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from modules.shared_cmd_options import cmd_opts 4 | 5 | 6 | def realesrgan_models_names(): 7 | import modules.realesrgan_model 8 | return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)] 9 | 10 | 11 | def postprocessing_scripts(): 12 | import modules.scripts 13 | 14 | return modules.scripts.scripts_postproc.scripts 15 | 16 | 17 | def sd_vae_items(): 18 | import modules.sd_vae 19 | 20 | return ["Automatic", "None"] + list(modules.sd_vae.vae_dict) 21 | 22 | 23 | def refresh_vae_list(): 24 | import modules.sd_vae 25 | 26 | modules.sd_vae.refresh_vae_list() 27 | 28 | 29 | def cross_attention_optimizations(): 30 | import modules.sd_hijack 31 | 32 | return ["Automatic"] + [x.title() for x in modules.sd_hijack.optimizers] + ["None"] 33 | 34 | 35 | def sd_unet_items(): 36 | import modules.sd_unet 37 | 38 | return ["Automatic"] + [x.label for x in modules.sd_unet.unet_options] + ["None"] 39 | 40 | 41 | def refresh_unet_list(): 42 | import modules.sd_unet 43 | 44 | modules.sd_unet.list_unets() 45 | 46 | 47 | def list_checkpoint_tiles(): 48 | import modules.sd_models 49 | return modules.sd_models.checkpoint_tiles() 50 | 51 | 52 | def refresh_checkpoints(): 53 | import modules.sd_models 54 | return modules.sd_models.list_models() 55 | 56 | 57 | def list_samplers(): 58 | import modules.sd_samplers 59 | return modules.sd_samplers.all_samplers 60 | 61 | 62 | def reload_hypernetworks(): 63 | from modules.hypernetworks import hypernetwork 64 | from modules import shared 65 | 66 | shared.hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir) 67 | 68 | 69 | ui_reorder_categories_builtin_items = [ 70 | "inpaint", 71 | "sampler", 72 | "accordions", 73 | "checkboxes", 74 | "dimensions", 75 | "cfg", 76 | "denoising", 77 | "seed", 78 | "batch", 79 | "override_settings", 80 | ] 81 | 82 | 83 | def ui_reorder_categories(): 84 | from modules import scripts 85 | 86 | yield from ui_reorder_categories_builtin_items 87 | 88 | sections = {} 89 | for script in scripts.scripts_txt2img.scripts + scripts.scripts_img2img.scripts: 90 | if isinstance(script.section, str) and script.section not in ui_reorder_categories_builtin_items: 91 | sections[script.section] = 1 92 | 93 | yield from sections 94 | 95 | yield "scripts" 96 | 97 | 98 | class Shared(sys.modules[__name__].__class__): 99 | """ 100 | this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than 101 | at program startup. 102 | """ 103 | 104 | sd_model_val = None 105 | 106 | @property 107 | def sd_model(self): 108 | import modules.sd_models 109 | 110 | return modules.sd_models.model_data.get_sd_model() 111 | 112 | @sd_model.setter 113 | def sd_model(self, value): 114 | import modules.sd_models 115 | 116 | modules.sd_models.model_data.set_sd_model(value) 117 | 118 | 119 | sys.modules['modules.shared'].__class__ = Shared 120 | -------------------------------------------------------------------------------- /modules/shared_total_tqdm.py: -------------------------------------------------------------------------------- 1 | import tqdm 2 | 3 | from modules import shared 4 | 5 | 6 | class TotalTQDM: 7 | def __init__(self): 8 | self._tqdm = None 9 | 10 | def reset(self): 11 | self._tqdm = tqdm.tqdm( 12 | desc="Total progress", 13 | total=shared.state.job_count * shared.state.sampling_steps, 14 | position=1, 15 | file=shared.progress_print_out 16 | ) 17 | 18 | def update(self): 19 | if not shared.opts.multiple_tqdm or shared.cmd_opts.disable_console_progressbars: 20 | return 21 | if self._tqdm is None: 22 | self.reset() 23 | self._tqdm.update() 24 | 25 | def updateTotal(self, new_total): 26 | if not shared.opts.multiple_tqdm or shared.cmd_opts.disable_console_progressbars: 27 | return 28 | if self._tqdm is None: 29 | self.reset() 30 | self._tqdm.total = new_total 31 | 32 | def clear(self): 33 | if self._tqdm is not None: 34 | self._tqdm.refresh() 35 | self._tqdm.close() 36 | self._tqdm = None 37 | 38 | -------------------------------------------------------------------------------- /modules/textual_inversion/learn_schedule.py: -------------------------------------------------------------------------------- 1 | import tqdm 2 | 3 | 4 | class LearnScheduleIterator: 5 | def __init__(self, learn_rate, max_steps, cur_step=0): 6 | """ 7 | specify learn_rate as "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000 8 | """ 9 | 10 | pairs = learn_rate.split(',') 11 | self.rates = [] 12 | self.it = 0 13 | self.maxit = 0 14 | try: 15 | for pair in pairs: 16 | if not pair.strip(): 17 | continue 18 | tmp = pair.split(':') 19 | if len(tmp) == 2: 20 | step = int(tmp[1]) 21 | if step > cur_step: 22 | self.rates.append((float(tmp[0]), min(step, max_steps))) 23 | self.maxit += 1 24 | if step > max_steps: 25 | return 26 | elif step == -1: 27 | self.rates.append((float(tmp[0]), max_steps)) 28 | self.maxit += 1 29 | return 30 | else: 31 | self.rates.append((float(tmp[0]), max_steps)) 32 | self.maxit += 1 33 | return 34 | assert self.rates 35 | except (ValueError, AssertionError) as e: 36 | raise Exception('Invalid learning rate schedule. It should be a number or, for example, like "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000.') from e 37 | 38 | 39 | def __iter__(self): 40 | return self 41 | 42 | def __next__(self): 43 | if self.it < self.maxit: 44 | self.it += 1 45 | return self.rates[self.it - 1] 46 | else: 47 | raise StopIteration 48 | 49 | 50 | class LearnRateScheduler: 51 | def __init__(self, learn_rate, max_steps, cur_step=0, verbose=True): 52 | self.schedules = LearnScheduleIterator(learn_rate, max_steps, cur_step) 53 | (self.learn_rate, self.end_step) = next(self.schedules) 54 | self.verbose = verbose 55 | 56 | if self.verbose: 57 | print(f'Training at rate of {self.learn_rate} until step {self.end_step}') 58 | 59 | self.finished = False 60 | 61 | def step(self, step_number): 62 | if step_number < self.end_step: 63 | return False 64 | 65 | try: 66 | (self.learn_rate, self.end_step) = next(self.schedules) 67 | except StopIteration: 68 | self.finished = True 69 | return False 70 | return True 71 | 72 | def apply(self, optimizer, step_number): 73 | if not self.step(step_number): 74 | return 75 | 76 | if self.verbose: 77 | tqdm.tqdm.write(f'Training at rate of {self.learn_rate} until step {self.end_step}') 78 | 79 | for pg in optimizer.param_groups: 80 | pg['lr'] = self.learn_rate 81 | 82 | -------------------------------------------------------------------------------- /modules/textual_inversion/logging.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import json 3 | import os 4 | 5 | saved_params_shared = { 6 | "batch_size", 7 | "clip_grad_mode", 8 | "clip_grad_value", 9 | "create_image_every", 10 | "data_root", 11 | "gradient_step", 12 | "initial_step", 13 | "latent_sampling_method", 14 | "learn_rate", 15 | "log_directory", 16 | "model_hash", 17 | "model_name", 18 | "num_of_dataset_images", 19 | "steps", 20 | "template_file", 21 | "training_height", 22 | "training_width", 23 | } 24 | saved_params_ti = { 25 | "embedding_name", 26 | "num_vectors_per_token", 27 | "save_embedding_every", 28 | "save_image_with_stored_embedding", 29 | } 30 | saved_params_hypernet = { 31 | "activation_func", 32 | "add_layer_norm", 33 | "hypernetwork_name", 34 | "layer_structure", 35 | "save_hypernetwork_every", 36 | "use_dropout", 37 | "weight_init", 38 | } 39 | saved_params_all = saved_params_shared | saved_params_ti | saved_params_hypernet 40 | saved_params_previews = { 41 | "preview_cfg_scale", 42 | "preview_height", 43 | "preview_negative_prompt", 44 | "preview_prompt", 45 | "preview_sampler_index", 46 | "preview_seed", 47 | "preview_steps", 48 | "preview_width", 49 | } 50 | 51 | 52 | def save_settings_to_file(log_directory, all_params): 53 | now = datetime.datetime.now() 54 | params = {"datetime": now.strftime("%Y-%m-%d %H:%M:%S")} 55 | 56 | keys = saved_params_all 57 | if all_params.get('preview_from_txt2img'): 58 | keys = keys | saved_params_previews 59 | 60 | params.update({k: v for k, v in all_params.items() if k in keys}) 61 | 62 | filename = f'settings-{now.strftime("%Y-%m-%d-%H-%M-%S")}.json' 63 | with open(os.path.join(log_directory, filename), "w") as file: 64 | json.dump(params, file, indent=4) 65 | -------------------------------------------------------------------------------- /modules/textual_inversion/test_embedding.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openvinotoolkit/stable-diffusion-webui/e5a634da06c62d72dbdc764b16c65ef3408aa588/modules/textual_inversion/test_embedding.png -------------------------------------------------------------------------------- /modules/textual_inversion/ui.py: -------------------------------------------------------------------------------- 1 | import html 2 | 3 | import gradio as gr 4 | 5 | import modules.textual_inversion.textual_inversion 6 | import modules.textual_inversion.preprocess 7 | from modules import sd_hijack, shared 8 | 9 | 10 | def create_embedding(name, initialization_text, nvpt, overwrite_old): 11 | filename = modules.textual_inversion.textual_inversion.create_embedding(name, nvpt, overwrite_old, init_text=initialization_text) 12 | 13 | sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() 14 | 15 | return gr.Dropdown.update(choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())), f"Created: {filename}", "" 16 | 17 | 18 | def preprocess(*args): 19 | modules.textual_inversion.preprocess.preprocess(*args) 20 | 21 | return f"Preprocessing {'interrupted' if shared.state.interrupted else 'finished'}.", "" 22 | 23 | 24 | def train_embedding(*args): 25 | 26 | assert not shared.cmd_opts.lowvram, 'Training models with lowvram not possible' 27 | 28 | apply_optimizations = shared.opts.training_xattention_optimizations 29 | try: 30 | if not apply_optimizations: 31 | sd_hijack.undo_optimizations() 32 | 33 | embedding, filename = modules.textual_inversion.textual_inversion.train_embedding(*args) 34 | 35 | res = f""" 36 | Training {'interrupted' if shared.state.interrupted else 'finished'} at {embedding.step} steps. 37 | Embedding saved to {html.escape(filename)} 38 | """ 39 | return res, "" 40 | except Exception: 41 | raise 42 | finally: 43 | if not apply_optimizations: 44 | sd_hijack.apply_optimizations() 45 | 46 | -------------------------------------------------------------------------------- /modules/timer.py: -------------------------------------------------------------------------------- 1 | import time 2 | import argparse 3 | 4 | 5 | class TimerSubcategory: 6 | def __init__(self, timer, category): 7 | self.timer = timer 8 | self.category = category 9 | self.start = None 10 | self.original_base_category = timer.base_category 11 | 12 | def __enter__(self): 13 | self.start = time.time() 14 | self.timer.base_category = self.original_base_category + self.category + "/" 15 | self.timer.subcategory_level += 1 16 | 17 | if self.timer.print_log: 18 | print(f"{' ' * self.timer.subcategory_level}{self.category}:") 19 | 20 | def __exit__(self, exc_type, exc_val, exc_tb): 21 | elapsed_for_subcategroy = time.time() - self.start 22 | self.timer.base_category = self.original_base_category 23 | self.timer.add_time_to_record(self.original_base_category + self.category, elapsed_for_subcategroy) 24 | self.timer.subcategory_level -= 1 25 | self.timer.record(self.category, disable_log=True) 26 | 27 | 28 | class Timer: 29 | def __init__(self, print_log=False): 30 | self.start = time.time() 31 | self.records = {} 32 | self.total = 0 33 | self.base_category = '' 34 | self.print_log = print_log 35 | self.subcategory_level = 0 36 | 37 | def elapsed(self): 38 | end = time.time() 39 | res = end - self.start 40 | self.start = end 41 | return res 42 | 43 | def add_time_to_record(self, category, amount): 44 | if category not in self.records: 45 | self.records[category] = 0 46 | 47 | self.records[category] += amount 48 | 49 | def record(self, category, extra_time=0, disable_log=False): 50 | e = self.elapsed() 51 | 52 | self.add_time_to_record(self.base_category + category, e + extra_time) 53 | 54 | self.total += e + extra_time 55 | 56 | if self.print_log and not disable_log: 57 | print(f"{' ' * self.subcategory_level}{category}: done in {e + extra_time:.3f}s") 58 | 59 | def subcategory(self, name): 60 | self.elapsed() 61 | 62 | subcat = TimerSubcategory(self, name) 63 | return subcat 64 | 65 | def summary(self): 66 | res = f"{self.total:.1f}s" 67 | 68 | additions = [(category, time_taken) for category, time_taken in self.records.items() if time_taken >= 0.1 and '/' not in category] 69 | if not additions: 70 | return res 71 | 72 | res += " (" 73 | res += ", ".join([f"{category}: {time_taken:.1f}s" for category, time_taken in additions]) 74 | res += ")" 75 | 76 | return res 77 | 78 | def dump(self): 79 | return {'total': self.total, 'records': self.records} 80 | 81 | def reset(self): 82 | self.__init__() 83 | 84 | 85 | parser = argparse.ArgumentParser(add_help=False) 86 | parser.add_argument("--log-startup", action='store_true', help="print a detailed log of what's happening at startup") 87 | args = parser.parse_known_args()[0] 88 | 89 | startup_timer = Timer(print_log=args.log_startup) 90 | 91 | startup_record = None 92 | -------------------------------------------------------------------------------- /modules/txt2img.py: -------------------------------------------------------------------------------- 1 | from contextlib import closing 2 | 3 | import modules.scripts 4 | from modules import processing 5 | from modules.generation_parameters_copypaste import create_override_settings_dict 6 | from modules.shared import opts, cmd_opts 7 | import modules.shared as shared 8 | from modules.ui import plaintext_to_html 9 | import gradio as gr 10 | 11 | 12 | def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, n_iter: int, batch_size: int, cfg_scale: float, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): 13 | override_settings = create_override_settings_dict(override_settings_texts) 14 | 15 | p = processing.StableDiffusionProcessingTxt2Img( 16 | sd_model=shared.sd_model, 17 | outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples, 18 | outpath_grids=opts.outdir_grids or opts.outdir_txt2img_grids, 19 | prompt=prompt, 20 | styles=prompt_styles, 21 | negative_prompt=negative_prompt, 22 | sampler_name=sampler_name, 23 | batch_size=batch_size, 24 | n_iter=n_iter, 25 | steps=steps, 26 | cfg_scale=cfg_scale, 27 | width=width, 28 | height=height, 29 | enable_hr=enable_hr, 30 | denoising_strength=denoising_strength if enable_hr else None, 31 | hr_scale=hr_scale, 32 | hr_upscaler=hr_upscaler, 33 | hr_second_pass_steps=hr_second_pass_steps, 34 | hr_resize_x=hr_resize_x, 35 | hr_resize_y=hr_resize_y, 36 | hr_checkpoint_name=None if hr_checkpoint_name == 'Use same checkpoint' else hr_checkpoint_name, 37 | hr_sampler_name=None if hr_sampler_name == 'Use same sampler' else hr_sampler_name, 38 | hr_prompt=hr_prompt, 39 | hr_negative_prompt=hr_negative_prompt, 40 | override_settings=override_settings, 41 | ) 42 | 43 | p.scripts = modules.scripts.scripts_txt2img 44 | p.script_args = args 45 | 46 | p.user = request.username 47 | 48 | if cmd_opts.enable_console_prompts: 49 | print(f"\ntxt2img: {prompt}", file=shared.progress_print_out) 50 | 51 | with closing(p): 52 | processed = modules.scripts.scripts_txt2img.run(p, *args) 53 | 54 | if processed is None: 55 | processed = processing.process_images(p) 56 | 57 | shared.total_tqdm.clear() 58 | 59 | generation_info_js = processed.js() 60 | if opts.samples_log_stdout: 61 | print(generation_info_js) 62 | 63 | if opts.do_not_show_images: 64 | processed.images = [] 65 | 66 | return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments, classname="comments") 67 | -------------------------------------------------------------------------------- /modules/ui_extra_networks_checkpoints.py: -------------------------------------------------------------------------------- 1 | import html 2 | import os 3 | 4 | from modules import shared, ui_extra_networks, sd_models 5 | from modules.ui_extra_networks import quote_js 6 | from modules.ui_extra_networks_checkpoints_user_metadata import CheckpointUserMetadataEditor 7 | 8 | 9 | class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage): 10 | def __init__(self): 11 | super().__init__('Checkpoints') 12 | 13 | def refresh(self): 14 | shared.refresh_checkpoints() 15 | 16 | def create_item(self, name, index=None, enable_filter=True): 17 | checkpoint: sd_models.CheckpointInfo = sd_models.checkpoint_aliases.get(name) 18 | path, ext = os.path.splitext(checkpoint.filename) 19 | return { 20 | "name": checkpoint.name_for_extra, 21 | "filename": checkpoint.filename, 22 | "shorthash": checkpoint.shorthash, 23 | "preview": self.find_preview(path), 24 | "description": self.find_description(path), 25 | "search_term": self.search_terms_from_path(checkpoint.filename) + " " + (checkpoint.sha256 or ""), 26 | "onclick": '"' + html.escape(f"""return selectCheckpoint({quote_js(name)})""") + '"', 27 | "local_preview": f"{path}.{shared.opts.samples_format}", 28 | "metadata": checkpoint.metadata, 29 | "sort_keys": {'default': index, **self.get_sort_keys(checkpoint.filename)}, 30 | } 31 | 32 | def list_items(self): 33 | names = list(sd_models.checkpoints_list) 34 | for index, name in enumerate(names): 35 | yield self.create_item(name, index) 36 | 37 | def allowed_directories_for_previews(self): 38 | return [v for v in [shared.cmd_opts.ckpt_dir, sd_models.model_path] if v is not None] 39 | 40 | def create_user_metadata_editor(self, ui, tabname): 41 | return CheckpointUserMetadataEditor(ui, tabname, self) 42 | -------------------------------------------------------------------------------- /modules/ui_extra_networks_checkpoints_user_metadata.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | 3 | from modules import ui_extra_networks_user_metadata, sd_vae, shared 4 | from modules.ui_common import create_refresh_button 5 | 6 | 7 | class CheckpointUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor): 8 | def __init__(self, ui, tabname, page): 9 | super().__init__(ui, tabname, page) 10 | 11 | self.select_vae = None 12 | 13 | def save_user_metadata(self, name, desc, notes, vae): 14 | user_metadata = self.get_user_metadata(name) 15 | user_metadata["description"] = desc 16 | user_metadata["notes"] = notes 17 | user_metadata["vae"] = vae 18 | 19 | self.write_user_metadata(name, user_metadata) 20 | 21 | def update_vae(self, name): 22 | if name == shared.sd_model.sd_checkpoint_info.name_for_extra: 23 | sd_vae.reload_vae_weights() 24 | 25 | def put_values_into_components(self, name): 26 | user_metadata = self.get_user_metadata(name) 27 | values = super().put_values_into_components(name) 28 | 29 | return [ 30 | *values[0:5], 31 | user_metadata.get('vae', ''), 32 | ] 33 | 34 | def create_editor(self): 35 | self.create_default_editor_elems() 36 | 37 | with gr.Row(): 38 | self.select_vae = gr.Dropdown(choices=["Automatic", "None"] + list(sd_vae.vae_dict), value="None", label="Preferred VAE", elem_id="checpoint_edit_user_metadata_preferred_vae") 39 | create_refresh_button(self.select_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["Automatic", "None"] + list(sd_vae.vae_dict)}, "checpoint_edit_user_metadata_refresh_preferred_vae") 40 | 41 | self.edit_notes = gr.TextArea(label='Notes', lines=4) 42 | 43 | self.create_default_buttons() 44 | 45 | viewed_components = [ 46 | self.edit_name, 47 | self.edit_description, 48 | self.html_filedata, 49 | self.html_preview, 50 | self.edit_notes, 51 | self.select_vae, 52 | ] 53 | 54 | self.button_edit\ 55 | .click(fn=self.put_values_into_components, inputs=[self.edit_name_input], outputs=viewed_components)\ 56 | .then(fn=lambda: gr.update(visible=True), inputs=[], outputs=[self.box]) 57 | 58 | edited_components = [ 59 | self.edit_description, 60 | self.edit_notes, 61 | self.select_vae, 62 | ] 63 | 64 | self.setup_save_handler(self.button_save, self.save_user_metadata, edited_components) 65 | self.button_save.click(fn=self.update_vae, inputs=[self.edit_name_input]) 66 | 67 | -------------------------------------------------------------------------------- /modules/ui_extra_networks_hypernets.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from modules import shared, ui_extra_networks 4 | from modules.ui_extra_networks import quote_js 5 | from modules.hashes import sha256_from_cache 6 | 7 | 8 | class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage): 9 | def __init__(self): 10 | super().__init__('Hypernetworks') 11 | 12 | def refresh(self): 13 | shared.reload_hypernetworks() 14 | 15 | def create_item(self, name, index=None, enable_filter=True): 16 | full_path = shared.hypernetworks[name] 17 | path, ext = os.path.splitext(full_path) 18 | sha256 = sha256_from_cache(full_path, f'hypernet/{name}') 19 | shorthash = sha256[0:10] if sha256 else None 20 | 21 | return { 22 | "name": name, 23 | "filename": full_path, 24 | "shorthash": shorthash, 25 | "preview": self.find_preview(path), 26 | "description": self.find_description(path), 27 | "search_term": self.search_terms_from_path(path) + " " + (sha256 or ""), 28 | "prompt": quote_js(f""), 29 | "local_preview": f"{path}.preview.{shared.opts.samples_format}", 30 | "sort_keys": {'default': index, **self.get_sort_keys(path + ext)}, 31 | } 32 | 33 | def list_items(self): 34 | for index, name in enumerate(shared.hypernetworks): 35 | yield self.create_item(name, index) 36 | 37 | def allowed_directories_for_previews(self): 38 | return [shared.cmd_opts.hypernetwork_dir] 39 | 40 | -------------------------------------------------------------------------------- /modules/ui_extra_networks_textual_inversion.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from modules import ui_extra_networks, sd_hijack, shared 4 | from modules.ui_extra_networks import quote_js 5 | 6 | 7 | class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage): 8 | def __init__(self): 9 | super().__init__('Textual Inversion') 10 | self.allow_negative_prompt = True 11 | 12 | def refresh(self): 13 | sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) 14 | 15 | def create_item(self, name, index=None, enable_filter=True): 16 | embedding = sd_hijack.model_hijack.embedding_db.word_embeddings.get(name) 17 | 18 | path, ext = os.path.splitext(embedding.filename) 19 | return { 20 | "name": name, 21 | "filename": embedding.filename, 22 | "shorthash": embedding.shorthash, 23 | "preview": self.find_preview(path), 24 | "description": self.find_description(path), 25 | "search_term": self.search_terms_from_path(embedding.filename) + " " + (embedding.hash or ""), 26 | "prompt": quote_js(embedding.name), 27 | "local_preview": f"{path}.preview.{shared.opts.samples_format}", 28 | "sort_keys": {'default': index, **self.get_sort_keys(embedding.filename)}, 29 | } 30 | 31 | def list_items(self): 32 | for index, name in enumerate(sd_hijack.model_hijack.embedding_db.word_embeddings): 33 | yield self.create_item(name, index) 34 | 35 | def allowed_directories_for_previews(self): 36 | return list(sd_hijack.model_hijack.embedding_db.embedding_dirs) 37 | -------------------------------------------------------------------------------- /modules/ui_gradio_extensions.py: -------------------------------------------------------------------------------- 1 | import os 2 | import gradio as gr 3 | 4 | from modules import localization, shared, scripts 5 | from modules.paths import script_path, data_path 6 | 7 | 8 | def webpath(fn): 9 | if fn.startswith(script_path): 10 | web_path = os.path.relpath(fn, script_path).replace('\\', '/') 11 | else: 12 | web_path = os.path.abspath(fn) 13 | 14 | return f'file={web_path}?{os.path.getmtime(fn)}' 15 | 16 | 17 | def javascript_html(): 18 | # Ensure localization is in `window` before scripts 19 | head = f'\n' 20 | 21 | script_js = os.path.join(script_path, "script.js") 22 | head += f'\n' 23 | 24 | for script in scripts.list_scripts("javascript", ".js"): 25 | head += f'\n' 26 | 27 | for script in scripts.list_scripts("javascript", ".mjs"): 28 | head += f'\n' 29 | 30 | if shared.cmd_opts.theme: 31 | head += f'\n' 32 | 33 | return head 34 | 35 | 36 | def css_html(): 37 | head = "" 38 | 39 | def stylesheet(fn): 40 | return f'' 41 | 42 | for cssfile in scripts.list_files_with_name("style.css"): 43 | if not os.path.isfile(cssfile): 44 | continue 45 | 46 | head += stylesheet(cssfile) 47 | 48 | if os.path.exists(os.path.join(data_path, "user.css")): 49 | head += stylesheet(os.path.join(data_path, "user.css")) 50 | 51 | return head 52 | 53 | 54 | def reload_javascript(): 55 | js = javascript_html() 56 | css = css_html() 57 | 58 | def template_response(*args, **kwargs): 59 | res = shared.GradioTemplateResponseOriginal(*args, **kwargs) 60 | res.body = res.body.replace(b'', f'{js}'.encode("utf8")) 61 | res.body = res.body.replace(b'', f'{css}'.encode("utf8")) 62 | res.init_headers() 63 | return res 64 | 65 | gr.routes.templates.TemplateResponse = template_response 66 | 67 | 68 | if not hasattr(shared, 'GradioTemplateResponseOriginal'): 69 | shared.GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse 70 | -------------------------------------------------------------------------------- /modules/ui_postprocessing.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | from modules import scripts, shared, ui_common, postprocessing, call_queue 3 | import modules.generation_parameters_copypaste as parameters_copypaste 4 | 5 | 6 | def create_ui(): 7 | tab_index = gr.State(value=0) 8 | 9 | with gr.Row(equal_height=False, variant='compact'): 10 | with gr.Column(variant='compact'): 11 | with gr.Tabs(elem_id="mode_extras"): 12 | with gr.TabItem('Single Image', id="single_image", elem_id="extras_single_tab") as tab_single: 13 | extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil", elem_id="extras_image") 14 | 15 | with gr.TabItem('Batch Process', id="batch_process", elem_id="extras_batch_process_tab") as tab_batch: 16 | image_batch = gr.Files(label="Batch Process", interactive=True, elem_id="extras_image_batch") 17 | 18 | with gr.TabItem('Batch from Directory', id="batch_from_directory", elem_id="extras_batch_directory_tab") as tab_batch_dir: 19 | extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, placeholder="A directory on the same machine where the server is running.", elem_id="extras_batch_input_dir") 20 | extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.", elem_id="extras_batch_output_dir") 21 | show_extras_results = gr.Checkbox(label='Show result images', value=True, elem_id="extras_show_extras_results") 22 | 23 | submit = gr.Button('Generate', elem_id="extras_generate", variant='primary') 24 | 25 | script_inputs = scripts.scripts_postproc.setup_ui() 26 | 27 | with gr.Column(): 28 | result_images, html_info_x, html_info, html_log = ui_common.create_output_panel("extras", shared.opts.outdir_extras_samples) 29 | 30 | tab_single.select(fn=lambda: 0, inputs=[], outputs=[tab_index]) 31 | tab_batch.select(fn=lambda: 1, inputs=[], outputs=[tab_index]) 32 | tab_batch_dir.select(fn=lambda: 2, inputs=[], outputs=[tab_index]) 33 | 34 | submit.click( 35 | fn=call_queue.wrap_gradio_gpu_call(postprocessing.run_postprocessing, extra_outputs=[None, '']), 36 | inputs=[ 37 | tab_index, 38 | extras_image, 39 | image_batch, 40 | extras_batch_input_dir, 41 | extras_batch_output_dir, 42 | show_extras_results, 43 | *script_inputs 44 | ], 45 | outputs=[ 46 | result_images, 47 | html_info_x, 48 | html_info, 49 | ] 50 | ) 51 | 52 | parameters_copypaste.add_paste_fields("extras", extras_image, None) 53 | 54 | extras_image.change( 55 | fn=scripts.scripts_postproc.image_changed, 56 | inputs=[], outputs=[] 57 | ) 58 | -------------------------------------------------------------------------------- /modules/ui_tempdir.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | from collections import namedtuple 4 | from pathlib import Path 5 | 6 | import gradio.components 7 | 8 | from PIL import PngImagePlugin 9 | 10 | from modules import shared 11 | 12 | 13 | Savedfile = namedtuple("Savedfile", ["name"]) 14 | 15 | 16 | def register_tmp_file(gradio, filename): 17 | if hasattr(gradio, 'temp_file_sets'): # gradio 3.15 18 | gradio.temp_file_sets[0] = gradio.temp_file_sets[0] | {os.path.abspath(filename)} 19 | 20 | if hasattr(gradio, 'temp_dirs'): # gradio 3.9 21 | gradio.temp_dirs = gradio.temp_dirs | {os.path.abspath(os.path.dirname(filename))} 22 | 23 | 24 | def check_tmp_file(gradio, filename): 25 | if hasattr(gradio, 'temp_file_sets'): 26 | return any(filename in fileset for fileset in gradio.temp_file_sets) 27 | 28 | if hasattr(gradio, 'temp_dirs'): 29 | return any(Path(temp_dir).resolve() in Path(filename).resolve().parents for temp_dir in gradio.temp_dirs) 30 | 31 | return False 32 | 33 | 34 | def save_pil_to_file(self, pil_image, dir=None, format="png"): 35 | already_saved_as = getattr(pil_image, 'already_saved_as', None) 36 | if already_saved_as and os.path.isfile(already_saved_as): 37 | register_tmp_file(shared.demo, already_saved_as) 38 | filename = already_saved_as 39 | 40 | if not shared.opts.save_images_add_number: 41 | filename += f'?{os.path.getmtime(already_saved_as)}' 42 | 43 | return filename 44 | 45 | if shared.opts.temp_dir != "": 46 | dir = shared.opts.temp_dir 47 | else: 48 | os.makedirs(dir, exist_ok=True) 49 | 50 | use_metadata = False 51 | metadata = PngImagePlugin.PngInfo() 52 | for key, value in pil_image.info.items(): 53 | if isinstance(key, str) and isinstance(value, str): 54 | metadata.add_text(key, value) 55 | use_metadata = True 56 | 57 | file_obj = tempfile.NamedTemporaryFile(delete=False, suffix=".png", dir=dir) 58 | pil_image.save(file_obj, pnginfo=(metadata if use_metadata else None)) 59 | return file_obj.name 60 | 61 | 62 | def install_ui_tempdir_override(): 63 | """override save to file function so that it also writes PNG info""" 64 | gradio.components.IOComponent.pil_to_temp_file = save_pil_to_file 65 | 66 | 67 | def on_tmpdir_changed(): 68 | if shared.opts.temp_dir == "" or shared.demo is None: 69 | return 70 | 71 | os.makedirs(shared.opts.temp_dir, exist_ok=True) 72 | 73 | register_tmp_file(shared.demo, os.path.join(shared.opts.temp_dir, "x")) 74 | 75 | 76 | def cleanup_tmpdr(): 77 | temp_dir = shared.opts.temp_dir 78 | if temp_dir == "" or not os.path.isdir(temp_dir): 79 | return 80 | 81 | for root, _, files in os.walk(temp_dir, topdown=False): 82 | for name in files: 83 | _, extension = os.path.splitext(name) 84 | if extension != ".png": 85 | continue 86 | 87 | filename = os.path.join(root, name) 88 | os.remove(filename) 89 | -------------------------------------------------------------------------------- /modules/util.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | 4 | from modules import shared 5 | from modules.paths_internal import script_path 6 | 7 | 8 | def natural_sort_key(s, regex=re.compile('([0-9]+)')): 9 | return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)] 10 | 11 | 12 | def listfiles(dirname): 13 | filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")] 14 | return [file for file in filenames if os.path.isfile(file)] 15 | 16 | 17 | def html_path(filename): 18 | return os.path.join(script_path, "html", filename) 19 | 20 | 21 | def html(filename): 22 | path = html_path(filename) 23 | 24 | if os.path.exists(path): 25 | with open(path, encoding="utf8") as file: 26 | return file.read() 27 | 28 | return "" 29 | 30 | 31 | def walk_files(path, allowed_extensions=None): 32 | if not os.path.exists(path): 33 | return 34 | 35 | if allowed_extensions is not None: 36 | allowed_extensions = set(allowed_extensions) 37 | 38 | items = list(os.walk(path, followlinks=True)) 39 | items = sorted(items, key=lambda x: natural_sort_key(x[0])) 40 | 41 | for root, _, files in items: 42 | for filename in sorted(files, key=natural_sort_key): 43 | if allowed_extensions is not None: 44 | _, ext = os.path.splitext(filename) 45 | if ext not in allowed_extensions: 46 | continue 47 | 48 | if not shared.opts.list_hidden_files and ("/." in root or "\\." in root): 49 | continue 50 | 51 | yield os.path.join(root, filename) 52 | 53 | 54 | def ldm_print(*args, **kwargs): 55 | if shared.opts.hide_ldm_prints: 56 | return 57 | 58 | print(*args, **kwargs) 59 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "stable-diffusion-webui", 3 | "version": "0.0.0", 4 | "devDependencies": { 5 | "eslint": "^8.40.0" 6 | }, 7 | "scripts": { 8 | "lint": "eslint .", 9 | "fix": "eslint --fix ." 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.ruff] 2 | 3 | target-version = "py39" 4 | 5 | extend-select = [ 6 | "B", 7 | "C", 8 | "I", 9 | "W", 10 | ] 11 | 12 | exclude = [ 13 | "extensions", 14 | "extensions-disabled", 15 | ] 16 | 17 | ignore = [ 18 | "E501", # Line too long 19 | "E731", # Do not assign a `lambda` expression, use a `def` 20 | 21 | "I001", # Import block is un-sorted or un-formatted 22 | "C901", # Function is too complex 23 | "C408", # Rewrite as a literal 24 | "W605", # invalid escape sequence, messes with some docstrings 25 | ] 26 | 27 | [tool.ruff.per-file-ignores] 28 | "webui.py" = ["E402"] # Module level import not at top of file 29 | 30 | [tool.ruff.flake8-bugbear] 31 | # Allow default arguments like, e.g., `data: List[str] = fastapi.Query(None)`. 32 | extend-immutable-calls = ["fastapi.Depends", "fastapi.security.HTTPBasic"] 33 | 34 | [tool.pytest.ini_options] 35 | base_url = "http://127.0.0.1:7860" 36 | -------------------------------------------------------------------------------- /requirements-test.txt: -------------------------------------------------------------------------------- 1 | pytest-base-url~=2.0 2 | pytest-cov~=4.0 3 | pytest~=7.3 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | GitPython 2 | Pillow 3 | accelerate 4 | 5 | basicsr 6 | blendmodes 7 | clean-fid 8 | einops 9 | fastapi>=0.90.1 10 | gfpgan 11 | gradio==3.41.2 12 | inflection 13 | jsonmerge 14 | kornia 15 | lark 16 | numpy 17 | omegaconf 18 | open-clip-torch 19 | 20 | piexif 21 | psutil 22 | pytorch_lightning 23 | realesrgan 24 | requests 25 | resize-right 26 | 27 | safetensors 28 | scikit-image>=0.19 29 | timm 30 | tomesd 31 | torch 32 | torchdiffeq 33 | torchsde 34 | diffusers 35 | openvino 36 | invisible-watermark 37 | transformers 38 | 39 | -------------------------------------------------------------------------------- /requirements_versions.txt: -------------------------------------------------------------------------------- 1 | GitPython==3.1.37 2 | Pillow==10.0.1 3 | accelerate==0.21.0 4 | basicsr==1.4.2 5 | blendmodes==2023 6 | clean-fid==0.1.35 7 | einops==0.4.1 8 | fastapi==0.94.0 9 | gfpgan==1.3.8 10 | gradio==3.41.2 11 | httpcore==0.15 12 | httpx==0.24.1 13 | inflection==0.5.1 14 | jsonmerge==1.8.0 15 | kornia==0.6.7 16 | lark==1.1.2 17 | numpy==1.23.5 18 | omegaconf==2.2.3 19 | open-clip-torch==2.20.0 20 | piexif==1.1.3 21 | psutil==5.9.5 22 | pytorch_lightning==1.9.4 23 | realesrgan==0.3.0 24 | resize-right==0.0.2 25 | safetensors==0.3.1 26 | scikit-image==0.21.0 27 | timm==0.9.2 28 | tomesd==0.1.3 29 | torch 30 | torchdiffeq==0.2.3 31 | torchsde==0.2.5 32 | transformers==4.30.2 33 | diffusers==0.23.0 34 | openvino==2023.2.0 35 | invisible-watermark 36 | 37 | 38 | -------------------------------------------------------------------------------- /screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openvinotoolkit/stable-diffusion-webui/e5a634da06c62d72dbdc764b16c65ef3408aa588/screenshot.png -------------------------------------------------------------------------------- /screenshot_OpenVINO.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openvinotoolkit/stable-diffusion-webui/e5a634da06c62d72dbdc764b16c65ef3408aa588/screenshot_OpenVINO.png -------------------------------------------------------------------------------- /scripts/custom_code.py: -------------------------------------------------------------------------------- 1 | import modules.scripts as scripts 2 | import gradio as gr 3 | import ast 4 | import copy 5 | 6 | from modules.processing import Processed 7 | from modules.shared import cmd_opts 8 | 9 | 10 | def convertExpr2Expression(expr): 11 | expr.lineno = 0 12 | expr.col_offset = 0 13 | result = ast.Expression(expr.value, lineno=0, col_offset = 0) 14 | 15 | return result 16 | 17 | 18 | def exec_with_return(code, module): 19 | """ 20 | like exec() but can return values 21 | https://stackoverflow.com/a/52361938/5862977 22 | """ 23 | code_ast = ast.parse(code) 24 | 25 | init_ast = copy.deepcopy(code_ast) 26 | init_ast.body = code_ast.body[:-1] 27 | 28 | last_ast = copy.deepcopy(code_ast) 29 | last_ast.body = code_ast.body[-1:] 30 | 31 | exec(compile(init_ast, "", "exec"), module.__dict__) 32 | if type(last_ast.body[0]) == ast.Expr: 33 | return eval(compile(convertExpr2Expression(last_ast.body[0]), "", "eval"), module.__dict__) 34 | else: 35 | exec(compile(last_ast, "", "exec"), module.__dict__) 36 | 37 | 38 | class Script(scripts.Script): 39 | 40 | def title(self): 41 | return "Custom code" 42 | 43 | def show(self, is_img2img): 44 | return cmd_opts.allow_code 45 | 46 | def ui(self, is_img2img): 47 | example = """from modules.processing import process_images 48 | 49 | p.width = 768 50 | p.height = 768 51 | p.batch_size = 2 52 | p.steps = 10 53 | 54 | return process_images(p) 55 | """ 56 | 57 | 58 | code = gr.Code(value=example, language="python", label="Python code", elem_id=self.elem_id("code")) 59 | indent_level = gr.Number(label='Indent level', value=2, precision=0, elem_id=self.elem_id("indent_level")) 60 | 61 | return [code, indent_level] 62 | 63 | def run(self, p, code, indent_level): 64 | assert cmd_opts.allow_code, '--allow-code option must be enabled' 65 | 66 | display_result_data = [[], -1, ""] 67 | 68 | def display(imgs, s=display_result_data[1], i=display_result_data[2]): 69 | display_result_data[0] = imgs 70 | display_result_data[1] = s 71 | display_result_data[2] = i 72 | 73 | from types import ModuleType 74 | module = ModuleType("testmodule") 75 | module.__dict__.update(globals()) 76 | module.p = p 77 | module.display = display 78 | 79 | indent = " " * indent_level 80 | indented = code.replace('\n', f"\n{indent}") 81 | body = f"""def __webuitemp__(): 82 | {indent}{indented} 83 | __webuitemp__()""" 84 | 85 | result = exec_with_return(body, module) 86 | 87 | if isinstance(result, Processed): 88 | return result 89 | 90 | return Processed(p, *display_result_data) 91 | -------------------------------------------------------------------------------- /scripts/postprocessing_codeformer.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import numpy as np 3 | 4 | from modules import scripts_postprocessing, codeformer_model 5 | import gradio as gr 6 | 7 | from modules.ui_components import FormRow 8 | 9 | 10 | class ScriptPostprocessingCodeFormer(scripts_postprocessing.ScriptPostprocessing): 11 | name = "CodeFormer" 12 | order = 3000 13 | 14 | def ui(self): 15 | with FormRow(): 16 | codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, elem_id="extras_codeformer_visibility") 17 | codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, elem_id="extras_codeformer_weight") 18 | 19 | return { 20 | "codeformer_visibility": codeformer_visibility, 21 | "codeformer_weight": codeformer_weight, 22 | } 23 | 24 | def process(self, pp: scripts_postprocessing.PostprocessedImage, codeformer_visibility, codeformer_weight): 25 | if codeformer_visibility == 0: 26 | return 27 | 28 | restored_img = codeformer_model.codeformer.restore(np.array(pp.image, dtype=np.uint8), w=codeformer_weight) 29 | res = Image.fromarray(restored_img) 30 | 31 | if codeformer_visibility < 1.0: 32 | res = Image.blend(pp.image, res, codeformer_visibility) 33 | 34 | pp.image = res 35 | pp.info["CodeFormer visibility"] = round(codeformer_visibility, 3) 36 | pp.info["CodeFormer weight"] = round(codeformer_weight, 3) 37 | -------------------------------------------------------------------------------- /scripts/postprocessing_gfpgan.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import numpy as np 3 | 4 | from modules import scripts_postprocessing, gfpgan_model 5 | import gradio as gr 6 | 7 | from modules.ui_components import FormRow 8 | 9 | 10 | class ScriptPostprocessingGfpGan(scripts_postprocessing.ScriptPostprocessing): 11 | name = "GFPGAN" 12 | order = 2000 13 | 14 | def ui(self): 15 | with FormRow(): 16 | gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, elem_id="extras_gfpgan_visibility") 17 | 18 | return { 19 | "gfpgan_visibility": gfpgan_visibility, 20 | } 21 | 22 | def process(self, pp: scripts_postprocessing.PostprocessedImage, gfpgan_visibility): 23 | if gfpgan_visibility == 0: 24 | return 25 | 26 | restored_img = gfpgan_model.gfpgan_fix_faces(np.array(pp.image, dtype=np.uint8)) 27 | res = Image.fromarray(restored_img) 28 | 29 | if gfpgan_visibility < 1.0: 30 | res = Image.blend(pp.image, res, gfpgan_visibility) 31 | 32 | pp.image = res 33 | pp.info["GFPGAN visibility"] = round(gfpgan_visibility, 3) 34 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openvinotoolkit/stable-diffusion-webui/e5a634da06c62d72dbdc764b16c65ef3408aa588/test/__init__.py -------------------------------------------------------------------------------- /test/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | import base64 5 | 6 | 7 | test_files_path = os.path.dirname(__file__) + "/test_files" 8 | 9 | 10 | def file_to_base64(filename): 11 | with open(filename, "rb") as file: 12 | data = file.read() 13 | 14 | base64_str = str(base64.b64encode(data), "utf-8") 15 | return "data:image/png;base64," + base64_str 16 | 17 | 18 | @pytest.fixture(scope="session") # session so we don't read this over and over 19 | def img2img_basic_image_base64() -> str: 20 | return file_to_base64(os.path.join(test_files_path, "img2img_basic.png")) 21 | 22 | 23 | @pytest.fixture(scope="session") # session so we don't read this over and over 24 | def mask_basic_image_base64() -> str: 25 | return file_to_base64(os.path.join(test_files_path, "mask_basic.png")) 26 | -------------------------------------------------------------------------------- /test/test_extras.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | 4 | def test_simple_upscaling_performed(base_url, img2img_basic_image_base64): 5 | payload = { 6 | "resize_mode": 0, 7 | "show_extras_results": True, 8 | "gfpgan_visibility": 0, 9 | "codeformer_visibility": 0, 10 | "codeformer_weight": 0, 11 | "upscaling_resize": 2, 12 | "upscaling_resize_w": 128, 13 | "upscaling_resize_h": 128, 14 | "upscaling_crop": True, 15 | "upscaler_1": "Lanczos", 16 | "upscaler_2": "None", 17 | "extras_upscaler_2_visibility": 0, 18 | "image": img2img_basic_image_base64, 19 | } 20 | assert requests.post(f"{base_url}/sdapi/v1/extra-single-image", json=payload).status_code == 200 21 | 22 | 23 | def test_png_info_performed(base_url, img2img_basic_image_base64): 24 | payload = { 25 | "image": img2img_basic_image_base64, 26 | } 27 | assert requests.post(f"{base_url}/sdapi/v1/extra-single-image", json=payload).status_code == 200 28 | 29 | 30 | def test_interrogate_performed(base_url, img2img_basic_image_base64): 31 | payload = { 32 | "image": img2img_basic_image_base64, 33 | "model": "clip", 34 | } 35 | assert requests.post(f"{base_url}/sdapi/v1/extra-single-image", json=payload).status_code == 200 36 | -------------------------------------------------------------------------------- /test/test_files/empty.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openvinotoolkit/stable-diffusion-webui/e5a634da06c62d72dbdc764b16c65ef3408aa588/test/test_files/empty.pt -------------------------------------------------------------------------------- /test/test_files/img2img_basic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openvinotoolkit/stable-diffusion-webui/e5a634da06c62d72dbdc764b16c65ef3408aa588/test/test_files/img2img_basic.png -------------------------------------------------------------------------------- /test/test_files/mask_basic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openvinotoolkit/stable-diffusion-webui/e5a634da06c62d72dbdc764b16c65ef3408aa588/test/test_files/mask_basic.png -------------------------------------------------------------------------------- /test/test_img2img.py: -------------------------------------------------------------------------------- 1 | 2 | import pytest 3 | import requests 4 | 5 | 6 | @pytest.fixture() 7 | def url_img2img(base_url): 8 | return f"{base_url}/sdapi/v1/img2img" 9 | 10 | 11 | @pytest.fixture() 12 | def simple_img2img_request(img2img_basic_image_base64): 13 | return { 14 | "batch_size": 1, 15 | "cfg_scale": 7, 16 | "denoising_strength": 0.75, 17 | "eta": 0, 18 | "height": 64, 19 | "include_init_images": False, 20 | "init_images": [img2img_basic_image_base64], 21 | "inpaint_full_res": False, 22 | "inpaint_full_res_padding": 0, 23 | "inpainting_fill": 0, 24 | "inpainting_mask_invert": False, 25 | "mask": None, 26 | "mask_blur": 4, 27 | "n_iter": 1, 28 | "negative_prompt": "", 29 | "override_settings": {}, 30 | "prompt": "example prompt", 31 | "resize_mode": 0, 32 | "restore_faces": False, 33 | "s_churn": 0, 34 | "s_noise": 1, 35 | "s_tmax": 0, 36 | "s_tmin": 0, 37 | "sampler_index": "Euler a", 38 | "seed": -1, 39 | "seed_resize_from_h": -1, 40 | "seed_resize_from_w": -1, 41 | "steps": 3, 42 | "styles": [], 43 | "subseed": -1, 44 | "subseed_strength": 0, 45 | "tiling": False, 46 | "width": 64, 47 | } 48 | 49 | 50 | def test_img2img_simple_performed(url_img2img, simple_img2img_request): 51 | assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200 52 | 53 | 54 | def test_inpainting_masked_performed(url_img2img, simple_img2img_request, mask_basic_image_base64): 55 | simple_img2img_request["mask"] = mask_basic_image_base64 56 | assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200 57 | 58 | 59 | def test_inpainting_with_inverted_masked_performed(url_img2img, simple_img2img_request, mask_basic_image_base64): 60 | simple_img2img_request["mask"] = mask_basic_image_base64 61 | simple_img2img_request["inpainting_mask_invert"] = True 62 | assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200 63 | 64 | 65 | def test_img2img_sd_upscale_performed(url_img2img, simple_img2img_request): 66 | simple_img2img_request["script_name"] = "sd upscale" 67 | simple_img2img_request["script_args"] = ["", 8, "Lanczos", 2.0] 68 | assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200 69 | -------------------------------------------------------------------------------- /test/test_txt2img.py: -------------------------------------------------------------------------------- 1 | 2 | import pytest 3 | import requests 4 | 5 | 6 | @pytest.fixture() 7 | def url_txt2img(base_url): 8 | return f"{base_url}/sdapi/v1/txt2img" 9 | 10 | 11 | @pytest.fixture() 12 | def simple_txt2img_request(): 13 | return { 14 | "batch_size": 1, 15 | "cfg_scale": 7, 16 | "denoising_strength": 0, 17 | "enable_hr": False, 18 | "eta": 0, 19 | "firstphase_height": 0, 20 | "firstphase_width": 0, 21 | "height": 64, 22 | "n_iter": 1, 23 | "negative_prompt": "", 24 | "prompt": "example prompt", 25 | "restore_faces": False, 26 | "s_churn": 0, 27 | "s_noise": 1, 28 | "s_tmax": 0, 29 | "s_tmin": 0, 30 | "sampler_index": "Euler a", 31 | "seed": -1, 32 | "seed_resize_from_h": -1, 33 | "seed_resize_from_w": -1, 34 | "steps": 3, 35 | "styles": [], 36 | "subseed": -1, 37 | "subseed_strength": 0, 38 | "tiling": False, 39 | "width": 64, 40 | } 41 | 42 | 43 | def test_txt2img_simple_performed(url_txt2img, simple_txt2img_request): 44 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 45 | 46 | 47 | def test_txt2img_with_negative_prompt_performed(url_txt2img, simple_txt2img_request): 48 | simple_txt2img_request["negative_prompt"] = "example negative prompt" 49 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 50 | 51 | 52 | def test_txt2img_with_complex_prompt_performed(url_txt2img, simple_txt2img_request): 53 | simple_txt2img_request["prompt"] = "((emphasis)), (emphasis1:1.1), [to:1], [from::2], [from:to:0.3], [alt|alt1]" 54 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 55 | 56 | 57 | def test_txt2img_not_square_image_performed(url_txt2img, simple_txt2img_request): 58 | simple_txt2img_request["height"] = 128 59 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 60 | 61 | 62 | def test_txt2img_with_hrfix_performed(url_txt2img, simple_txt2img_request): 63 | simple_txt2img_request["enable_hr"] = True 64 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 65 | 66 | 67 | def test_txt2img_with_tiling_performed(url_txt2img, simple_txt2img_request): 68 | simple_txt2img_request["tiling"] = True 69 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 70 | 71 | 72 | def test_txt2img_with_restore_faces_performed(url_txt2img, simple_txt2img_request): 73 | simple_txt2img_request["restore_faces"] = True 74 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 75 | 76 | 77 | @pytest.mark.parametrize("sampler", ["PLMS", "DDIM", "UniPC"]) 78 | def test_txt2img_with_vanilla_sampler_performed(url_txt2img, simple_txt2img_request, sampler): 79 | simple_txt2img_request["sampler_index"] = sampler 80 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 81 | 82 | 83 | def test_txt2img_multiple_batches_performed(url_txt2img, simple_txt2img_request): 84 | simple_txt2img_request["n_iter"] = 2 85 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 86 | 87 | 88 | def test_txt2img_batch_performed(url_txt2img, simple_txt2img_request): 89 | simple_txt2img_request["batch_size"] = 2 90 | assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 91 | -------------------------------------------------------------------------------- /test/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import requests 3 | 4 | 5 | def test_options_write(base_url): 6 | url_options = f"{base_url}/sdapi/v1/options" 7 | response = requests.get(url_options) 8 | assert response.status_code == 200 9 | 10 | pre_value = response.json()["send_seed"] 11 | 12 | assert requests.post(url_options, json={'send_seed': (not pre_value)}).status_code == 200 13 | 14 | response = requests.get(url_options) 15 | assert response.status_code == 200 16 | assert response.json()['send_seed'] == (not pre_value) 17 | 18 | requests.post(url_options, json={"send_seed": pre_value}) 19 | 20 | 21 | @pytest.mark.parametrize("url", [ 22 | "sdapi/v1/cmd-flags", 23 | "sdapi/v1/samplers", 24 | "sdapi/v1/upscalers", 25 | "sdapi/v1/sd-models", 26 | "sdapi/v1/hypernetworks", 27 | "sdapi/v1/face-restorers", 28 | "sdapi/v1/realesrgan-models", 29 | "sdapi/v1/prompt-styles", 30 | "sdapi/v1/embeddings", 31 | ]) 32 | def test_get_api_url(base_url, url): 33 | assert requests.get(f"{base_url}/{url}").status_code == 200 34 | -------------------------------------------------------------------------------- /textual_inversion_templates/hypernetwork.txt: -------------------------------------------------------------------------------- 1 | a photo of a [filewords] 2 | a rendering of a [filewords] 3 | a cropped photo of the [filewords] 4 | the photo of a [filewords] 5 | a photo of a clean [filewords] 6 | a photo of a dirty [filewords] 7 | a dark photo of the [filewords] 8 | a photo of my [filewords] 9 | a photo of the cool [filewords] 10 | a close-up photo of a [filewords] 11 | a bright photo of the [filewords] 12 | a cropped photo of a [filewords] 13 | a photo of the [filewords] 14 | a good photo of the [filewords] 15 | a photo of one [filewords] 16 | a close-up photo of the [filewords] 17 | a rendition of the [filewords] 18 | a photo of the clean [filewords] 19 | a rendition of a [filewords] 20 | a photo of a nice [filewords] 21 | a good photo of a [filewords] 22 | a photo of the nice [filewords] 23 | a photo of the small [filewords] 24 | a photo of the weird [filewords] 25 | a photo of the large [filewords] 26 | a photo of a cool [filewords] 27 | a photo of a small [filewords] 28 | -------------------------------------------------------------------------------- /textual_inversion_templates/none.txt: -------------------------------------------------------------------------------- 1 | picture 2 | -------------------------------------------------------------------------------- /textual_inversion_templates/style.txt: -------------------------------------------------------------------------------- 1 | a painting, art by [name] 2 | a rendering, art by [name] 3 | a cropped painting, art by [name] 4 | the painting, art by [name] 5 | a clean painting, art by [name] 6 | a dirty painting, art by [name] 7 | a dark painting, art by [name] 8 | a picture, art by [name] 9 | a cool painting, art by [name] 10 | a close-up painting, art by [name] 11 | a bright painting, art by [name] 12 | a cropped painting, art by [name] 13 | a good painting, art by [name] 14 | a close-up painting, art by [name] 15 | a rendition, art by [name] 16 | a nice painting, art by [name] 17 | a small painting, art by [name] 18 | a weird painting, art by [name] 19 | a large painting, art by [name] 20 | -------------------------------------------------------------------------------- /textual_inversion_templates/style_filewords.txt: -------------------------------------------------------------------------------- 1 | a painting of [filewords], art by [name] 2 | a rendering of [filewords], art by [name] 3 | a cropped painting of [filewords], art by [name] 4 | the painting of [filewords], art by [name] 5 | a clean painting of [filewords], art by [name] 6 | a dirty painting of [filewords], art by [name] 7 | a dark painting of [filewords], art by [name] 8 | a picture of [filewords], art by [name] 9 | a cool painting of [filewords], art by [name] 10 | a close-up painting of [filewords], art by [name] 11 | a bright painting of [filewords], art by [name] 12 | a cropped painting of [filewords], art by [name] 13 | a good painting of [filewords], art by [name] 14 | a close-up painting of [filewords], art by [name] 15 | a rendition of [filewords], art by [name] 16 | a nice painting of [filewords], art by [name] 17 | a small painting of [filewords], art by [name] 18 | a weird painting of [filewords], art by [name] 19 | a large painting of [filewords], art by [name] 20 | -------------------------------------------------------------------------------- /textual_inversion_templates/subject.txt: -------------------------------------------------------------------------------- 1 | a photo of a [name] 2 | a rendering of a [name] 3 | a cropped photo of the [name] 4 | the photo of a [name] 5 | a photo of a clean [name] 6 | a photo of a dirty [name] 7 | a dark photo of the [name] 8 | a photo of my [name] 9 | a photo of the cool [name] 10 | a close-up photo of a [name] 11 | a bright photo of the [name] 12 | a cropped photo of a [name] 13 | a photo of the [name] 14 | a good photo of the [name] 15 | a photo of one [name] 16 | a close-up photo of the [name] 17 | a rendition of the [name] 18 | a photo of the clean [name] 19 | a rendition of a [name] 20 | a photo of a nice [name] 21 | a good photo of a [name] 22 | a photo of the nice [name] 23 | a photo of the small [name] 24 | a photo of the weird [name] 25 | a photo of the large [name] 26 | a photo of a cool [name] 27 | a photo of a small [name] 28 | -------------------------------------------------------------------------------- /textual_inversion_templates/subject_filewords.txt: -------------------------------------------------------------------------------- 1 | a photo of a [name], [filewords] 2 | a rendering of a [name], [filewords] 3 | a cropped photo of the [name], [filewords] 4 | the photo of a [name], [filewords] 5 | a photo of a clean [name], [filewords] 6 | a photo of a dirty [name], [filewords] 7 | a dark photo of the [name], [filewords] 8 | a photo of my [name], [filewords] 9 | a photo of the cool [name], [filewords] 10 | a close-up photo of a [name], [filewords] 11 | a bright photo of the [name], [filewords] 12 | a cropped photo of a [name], [filewords] 13 | a photo of the [name], [filewords] 14 | a good photo of the [name], [filewords] 15 | a photo of one [name], [filewords] 16 | a close-up photo of the [name], [filewords] 17 | a rendition of the [name], [filewords] 18 | a photo of the clean [name], [filewords] 19 | a rendition of a [name], [filewords] 20 | a photo of a nice [name], [filewords] 21 | a good photo of a [name], [filewords] 22 | a photo of the nice [name], [filewords] 23 | a photo of the small [name], [filewords] 24 | a photo of the weird [name], [filewords] 25 | a photo of the large [name], [filewords] 26 | a photo of a cool [name], [filewords] 27 | a photo of a small [name], [filewords] 28 | -------------------------------------------------------------------------------- /webui-macos-env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #################################################################### 3 | # macOS defaults # 4 | # Please modify webui-user.sh to change these instead of this file # 5 | #################################################################### 6 | 7 | if [[ -x "$(command -v python3.10)" ]] 8 | then 9 | python_cmd="python3.10" 10 | fi 11 | 12 | export install_dir="$HOME" 13 | export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate" 14 | export TORCH_COMMAND="pip install torch==2.0.1 torchvision==0.15.2" 15 | export PYTORCH_ENABLE_MPS_FALLBACK=1 16 | 17 | #################################################################### 18 | -------------------------------------------------------------------------------- /webui-user.bat: -------------------------------------------------------------------------------- 1 | 2 | @echo off 3 | 4 | set PYTHON= 5 | set GIT= 6 | set VENV_DIR= 7 | set COMMANDLINE_ARGS=--skip-torch-cuda-test --precision full --no-half 8 | set PYTORCH_TRACING_MODE=TORCHFX 9 | set USE_OPENVINO=1 10 | 11 | call webui.bat 12 | 13 | -------------------------------------------------------------------------------- /webui-user.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ######################################################### 3 | # Uncomment and change the variables below to your need:# 4 | ######################################################### 5 | 6 | # Install directory without trailing slash 7 | #install_dir="/home/$(whoami)" 8 | 9 | # Name of the subdirectory 10 | #clone_dir="stable-diffusion-webui" 11 | 12 | # Commandline arguments for webui.py, for example: export COMMANDLINE_ARGS="--medvram --opt-split-attention" 13 | #export COMMANDLINE_ARGS="" 14 | 15 | # python3 executable 16 | #python_cmd="python3" 17 | 18 | # git executable 19 | #export GIT="git" 20 | 21 | # python3 venv without trailing slash (defaults to ${install_dir}/${clone_dir}/venv) 22 | #venv_dir="venv" 23 | 24 | # script to launch to start the app 25 | #export LAUNCH_SCRIPT="launch.py" 26 | 27 | # install command for torch 28 | #export TORCH_COMMAND="pip install torch==1.12.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113" 29 | 30 | # Requirements file to use for stable-diffusion-webui 31 | #export REQS_FILE="requirements_versions.txt" 32 | 33 | # Fixed git repos 34 | #export K_DIFFUSION_PACKAGE="" 35 | #export GFPGAN_PACKAGE="" 36 | 37 | # Fixed git commits 38 | #export STABLE_DIFFUSION_COMMIT_HASH="" 39 | #export CODEFORMER_COMMIT_HASH="" 40 | #export BLIP_COMMIT_HASH="" 41 | 42 | # Uncomment to enable accelerated launch 43 | #export ACCELERATE="True" 44 | 45 | # Uncomment to disable TCMalloc 46 | #export NO_TCMALLOC="True" 47 | 48 | ########################################### 49 | -------------------------------------------------------------------------------- /webui.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | if not defined PYTHON (set PYTHON=python) 4 | if not defined VENV_DIR (set "VENV_DIR=%~dp0%venv") 5 | 6 | set SD_WEBUI_RESTART=tmp/restart 7 | set ERROR_REPORTING=FALSE 8 | 9 | mkdir tmp 2>NUL 10 | 11 | %PYTHON% -c "" >tmp/stdout.txt 2>tmp/stderr.txt 12 | if %ERRORLEVEL% == 0 goto :check_pip 13 | echo Couldn't launch python 14 | goto :show_stdout_stderr 15 | 16 | :check_pip 17 | %PYTHON% -mpip --help >tmp/stdout.txt 2>tmp/stderr.txt 18 | if %ERRORLEVEL% == 0 goto :start_venv 19 | if "%PIP_INSTALLER_LOCATION%" == "" goto :show_stdout_stderr 20 | %PYTHON% "%PIP_INSTALLER_LOCATION%" >tmp/stdout.txt 2>tmp/stderr.txt 21 | if %ERRORLEVEL% == 0 goto :start_venv 22 | echo Couldn't install pip 23 | goto :show_stdout_stderr 24 | 25 | :start_venv 26 | if ["%VENV_DIR%"] == ["-"] goto :skip_venv 27 | if ["%SKIP_VENV%"] == ["1"] goto :skip_venv 28 | 29 | dir "%VENV_DIR%\Scripts\Python.exe" >tmp/stdout.txt 2>tmp/stderr.txt 30 | if %ERRORLEVEL% == 0 goto :activate_venv 31 | 32 | for /f "delims=" %%i in ('CALL %PYTHON% -c "import sys; print(sys.executable)"') do set PYTHON_FULLNAME="%%i" 33 | echo Creating venv in directory %VENV_DIR% using python %PYTHON_FULLNAME% 34 | %PYTHON_FULLNAME% -m venv "%VENV_DIR%" >tmp/stdout.txt 2>tmp/stderr.txt 35 | if %ERRORLEVEL% == 0 goto :activate_venv 36 | echo Unable to create venv in directory "%VENV_DIR%" 37 | goto :show_stdout_stderr 38 | 39 | :activate_venv 40 | set PYTHON="%VENV_DIR%\Scripts\Python.exe" 41 | echo venv %PYTHON% 42 | 43 | :skip_venv 44 | if [%ACCELERATE%] == ["True"] goto :accelerate 45 | goto :launch 46 | 47 | :accelerate 48 | echo Checking for accelerate 49 | set ACCELERATE="%VENV_DIR%\Scripts\accelerate.exe" 50 | if EXIST %ACCELERATE% goto :accelerate_launch 51 | 52 | :launch 53 | %PYTHON% launch.py %* 54 | if EXIST tmp/restart goto :skip_venv 55 | pause 56 | exit /b 57 | 58 | :accelerate_launch 59 | echo Accelerating 60 | %ACCELERATE% launch --num_cpu_threads_per_process=6 launch.py 61 | if EXIST tmp/restart goto :skip_venv 62 | pause 63 | exit /b 64 | 65 | :show_stdout_stderr 66 | 67 | echo. 68 | echo exit code: %errorlevel% 69 | 70 | for /f %%i in ("tmp\stdout.txt") do set size=%%~zi 71 | if %size% equ 0 goto :show_stderr 72 | echo. 73 | echo stdout: 74 | type tmp\stdout.txt 75 | 76 | :show_stderr 77 | for /f %%i in ("tmp\stderr.txt") do set size=%%~zi 78 | if %size% equ 0 goto :show_stderr 79 | echo. 80 | echo stderr: 81 | type tmp\stderr.txt 82 | 83 | :endofscript 84 | 85 | echo. 86 | echo Launch unsuccessful. Exiting. 87 | pause 88 | --------------------------------------------------------------------------------