├── .gitignore ├── LICENSE ├── README.md ├── backends ├── .gitignore ├── model_converter │ ├── .gitignore │ ├── constants.py │ ├── convert_model.py │ ├── fake_torch.py │ ├── safetensor_wrapper.py │ ├── sd_shapes.py │ ├── sd_shapes_consts.py │ └── tdict.py ├── stable_diffusion │ ├── .gitignore │ ├── WEIGHTS_LICENSE │ ├── applets │ │ ├── __init__.py │ │ ├── applets.py │ │ ├── form_utils.py │ │ ├── frame_interpolator.py │ │ ├── options.py │ │ └── sample_applet.py │ ├── diffusionbee_backend.py │ ├── fake_interface │ │ ├── __init__.py │ │ └── interface.py │ ├── requirements.txt │ └── stable_diffusion │ │ ├── __init__.py │ │ ├── clip_tokenizer │ │ ├── __init__.py │ │ ├── bpe_simple_vocab_16e6.txt.gz │ │ └── xl_text_projection_mat.npy │ │ ├── control_processors │ │ ├── __init__.py │ │ ├── process_body_pose.py │ │ ├── process_lineart.py │ │ └── process_midas_depth.py │ │ ├── plugins │ │ ├── __init__.py │ │ ├── base_plugin.py │ │ ├── controlnet.py │ │ ├── inpainting.py │ │ ├── plugin_system.py │ │ └── sd15_inpainting.py │ │ ├── schedulers │ │ ├── __init__.py │ │ ├── get_scheduler.py │ │ ├── k_euler.py │ │ ├── k_euler_ancestral.py │ │ ├── karras_scheduler.py │ │ ├── scheduler_mixin.py │ │ ├── scheduling_ddim.py │ │ ├── scheduling_lms_discrete.py │ │ └── scheduling_pndm.py │ │ ├── sd_run.py │ │ ├── stable_diffusion.py │ │ └── utils │ │ ├── __init__.py │ │ ├── extra_model_utils.py │ │ ├── image_preprocess.py │ │ ├── logging.py │ │ ├── model_interface.py │ │ ├── stdin_input.py │ │ └── utils.py └── stable_diffusion_tf_models │ ├── .gitignore │ ├── WEIGHTS_LICENSE │ ├── autoencoder_kl.py │ ├── clip_encoder.py │ ├── clip_encoder_v2.py │ ├── controlnet.py │ ├── diffusion_model.py │ ├── group_norm.py │ ├── interface.py │ ├── layers.py │ └── mapping_constants.py ├── docs ├── DOCUMENTATION.md └── Running_from_source.md ├── electron_app ├── .gitignore ├── README.md ├── babel.config.js ├── build │ ├── Icon-1024.png │ ├── Icon-128.png │ ├── Icon-256.png │ ├── Icon-32.png │ ├── Icon-512.png │ ├── Icon-64.png │ └── entitlements.mac.plist ├── download.data ├── package-lock.json ├── package.json ├── public │ ├── favicon.ico │ └── index.html ├── src │ ├── App.vue │ ├── AssetsManager.vue │ ├── SDManager.vue │ ├── StableDiffusion.vue │ ├── assets │ │ ├── app_logo.svg │ │ ├── blank_project.svg │ │ ├── css │ │ │ └── theme.css │ │ ├── imgs │ │ │ ├── blank_illus4.png │ │ │ ├── blank_illus4_dark.png │ │ │ └── page_icon_imgs │ │ │ │ ├── default.png │ │ │ │ ├── default1.png │ │ │ │ ├── deforum.gif │ │ │ │ ├── history.png │ │ │ │ ├── img2img.png │ │ │ │ ├── inpainting.png │ │ │ │ ├── inpainting_1.png │ │ │ │ ├── interpolate.gif │ │ │ │ ├── models.png │ │ │ │ ├── models0.png │ │ │ │ ├── outpainting.png │ │ │ │ ├── settings.png │ │ │ │ ├── training.png │ │ │ │ ├── txt2img.png │ │ │ │ └── upscale.png │ │ ├── logo_icon_raw.svg │ │ ├── logo_splash.png │ │ ├── logo_splash_dark.png │ │ ├── notification.mp3 │ │ ├── nsfw.png │ │ └── sample.png │ ├── background.js │ ├── bridge.js │ ├── clip_tokeniser │ │ ├── bpe_simple_vocab_16e6.mjs │ │ ├── clip_encoder.js │ │ └── html_entities.js │ ├── components │ │ ├── AppletPage.vue │ │ ├── BasicSDApplet.vue │ │ ├── DownloadButton.vue │ │ ├── GenerationGallery.vue │ │ ├── MainToolbar.vue │ │ ├── PagesRouter.vue │ │ ├── SDImageGenerationApplet.vue │ │ ├── _REM_ImageItem.vue │ │ └── image_menu_functions.js │ ├── components_bare │ │ ├── ApplicationFrame.vue │ │ ├── CircleProgress.vue │ │ ├── Form.vue │ │ ├── GalleryImage.vue │ │ ├── GalleryPane.vue │ │ ├── ImageCanvas.vue │ │ ├── LoaderModal.vue │ │ ├── SplashScreen.vue │ │ ├── TwoColAppletLayout.vue │ │ ├── empty_component.vue │ │ ├── icon_library.js │ │ └── inputform │ │ │ ├── Accordian.vue │ │ │ ├── BetterSelector.vue │ │ │ ├── Checkbox.vue │ │ │ ├── Dropdown.vue │ │ │ ├── FilePathTextBox.vue │ │ │ ├── FormInputMixin.vue │ │ │ ├── ImageInput.vue │ │ │ ├── InputWithDesc.vue │ │ │ ├── OutputImage.vue │ │ │ ├── OutputText.vue │ │ │ ├── ResolveInputComponent.vue │ │ │ ├── Slider.vue │ │ │ ├── Textarea.vue │ │ │ └── Textbox.vue │ ├── fake_backend.py │ ├── forms │ │ ├── sample_ext_applet.json │ │ └── sd_options_adv.json │ ├── init_vue_libs.js │ ├── main.js │ ├── main_demoui.js │ ├── menu_template.js │ ├── modifiers.json │ ├── native_functions.js │ ├── native_functions_vue_bridge.js │ ├── pages │ │ ├── BlankPage.vue │ │ ├── ContactUs.vue │ │ ├── History.vue │ │ ├── Homepage.vue │ │ ├── Img2Img.vue │ │ ├── Inpainting.vue │ │ ├── Logs.vue │ │ ├── ModelStore.vue │ │ ├── PostProcessImage.vue │ │ ├── Settings.vue │ │ ├── Training.vue │ │ └── Txt2Img.vue │ ├── preload.js │ ├── py_vue_bridge.js │ ├── stable_diffusion_bridge.js │ ├── utils.js │ └── utils │ │ ├── controlnet_frontend_utils.js │ │ └── in_out_paint_utils.js └── vue.config.js └── package-lock.json /.gitignore: -------------------------------------------------------------------------------- 1 | build_test/ 2 | optimized_stable_diffusion/ 3 | 4 | 5 | HF_weights/ 6 | outputs/ 7 | 8 | # Byte-compiled / optimized / DLL files 9 | __pycache__/ 10 | *.py[cod] 11 | *$py.class 12 | 13 | # C extensions 14 | *.so 15 | 16 | # Distribution / packaging 17 | .Python 18 | develop-eggs/ 19 | dist/ 20 | downloads/ 21 | eggs/ 22 | .eggs/ 23 | lib/ 24 | lib64/ 25 | parts/ 26 | sdist/ 27 | var/ 28 | wheels/ 29 | share/python-wheels/ 30 | *.egg-info/ 31 | .installed.cfg 32 | *.egg 33 | MANIFEST 34 | 35 | # PyInstaller 36 | # Usually these files are written by a python script from a template 37 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 38 | *.manifest 39 | *.spec 40 | 41 | # Installer logs 42 | pip-log.txt 43 | pip-delete-this-directory.txt 44 | 45 | # Unit test / coverage reports 46 | htmlcov/ 47 | .tox/ 48 | .nox/ 49 | .coverage 50 | .coverage.* 51 | .cache 52 | nosetests.xml 53 | coverage.xml 54 | *.cover 55 | *.py,cover 56 | .hypothesis/ 57 | .pytest_cache/ 58 | cover/ 59 | 60 | # Translations 61 | *.mo 62 | *.pot 63 | 64 | # Django stuff: 65 | *.log 66 | local_settings.py 67 | db.sqlite3 68 | db.sqlite3-journal 69 | 70 | # Flask stuff: 71 | instance/ 72 | .webassets-cache 73 | 74 | # Scrapy stuff: 75 | .scrapy 76 | 77 | # Sphinx documentation 78 | docs/_build/ 79 | 80 | # PyBuilder 81 | .pybuilder/ 82 | target/ 83 | 84 | # Jupyter Notebook 85 | .ipynb_checkpoints 86 | 87 | # IPython 88 | profile_default/ 89 | ipython_config.py 90 | 91 | # pyenv 92 | # For a library or package, you might want to ignore these files since the code is 93 | # intended to run in multiple environments; otherwise, check them in: 94 | # .python-version 95 | 96 | # pipenv 97 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 98 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 99 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 100 | # install all needed dependencies. 101 | #Pipfile.lock 102 | 103 | # poetry 104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 105 | # This is especially recommended for binary packages to ensure reproducibility, and is more 106 | # commonly ignored for libraries. 107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 108 | #poetry.lock 109 | 110 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 111 | __pypackages__/ 112 | 113 | # Celery stuff 114 | celerybeat-schedule 115 | celerybeat.pid 116 | 117 | # SageMath parsed files 118 | *.sage.py 119 | 120 | # Environments 121 | .env 122 | .venv 123 | env/ 124 | venv/ 125 | ENV/ 126 | env.bak/ 127 | venv.bak/ 128 | 129 | # Spyder project settings 130 | .spyderproject 131 | .spyproject 132 | 133 | # Rope project settings 134 | .ropeproject 135 | 136 | # mkdocs documentation 137 | /site 138 | 139 | # mypy 140 | .mypy_cache/ 141 | .dmypy.json 142 | dmypy.json 143 | 144 | # Pyre type checker 145 | .pyre/ 146 | 147 | # pytype static type analyzer 148 | .pytype/ 149 | 150 | # Cython debug symbols 151 | cython_debug/ 152 | 153 | # PyCharm 154 | # JetBrains specific template is maintainted in a separate JetBrains.gitignore that can 155 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 156 | # and can be added to the global gitignore or merged into this file. For a more nuclear 157 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 158 | #.idea/ 159 | 160 | 161 | # General 162 | .DS_Store 163 | .AppleDouble 164 | .LSOverride 165 | 166 | # Icon must end with two \r 167 | Icon 168 | 169 | # Thumbnails 170 | ._* 171 | 172 | # Files that might appear in the root of a volume 173 | .DocumentRevisions-V100 174 | .fseventsd 175 | .Spotlight-V100 176 | .TemporaryItems 177 | .Trashes 178 | .VolumeIcon.icns 179 | .com.apple.timemachine.donotpresent 180 | 181 | # Directories potentially created on remote AFP share 182 | .AppleDB 183 | .AppleDesktop 184 | Network Trash Folder 185 | Temporary Items 186 | .apdisk 187 | 188 | 189 | 190 | pretrained_weights/ 191 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Diffusion Bee - Stable Diffusion GUI App for MacOS 2 | [![Twitter](https://img.shields.io/twitter/url.svg?label=Follow%20%40divamgupta&style=social&url=https%3A%2F%2Ftwitter.com%2Fdivamgupta)](https://twitter.com/divamgupta) 3 | 4 | 5 | 6 | ### Diffusion Bee is the easiest way to run Stable Diffusion locally on your Intel / M1 Mac. Comes with a one-click installer. No dependencies or technical knowledge needed. 7 | 8 | * Runs locally on your computer no data is sent to the cloud ( other than request to download the weights or unless you chose to upload an image ). 9 | * *If you like Diffusion Bee, consider checking https://Liner.ai , a one-click tool to train machine learning models* 10 | 11 |
12 | 13 | [![Download](https://user-images.githubusercontent.com/1890549/189538422-52d50488-c1fa-4924-bec6-186c9e0f307b.png)](https://diffusionbee.com/) 14 | 15 | Download at https://diffusionbee.com/ 16 | 17 | 18 |
19 | 20 | For prompt ideas visit https://arthub.ai 21 | 22 | Join discord server : https://discord.gg/t6rC5RaJQn 23 | 24 | 25 | ## Features 26 | * Full data privacy - nothing is sent to the cloud ( unless you chose to upload an image ) 27 | * Clean and easy to use UI with one-click installer 28 | * Image to image 29 | * Supported models : - SD 1.x, SD 2.x, SD XL, Inpainting, ControlNet, LoRA 30 | * Download models from the app 31 | * In-painting 32 | * Out-painting 33 | * Generation history 34 | * Upscaling images 35 | * Multiple image sizes 36 | * Optimized for M1/M2 Chips 37 | * Runs locally on your computer 38 | * Negative prompts 39 | * Advanced prompt options 40 | * ControlNet 41 | 42 | 43 | ## How to use 44 | 45 | 1) Download and start the application 46 | 47 | ![image](https://user-images.githubusercontent.com/1890549/198916443-c6a2e40a-3d1e-4000-882d-993aa1941391.png) 48 | 49 | 50 | 2) Enter a prompt and click generate 51 | 52 | Text to image: 53 | 54 | ![Screenshot 2023-12-13 at 1 19 38 PM](https://github.com/divamgupta/diffusionbee-stable-diffusion-ui/assets/1890549/3ee8e70b-ea17-4b26-8069-6d8c65aaa729) 55 | 56 | 57 | 58 | Image to image: 59 | 60 | ![Screenshot 2023-12-13 at 1 14 35 PM](https://github.com/divamgupta/diffusionbee-stable-diffusion-ui/assets/1890549/ceb4b799-5003-47c6-a689-1a5dcd110935) 61 | 62 | Multiple Apps: 63 | 64 | ![Screenshot 2023-12-13 at 1 11 14 PM](https://github.com/divamgupta/diffusionbee-stable-diffusion-ui/assets/1890549/5deb2129-b1c7-4f25-9718-754aa9a96008) 65 | 66 | 67 | Image to image with mask: 68 | 69 | ![image](https://user-images.githubusercontent.com/1890549/198915075-dba8e90f-47f6-4915-87b5-fd09c17a58e5.png) 70 | 71 | Inpainting: 72 | 73 | ![image](https://user-images.githubusercontent.com/1890549/198915349-6261dc9e-c24d-4fb0-98a2-973b429914b8.png) 74 | 75 | ![image](https://user-images.githubusercontent.com/1890549/198915395-71d4d278-2434-4e21-aea6-42988593941a.png) 76 | 77 | Advanced AI Canvas: 78 | 79 | ![Screenshot 2023-12-13 at 1 13 59 PM](https://github.com/divamgupta/diffusionbee-stable-diffusion-ui/assets/1890549/f01273d3-6a01-4498-8d11-5f46b9946213) 80 | 81 | 82 | ![image](https://user-images.githubusercontent.com/1890549/198916091-62872915-af1d-4553-b657-934c1c8c7aca.png) 83 | 84 | ControlNet: 85 | 86 | ![Screenshot 2023-12-13 at 1 12 43 PM](https://github.com/divamgupta/diffusionbee-stable-diffusion-ui/assets/1890549/55f8d6b0-2f18-4706-9771-999f379a8e4d) 87 | 88 | ![Screenshot 2023-12-13 at 1 12 22 PM](https://github.com/divamgupta/diffusionbee-stable-diffusion-ui/assets/1890549/53f5e3ef-6398-4c43-aeb6-0f1ded052a9a) 89 | 90 | 91 | Download Models: 92 | 93 | ![Screenshot 2023-12-13 at 1 13 08 PM](https://github.com/divamgupta/diffusionbee-stable-diffusion-ui/assets/1890549/4202ba4e-f33f-47e7-bd27-26b1b93142db) 94 | 95 | 96 | History: 97 | 98 | ![image](https://user-images.githubusercontent.com/1890549/198916678-9061829c-69da-4eee-b28d-1989e01c11e0.png) 99 | 100 | 101 | ### To learn more, visit the [documentation](https://github.com/divamgupta/diffusionbee-stable-diffusion-ui/blob/master/docs/DOCUMENTATION.md). 102 | 103 | ## Requirements 104 | * Mac with Intel or M1/M2 CPU 105 | * For Intel : MacOS 12.3.1 or later 106 | * For M1/M2 : MacOS 11.0.0 or later 107 | 108 | License : Stable Diffusion is released under the CreativeML OpenRAIL M license : https://github.com/CompVis/stable-diffusion/blob/main/LICENSE 109 | Diffusion Bee is just a GUI wrapper on top of Stable Diffusion, so all the term of Stable Diffusion are applied on the outputs. 110 | 111 | 112 | 113 | References 114 | 1) https://github.com/CompVis/stable-diffusion 115 | 2) https://github.com/madebyollin/maple-diffusion 116 | 3) https://github.com/divamgupta/stable-diffusion-tensorflow 117 | 4) https://github.com/liuliu/swift-diffusion (big thanks to Liu Liu) 118 | 5) https://github.com/huggingface/diffusers 119 | 120 | 121 | -------------------------------------------------------------------------------- /backends/.gitignore: -------------------------------------------------------------------------------- 1 | realesrgan_ncnn 2 | realesrgan_ncnn/ 3 | -------------------------------------------------------------------------------- /backends/model_converter/.gitignore: -------------------------------------------------------------------------------- 1 | build_test/ 2 | optimized_stable_diffusion/ 3 | 4 | convert_sd_model_bin 5 | ./convert_sd_model_bin 6 | convert_sd_model_bin/ 7 | ./convert_sd_model_bin/ 8 | 9 | HF_weights/ 10 | outputs/ 11 | 12 | # Byte-compiled / optimized / DLL files 13 | __pycache__/ 14 | *.py[cod] 15 | *$py.class 16 | 17 | # C extensions 18 | *.so 19 | 20 | # Distribution / packaging 21 | .Python 22 | build/ 23 | develop-eggs/ 24 | dist/ 25 | downloads/ 26 | eggs/ 27 | .eggs/ 28 | lib/ 29 | lib64/ 30 | parts/ 31 | sdist/ 32 | var/ 33 | wheels/ 34 | share/python-wheels/ 35 | *.egg-info/ 36 | .installed.cfg 37 | *.egg 38 | MANIFEST 39 | 40 | # PyInstaller 41 | # Usually these files are written by a python script from a template 42 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 43 | *.manifest 44 | *.spec 45 | 46 | # Installer logs 47 | pip-log.txt 48 | pip-delete-this-directory.txt 49 | 50 | # Unit test / coverage reports 51 | htmlcov/ 52 | .tox/ 53 | .nox/ 54 | .coverage 55 | .coverage.* 56 | .cache 57 | nosetests.xml 58 | coverage.xml 59 | *.cover 60 | *.py,cover 61 | .hypothesis/ 62 | .pytest_cache/ 63 | cover/ 64 | 65 | # Translations 66 | *.mo 67 | *.pot 68 | 69 | # Django stuff: 70 | *.log 71 | local_settings.py 72 | db.sqlite3 73 | db.sqlite3-journal 74 | 75 | # Flask stuff: 76 | instance/ 77 | .webassets-cache 78 | 79 | # Scrapy stuff: 80 | .scrapy 81 | 82 | # Sphinx documentation 83 | docs/_build/ 84 | 85 | # PyBuilder 86 | .pybuilder/ 87 | target/ 88 | 89 | # Jupyter Notebook 90 | .ipynb_checkpoints 91 | 92 | # IPython 93 | profile_default/ 94 | ipython_config.py 95 | 96 | # pyenv 97 | # For a library or package, you might want to ignore these files since the code is 98 | # intended to run in multiple environments; otherwise, check them in: 99 | # .python-version 100 | 101 | # pipenv 102 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 103 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 104 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 105 | # install all needed dependencies. 106 | #Pipfile.lock 107 | 108 | # poetry 109 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 110 | # This is especially recommended for binary packages to ensure reproducibility, and is more 111 | # commonly ignored for libraries. 112 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 113 | #poetry.lock 114 | 115 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 116 | __pypackages__/ 117 | 118 | # Celery stuff 119 | celerybeat-schedule 120 | celerybeat.pid 121 | 122 | # SageMath parsed files 123 | *.sage.py 124 | 125 | # Environments 126 | .env 127 | .venv 128 | env/ 129 | venv/ 130 | ENV/ 131 | env.bak/ 132 | venv.bak/ 133 | 134 | # Spyder project settings 135 | .spyderproject 136 | .spyproject 137 | 138 | # Rope project settings 139 | .ropeproject 140 | 141 | # mkdocs documentation 142 | /site 143 | 144 | # mypy 145 | .mypy_cache/ 146 | .dmypy.json 147 | dmypy.json 148 | 149 | # Pyre type checker 150 | .pyre/ 151 | 152 | # pytype static type analyzer 153 | .pytype/ 154 | 155 | # Cython debug symbols 156 | cython_debug/ 157 | 158 | # PyCharm 159 | # JetBrains specific template is maintainted in a separate JetBrains.gitignore that can 160 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 161 | # and can be added to the global gitignore or merged into this file. For a more nuclear 162 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 163 | #.idea/ 164 | 165 | 166 | # General 167 | .DS_Store 168 | .AppleDouble 169 | .LSOverride 170 | 171 | # Icon must end with two \r 172 | Icon 173 | 174 | # Thumbnails 175 | ._* 176 | 177 | # Files that might appear in the root of a volume 178 | .DocumentRevisions-V100 179 | .fseventsd 180 | .Spotlight-V100 181 | .TemporaryItems 182 | .Trashes 183 | .VolumeIcon.icns 184 | .com.apple.timemachine.donotpresent 185 | 186 | # Directories potentially created on remote AFP share 187 | .AppleDB 188 | .AppleDesktop 189 | Network Trash Folder 190 | Temporary Items 191 | .apdisk 192 | 193 | 194 | 195 | pretrained_weights/ 196 | -------------------------------------------------------------------------------- /backends/model_converter/safetensor_wrapper.py: -------------------------------------------------------------------------------- 1 | from safetensors import safe_open 2 | 3 | 4 | class SafetensorWrapper: 5 | 6 | def __init__(self , fname ): 7 | self.file = safe_open(fname, framework="np", device="cpu") 8 | self.new_items = {} 9 | 10 | def keys(self): 11 | return list(self.file.keys()) + list(self.new_items.keys()) 12 | 13 | def __contains__(self, k): 14 | if k in self.file.keys(): 15 | return True 16 | if k in self.new_items: 17 | return True 18 | return False 19 | 20 | def __getitem__(self , k): 21 | if k in self.new_items: 22 | return self.new_items[k] 23 | else: 24 | return self.file.get_tensor(k) 25 | 26 | def __setitem__(self, key , item ): 27 | self.new_items[key] = item 28 | 29 | def __iter__(self): 30 | return iter(self.keys()) -------------------------------------------------------------------------------- /backends/model_converter/sd_shapes.py: -------------------------------------------------------------------------------- 1 | 2 | from sd_shapes_consts import shapes_unet , shapes_encoder, shapes_decoder , shapes_text_encoder, shapes_params , shapes_unet_v2 , text_encoder_open_clip 3 | import copy 4 | from collections import Counter 5 | 6 | def add_aux_shapes(d): 7 | for k in list(d.keys()): 8 | if '.norm' in k and '.bias' in k: 9 | d[k.replace(".bias" , ".bias_by_weight")] = d[k] 10 | 11 | if ".ff." in k: 12 | sh = list(d[k]) 13 | sh[0] = sh[0] // 2 14 | sh = tuple(sh) 15 | d[k + "._split_1"] = sh 16 | d[k + "._split_2"] = sh 17 | 18 | elif "attn.in_proj" in k and d[k][0] == 3072 : 19 | sh = list(d[k]) 20 | sh[0] = sh[0] // 3 21 | sh = tuple(sh) 22 | d[k + "._split_1"] = sh 23 | d[k + "._split_2"] = sh 24 | d[k + "._split_3"] = sh 25 | 26 | for i in range(1,21): 27 | nn = 320*i 28 | d["zeros_"+str(nn)] = (nn,) 29 | d["ones_"+str(nn)] = (nn,) 30 | 31 | nn = 128*i 32 | d["zeros_"+str(nn)] = (nn,) 33 | d["ones_"+str(nn)] = (nn,) 34 | 35 | 36 | 37 | 38 | sd_1x_shapes = {} 39 | sd_1x_shapes.update(shapes_unet) 40 | sd_1x_shapes.update(shapes_encoder) 41 | sd_1x_shapes.update(shapes_decoder) 42 | sd_1x_shapes.update(shapes_text_encoder) 43 | sd_1x_shapes.update(shapes_params) 44 | 45 | sd_1x_inpaint_shapes = copy.deepcopy(sd_1x_shapes) 46 | sd_1x_inpaint_shapes['model.diffusion_model.input_blocks.0.0.weight'] = [320, 9, 3, 3] 47 | 48 | add_aux_shapes(sd_1x_shapes) 49 | add_aux_shapes(sd_1x_inpaint_shapes) 50 | 51 | 52 | 53 | 54 | sd_2x_shapes = {} 55 | sd_2x_shapes.update(shapes_unet_v2) 56 | sd_2x_shapes.update(shapes_encoder) 57 | sd_2x_shapes.update(shapes_decoder) 58 | sd_2x_shapes.update(text_encoder_open_clip) 59 | sd_2x_shapes.update(shapes_params) 60 | 61 | 62 | add_aux_shapes(sd_2x_shapes) 63 | 64 | 65 | possible_model_shapes = {"SD_1x_float32": sd_1x_shapes , 66 | "SD_1x_inpaint_float32": sd_1x_inpaint_shapes, 67 | "SD_1x_float16": sd_1x_shapes , 68 | 69 | "SD_2x_float16": sd_2x_shapes , 70 | "SD_2x_float32": sd_2x_shapes , 71 | 72 | "SD_1x_inpaint_float16": sd_1x_inpaint_shapes} 73 | 74 | ctdict_ids = {"SD_1x_float32": 12 , 75 | "SD_1x_inpaint_float32": 13, 76 | "SD_2x_float32": 15 , 77 | "SD_1x_float16": 1012 , 78 | "SD_1x_inpaint_float16": 1013 , 79 | "SD_1x_just_controlnet_16" : 1014, 80 | "SD_2x_float16": 1015 , 81 | "sdxl_base_unet_f8_rest_f16" : 3031, 82 | } 83 | 84 | 85 | 86 | extra_keys = ['temb_coefficients_fp32' , 'temb_coefficients_fp16' , 'causal_mask' , 'aux_output_conv.weight' , 'aux_output_conv.bias', 'alphas_cumprod'] 87 | 88 | 89 | 90 | def are_shapes_matching(state_dict , template_shapes , name=None): 91 | for k in template_shapes: 92 | if k not in state_dict: 93 | print("key", k , "not found in state_dict" , state_dict.keys()) 94 | return False 95 | if tuple(template_shapes[k]) != tuple(state_dict[k].shape): 96 | if tuple(template_shapes[k]) != tuple(state_dict[k].shape) + (1,1): 97 | print("shape mismatch", k , tuple(template_shapes[k]) ,tuple(state_dict[k].shape) ) 98 | return False 99 | 100 | return True 101 | 102 | 103 | def get_dtype(state_dict, template_shapes ): 104 | c = Counter() 105 | 106 | for k in state_dict: 107 | if k in extra_keys: 108 | continue 109 | if k not in template_shapes: 110 | continue 111 | 112 | if 'float' in str(state_dict[k].dtype): 113 | c[ str(state_dict[k].dtype)] += 1 114 | print(c.most_common()) 115 | return c.most_common(1)[0][0] 116 | 117 | 118 | 119 | def check_shapes_float(state_dict, template_shapes ): 120 | for k in state_dict: 121 | if k in extra_keys: 122 | continue 123 | if k not in template_shapes: 124 | continue 125 | 126 | assert 'float' in str(state_dict[k].dtype ) 127 | 128 | 129 | 130 | def get_model_type(state_dict): 131 | 132 | if are_shapes_matching(state_dict , sd_1x_shapes) : 133 | shapes = sd_1x_shapes 134 | mname = "SD_1x" 135 | elif are_shapes_matching(state_dict , sd_2x_shapes) : 136 | shapes = sd_2x_shapes 137 | mname = "SD_2x" 138 | elif are_shapes_matching(state_dict , sd_1x_inpaint_shapes) : 139 | shapes = sd_1x_inpaint_shapes 140 | mname = "SD_1x_inpaint" 141 | else: 142 | return None 143 | 144 | check_shapes_float(state_dict , shapes) 145 | c_dtype = get_dtype(state_dict , shapes) 146 | if c_dtype not in ["float32" , "float16"]: 147 | raise ValueError("The weights should either be float32 or float16, but these are " + c_dtype) 148 | 149 | return mname + "_" + c_dtype 150 | 151 | 152 | -------------------------------------------------------------------------------- /backends/stable_diffusion/.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | build/ 3 | build/* 4 | 5 | dist 6 | dist/ 7 | dist/* 8 | 9 | # Byte-compiled / optimized / DLL files 10 | __pycache__/ 11 | *.py[cod] 12 | *$py.class 13 | 14 | # C extensions 15 | *.so 16 | 17 | # Distribution / packaging 18 | .Python 19 | build/ 20 | develop-eggs/ 21 | dist/ 22 | downloads/ 23 | eggs/ 24 | .eggs/ 25 | lib/ 26 | lib64/ 27 | parts/ 28 | sdist/ 29 | var/ 30 | wheels/ 31 | share/python-wheels/ 32 | *.egg-info/ 33 | .installed.cfg 34 | *.egg 35 | MANIFEST 36 | 37 | # PyInstaller 38 | # Usually these files are written by a python script from a template 39 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 40 | *.manifest 41 | *.spec 42 | 43 | # Installer logs 44 | pip-log.txt 45 | pip-delete-this-directory.txt 46 | 47 | # Unit test / coverage reports 48 | htmlcov/ 49 | .tox/ 50 | .nox/ 51 | .coverage 52 | .coverage.* 53 | .cache 54 | nosetests.xml 55 | coverage.xml 56 | *.cover 57 | *.py,cover 58 | .hypothesis/ 59 | .pytest_cache/ 60 | cover/ 61 | 62 | # Translations 63 | *.mo 64 | *.pot 65 | 66 | # Django stuff: 67 | *.log 68 | local_settings.py 69 | db.sqlite3 70 | db.sqlite3-journal 71 | 72 | # Flask stuff: 73 | instance/ 74 | .webassets-cache 75 | 76 | # Scrapy stuff: 77 | .scrapy 78 | 79 | # Sphinx documentation 80 | docs/_build/ 81 | 82 | # PyBuilder 83 | .pybuilder/ 84 | target/ 85 | 86 | # Jupyter Notebook 87 | .ipynb_checkpoints 88 | 89 | # IPython 90 | profile_default/ 91 | ipython_config.py 92 | 93 | # pyenv 94 | # For a library or package, you might want to ignore these files since the code is 95 | # intended to run in multiple environments; otherwise, check them in: 96 | # .python-version 97 | 98 | # pipenv 99 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 100 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 101 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 102 | # install all needed dependencies. 103 | #Pipfile.lock 104 | 105 | # poetry 106 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 107 | # This is especially recommended for binary packages to ensure reproducibility, and is more 108 | # commonly ignored for libraries. 109 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 110 | #poetry.lock 111 | 112 | # pdm 113 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 114 | #pdm.lock 115 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 116 | # in version control. 117 | # https://pdm.fming.dev/#use-with-ide 118 | .pdm.toml 119 | 120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 121 | __pypackages__/ 122 | 123 | # Celery stuff 124 | celerybeat-schedule 125 | celerybeat.pid 126 | 127 | # SageMath parsed files 128 | *.sage.py 129 | 130 | # Environments 131 | .env 132 | .venv 133 | env/ 134 | venv/ 135 | ENV/ 136 | env.bak/ 137 | venv.bak/ 138 | 139 | # Spyder project settings 140 | .spyderproject 141 | .spyproject 142 | 143 | # Rope project settings 144 | .ropeproject 145 | 146 | # mkdocs documentation 147 | /site 148 | 149 | # mypy 150 | .mypy_cache/ 151 | .dmypy.json 152 | dmypy.json 153 | 154 | # Pyre type checker 155 | .pyre/ 156 | 157 | # pytype static type analyzer 158 | .pytype/ 159 | 160 | # Cython debug symbols 161 | cython_debug/ 162 | 163 | # PyCharm 164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 166 | # and can be added to the global gitignore or merged into this file. For a more nuclear 167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 168 | #.idea/ 169 | 170 | 171 | -------------------------------------------------------------------------------- /backends/stable_diffusion/WEIGHTS_LICENSE: -------------------------------------------------------------------------------- 1 | The Stable Diffusion weights (not included as part of this repository) are under the following license (CreativeML Open RAIL-M): 2 | 3 | https://raw.githubusercontent.com/CompVis/stable-diffusion/main/LICENSE 4 | 5 | The weights content has not been modified as part of this reimplementation (only reformatted). 6 | -------------------------------------------------------------------------------- /backends/stable_diffusion/applets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/backends/stable_diffusion/applets/__init__.py -------------------------------------------------------------------------------- /backends/stable_diffusion/applets/applets.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | def update_state_raw(key , val ): 4 | print( "utds " + key + "___U_P_D_A_T_E___" + json.dumps(val)) 5 | 6 | 7 | registered_applets = {} 8 | 9 | def register_applet(model_container, applet_cls): 10 | applet_name = applet_cls.applet_name 11 | applet = applet_cls() 12 | applet.init_applet(model_container) 13 | registered_applets[applet_name] = applet 14 | 15 | def run_applet(applet_name , params_dict): 16 | registered_applets[applet_name].run(params_dict) 17 | 18 | 19 | class AppletBase: 20 | 21 | applet_name = None 22 | applet_title = None 23 | is_stop_avail = False 24 | applet_description = "" 25 | applet_icon = "file" 26 | applet_icon_fname = None 27 | 28 | def run(self, params): 29 | raise NotImplementedError("base cls") 30 | 31 | def get_input_form(self): 32 | return [] 33 | 34 | def update_output( self, key , val ): 35 | self.update_state( "outputs." + key , val) 36 | 37 | def update_state(self , key , val ): 38 | key = "registered_ext_applets." + self.applet_name + "." + key 39 | update_state_raw(key , val) 40 | 41 | def init_applet(self, model_container): 42 | 43 | self.model_container = model_container 44 | 45 | update_state_raw( "registered_ext_applets." + self.applet_name , {}) 46 | update_state_raw( "registered_ext_applets." + self.applet_name + ".id" , self.applet_name) 47 | update_state_raw( "registered_ext_applets." + self.applet_name + ".title" , self.applet_title) 48 | update_state_raw( "registered_ext_applets." + self.applet_name + ".description" , self.applet_description) 49 | update_state_raw( "registered_ext_applets." + self.applet_name + ".icon" , self.applet_icon ) 50 | if self.applet_icon_fname is not None: 51 | update_state_raw( "registered_ext_applets." + self.applet_name + ".img_icon" , self.applet_icon_fname ) 52 | update_state_raw( "registered_ext_applets." + self.applet_name + ".home_category" , "misc") 53 | update_state_raw( "registered_ext_applets." + self.applet_name + ".inputs" , self.get_input_form() ) 54 | update_state_raw( "registered_ext_applets." + self.applet_name + ".outputs" , [] ) 55 | update_state_raw( "registered_ext_applets." + self.applet_name + ".is_stop_avail" , self.is_stop_avail ) 56 | 57 | 58 | 59 | -------------------------------------------------------------------------------- /backends/stable_diffusion/applets/form_utils.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | def get_textbox(id, type="str" , default="" , title="" , description=""): 4 | 5 | types = {"str" : "text" , "int" : "number" , "float" : "number"} 6 | 7 | return { 8 | "id": str(random.randint(0,19989999)), 9 | "component": "InputWithDesc", 10 | "title": title, 11 | "description":description , 12 | "children": [ 13 | { 14 | "id": id, 15 | "component": "Textbox", 16 | "placeholder" : "", 17 | "default_value" : default , 18 | "type" : types[type], 19 | "output_type" :type , 20 | "is_persistant" : False 21 | } 22 | ] 23 | } 24 | 25 | def get_output_text(text): 26 | return { 27 | "id": str(random.randint(0,19989999)), 28 | "component": "OutputText", 29 | "text" : text 30 | } 31 | def get_output_img(img_path, save_ext='.png' , is_save=False): 32 | return { 33 | "id": str(random.randint(0,19989999)), 34 | "component": "OutputImage", 35 | "img_path" : img_path, 36 | "is_save" : is_save , 37 | "save_ext" : save_ext 38 | } 39 | 40 | def get_file_textbox(id , path_type="", title="" , description="" ): 41 | return { 42 | "id": str(random.randint(0,19989999)), 43 | "component": "InputWithDesc", 44 | "full_width": True, 45 | "title": title, 46 | "description": description, 47 | "children": [ 48 | { 49 | "id": id , 50 | "component": "FilePathTextBox", 51 | "placeholder" : "", 52 | "is_persistant" : False, 53 | "path_type" : path_type 54 | }, 55 | ] 56 | } 57 | 58 | def get_textarea(id , title="" , description="" ): 59 | return { 60 | "id": str(random.randint(0,19989999)), 61 | "component": "InputWithDesc", 62 | "full_width": True, 63 | "title": title, 64 | "description": description, 65 | "children": [ 66 | { 67 | "id": id , 68 | "component": "Textarea", 69 | "placeholder" : title , 70 | "is_small" : True, 71 | "is_persistant" : False, 72 | }, 73 | ] 74 | } 75 | 76 | -------------------------------------------------------------------------------- /backends/stable_diffusion/applets/sample_applet.py: -------------------------------------------------------------------------------- 1 | from .applets import AppletBase 2 | import json 3 | 4 | class MergeLora(AppletBase): 5 | 6 | applet_name = "prompt_seed_interpolate" 7 | applet_title = "Interpolate Prompts and Seeds" 8 | 9 | def get_input_form(self): 10 | return [] 11 | 12 | def run(self , params ): 13 | self.update_state("outputs" , [] ) -------------------------------------------------------------------------------- /backends/stable_diffusion/fake_interface/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/backends/stable_diffusion/fake_interface/__init__.py -------------------------------------------------------------------------------- /backends/stable_diffusion/fake_interface/interface.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import time 4 | 5 | class ModelInterface: 6 | default_float_type = 'float32' 7 | avail_float_types = ['float32'] 8 | avail_models = ["sd_1x" , "sd_2x" , "sd_1x_inpaint" , "sd_1x_controlnet"] 9 | 10 | def __init__(self, tdict, dtype='float16', model_name="sd_1x", second_tdict=None ): 11 | pass 12 | 13 | def run_unet(self, time_emb, text_emb, unet_inp, control_inp=None, control_weight=1): 14 | time.sleep(1.4) 15 | return np.copy(unet_inp) 16 | 17 | def run_controlnet(self, time_emb, text_emb, unet_inp, hint_img ): 18 | time.sleep(0.4) 19 | return np.array([42]) 20 | 21 | def run_dec(self, unet_out): 22 | time.sleep(1.4) 23 | return np.zeros((unet_out.shape[0] , unet_out.shape[1]*8 , unet_out.shape[2]*8 , unet_out.shape[3])) 24 | 25 | def run_text_enc(self, tokens, pos_ids): 26 | time.sleep(1.4) 27 | return np.zeros((tokens.shape[0] , 77 , 768)) 28 | 29 | def run_enc(self, inp): 30 | time.sleep(1.4) 31 | return np.zeros((unet_out.shape[0] , unet_out.shape[1]//8 , unet_out.shape[2]//8 , unet_out.shape[3])) 32 | 33 | def destroy(self): 34 | pass 35 | 36 | def load_from_tdict(self, tdict_path): 37 | pass 38 | -------------------------------------------------------------------------------- /backends/stable_diffusion/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow==2.10.0 2 | numpy 3 | regex 4 | tqdm 5 | Pillow==9.4.0 6 | scipy 7 | ftfy 8 | fickling 9 | requests 10 | pyinstaller 11 | onnxruntime 12 | opencv-python 13 | safetensors 14 | cython 15 | numexpr 16 | pandas 17 | scikit-image -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/backends/stable_diffusion/stable_diffusion/__init__.py -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/clip_tokenizer/bpe_simple_vocab_16e6.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/backends/stable_diffusion/stable_diffusion/clip_tokenizer/bpe_simple_vocab_16e6.txt.gz -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/clip_tokenizer/xl_text_projection_mat.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/backends/stable_diffusion/stable_diffusion/clip_tokenizer/xl_text_projection_mat.npy -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/control_processors/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/backends/stable_diffusion/stable_diffusion/control_processors/__init__.py -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/control_processors/process_lineart.py: -------------------------------------------------------------------------------- 1 | 2 | import onnxruntime as ort 3 | import numpy as np 4 | import cv2 5 | 6 | 7 | 8 | def process_image_lineart(inp_fname, out_fname, model_path): 9 | 10 | im = cv2.imread(inp_fname)[... , ::-1 ] 11 | im_h = im.shape[0] 12 | im_w = im.shape[1] 13 | 14 | im = cv2.resize(im , (512 , 512 )).astype("float32") #TODO change this 15 | im = np.rollaxis(im , 2, 0 )[None]/255.0 16 | 17 | ort_sess = ort.InferenceSession(model_path) 18 | 19 | dep = ort_sess.run(None, {'input': im })[0][0][0] 20 | 21 | dep = ((1-dep)*255).clip(0, 255).astype('uint8')[... , None ] 22 | dep = np.repeat(dep , 3 , axis=2) 23 | 24 | dep = cv2.resize(dep , (im_w , im_h)) 25 | cv2.imwrite(out_fname , dep ) 26 | 27 | 28 | if __name__ == "__main__": 29 | process_image_lineart("/Users/divamgupta/Downloads/2T6A5407R-scaled.jpg" , "/tmp/a.png" , "/tmp/linart.onnx") 30 | import os 31 | os.system("open /tmp/a.png") -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/control_processors/process_midas_depth.py: -------------------------------------------------------------------------------- 1 | 2 | import onnxruntime as ort 3 | import numpy as np 4 | import cv2 5 | 6 | 7 | 8 | def process_image_midas_depth(inp_fname, out_fname, model_path): 9 | 10 | im = cv2.imread(inp_fname)[... , ::-1 ] 11 | im_h = im.shape[0] 12 | im_w = im.shape[1] 13 | 14 | im = cv2.resize(im , (384 , 384 )).astype("float32") 15 | im = np.rollaxis(im , 2, 0 )[None]/127.5 - 1.0 16 | 17 | 18 | ort_sess = ort.InferenceSession(model_path) 19 | 20 | dep = ort_sess.run(None, {'0': im })[0][0] 21 | dep = dep - dep.min() 22 | dep = dep / dep.max() 23 | 24 | dep = (dep*255).astype('uint8')[... , None ] 25 | dep = np.repeat(dep , 3 , axis=2) 26 | 27 | is_normal = False 28 | 29 | if is_normal: 30 | depth_np = dep[... , 0] 31 | a=np.pi * 2.0 32 | bg_th=0.1 33 | 34 | x = cv2.Sobel(depth_np, cv2.CV_32F, 1, 0, ksize=3) 35 | y = cv2.Sobel(depth_np, cv2.CV_32F, 0, 1, ksize=3) 36 | z = np.ones_like(x) * a 37 | x[depth_np < bg_th] = 0 38 | y[depth_np < bg_th] = 0 39 | normal = np.stack([x, y, z], axis=2) 40 | normal /= np.sum(normal ** 2.0, axis=2, keepdims=True) ** 0.5 41 | normal_image = (normal * 127.5 + 127.5).clip(0, 255).astype(np.uint8) 42 | dep = normal_image 43 | 44 | 45 | dep = cv2.resize(dep , (im_w , im_h)) 46 | cv2.imwrite(out_fname , dep ) 47 | 48 | 49 | if __name__ == "__main__": 50 | process_image_midas_depth("/Users/divamgupta/Downloads/2T6A5407R-scaled.jpg" , "/tmp/a.png" , "/Users/divamgupta/Downloads/midas_monodepth.onnx") 51 | import os 52 | os.system("open /tmp/a.png") -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/plugins/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/backends/stable_diffusion/stable_diffusion/plugins/__init__.py -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/plugins/base_plugin.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | class BasePlugin: 4 | 5 | def __init__(self , parent=None): 6 | self.parent = parent -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/plugins/inpainting.py: -------------------------------------------------------------------------------- 1 | from .base_plugin import BasePlugin 2 | import numpy as np 3 | from ..utils.image_preprocess import process_inp_img , post_process_mask 4 | import cv2 5 | 6 | class MaskInpainting(BasePlugin): 7 | 8 | 9 | def hook_pre_generate(self, sd_run): 10 | if sd_run.mode == "img2img" : 11 | if sd_run.mask_image_path is not None and sd_run.mask_image_path != "" or sd_run.get_mask_from_image_alpha: 12 | sd_run.do_masking_diffusion = True 13 | 14 | 15 | def hook_post_process_inp_imgs(self, sd_run): 16 | if (sd_run.mask_image_path is not None and sd_run.mask_image_path != "") or (sd_run.get_mask_from_image_alpha): 17 | 18 | mask_image = None 19 | 20 | if sd_run.mask_image_path is not None and sd_run.mask_image_path != "": 21 | h , w , mask_image , _ = process_inp_img(sd_run.mask_image_path , image_size=sd_run.inp_image_resize_mode , new_w=sd_run.img_width , new_h=sd_run.img_height ) 22 | 23 | if sd_run.get_mask_from_image_alpha: 24 | h , w , mask_image_alp , _ = process_inp_img(sd_run.input_image_path , only_read_alpha=True , image_size=sd_run.inp_image_resize_mode , new_w=sd_run.img_width , new_h=sd_run.img_height ) 25 | 26 | if mask_image is None: 27 | mask_image = mask_image_alp 28 | else: 29 | mask_image = np.maximum(mask_image , mask_image_alp) 30 | 31 | assert mask_image is not None 32 | assert h == sd_run.img_height , "Mask should be of the same size as inp image" 33 | assert w == sd_run.img_width , "Mask should be of the same size as inp image" 34 | mask_image = (mask_image + 1 )/2 35 | 36 | if sd_run.get_mask_from_image_alpha: 37 | mask_image = cv2.dilate(mask_image, np.ones((20, 20), np.uint8)) 38 | mask_image[mask_image < 0.5] = 0 39 | mask_image[mask_image >= 0.5] = 1 40 | 41 | 42 | if sd_run.blur_mask: 43 | oo = (mask_image*255).astype('uint8') 44 | oo = cv2.dilate(oo, np.ones((10, 10), np.uint8)) 45 | oo = cv2.blur( oo ,(10,10)) 46 | mask_image = oo.astype('float32')/255 47 | 48 | mask_image = 1 - mask_image # repaint white keep black 49 | mask_image = np.copy(mask_image[... , :1 ]) 50 | 51 | if sd_run.do_masking_diffusion or True: 52 | mask_image_sm = cv2.resize(mask_image , (mask_image.shape[1]//8 , mask_image.shape[0]//8) )[: , : , None] 53 | else: 54 | mask_image_sm = np.copy(mask_image[::8 , ::8]) 55 | 56 | mask_image = mask_image[None] 57 | mask_image_sm = mask_image_sm[None] 58 | 59 | if not sd_run.do_masking_diffusion: 60 | mask_image[mask_image < 0.5] = 0 61 | mask_image[mask_image >= 0.5] = 1 62 | 63 | mask_image_sm[mask_image_sm < 0.5] = 0 64 | mask_image_sm[mask_image_sm >= 0.5] = 1 65 | 66 | sd_run.processed_mask_downscaled = mask_image_sm 67 | sd_run.processed_mask = mask_image 68 | 69 | def hook_post_get_next_latent(self, sd_run): 70 | 71 | if sd_run.do_masking_diffusion: 72 | 73 | latent_proper = np.copy(sd_run.encoded_img_orig) 74 | 75 | noise = self.parent.get_modded_noise(sd_run.seed , latent_proper.shape , seed_type=sd_run.seed_type, small_mod_seed=sd_run.small_mod_seed ) 76 | latent_proper = self.parent.scheduler.add_noise(latent_proper, noise, np.array([self.parent.t_to_i(sd_run.current_t)] * sd_run.batch_size, dtype=np.int64 ) ) 77 | 78 | sd_run.next_latents = (latent_proper * sd_run.processed_mask_downscaled) + (sd_run.next_latents * (1 - sd_run.processed_mask_downscaled)) 79 | 80 | def hook_mid_decode(self, sd_run): 81 | if sd_run.do_masking_diffusion: 82 | sd_run.decoded = (sd_run.input_image_processed * sd_run.processed_mask) + (sd_run.decoded * (1 - sd_run.processed_mask )) 83 | 84 | -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/plugins/plugin_system.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | class StableDiffusionPluginMixin: 4 | 5 | def plugin_system_init(self): 6 | self.hooks = {} 7 | 8 | 9 | 10 | def register_hook(self, fn_name , fn_stage, function): 11 | hook_ident = fn_name + "_" + fn_stage 12 | print("[SD] registered" , hook_ident) 13 | if hook_ident not in self.hooks: 14 | self.hooks[hook_ident] = [] 15 | self.hooks[hook_ident].append(function) 16 | 17 | def run_plugin_hook( self, fn_name , fn_stage, sd_run, *args , **kwargs ): 18 | hook_ident = fn_name + "_" + fn_stage 19 | if hook_ident not in self.hooks: 20 | return 21 | for fn in self.hooks[hook_ident]: 22 | fn(sd_run , *args , **kwargs) 23 | 24 | def add_plugin(self , plugin_class): 25 | plugin_object = plugin_class(parent=self) 26 | hooks_list = [method for method in dir(plugin_object) if method.startswith('hook_')] 27 | 28 | print(hooks_list) 29 | 30 | for hook in hooks_list: 31 | hook_stage = hook.split("_")[1] 32 | hook_fn_name = "_".join(hook.split("_")[2:]) 33 | 34 | hook_fn = getattr(plugin_object , hook) 35 | 36 | self.register_hook(hook_fn_name , hook_stage , hook_fn ) 37 | 38 | 39 | -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/plugins/sd15_inpainting.py: -------------------------------------------------------------------------------- 1 | from .base_plugin import BasePlugin 2 | import numpy as np 3 | import cv2 4 | 5 | class Sd15Inpainting(BasePlugin): 6 | 7 | def hook_pre_generate(self, sd_run): 8 | if sd_run.is_sd15_inpaint: 9 | assert (sd_run.input_image_path is not None and sd_run.input_image_path != "") , "No input image specified" 10 | # if sd_run.mode != "txt2img": 11 | # raise ValueError("SD15 Inpaint can only run in txt2img mode") 12 | if (not sd_run.get_mask_from_image_alpha) and (sd_run.mask_image_path is None or sd_run.mask_image_path == ""): 13 | raise ValueError("With SD15, the mask should be present") 14 | 15 | 16 | 17 | 18 | def hook_post_prepare_init_latent(self, sd_run): 19 | if sd_run.is_sd15_inpaint: 20 | masked_img = sd_run.input_image_processed * (sd_run.processed_mask > 0.5 ) 21 | sd_run.encoded_masked_img = self.parent.get_encoded_img(sd_run , masked_img) 22 | 23 | assert sd_run.processed_mask_downscaled.shape[0] == 1, "batch_size > 1 not supported " 24 | 25 | if sd_run.do_masking_diffusion: 26 | sd_run.encoded_img_orig = np.copy(sd_run.encoded_masked_img) 27 | 28 | 29 | 30 | def hook_mid_get_unet_out(self, sd_run): 31 | if sd_run.is_sd15_inpaint: 32 | sd_run.latent_model_input = np.concatenate([ 33 | sd_run.latent_model_input , 34 | np.repeat( (1 - sd_run.processed_mask_downscaled ), sd_run.batch_size , axis=0) , 35 | sd_run.encoded_masked_img 36 | ], axis=-1) #TODO verify if we have to concat before or after prescale -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/schedulers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/backends/stable_diffusion/stable_diffusion/schedulers/__init__.py -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/schedulers/get_scheduler.py: -------------------------------------------------------------------------------- 1 | 2 | from .scheduling_ddim import DDIMScheduler 3 | from .scheduling_lms_discrete import LMSDiscreteScheduler 4 | from .scheduling_pndm import PNDMScheduler 5 | from .k_euler_ancestral import KEulerAncestralSampler 6 | from .k_euler import KEulerSampler 7 | from .karras_scheduler import KarrasSampler 8 | 9 | def get_scheduler(name): 10 | if name == "ddim": 11 | return DDIMScheduler( 12 | beta_start=0.00085, 13 | beta_end=0.012, 14 | beta_schedule="scaled_linear", 15 | clip_sample= False, 16 | num_train_timesteps= 1000, 17 | set_alpha_to_one=False, 18 | # steps_offset= 1, 19 | trained_betas= None, 20 | tensor_format="np", 21 | ) 22 | 23 | if name == "ddim_v": 24 | return DDIMScheduler( 25 | beta_start=0.00085, 26 | beta_end=0.012, 27 | beta_schedule="scaled_linear", 28 | clip_sample= False, 29 | num_train_timesteps= 1000, 30 | set_alpha_to_one=False, 31 | # steps_offset= 1, 32 | trained_betas= None, 33 | tensor_format="np", 34 | prediction_type="v_prediction" 35 | ) 36 | 37 | if name == "lmsd": 38 | return LMSDiscreteScheduler( 39 | beta_start=0.00085, 40 | beta_end=0.012, 41 | beta_schedule="scaled_linear", 42 | tensor_format="np") 43 | 44 | if name == "pndm": 45 | return PNDMScheduler( 46 | beta_start=0.00085, 47 | beta_end=0.012, 48 | beta_schedule="scaled_linear", 49 | skip_prk_steps = True, 50 | tensor_format="np") 51 | 52 | if name == "k_euler_ancestral": 53 | return KEulerAncestralSampler() 54 | 55 | if name == "k_euler": 56 | return KEulerSampler() 57 | 58 | 59 | if name == "karras": 60 | return KarrasSampler() 61 | -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/schedulers/k_euler.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def get_alphas_cumprod(beta_start=0.00085, beta_end=0.0120, n_training_steps=1000): 5 | betas = np.linspace(beta_start ** 0.5, beta_end ** 0.5, n_training_steps, dtype=np.float32) ** 2 6 | alphas = 1.0 - betas 7 | alphas_cumprod = np.cumprod(alphas, axis=0) 8 | return alphas_cumprod 9 | 10 | 11 | 12 | class KEulerSampler(): 13 | def __init__(self, n_inference_steps=50, n_training_steps=1000): 14 | pass 15 | 16 | def set_timesteps(self, n_inference_steps, n_training_steps=1000): #dg 17 | timesteps = np.linspace(n_training_steps - 1, 0, n_inference_steps) 18 | 19 | alphas_cumprod = get_alphas_cumprod(n_training_steps=n_training_steps) 20 | sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5 21 | log_sigmas = np.log(sigmas) 22 | log_sigmas = np.interp(timesteps, range(n_training_steps), log_sigmas) 23 | sigmas = np.exp(log_sigmas) 24 | sigmas = np.append(sigmas, 0) 25 | 26 | self.sigmas = sigmas 27 | self.initial_scale = sigmas.max() 28 | self.timesteps = timesteps 29 | self.n_inference_steps = n_inference_steps 30 | self.n_training_steps = n_training_steps 31 | self.step_count = 0 32 | 33 | 34 | def get_input_scale(self, step_count=None): 35 | if step_count is None: 36 | step_count = self.step_count 37 | sigma = self.sigmas[step_count] 38 | return 1 / (sigma ** 2 + 1) ** 0.5 39 | 40 | def add_noise(self, latent, noise, idx ): #dg 41 | for i in idx: 42 | assert idx[0] == i 43 | sc = self.sigmas[idx[0]] 44 | return latent + noise*sc 45 | 46 | 47 | def step(self, output , t , latents , seed=None ): #dg 48 | 49 | 50 | sigma_from = self.sigmas[t] 51 | sigma_to = self.sigmas[t + 1] 52 | latents += output * (sigma_to - sigma_from) 53 | return {"prev_sample": latents } #latents -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/schedulers/k_euler_ancestral.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def get_alphas_cumprod(beta_start=0.00085, beta_end=0.0120, n_training_steps=1000): 5 | betas = np.linspace(beta_start ** 0.5, beta_end ** 0.5, n_training_steps, dtype=np.float32) ** 2 6 | alphas = 1.0 - betas 7 | alphas_cumprod = np.cumprod(alphas, axis=0) 8 | return alphas_cumprod 9 | 10 | 11 | class KEulerAncestralSampler(): 12 | def __init__(self): 13 | pass 14 | 15 | 16 | def set_timesteps(self, n_inference_steps, n_training_steps=1000): #dg 17 | timesteps = np.linspace(n_training_steps - 1, 0, n_inference_steps) 18 | 19 | alphas_cumprod = get_alphas_cumprod(n_training_steps=n_training_steps) 20 | sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5 21 | log_sigmas = np.log(sigmas) 22 | log_sigmas = np.interp(timesteps, range(n_training_steps), log_sigmas) 23 | sigmas = np.exp(log_sigmas) 24 | sigmas = np.append(sigmas, 0) 25 | 26 | self.sigmas = sigmas 27 | self.initial_scale = sigmas.max() 28 | self.timesteps = timesteps 29 | self.n_inference_steps = n_inference_steps 30 | self.n_training_steps = n_training_steps 31 | 32 | 33 | def get_input_scale(self, step_count=None): 34 | sigma = self.sigmas[step_count] 35 | return 1 / (sigma ** 2 + 1) ** 0.5 36 | 37 | # def set_strength(self, strength=1): 38 | # start_step = self.n_inference_steps - int(self.n_inference_steps * strength) 39 | # self.timesteps = np.linspace(self.n_training_steps - 1, 0, self.n_inference_steps) 40 | # self.timesteps = self.timesteps[start_step:] 41 | # self.initial_scale = self.sigmas[start_step] 42 | # self.step_count = start_step 43 | 44 | 45 | def add_noise(self, latent, noise, idx ): #dg 46 | for i in idx: 47 | assert idx[0] == i 48 | sc = self.sigmas[idx[0]] 49 | return latent + noise*sc 50 | 51 | def step(self, output , t , latents , seed ): #dg 52 | 53 | 54 | sigma_from = self.sigmas[t] 55 | sigma_to = self.sigmas[t + 1] 56 | sigma_up = sigma_to * (1 - (sigma_to ** 2 / sigma_from ** 2)) ** 0.5 57 | sigma_down = sigma_to ** 2 / sigma_from 58 | latents += output * (sigma_down - sigma_from) 59 | noise = np.random.RandomState(seed).normal(size=latents.shape).astype('float32') 60 | latents += noise * sigma_up 61 | return {"prev_sample": latents } #latents -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/schedulers/scheduler_mixin.py: -------------------------------------------------------------------------------- 1 | # source : https://github.com/huggingface/diffusers/ 2 | 3 | from typing import Union 4 | import numpy as np 5 | 6 | 7 | class SchedulerMixin: 8 | """ 9 | Mixin containing common functions for the schedulers. 10 | """ 11 | 12 | ignore_for_config = ["tensor_format"] 13 | 14 | def set_format(self, tensor_format="pt"): 15 | self.tensor_format = tensor_format 16 | if tensor_format == "pt": 17 | for key, value in vars(self).items(): 18 | if isinstance(value, np.ndarray): 19 | setattr(self, key, torch.from_numpy(value)) 20 | 21 | return self 22 | 23 | def clip(self, tensor, min_value=None, max_value=None): 24 | tensor_format = getattr(self, "tensor_format", "pt") 25 | 26 | if tensor_format == "np": 27 | return np.clip(tensor, min_value, max_value) 28 | elif tensor_format == "pt": 29 | return torch.clamp(tensor, min_value, max_value) 30 | 31 | raise ValueError(f"`self.tensor_format`: {self.tensor_format} is not valid.") 32 | 33 | def log(self, tensor): 34 | tensor_format = getattr(self, "tensor_format", "pt") 35 | 36 | if tensor_format == "np": 37 | return np.log(tensor) 38 | elif tensor_format == "pt": 39 | return torch.log(tensor) 40 | 41 | raise ValueError(f"`self.tensor_format`: {self.tensor_format} is not valid.") 42 | 43 | def match_shape(self, values: Union[np.ndarray], broadcast_array: Union[np.ndarray]): 44 | """ 45 | Turns a 1-D array into an array or tensor with len(broadcast_array.shape) dims. 46 | Args: 47 | values: an array or tensor of values to extract. 48 | broadcast_array: an array with a larger shape of K dimensions with the batch 49 | dimension equal to the length of timesteps. 50 | Returns: 51 | a tensor of shape [batch_size, 1, ...] where the shape has K dims. 52 | """ 53 | 54 | tensor_format = getattr(self, "tensor_format", "pt") 55 | values = values.flatten() 56 | 57 | while len(values.shape) < len(broadcast_array.shape): 58 | values = values[..., None] 59 | if tensor_format == "pt": 60 | values = values.to(broadcast_array.device) 61 | 62 | return values 63 | 64 | def norm(self, tensor): 65 | tensor_format = getattr(self, "tensor_format", "pt") 66 | if tensor_format == "np": 67 | return np.linalg.norm(tensor) 68 | elif tensor_format == "pt": 69 | return torch.norm(tensor.reshape(tensor.shape[0], -1), dim=-1).mean() 70 | 71 | raise ValueError(f"`self.tensor_format`: {self.tensor_format} is not valid.") 72 | 73 | def randn_like(self, tensor, generator=None): 74 | tensor_format = getattr(self, "tensor_format", "pt") 75 | if tensor_format == "np": 76 | return np.random.randn(*np.shape(tensor)) 77 | elif tensor_format == "pt": 78 | # return torch.randn_like(tensor) 79 | return torch.randn(tensor.shape, layout=tensor.layout, generator=generator).to(tensor.device) 80 | 81 | raise ValueError(f"`self.tensor_format`: {self.tensor_format} is not valid.") 82 | 83 | def zeros_like(self, tensor): 84 | tensor_format = getattr(self, "tensor_format", "pt") 85 | if tensor_format == "np": 86 | return np.zeros_like(tensor) 87 | elif tensor_format == "pt": 88 | return torch.zeros_like(tensor) 89 | 90 | raise ValueError(f"`self.tensor_format`: {self.tensor_format} is not valid.") -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/sd_run.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | 4 | @dataclass 5 | class SDRun(): 6 | 7 | prompt: str 8 | 9 | is_sd2:bool = False 10 | 11 | mode:str="txt2img" 12 | tdict_path:str=None 13 | dtype:str= "float16" 14 | 15 | starting_img_given:bool = False 16 | do_masking_diffusion:bool = False # if you wanna mask the latent at every sd step 17 | img_height: int = None 18 | img_width: int = None 19 | 20 | negative_prompt:str="" 21 | 22 | batch_size:int =1 23 | num_steps:int =25 24 | guidance_scale:float=7.5 25 | seed:int=None 26 | seed_type:str="np" 27 | small_mod_seed:int=None 28 | img_id:int = 0 29 | scheduler:str = "__default_for_model__" 30 | 31 | combine_unet_run:bool = False # if it should do the cond + uncond in single batch 32 | 33 | input_image_path:str =None 34 | inp_image_resize_mode:str = "legacy_auto" 35 | 36 | mask_image_path:str =None 37 | get_mask_from_image_alpha:bool = False 38 | get_binary_mask_from_colored_mask:bool = True #if the mask_image_path is a colored mask, and this option will extract binary mask from it 39 | blur_mask:bool = False 40 | infill_alpha:bool = False #infill the image first 41 | infill_mask:bool = False #not used, infill the mask 42 | 43 | force_use_given_size:bool = False # this will set inp_image_resize_mode to none 44 | input_image_strength:float=0.5 45 | second_tdict_path:str = None 46 | lora_tdict_paths:tuple = () 47 | 48 | controlnet_model:str = None 49 | do_controlnet_preprocess:bool = False 50 | controlnet_input_image_path:str = None 51 | controlnet_inp_img_preprocesser:str = None # name of the preprocess fn 52 | is_control_net:bool = False 53 | controlnet_inp_img_preprocesser_model_path:str = None 54 | controlnet_tdict_path:str = None 55 | control_weight:float = 1.0 56 | control_weight_current_cond:float = 1.0 57 | control_weight_current_uncond:float = 1.0 58 | controlnet_guess_mode:bool = False 59 | 60 | is_sd15_inpaint:bool = False 61 | do_v_prediction:bool = False 62 | 63 | is_clip_skip_2:bool = False 64 | -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/backends/stable_diffusion/stable_diffusion/utils/__init__.py -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/utils/extra_model_utils.py: -------------------------------------------------------------------------------- 1 | # utils for resolving weights of extra models like LoRA, TI etc 2 | import numpy as np 3 | from tdict import TDict 4 | from tqdm import tqdm 5 | 6 | 7 | def add_lora_w(weight ,up_weight , down_weight , scale , ratio): 8 | 9 | # for some reason numpy operatrions are slow with fp16 10 | weight = weight.astype('float32') 11 | up_weight = up_weight.astype('float32') 12 | down_weight = down_weight.astype('float32') 13 | 14 | if len(weight.shape) == 2: 15 | # linear 16 | weight = weight + ratio * (up_weight @ down_weight) * scale 17 | elif down_weight.shape[2:4] == (1, 1): 18 | 19 | # conv2d 1x1 20 | weight = ( 21 | weight 22 | + ratio 23 | * (up_weight[: , : , 0 , 0 ] @ down_weight[: , : , 0 , 0 ])[: , : , None , None ] 24 | * scale 25 | ) 26 | else: 27 | raise ValueError("Unsupported LoRA W") 28 | 29 | weight = weight.astype( 'float16' ) 30 | return weight 31 | 32 | def add_lora_weights(src_tdict , state_dict , lora_tdict , ratio ): 33 | lora_tdict.init_read() 34 | root_keys = [ k.replace("_lora_up" , "").replace("_lora_down" , "").replace("_lora_scale" , "") for k in lora_tdict.keys() ] 35 | root_keys = list(set(root_keys)) 36 | 37 | for k in tqdm(root_keys): 38 | if k not in state_dict: 39 | state_dict[k] = src_tdict.read_key(k).copy() 40 | up_weight = lora_tdict.read_key(k + "_lora_up") 41 | down_weight = lora_tdict.read_key(k + "_lora_down") 42 | scale = lora_tdict.read_key(k + "_lora_scale") 43 | 44 | state_dict[k] = add_lora_w( state_dict[k] , up_weight=up_weight , down_weight=down_weight , scale=scale , ratio=ratio ) 45 | 46 | 47 | def add_lora_ti_weights(src_tdict , weight_additions_list): 48 | src_tdict.init_read() 49 | state_dict = {} 50 | for add_model_fn , tdict_path , power in weight_additions_list: 51 | if add_model_fn == "lora": 52 | m_tdict = TDict(tdict_path) 53 | add_lora_weights(src_tdict , state_dict, m_tdict , power ) 54 | 55 | return state_dict 56 | 57 | def clip_skip_2_patch_weights(src_tdict, weight_additions_list , current_weight_additions): 58 | is_skip = False 59 | for add_model_fn , _ , _ in weight_additions_list: 60 | if add_model_fn == "clip_skip_2": 61 | is_skip = True 62 | 63 | if is_skip: 64 | src_tdict.init_read() 65 | for k in src_tdict.keys(): 66 | if k in [ 67 | "cond_stage_model.transformer.text_model.encoder.layers.11.mlp.fc2.weight", 68 | "cond_stage_model.transformer.text_model.encoder.layers.11.mlp.fc2.bias", 69 | "cond_stage_model.transformer.text_model.encoder.layers.11.self_attn.out_proj.weight", 70 | "cond_stage_model.transformer.text_model.encoder.layers.11.self_attn.out_proj.bias" 71 | ]: 72 | current_weight_additions[k] = (src_tdict.read_key(k).copy() * 0).astype('float16') 73 | 74 | return current_weight_additions 75 | 76 | 77 | -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/utils/image_preprocess.py: -------------------------------------------------------------------------------- 1 | from PIL import Image, ImageOps, ImageFilter 2 | import numpy as np 3 | 4 | def process_inp_img(input_image, image_size="legacy_auto",new_w=None , new_h=None , only_read_alpha=False): 5 | input_image = Image.open(input_image) 6 | 7 | if only_read_alpha: 8 | input_image = input_image.split()[-1] #only keep the last channel 9 | input_image = ImageOps.invert(input_image) 10 | 11 | input_image = input_image.convert('RGB') 12 | 13 | if image_size == "legacy_auto": 14 | w , h = input_image.size 15 | if w > h: 16 | new_w = 512 17 | new_h = round((h * new_w / w)/64)*64 18 | else: 19 | new_h = 512 20 | new_w = round((w * new_h / h)/64)*64 21 | 22 | input_image_downscaled = ImageOps.fit(input_image, (new_w//8, new_h//8), method = Image.ANTIALIAS , 23 | bleed = 0.0, centering =(0.5, 0.5)) 24 | 25 | input_image = ImageOps.fit(input_image, (new_w, new_h), method = Image.ANTIALIAS , 26 | bleed = 0.0, centering =(0.5, 0.5)) 27 | input_image = np.array(input_image)[... , :3] 28 | input_image_downscaled = np.array(input_image_downscaled)[... , :3] 29 | input_image = (input_image.astype("float32") / 255.0)*2 - 1 30 | input_image_downscaled = (input_image_downscaled.astype("float32") / 255.0)*2 - 1 31 | return new_h , new_w , input_image , input_image_downscaled 32 | 33 | def post_process_mask(im , dilate=None, erode=None, blur=None): 34 | im = Image.fromarray((( im + 1 )*255/2).astype('uint8') ) 35 | if dilate is not None: 36 | im = im.filter(ImageFilter.MaxFilter(dilate)) 37 | if erode is not None: 38 | im = im.filter(ImageFilter.MinFilter(dilate)) 39 | return (np.array(im).astype("float32")/ 255.0)*2 - 1 40 | 41 | -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/utils/logging.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import time 4 | import pickle 5 | 6 | def log_numpy( arr , out_dir , key ): 7 | outf = os.path.join( out_dir , "%d_%s.npy"%(int(time.time()*1000) , key ) ) 8 | np.save(outf , arr ) 9 | 10 | def log_pkl( obj , out_dir , key ): 11 | outf = os.path.join( out_dir , "%d_%s.pkl"%(int(time.time()*1000) , key ) ) 12 | 13 | with open(outf , 'wb') as handle: 14 | pickle.dump(obj , handle, protocol=pickle.HIGHEST_PROTOCOL) 15 | 16 | 17 | def log_object( obj , out_dir , key="" ): 18 | if type(obj) is np.ndarray: 19 | log_numpy( obj , out_dir , key ) 20 | else: 21 | log_pkl( obj , out_dir , key ) 22 | 23 | 24 | -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/utils/model_interface.py: -------------------------------------------------------------------------------- 1 | from tdict import TDict 2 | from .extra_model_utils import add_lora_ti_weights, clip_skip_2_patch_weights 3 | 4 | 5 | def load_weights_model(model_container, weights_config): 6 | assert model_container.model_config[0] == "SD_normal" 7 | assert model_container.model_type == "SD_normal" 8 | 9 | if model_container.weights_config == weights_config: 10 | return 11 | 12 | tdict_path, second_tdict_path, weight_additions = weights_config 13 | cur_tdict_path, cur_second_tdict_path, cur_weight_additions = model_container.weights_config 14 | 15 | tdict_1 = None 16 | 17 | if tdict_path == cur_tdict_path and second_tdict_path == cur_second_tdict_path and cur_weight_additions == (): 18 | # the main tdicts are already loaded, just load the additional weights 19 | pass 20 | else: 21 | if second_tdict_path is not None: 22 | tdict2 = TDict(second_tdict_path) 23 | else: 24 | tdict2 = None 25 | tdict_1 = TDict(tdict_path) 26 | print("[SD] Loading weights") 27 | model_container.model.load_from_tdict(tdict_1, tdict2 ) 28 | model_container.weights_config = tdict_path, second_tdict_path, () 29 | 30 | if weight_additions is not None and weight_additions != (): 31 | if tdict_1 is None: 32 | tdict_1 = TDict(tdict_path) 33 | 34 | print("[SD] Loading LoRA weights") 35 | extra_weights = add_lora_ti_weights(tdict_1 , weight_additions ) 36 | extra_weights = clip_skip_2_patch_weights(tdict_1 , weight_additions, extra_weights ) 37 | 38 | 39 | model_container.model.load_from_state_dict(extra_weights ) 40 | model_container.weights_config = tdict_path, second_tdict_path, extra_weights 41 | 42 | def create_sd_model_with_weights(ModelInterfaceClass, model_container, model_config, weights_config): 43 | model_type, model_name, dtype = model_config 44 | tdict_path, second_tdict_path, weight_additions = weights_config 45 | 46 | assert model_type == "SD_normal" 47 | 48 | if model_container.model_config != model_config: 49 | model_container.reset() 50 | print("[SD] Creating model interface") 51 | assert tdict_path is not None 52 | 53 | if second_tdict_path is not None: 54 | tdict2 = TDict(second_tdict_path) 55 | else: 56 | tdict2 = None 57 | 58 | model_container.model = ModelInterfaceClass(TDict(tdict_path ) , dtype=dtype, model_name=model_name , second_tdict=tdict2) 59 | model_container.model_type = "SD_normal" 60 | model_container.model_config = model_type, model_name, dtype 61 | model_container.weights_config = tdict_path, second_tdict_path, () 62 | 63 | load_weights_model(model_container , weights_config) 64 | 65 | 66 | -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/utils/stdin_input.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import queue 3 | import sys 4 | import platform 5 | import select 6 | 7 | 8 | if platform.system() == "Windows": 9 | 10 | # Due to limitations of windows, we spawn a new thread for stdin. The thread collects the stdin inputs and appends them in list. 11 | 12 | input_queue = queue.Queue() 13 | evt = threading.Event() 14 | print("starting the input thread!") 15 | 16 | def input_collector_worker(input_queue): 17 | while True: 18 | input_queue.put(input()) 19 | evt.set() 20 | 21 | input_thread = threading.Thread( 22 | target=input_collector_worker, args=( 23 | input_queue,)) 24 | input_thread.daemon = True 25 | input_thread.start() 26 | 27 | def is_avail(): 28 | """To check in a non blocking way if there is some input available. 29 | Returns: 30 | bool: If input is avaialble 31 | """ 32 | return not input_queue.empty() 33 | 34 | def get_input(): 35 | """To get the input in a blocking way. 36 | Returns: 37 | str: The input 38 | """ 39 | evt.clear() 40 | 41 | if is_avail(): 42 | return input_queue.get() 43 | 44 | evt.wait() 45 | 46 | assert is_avail() 47 | return get_input() 48 | 49 | 50 | else: 51 | 52 | # for unix we use select to see if stdin is there or not 53 | 54 | print("non threaded input for unix systems!") 55 | 56 | def get_input(): 57 | return input() 58 | 59 | def is_avail(): 60 | if select.select([sys.stdin, ], [], [], 0.0)[0]: 61 | return True 62 | else: 63 | return False 64 | -------------------------------------------------------------------------------- /backends/stable_diffusion/stable_diffusion/utils/utils.py: -------------------------------------------------------------------------------- 1 | from tdict import TDict 2 | from ..sd_run import SDRun 3 | import math 4 | from dataclasses import fields 5 | from .stdin_input import is_avail, get_input 6 | 7 | 8 | tdict_model_versions = {} 9 | def get_tdict_model_version(tdict_path): 10 | if tdict_path in tdict_model_versions: 11 | return tdict_model_versions[tdict_path] 12 | f = TDict(tdict_path, mode='r') 13 | tdict_model_versions[ tdict_path] = f.ctdict_version 14 | return tdict_model_versions[ tdict_path] 15 | 16 | 17 | def get_sd_run_from_dict(d): 18 | 19 | if 'input_img' in d: 20 | d['input_image_path'] = d['input_img'] 21 | if 'mask_image' in d: 22 | d['mask_image_path'] = d['mask_image'] 23 | if 'model_tdict_path' in d: 24 | d['tdict_path'] = d['model_tdict_path'] 25 | 26 | if 'batch_size' not in d: 27 | d['batch_size'] = 1 28 | if 'num_imgs' not in d: 29 | d['num_imgs'] = 1 30 | 31 | d2 = {} 32 | SD_keys = [ff.name for ff in fields(SDRun) ] 33 | for k in d: 34 | if k in SD_keys: 35 | d2[k] = d[k] 36 | print(k, "k") 37 | 38 | sd_run = SDRun(**d2) 39 | 40 | if sd_run.input_image_path is not None and sd_run.input_image_path != "" and ( not sd_run.is_sd15_inpaint): 41 | sd_run.mode = "img2img" 42 | else: 43 | sd_run.mode = "txt2img" 44 | 45 | 46 | if "sd_mode_override" in d: 47 | sd_run.mode = d["sd_mode_override"] 48 | 49 | return sd_run 50 | 51 | 52 | def sd_bee_stop_callback(state="" , progress=-1): 53 | if is_avail(): 54 | if "__stop__" in get_input(): 55 | return "stop" -------------------------------------------------------------------------------- /backends/stable_diffusion_tf_models/.gitignore: -------------------------------------------------------------------------------- 1 | build_test/ 2 | optimized_stable_diffusion/ 3 | 4 | 5 | HF_weights/ 6 | outputs/ 7 | 8 | # Byte-compiled / optimized / DLL files 9 | __pycache__/ 10 | *.py[cod] 11 | *$py.class 12 | 13 | # C extensions 14 | *.so 15 | 16 | # Distribution / packaging 17 | .Python 18 | build/ 19 | develop-eggs/ 20 | dist/ 21 | downloads/ 22 | eggs/ 23 | .eggs/ 24 | lib/ 25 | lib64/ 26 | parts/ 27 | sdist/ 28 | var/ 29 | wheels/ 30 | share/python-wheels/ 31 | *.egg-info/ 32 | .installed.cfg 33 | *.egg 34 | MANIFEST 35 | 36 | # PyInstaller 37 | # Usually these files are written by a python script from a template 38 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 39 | *.manifest 40 | *.spec 41 | 42 | # Installer logs 43 | pip-log.txt 44 | pip-delete-this-directory.txt 45 | 46 | # Unit test / coverage reports 47 | htmlcov/ 48 | .tox/ 49 | .nox/ 50 | .coverage 51 | .coverage.* 52 | .cache 53 | nosetests.xml 54 | coverage.xml 55 | *.cover 56 | *.py,cover 57 | .hypothesis/ 58 | .pytest_cache/ 59 | cover/ 60 | 61 | # Translations 62 | *.mo 63 | *.pot 64 | 65 | # Django stuff: 66 | *.log 67 | local_settings.py 68 | db.sqlite3 69 | db.sqlite3-journal 70 | 71 | # Flask stuff: 72 | instance/ 73 | .webassets-cache 74 | 75 | # Scrapy stuff: 76 | .scrapy 77 | 78 | # Sphinx documentation 79 | docs/_build/ 80 | 81 | # PyBuilder 82 | .pybuilder/ 83 | target/ 84 | 85 | # Jupyter Notebook 86 | .ipynb_checkpoints 87 | 88 | # IPython 89 | profile_default/ 90 | ipython_config.py 91 | 92 | # pyenv 93 | # For a library or package, you might want to ignore these files since the code is 94 | # intended to run in multiple environments; otherwise, check them in: 95 | # .python-version 96 | 97 | # pipenv 98 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 99 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 100 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 101 | # install all needed dependencies. 102 | #Pipfile.lock 103 | 104 | # poetry 105 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 106 | # This is especially recommended for binary packages to ensure reproducibility, and is more 107 | # commonly ignored for libraries. 108 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 109 | #poetry.lock 110 | 111 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 112 | __pypackages__/ 113 | 114 | # Celery stuff 115 | celerybeat-schedule 116 | celerybeat.pid 117 | 118 | # SageMath parsed files 119 | *.sage.py 120 | 121 | # Environments 122 | .env 123 | .venv 124 | env/ 125 | venv/ 126 | ENV/ 127 | env.bak/ 128 | venv.bak/ 129 | 130 | # Spyder project settings 131 | .spyderproject 132 | .spyproject 133 | 134 | # Rope project settings 135 | .ropeproject 136 | 137 | # mkdocs documentation 138 | /site 139 | 140 | # mypy 141 | .mypy_cache/ 142 | .dmypy.json 143 | dmypy.json 144 | 145 | # Pyre type checker 146 | .pyre/ 147 | 148 | # pytype static type analyzer 149 | .pytype/ 150 | 151 | # Cython debug symbols 152 | cython_debug/ 153 | 154 | # PyCharm 155 | # JetBrains specific template is maintainted in a separate JetBrains.gitignore that can 156 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 157 | # and can be added to the global gitignore or merged into this file. For a more nuclear 158 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 159 | #.idea/ 160 | 161 | 162 | # General 163 | .DS_Store 164 | .AppleDouble 165 | .LSOverride 166 | 167 | # Icon must end with two \r 168 | Icon 169 | 170 | # Thumbnails 171 | ._* 172 | 173 | # Files that might appear in the root of a volume 174 | .DocumentRevisions-V100 175 | .fseventsd 176 | .Spotlight-V100 177 | .TemporaryItems 178 | .Trashes 179 | .VolumeIcon.icns 180 | .com.apple.timemachine.donotpresent 181 | 182 | # Directories potentially created on remote AFP share 183 | .AppleDB 184 | .AppleDesktop 185 | Network Trash Folder 186 | Temporary Items 187 | .apdisk 188 | 189 | 190 | 191 | pretrained_weights/ 192 | -------------------------------------------------------------------------------- /backends/stable_diffusion_tf_models/WEIGHTS_LICENSE: -------------------------------------------------------------------------------- 1 | The Stable Diffusion weights (not included as part of this repository) are under the following license (CreativeML Open RAIL-M): 2 | 3 | https://raw.githubusercontent.com/CompVis/stable-diffusion/main/LICENSE 4 | 5 | The weights content has not been modified as part of this reimplementation (only reformatted). 6 | -------------------------------------------------------------------------------- /backends/stable_diffusion_tf_models/autoencoder_kl.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from group_norm import GroupNormalization 3 | 4 | from layers import apply_seq, PaddedConv2D 5 | 6 | 7 | class AttentionBlock(tf.keras.layers.Layer): 8 | def __init__(self, channels): 9 | super().__init__() 10 | self.norm = GroupNormalization(epsilon=1e-5) 11 | self.q = PaddedConv2D(channels, 1) 12 | self.k = PaddedConv2D(channels, 1) 13 | self.v = PaddedConv2D(channels, 1) 14 | self.proj_out = PaddedConv2D(channels, 1) 15 | 16 | def call(self, x): 17 | h_ = self.norm(x) 18 | q, k, v = self.q(h_), self.k(h_), self.v(h_) 19 | 20 | # Compute attention 21 | b, h, w, c = q.shape 22 | q = tf.reshape(q, (-1, h * w, c)) # b,hw,c 23 | k = tf.keras.layers.Permute((3, 1, 2))(k) 24 | k = tf.reshape(k, (-1, c, h * w)) # b,c,hw 25 | w_ = q @ k 26 | w_ = w_ * (c ** (-0.5)) 27 | w_ = tf.keras.activations.softmax(w_) 28 | 29 | # Attend to values 30 | v = tf.keras.layers.Permute((3, 1, 2))(v) 31 | v = tf.reshape(v, (-1, c, h * w)) 32 | w_ = tf.keras.layers.Permute((2, 1))(w_) 33 | h_ = v @ w_ 34 | h_ = tf.keras.layers.Permute((2, 1))(h_) 35 | h_ = tf.reshape(h_, (-1, h, w, c)) 36 | return x + self.proj_out(h_) 37 | 38 | 39 | class ResnetBlock(tf.keras.layers.Layer): 40 | def __init__(self, in_channels, out_channels): 41 | super().__init__() 42 | self.norm1 = GroupNormalization(epsilon=1e-5) 43 | self.conv1 = PaddedConv2D(out_channels, 3, padding=1) 44 | self.norm2 = GroupNormalization(epsilon=1e-5) 45 | self.conv2 = PaddedConv2D(out_channels, 3, padding=1) 46 | self.nin_shortcut = ( 47 | PaddedConv2D(out_channels, 1) 48 | if in_channels != out_channels 49 | else lambda x: x 50 | ) 51 | 52 | def call(self, x): 53 | h = self.conv1(tf.keras.activations.swish(self.norm1(x))) 54 | h = self.conv2(tf.keras.activations.swish(self.norm2(h))) 55 | return self.nin_shortcut(x) + h 56 | 57 | 58 | class Decoder(tf.keras.Sequential): 59 | def __init__(self): 60 | super().__init__( 61 | [ 62 | tf.keras.layers.Lambda(lambda x: 1 * x), 63 | PaddedConv2D(4, 1), 64 | PaddedConv2D(512, 3, padding=1), 65 | ResnetBlock(512, 512), 66 | AttentionBlock(512), 67 | ResnetBlock(512, 512), 68 | ResnetBlock(512, 512), 69 | ResnetBlock(512, 512), 70 | ResnetBlock(512, 512), 71 | tf.keras.layers.UpSampling2D(size=(2, 2)), 72 | PaddedConv2D(512, 3, padding=1), 73 | ResnetBlock(512, 512), 74 | ResnetBlock(512, 512), 75 | ResnetBlock(512, 512), 76 | tf.keras.layers.UpSampling2D(size=(2, 2)), 77 | PaddedConv2D(512, 3, padding=1), 78 | ResnetBlock(512, 256), 79 | ResnetBlock(256, 256), 80 | ResnetBlock(256, 256), 81 | tf.keras.layers.UpSampling2D(size=(2, 2)), 82 | PaddedConv2D(256, 3, padding=1), 83 | ResnetBlock(256, 128), 84 | ResnetBlock(128, 128), 85 | ResnetBlock(128, 128), 86 | GroupNormalization(epsilon=1e-5), 87 | tf.keras.layers.Activation("swish"), 88 | PaddedConv2D(3, 3, padding=1), 89 | ] 90 | ) 91 | 92 | 93 | class Encoder(tf.keras.Sequential): 94 | def __init__(self): 95 | super().__init__( 96 | [ 97 | PaddedConv2D(128, 3, padding=1 ), 98 | ResnetBlock(128,128), 99 | ResnetBlock(128, 128), 100 | PaddedConv2D(128 , 3 , padding=(0,1), stride=2), 101 | 102 | ResnetBlock(128,256), 103 | ResnetBlock(256, 256), 104 | PaddedConv2D(256 , 3 , padding=(0,1), stride=2), 105 | 106 | ResnetBlock(256,512), 107 | ResnetBlock(512, 512), 108 | PaddedConv2D(512 , 3 , padding=(0,1), stride=2), 109 | 110 | ResnetBlock(512,512), 111 | ResnetBlock(512, 512), 112 | 113 | ResnetBlock(512, 512), 114 | AttentionBlock(512), 115 | ResnetBlock(512, 512), 116 | 117 | GroupNormalization(epsilon=1e-5) , 118 | tf.keras.layers.Activation("swish"), 119 | PaddedConv2D(8, 3, padding=1 ), 120 | PaddedConv2D(8, 1 ), 121 | tf.keras.layers.Lambda(lambda x : x[... , :8] ) 122 | ] 123 | ) 124 | -------------------------------------------------------------------------------- /backends/stable_diffusion_tf_models/clip_encoder.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | 4 | from layers import quick_gelu 5 | from group_norm import GroupNormalization 6 | 7 | class CLIPAttention(tf.keras.layers.Layer): 8 | def __init__(self): 9 | super().__init__() 10 | self.embed_dim = 768 11 | self.num_heads = 12 12 | self.head_dim = self.embed_dim // self.num_heads 13 | self.scale = self.head_dim**-0.5 14 | self.q_proj = tf.keras.layers.Dense(self.embed_dim) 15 | self.k_proj = tf.keras.layers.Dense(self.embed_dim) 16 | self.v_proj = tf.keras.layers.Dense(self.embed_dim) 17 | self.out_proj = tf.keras.layers.Dense(self.embed_dim) 18 | 19 | def _shape(self, tensor, seq_len: int, bsz: int): 20 | a = tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)) 21 | return tf.keras.layers.Permute((2, 1, 3))(a) # bs , n_head , seq_len , head_dim 22 | 23 | def call(self, inputs): 24 | hidden_states, causal_attention_mask = inputs 25 | bsz, tgt_len, embed_dim = hidden_states.shape 26 | query_states = self.q_proj(hidden_states) * self.scale 27 | key_states = self._shape(self.k_proj(hidden_states), tgt_len, -1) 28 | value_states = self._shape(self.v_proj(hidden_states), tgt_len, -1) 29 | 30 | proj_shape = (-1, tgt_len, self.head_dim) 31 | query_states = self._shape(query_states, tgt_len, -1) 32 | query_states = tf.reshape(query_states, proj_shape) 33 | key_states = tf.reshape(key_states, proj_shape) 34 | 35 | src_len = tgt_len 36 | value_states = tf.reshape(value_states, proj_shape) 37 | attn_weights = query_states @ tf.keras.layers.Permute((2, 1))(key_states) 38 | 39 | attn_weights = tf.reshape(attn_weights, (-1, self.num_heads, tgt_len, src_len)) 40 | attn_weights = attn_weights + causal_attention_mask 41 | attn_weights = tf.reshape(attn_weights, (-1, tgt_len, src_len)) 42 | 43 | attn_weights = tf.nn.softmax(attn_weights) 44 | attn_output = attn_weights @ value_states 45 | 46 | attn_output = tf.reshape( 47 | attn_output, (-1, self.num_heads, tgt_len, self.head_dim) 48 | ) 49 | attn_output = tf.keras.layers.Permute((2, 1, 3))(attn_output) 50 | attn_output = tf.reshape(attn_output, (-1, tgt_len, embed_dim)) 51 | 52 | return self.out_proj(attn_output) 53 | 54 | 55 | class CLIPEncoderLayer(tf.keras.layers.Layer): 56 | def __init__(self): 57 | super().__init__() 58 | self.layer_norm1 = tf.keras.layers.LayerNormalization(epsilon=1e-5) 59 | self.self_attn = CLIPAttention() 60 | self.layer_norm2 = tf.keras.layers.LayerNormalization(epsilon=1e-5) 61 | self.fc1 = tf.keras.layers.Dense(3072) 62 | self.fc2 = tf.keras.layers.Dense(768) 63 | 64 | def call(self, inputs): 65 | hidden_states, causal_attention_mask = inputs 66 | residual = hidden_states 67 | 68 | hidden_states = self.layer_norm1(hidden_states) 69 | hidden_states = self.self_attn([hidden_states, causal_attention_mask]) 70 | hidden_states = residual + hidden_states 71 | 72 | residual = hidden_states 73 | hidden_states = self.layer_norm2(hidden_states) 74 | 75 | hidden_states = self.fc1(hidden_states) 76 | hidden_states = quick_gelu(hidden_states) 77 | hidden_states = self.fc2(hidden_states) 78 | 79 | return residual + hidden_states 80 | 81 | 82 | class CLIPEncoder(tf.keras.layers.Layer): 83 | def __init__(self): 84 | super().__init__() 85 | self.layers = [CLIPEncoderLayer() for i in range(12)] 86 | 87 | def call(self, inputs): 88 | [hidden_states, causal_attention_mask] = inputs 89 | for l in self.layers: 90 | hidden_states = l([hidden_states, causal_attention_mask]) 91 | return hidden_states 92 | 93 | 94 | class CLIPTextEmbeddings(tf.keras.layers.Layer): 95 | def __init__(self, n_words=77): 96 | super().__init__() 97 | self.token_embedding_layer = tf.keras.layers.Embedding( 98 | 49408, 768, name="token_embedding" 99 | ) 100 | self.position_embedding_layer = tf.keras.layers.Embedding( 101 | n_words, 768, name="position_embedding" 102 | ) 103 | 104 | def call(self, inputs): 105 | input_ids, position_ids = inputs 106 | word_embeddings = self.token_embedding_layer(input_ids) 107 | position_embeddings = self.position_embedding_layer(position_ids) 108 | return word_embeddings + position_embeddings 109 | 110 | 111 | class CLIPTextTransformer(tf.keras.models.Model): 112 | def __init__(self, n_words=77): 113 | super().__init__() 114 | self.embeddings = CLIPTextEmbeddings(n_words=n_words) 115 | self.encoder = CLIPEncoder() 116 | self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5) 117 | self.causal_attention_mask = tf.constant( 118 | np.triu(np.ones((1, 1, 77, 77), dtype="float32") * -np.inf, k=1) 119 | ) 120 | 121 | def call(self, inputs): 122 | input_ids, position_ids = inputs 123 | x = self.embeddings([input_ids, position_ids]) 124 | x = self.encoder([x, self.causal_attention_mask]) 125 | return self.final_layer_norm(x) 126 | -------------------------------------------------------------------------------- /backends/stable_diffusion_tf_models/controlnet.py: -------------------------------------------------------------------------------- 1 | 2 | import tensorflow as tf 3 | from diffusion_model import PaddedConv2D , ResBlock ,Downsample , SpatialTransformer , apply_seq 4 | 5 | 6 | class HintNet(tf.keras.models.Model): 7 | def __init__(self): 8 | super().__init__() 9 | 10 | self.blocks = [ 11 | PaddedConv2D(16, kernel_size=3, padding=1), 12 | tf.keras.activations.swish, 13 | PaddedConv2D(16, kernel_size=3, padding=1), 14 | tf.keras.activations.swish, 15 | PaddedConv2D(32, kernel_size=3, padding=1, stride=2), 16 | tf.keras.activations.swish, 17 | PaddedConv2D(32, kernel_size=3, padding=1), 18 | tf.keras.activations.swish, 19 | PaddedConv2D(96, kernel_size=3, padding=1, stride=2), 20 | tf.keras.activations.swish, 21 | PaddedConv2D(96, kernel_size=3, padding=1), 22 | tf.keras.activations.swish, 23 | PaddedConv2D(256, kernel_size=3, padding=1, stride=2), 24 | tf.keras.activations.swish, 25 | PaddedConv2D(320, kernel_size=3, padding=1), 26 | ] 27 | 28 | def call(self, inputs): 29 | x = inputs 30 | for l in self.blocks: 31 | x = l(x) 32 | return x 33 | 34 | 35 | 36 | class ControlNet(tf.keras.models.Model): 37 | def __init__(self): 38 | super().__init__() 39 | self.time_embed = [ 40 | tf.keras.layers.Dense(1280), 41 | tf.keras.activations.swish, 42 | tf.keras.layers.Dense(1280), 43 | ] 44 | self.input_blocks = [ 45 | [PaddedConv2D(320, kernel_size=3, padding=1)], 46 | [ResBlock(320, 320), SpatialTransformer(320, 8, 40)], 47 | [ResBlock(320, 320), SpatialTransformer(320, 8, 40)], 48 | [Downsample(320)], 49 | [ResBlock(320, 640), SpatialTransformer(640, 8, 80)], 50 | [ResBlock(640, 640), SpatialTransformer(640, 8, 80)], 51 | [Downsample(640)], 52 | [ResBlock(640, 1280), SpatialTransformer(1280, 8, 160)], 53 | [ResBlock(1280, 1280), SpatialTransformer(1280, 8, 160)], 54 | [Downsample(1280)], 55 | [ResBlock(1280, 1280)], 56 | [ResBlock(1280, 1280)], 57 | ] 58 | self.middle_block = [ 59 | ResBlock(1280, 1280), 60 | SpatialTransformer(1280, 8, 160), 61 | ResBlock(1280, 1280), 62 | ] 63 | 64 | self.zero_convs = [ PaddedConv2D(320, kernel_size=1, padding=0), 65 | PaddedConv2D(320, kernel_size=1, padding=0), 66 | PaddedConv2D(320, kernel_size=1, padding=0), 67 | PaddedConv2D(320, kernel_size=1, padding=0), 68 | PaddedConv2D(640, kernel_size=1, padding=0), 69 | PaddedConv2D(640, kernel_size=1, padding=0), 70 | PaddedConv2D(640, kernel_size=1, padding=0), 71 | PaddedConv2D(1280, kernel_size=1, padding=0), 72 | PaddedConv2D(1280, kernel_size=1, padding=0), 73 | PaddedConv2D(1280, kernel_size=1, padding=0), 74 | PaddedConv2D(1280, kernel_size=1, padding=0), 75 | PaddedConv2D(1280, kernel_size=1, padding=0), 76 | PaddedConv2D(1280, kernel_size=1, padding=0) 77 | ] 78 | 79 | 80 | def call(self, inputs): 81 | x, t_emb, context, hint_out = inputs 82 | emb = apply_seq(t_emb, self.time_embed) 83 | 84 | def apply(x, layer): 85 | if isinstance(layer, ResBlock): 86 | x = layer([x, emb]) 87 | elif isinstance(layer, SpatialTransformer): 88 | x = layer([x, context]) 89 | else: 90 | x = layer(x) 91 | return x 92 | 93 | saved_inputs = [] 94 | for i,b in enumerate(self.input_blocks): 95 | for layer in b: 96 | x = apply(x, layer) 97 | if i == 0: 98 | x = x + hint_out 99 | saved_inputs.append(x) 100 | 101 | for layer in self.middle_block: 102 | x = apply(x, layer) 103 | saved_inputs.append(x) 104 | 105 | assert len(saved_inputs) == 13 106 | 107 | outs = [] 108 | for i,x in enumerate(saved_inputs): 109 | outs.append( self.zero_convs[i](x) ) 110 | 111 | return outs 112 | 113 | 114 | -------------------------------------------------------------------------------- /backends/stable_diffusion_tf_models/layers.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | class PaddedConv2D(tf.keras.layers.Layer): 5 | def __init__(self, channels, kernel_size, padding=0, stride=1): 6 | super().__init__() 7 | self.padding2d = tf.keras.layers.ZeroPadding2D((padding, padding)) 8 | self.conv2d = tf.keras.layers.Conv2D( 9 | channels, kernel_size, strides=(stride, stride) 10 | ) 11 | 12 | def call(self, x): 13 | x = self.padding2d(x) 14 | return self.conv2d(x) 15 | 16 | 17 | 18 | class GEGLU(tf.keras.layers.Layer): 19 | def __init__(self, dim_out): 20 | super().__init__() 21 | self.proj = tf.keras.layers.Dense(dim_out * 2) 22 | self.dim_out = dim_out 23 | 24 | def call(self, x): 25 | xp = self.proj(x) 26 | x, gate = xp[..., : self.dim_out], xp[..., self.dim_out :] 27 | return x * gelu(gate) 28 | 29 | 30 | def gelu(x): 31 | tanh_res = tf.keras.activations.tanh(x * 0.7978845608 * (1 + 0.044715 * (x**2))) 32 | return 0.5 * x * (1 + tanh_res) 33 | 34 | 35 | def quick_gelu(x): 36 | return x * tf.sigmoid(x * 1.702) 37 | 38 | 39 | def apply_seq(x, layers): 40 | for l in layers: 41 | x = l(x) 42 | return x 43 | 44 | 45 | def td_dot(a, b): 46 | aa = tf.reshape(a, (-1, a.shape[2], a.shape[3])) 47 | bb = tf.reshape(b, (-1, b.shape[2], b.shape[3])) 48 | cc = tf.keras.backend.batch_dot(aa, bb) 49 | ans = tf.reshape(cc, (-1, a.shape[1], cc.shape[1], cc.shape[2])) 50 | assert ans.shape[1] * ans.shape[2] * ans.shape[3] < 8*4096*4096 - 1000 , "Shape too large" 51 | return ans 52 | -------------------------------------------------------------------------------- /docs/Running_from_source.md: -------------------------------------------------------------------------------- 1 | ## How to run DiffusionBee from source 2 | 3 | Install the following 4 | - Miniforge 5 | - Nodejs v16 6 | 7 | 8 | Clone the repo: 9 | 10 | ``` 11 | git clone https://github.com/divamgupta/diffusionbee-stable-diffusion-ui 12 | 13 | ``` 14 | 15 | 16 | 17 | 18 | Create the conda environment and activate it: 19 | 20 | ``` 21 | conda create -n diffusion_bee_env python=3.9.10 22 | conda activate diffusion_bee_env 23 | 24 | ``` 25 | 26 | Install the python packages : 27 | 28 | ``` 29 | cd diffusionbee-stable-diffusion-ui/backends/stable_diffusion 30 | pip install -r requirements.txt 31 | 32 | ``` 33 | 34 | Install the npm packages 35 | 36 | ``` 37 | cd diffusionbee-stable-diffusion-ui/electron_app 38 | npm install 39 | ``` 40 | 41 | 42 | Run the app 43 | 44 | ``` 45 | npm run electron:serve 46 | 47 | ``` -------------------------------------------------------------------------------- /electron_app/.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | node_modules 3 | /dist 4 | 5 | build_config.json 6 | dist_electron 7 | ./dist_electron 8 | 9 | # local env files 10 | .env.local 11 | .env.*.local 12 | 13 | # Log files 14 | npm-debug.log* 15 | yarn-debug.log* 16 | yarn-error.log* 17 | pnpm-debug.log* 18 | 19 | # Editor directories and files 20 | .idea 21 | .vscode 22 | *.suo 23 | *.ntvs* 24 | *.njsproj 25 | *.sln 26 | *.sw? 27 | 28 | #Electron-builder output 29 | /dist_electron 30 | -------------------------------------------------------------------------------- /electron_app/README.md: -------------------------------------------------------------------------------- 1 | # Diffusion Bee Electron App 2 | 3 | ## Project setup 4 | ``` 5 | npm install 6 | 7 | ``` 8 | 9 | ### Compiles and hot-reloads for development 10 | ``` 11 | npm run electron:serve # run via electron 12 | ``` 13 | 14 | ### Compiles and minifies for production 15 | ``` 16 | npm run electron:build 17 | ``` 18 | 19 | for building for production set env `APPLE_ID` and `APPLE_ID_PASSWORD` 20 | 21 | -------------------------------------------------------------------------------- /electron_app/babel.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | presets: [ 3 | '@vue/cli-plugin-babel/preset' 4 | ] 5 | } 6 | -------------------------------------------------------------------------------- /electron_app/build/Icon-1024.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/build/Icon-1024.png -------------------------------------------------------------------------------- /electron_app/build/Icon-128.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/build/Icon-128.png -------------------------------------------------------------------------------- /electron_app/build/Icon-256.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/build/Icon-256.png -------------------------------------------------------------------------------- /electron_app/build/Icon-32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/build/Icon-32.png -------------------------------------------------------------------------------- /electron_app/build/Icon-512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/build/Icon-512.png -------------------------------------------------------------------------------- /electron_app/build/Icon-64.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/build/Icon-64.png -------------------------------------------------------------------------------- /electron_app/build/entitlements.mac.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | com.apple.security.cs.allow-unsigned-executable-memory 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /electron_app/download.data: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/download.data -------------------------------------------------------------------------------- /electron_app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "DiffusionBee", 3 | "version": "2.3.0", 4 | "build_number": "0029", 5 | "website": "https://diffusionbee.com", 6 | "description": "Diffusion Bee - Stable Diffusion App.", 7 | "is_dev": false, 8 | "private": true, 9 | "scripts": { 10 | "serve": "vue-cli-service serve", 11 | "serve:ui": "vue-cli-service serve ./src/main_demoui.js", 12 | "build:ui": "vue-cli-service build ./src/main_demoui.js", 13 | "build": "vue-cli-service build", 14 | "lint": "vue-cli-service lint", 15 | "electron:build": "vue-cli-service electron:build", 16 | "electron:serve": "vue-cli-service electron:serve", 17 | "postinstall": "electron-builder install-app-deps", 18 | "postuninstall": "electron-builder install-app-deps" 19 | }, 20 | "main": "background.js", 21 | "dependencies": { 22 | "@codekraft-studio/vue-record": "^0.0.3", 23 | "@fortawesome/free-solid-svg-icons": "^5.15.4", 24 | "@fortawesome/vue-fontawesome": "^2.0.6", 25 | "apexcharts": "^3.33.0", 26 | "bootstrap": "^5.1.3", 27 | "bootstrap-vue": "^2.21.2", 28 | "chart.js": "^2.9.4", 29 | "core-js": "^3.8.3", 30 | "electron-context-menu": "^3.5.0", 31 | "electron-localshortcut": "^3.2.1", 32 | "electron-notarize": "^1.2.1", 33 | "electron-settings": "^4.0.2", 34 | "fuse.js": "^6.6.2", 35 | "javascript-time-ago": "^2.3.13", 36 | "js-yaml": "^4.1.0", 37 | "konva": "^8.3.13", 38 | "nsfwjs": "^2.4.2", 39 | "path": "^0.12.7", 40 | "stream": "^0.0.2", 41 | "v-click-outside": "^3.1.2", 42 | "verte": "^0.0.12", 43 | "vue": "^2.6.14", 44 | "vue-apexcharts": "^1.6.2", 45 | "vue-chartjs": "^3.5.1", 46 | "vue-click-outside": "^1.1.0", 47 | "vue-inline-svg": "^2.1.0", 48 | "vue-spinner": "^1.0.4", 49 | "vue-toast-notification": "^0.6.3", 50 | "vue-unicons": "^3.3.1", 51 | "vue-web-cam": "^1.9.0" 52 | }, 53 | "devDependencies": { 54 | "@babel/core": "^7.12.16", 55 | "@babel/eslint-parser": "^7.12.16", 56 | "@vue/cli-plugin-babel": "~5.0.0", 57 | "@vue/cli-plugin-eslint": "~5.0.0", 58 | "@vue/cli-service": "~5.0.0", 59 | "babel-eslint": "^10.1.0", 60 | "electron": "^13.6.9", 61 | "electron-builder": "^23.3.3", 62 | "electron-devtools-installer": "^3.1.0", 63 | "eslint": "^7.32.0", 64 | "eslint-plugin-vue": "^8.0.3", 65 | "vue-cli-plugin-electron-builder": "^2.1.1", 66 | "vue-template-compiler": "^2.6.14" 67 | }, 68 | "eslintConfig": { 69 | "root": true, 70 | "env": { 71 | "node": true 72 | }, 73 | "extends": [ 74 | "plugin:vue/essential", 75 | "eslint:recommended" 76 | ], 77 | "parserOptions": { 78 | "parser": "babel-eslint" 79 | }, 80 | "rules": { 81 | "vue/multi-word-component-names": "off", 82 | "vue/no-mutating-props": "off" 83 | } 84 | }, 85 | "browserslist": [ 86 | "> 1%", 87 | "last 2 versions", 88 | "not dead" 89 | ] 90 | } 91 | -------------------------------------------------------------------------------- /electron_app/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/public/favicon.ico -------------------------------------------------------------------------------- /electron_app/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | <%= htmlWebpackPlugin.options.title %> 9 | 10 | 11 | 14 |
15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /electron_app/src/assets/blank_project.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /electron_app/src/assets/imgs/blank_illus4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/imgs/blank_illus4.png -------------------------------------------------------------------------------- /electron_app/src/assets/imgs/blank_illus4_dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/imgs/blank_illus4_dark.png -------------------------------------------------------------------------------- /electron_app/src/assets/imgs/page_icon_imgs/default.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/imgs/page_icon_imgs/default.png -------------------------------------------------------------------------------- /electron_app/src/assets/imgs/page_icon_imgs/default1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/imgs/page_icon_imgs/default1.png -------------------------------------------------------------------------------- /electron_app/src/assets/imgs/page_icon_imgs/deforum.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/imgs/page_icon_imgs/deforum.gif -------------------------------------------------------------------------------- /electron_app/src/assets/imgs/page_icon_imgs/history.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/imgs/page_icon_imgs/history.png -------------------------------------------------------------------------------- /electron_app/src/assets/imgs/page_icon_imgs/img2img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/imgs/page_icon_imgs/img2img.png -------------------------------------------------------------------------------- /electron_app/src/assets/imgs/page_icon_imgs/inpainting.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/imgs/page_icon_imgs/inpainting.png -------------------------------------------------------------------------------- /electron_app/src/assets/imgs/page_icon_imgs/inpainting_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/imgs/page_icon_imgs/inpainting_1.png -------------------------------------------------------------------------------- /electron_app/src/assets/imgs/page_icon_imgs/interpolate.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/imgs/page_icon_imgs/interpolate.gif -------------------------------------------------------------------------------- /electron_app/src/assets/imgs/page_icon_imgs/models.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/imgs/page_icon_imgs/models.png -------------------------------------------------------------------------------- /electron_app/src/assets/imgs/page_icon_imgs/models0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/imgs/page_icon_imgs/models0.png -------------------------------------------------------------------------------- /electron_app/src/assets/imgs/page_icon_imgs/outpainting.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/imgs/page_icon_imgs/outpainting.png -------------------------------------------------------------------------------- /electron_app/src/assets/imgs/page_icon_imgs/settings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/imgs/page_icon_imgs/settings.png -------------------------------------------------------------------------------- /electron_app/src/assets/imgs/page_icon_imgs/training.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/imgs/page_icon_imgs/training.png -------------------------------------------------------------------------------- /electron_app/src/assets/imgs/page_icon_imgs/txt2img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/imgs/page_icon_imgs/txt2img.png -------------------------------------------------------------------------------- /electron_app/src/assets/imgs/page_icon_imgs/upscale.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/imgs/page_icon_imgs/upscale.png -------------------------------------------------------------------------------- /electron_app/src/assets/logo_icon_raw.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /electron_app/src/assets/logo_splash.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/logo_splash.png -------------------------------------------------------------------------------- /electron_app/src/assets/logo_splash_dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/logo_splash_dark.png -------------------------------------------------------------------------------- /electron_app/src/assets/notification.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/notification.mp3 -------------------------------------------------------------------------------- /electron_app/src/assets/nsfw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/nsfw.png -------------------------------------------------------------------------------- /electron_app/src/assets/sample.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/divamgupta/diffusionbee-stable-diffusion-ui/daf82f80e37b5a8c3e7b6caccedae09a3bf16dc5/electron_app/src/assets/sample.png -------------------------------------------------------------------------------- /electron_app/src/bridge.js: -------------------------------------------------------------------------------- 1 | import { ipcMain, dialog, app } from 'electron' 2 | 3 | var win; 4 | var python; 5 | 6 | var py_buffer = ""; 7 | var is_app_closing = false; 8 | 9 | var last_few_err = "" 10 | 11 | let RESTART_BACKEND_ON_CLOSE = false 12 | 13 | const path = require('path'); 14 | 15 | function start_bridge() { 16 | 17 | console.log("starting bridge") 18 | const fs = require('fs') 19 | 20 | let script_path = process.env.PY_SCRIPT || "../backends/stable_diffusion/diffusionbee_backend.py"; 21 | let bin_path = process.env.BIN_PATH; 22 | if(bin_path && (fs.existsSync(script_path))){ 23 | python = require('child_process').spawn( bin_path ); 24 | } 25 | else if (fs.existsSync(script_path)) { 26 | python = require('child_process').spawn('python', [script_path]); 27 | } 28 | else{ 29 | const path = require('path'); 30 | let backend_path = path.join(path.dirname(__dirname), 'core' , 'diffusionbee_backend' ); 31 | python = require('child_process').spawn( backend_path ); 32 | } 33 | 34 | 35 | python.stdin.setEncoding('utf-8'); 36 | 37 | python.stdout.on('data', function(data) { 38 | console.log("Python response: ", data.toString('utf8')); 39 | 40 | 41 | if(! data.toString().includes("sdbk ")){ 42 | if(win && !is_app_closing ) 43 | win.webContents.send('to_renderer', 'adlg ' + data.toString('utf8')); 44 | } 45 | 46 | 47 | 48 | if (win) { 49 | 50 | py_buffer += data.toString('utf8'); 51 | 52 | let splitted = py_buffer.split("\n") 53 | 54 | if( splitted.length > 1 ){ 55 | for (var i = 0; i < splitted.length -1 ; i++) { 56 | if (splitted[i].length > 0) 57 | if(win && !is_app_closing ) 58 | win.webContents.send('to_renderer', 'py2b ' + splitted[i]); 59 | } 60 | } 61 | 62 | py_buffer = splitted[ splitted.length - 1 ]; 63 | 64 | } else { 65 | console.log("window not binded yet, got from py : " + data.toString('utf8')) 66 | } 67 | 68 | }); 69 | 70 | python.stderr.on('data', (data) => { 71 | console.error(`stderr: ${data}`); 72 | last_few_err = last_few_err + data.toString(); 73 | last_few_err = last_few_err.slice(-300); 74 | if(win && !is_app_closing ) 75 | win.webContents.send('to_renderer', 'adlg ' + data.toString('utf8') ); 76 | }); 77 | 78 | python.on('close', (code) => { 79 | // if( code != 0 ) 80 | // { 81 | // dialog.showMessageBox("Backend quit unexpectedly") 82 | // } 83 | 84 | if(is_app_closing){ 85 | if (win){ 86 | app.exit(1); 87 | } 88 | return; 89 | } 90 | 91 | 92 | 93 | 94 | if(RESTART_BACKEND_ON_CLOSE){ 95 | // dialog.showMessageBox( win , { message: "Error in backend : " + last_few_err }); // this is non blocking 96 | // if(!(last_few_err.includes("leaked semaphore objects to clean up at shutdown"))){ 97 | // // this leaked semaphore issue just happens sometimes. so for now lets just silently restart 98 | // dialog.showMessageBox( { message: "Error in backend : " + last_few_err }); 99 | // } 100 | dialog.showMessageBox( { message: "Error in backend : " + last_few_err }); 101 | return start_bridge() 102 | } 103 | else{ 104 | 105 | dialog.showMessageBox({ message: "Backend quit unexpectedly. " + last_few_err }); 106 | 107 | if (win) 108 | { 109 | is_app_closing = true; 110 | app.exit(1); 111 | } 112 | } 113 | 114 | 115 | 116 | }); 117 | 118 | } 119 | 120 | 121 | ipcMain.on('to_python_sync', (event, arg) => { 122 | if (python) { 123 | event.returnValue = "ok"; 124 | // console("sending to py from main " + arg ) 125 | python.stdin.write("b2py " + arg.toString() + "\n") 126 | 127 | } else { 128 | console.log("Python not binded yet!"); 129 | event.returnValue = "not_ok"; 130 | } 131 | }) 132 | 133 | 134 | ipcMain.on('to_python_async', (event, arg) => { 135 | if (python) { 136 | python.stdin.write("b2py " + arg.toString() + "\n") 137 | } 138 | }) 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | app.on('window-all-closed', () => { 147 | if(python){ 148 | is_app_closing = true; 149 | python.kill(); 150 | } 151 | 152 | }) 153 | 154 | 155 | 156 | function bind_window_bridge(w) { 157 | console.log("browser object binded") 158 | win = w; 159 | } 160 | 161 | 162 | export { start_bridge, bind_window_bridge } -------------------------------------------------------------------------------- /electron_app/src/components/AppletPage.vue: -------------------------------------------------------------------------------- 1 | 47 | 123 | 125 | -------------------------------------------------------------------------------- /electron_app/src/components/DownloadButton.vue: -------------------------------------------------------------------------------- 1 | 24 | 68 | 70 | -------------------------------------------------------------------------------- /electron_app/src/components/_REM_ImageItem.vue: -------------------------------------------------------------------------------- 1 | 27 | 105 | 107 | -------------------------------------------------------------------------------- /electron_app/src/components/image_menu_functions.js: -------------------------------------------------------------------------------- 1 | 2 | let image_manu_functions = {} 3 | 4 | image_manu_functions['save_image'] = function (app, image_item_data){ 5 | app; 6 | image_item_data; 7 | 8 | if(!image_item_data.image_url) 9 | return; 10 | let im_path = image_item_data.image_url.split("?")[0]; 11 | 12 | let seed = (image_item_data.params || {}).seed || "" 13 | 14 | let suggested_fname = (image_item_data.description || "Image").substring(0, 100) + "_" + seed 15 | let out_path = window.ipcRenderer.sendSync('save_dialog', suggested_fname); 16 | if(!out_path) 17 | return 18 | let org_path = im_path.replaceAll("file://" , "") 19 | window.ipcRenderer.sendSync('save_file', org_path+"||" +out_path); 20 | 21 | } 22 | image_manu_functions['save_image'] .text = "Save Image" 23 | 24 | image_manu_functions['send_img_2_img'] = function (app, image_item_data){ 25 | app.functions.send_to_img2img( image_item_data.image_url ) 26 | 27 | } 28 | image_manu_functions['send_img_2_img'].text = "Send to Img2Img" 29 | 30 | image_manu_functions['send_outpaint'] = function (app, image_item_data){ 31 | 32 | app.functions.send_to_outpaint( image_item_data.image_url ) 33 | 34 | } 35 | image_manu_functions['send_outpaint'].text = "Send to AI Canvas" 36 | 37 | 38 | 39 | image_manu_functions['send_inpaint'] = function (app, image_item_data){ 40 | 41 | app.functions.send_to_inpaint( image_item_data.image_url ) 42 | 43 | } 44 | image_manu_functions['send_inpaint'].text = "Send to Inpainting" 45 | 46 | 47 | 48 | image_manu_functions['send_img_2_img_with_params'] = function (app, image_item_data){ 49 | 50 | let image_params = JSON.parse(JSON.stringify(image_item_data.params)) 51 | app.functions.send_to_img2img( image_item_data.image_url , image_params ) 52 | 53 | } 54 | image_manu_functions['send_img_2_img_with_params'].text = "Send to Img2Img with params" 55 | 56 | 57 | 58 | image_manu_functions['use_params_current_page'] = function (app, image_item_data){ 59 | 60 | let image_params = JSON.parse(JSON.stringify(image_item_data.params)) 61 | 62 | let router = app.$refs.router 63 | let cur_page_id = router.current_open_page_id 64 | 65 | router.$refs[ cur_page_id ][0].$refs.sd_applet.load_options(image_params) 66 | 67 | } 68 | image_manu_functions['use_params_current_page'].text = "Use parameters" 69 | 70 | 71 | 72 | image_manu_functions['copy_params'] = function (app, image_item_data){ 73 | app; 74 | image_item_data; 75 | const remove_keys = ['generated_img', 'done_percentage', 'prompt_tokens' , 76 | 'job_state', 'job_id', "raw_form_options" , 'negative_prompt_tokens' ,"input_image_with_mask" , "model_tdict_path" , 77 | "controlnet_tdict_path" , "controlnet_inp_img_preprocesser_model_path" , "aux_output_img" ] 78 | let image_params = JSON.parse(JSON.stringify(image_item_data.params)) 79 | for(let k of remove_keys) 80 | image_params[k] = undefined; 81 | window.ipcRenderer.sendSync('copy_to_clipboard' , JSON.stringify(image_params , null, 4)) 82 | 83 | } 84 | image_manu_functions['copy_params'].text = "Copy all parameters" 85 | 86 | 87 | 88 | image_manu_functions['send_to_postprocess'] = function (app, image_item_data){ 89 | 90 | app.functions.send_to_postprocess( image_item_data.image_url ) 91 | 92 | } 93 | image_manu_functions['send_to_postprocess'].text = "Send to Upscaler" 94 | 95 | image_manu_functions['generate_similar_images'] = function (app, image_item_data){ 96 | 97 | if( (!image_item_data.params) || image_item_data.params.applet_name != "txt2img"){ 98 | app.show_toast("Only available for images generated using TextToImage") 99 | return 100 | } 101 | 102 | let router = app.$refs.router 103 | 104 | if(router.$refs[ "Txt2Img" ][0].$refs.sd_applet.generate_similar_images( image_item_data.params)) 105 | app.functions.switch_page("Txt2Img") 106 | // app.functions.generate_similar_images( image_item_data.params ) 107 | 108 | } 109 | image_manu_functions['generate_similar_images'].text = "Generate similar images" 110 | 111 | 112 | 113 | export {image_manu_functions} -------------------------------------------------------------------------------- /electron_app/src/components_bare/CircleProgress.vue: -------------------------------------------------------------------------------- 1 | 15 | 51 | 53 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/GalleryImage.vue: -------------------------------------------------------------------------------- 1 | 54 | 87 | 94 | 95 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/LoaderModal.vue: -------------------------------------------------------------------------------- 1 | @import '../assets/css/theme.css'; 2 | 23 | 42 | 54 | 73 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/SplashScreen.vue: -------------------------------------------------------------------------------- 1 | @import '../assets/css/theme.css'; 2 | 9 | 27 | 40 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/TwoColAppletLayout.vue: -------------------------------------------------------------------------------- 1 | 21 | 42 | 44 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/empty_component.vue: -------------------------------------------------------------------------------- 1 | 3 | 23 | 25 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/inputform/Accordian.vue: -------------------------------------------------------------------------------- 1 | 24 | 48 | 50 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/inputform/BetterSelector.vue: -------------------------------------------------------------------------------- 1 | 22 | 84 | 86 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/inputform/Checkbox.vue: -------------------------------------------------------------------------------- 1 | 13 | 91 | 93 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/inputform/Dropdown.vue: -------------------------------------------------------------------------------- 1 | 13 | 69 | 71 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/inputform/FilePathTextBox.vue: -------------------------------------------------------------------------------- 1 | 16 | 53 | 55 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/inputform/FormInputMixin.vue: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/inputform/ImageInput.vue: -------------------------------------------------------------------------------- 1 | 28 | 122 | 124 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/inputform/InputWithDesc.vue: -------------------------------------------------------------------------------- 1 | 19 | 43 | 45 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/inputform/OutputImage.vue: -------------------------------------------------------------------------------- 1 | 13 | 58 | 60 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/inputform/OutputText.vue: -------------------------------------------------------------------------------- 1 | 11 | 38 | 40 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/inputform/ResolveInputComponent.vue: -------------------------------------------------------------------------------- 1 | 8 | 46 | 146 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/inputform/Slider.vue: -------------------------------------------------------------------------------- 1 | 23 | 93 | 95 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/inputform/Textarea.vue: -------------------------------------------------------------------------------- 1 | 16 | 45 | 47 | -------------------------------------------------------------------------------- /electron_app/src/components_bare/inputform/Textbox.vue: -------------------------------------------------------------------------------- 1 | 13 | 43 | 45 | -------------------------------------------------------------------------------- /electron_app/src/fake_backend.py: -------------------------------------------------------------------------------- 1 | import time 2 | import sys 3 | import json 4 | import copy 5 | import random 6 | import os 7 | import cv2 8 | 9 | class Unbuffered(object): 10 | def __init__(self, stream): 11 | self.stream = stream 12 | 13 | def write(self, data): 14 | self.stream.write(data) 15 | self.stream.flush() 16 | 17 | def writelines(self, datas): 18 | self.stream.writelines(datas) 19 | self.stream.flush() 20 | 21 | def __getattr__(self, attr): 22 | return getattr(self.stream, attr) 23 | 24 | 25 | sys.stdout = Unbuffered(sys.stdout) 26 | 27 | time.sleep(1) 28 | 29 | 30 | if len(sys.argv) > 1 and sys.argv[1] == "convert_model": 31 | time.sleep(4) 32 | exit() 33 | 34 | 35 | 36 | sample_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "assets", "sample.png" ) 37 | 38 | print("sdbk mltl downloading model") 39 | for i in range(100): 40 | time.sleep(0.02) 41 | 42 | print("sdbk mlpr %d"%i ) # model loading percentage 43 | print("sdbk mlms done %s of 100.0"%i) 44 | 45 | print("sdbk mlpr %d"%(-1) ) 46 | time.sleep(2) 47 | 48 | print("sdbk mdld") # model loaded 49 | 50 | 51 | def process_opt(opts): 52 | 53 | 54 | if 'num_imgs' not in opts: 55 | opts['num_imgs'] = 1 56 | 57 | for nn in range(opts['num_imgs']): 58 | 59 | 60 | if opts['seed'] < 20: 61 | print("sdbk errr just a random error lol") 62 | return 63 | 64 | print("sdbk dnpr "+str(-1) ) 65 | time.sleep(0.8) 66 | for i in range(0,100,5): 67 | print("sdbk dnpr "+str(i) ) # done percentage 68 | time.sleep(0.1) 69 | # if opts['seed'] > 2: 70 | # time.sleep(100.1) 71 | print("sdbk dnpr "+str(-1) ) 72 | time.sleep(0.8) 73 | impath = sample_path 74 | im = cv2.imread(impath) 75 | im = cv2.resize(im , ( opts['img_width'] , opts['img_height'] ) ) 76 | new_p = "/tmp/%d_%d.png"%( opts['img_width'] , opts['img_height']) 77 | cv2.imwrite( new_p , im ) 78 | 79 | # if 'input_image' in opts: 80 | # impath = opts['input_image'] 81 | ret_dict = {"generated_img_path" : (new_p) } 82 | 83 | print("sdbk nwim %s"%(json.dumps(ret_dict)) ) # new image generated 84 | 85 | while True: 86 | print("sdbk inrd") # input ready 87 | 88 | inp_str = input() 89 | print("got " , inp_str ) 90 | if inp_str.strip() == "": 91 | continue 92 | else: 93 | print("sbdk errr The string is blank") 94 | 95 | if not "b2py t2im" in inp_str: 96 | continue 97 | inp_str = inp_str.replace("b2py t2im" , "").strip() 98 | try: 99 | d = json.loads(inp_str) 100 | 101 | print("sdbk inwk") # working on the input 102 | process_opt(d) 103 | except Exception as e: 104 | print("sbdk errr %s"%(str(e))) 105 | -------------------------------------------------------------------------------- /electron_app/src/forms/sample_ext_applet.json: -------------------------------------------------------------------------------- 1 | { 2 | "id" : "sample" , 3 | "title" : "hiofh ", 4 | "icon" : "star" , 5 | "home_category" : "misc", 6 | "inputs" : [ 7 | { 8 | "id" : "prompt", 9 | "component": "Textarea", 10 | "placeholder" : "custom applet " 11 | }, 12 | { 13 | "id": "7657836", 14 | "component": "InputWithDesc", 15 | "title": "Seed", 16 | "description": "Starting point for iterations (any random number will do; DB will pick one if left blank).", 17 | "children": [ 18 | { 19 | "id": "seed", 20 | "component": "Textbox", 21 | "icon": "seed", 22 | "placeholder" : "-1", 23 | "type" : "number", 24 | "is_persistant" : true 25 | } 26 | ] 27 | } 28 | ] , 29 | "outputs": [ 30 | 31 | ] 32 | 33 | } -------------------------------------------------------------------------------- /electron_app/src/init_vue_libs.js: -------------------------------------------------------------------------------- 1 | import Vue from 'vue' 2 | 3 | 4 | 5 | 6 | // add the fond awesome stuff 7 | import { library } from '@fortawesome/fontawesome-svg-core' 8 | import { faChevronRight, faStopCircle, faPlayCircle , faPlus , faChevronLeft , 9 | faFileImage , faFileAudio , faFile , faBars , faAngleDown , faTrash, faChevronDown , 10 | faGlobe, faFolder, faCamera, faKeyboard, 11 | faMusic , faMicrophone , faTimes , faCheck , faHandPaper , faExpandArrowsAlt, faEraser , faUndo , faRedo, faImage, faMicrochip , faCube , 12 | faMagic, faSave, faHistory, faCubes, faImages, faHome , faPaintBrush, faCircle , faMask, faTools, faThList, faEllipsisV, faInbox} from '@fortawesome/free-solid-svg-icons' 13 | import { FontAwesomeIcon } from '@fortawesome/vue-fontawesome' 14 | 15 | library.add(faBars) 16 | 17 | library.add(faChevronRight) 18 | library.add(faChevronLeft) 19 | library.add(faGlobe) 20 | library.add(faFolder) 21 | library.add(faStopCircle) 22 | library.add(faPlayCircle) 23 | library.add(faFileImage) 24 | library.add(faFileAudio) 25 | library.add(faKeyboard) 26 | library.add(faMicrophone) 27 | library.add(faPlus) 28 | library.add(faFile) 29 | library.add(faExpandArrowsAlt) 30 | library.add(faEraser) 31 | library.add(faHandPaper) 32 | library.add(faImage) 33 | library.add(faMicrochip) 34 | library.add(faCube) 35 | library.add(faMagic) 36 | library.add(faSave) 37 | library.add(faUndo) 38 | library.add(faRedo) 39 | 40 | library.add(faAngleDown) 41 | library.add(faTrash) 42 | library.add(faChevronDown) 43 | library.add(faMusic) 44 | library.add(faCamera) 45 | library.add(faTimes) 46 | library.add(faCheck) 47 | library.add(faHistory) 48 | library.add(faCubes) 49 | library.add(faImages) 50 | library.add(faHome) 51 | library.add(faPaintBrush) 52 | library.add(faCircle) 53 | library.add(faMask) 54 | library.add(faTools) 55 | library.add(faThList) 56 | library.add(faEllipsisV) 57 | library.add(faInbox) 58 | 59 | Vue.component('font-awesome-icon', FontAwesomeIcon) 60 | Vue.config.productionTip = false 61 | 62 | 63 | // vue bootstrap 64 | import { BootstrapVue } from 'bootstrap-vue' 65 | 66 | // Import Bootstrap an BootstrapVue CSS files (order is important) 67 | import 'bootstrap/dist/css/bootstrap.css' 68 | import 'bootstrap-vue/dist/bootstrap-vue.css' 69 | 70 | Vue.use(BootstrapVue) 71 | 72 | 73 | 74 | // vue outside 75 | import vClickOutside from 'v-click-outside' 76 | Vue.use(vClickOutside) 77 | 78 | 79 | import VueToast from 'vue-toast-notification'; 80 | import 'vue-toast-notification/dist/theme-sugar.css'; 81 | 82 | Vue.use(VueToast); 83 | 84 | 85 | 86 | export {} -------------------------------------------------------------------------------- /electron_app/src/main.js: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | import Vue from 'vue' 6 | 7 | 8 | 9 | 10 | Vue.config.productionTip = false 11 | Vue.config.performance = true 12 | 13 | // setup the vue libs 14 | import {} from "./init_vue_libs.js" 15 | 16 | 17 | // include the py vue bridge 18 | import {} from "./py_vue_bridge.js" 19 | 20 | import App from './App.vue' 21 | new Vue({ 22 | render: h => h(App), 23 | }).$mount('#app') -------------------------------------------------------------------------------- /electron_app/src/main_demoui.js: -------------------------------------------------------------------------------- 1 | import Vue from 'vue' 2 | 3 | Vue.config.productionTip = false 4 | 5 | // setup the vue libs 6 | import {} from "./init_vue_libs.js" 7 | 8 | import AppDemoUI from './AppDemoUI.vue' 9 | 10 | 11 | new Vue({ 12 | render: h => h(AppDemoUI), 13 | }).$mount('#app') 14 | -------------------------------------------------------------------------------- /electron_app/src/menu_template.js: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | let isMac = true; 6 | 7 | const menu_template = [ 8 | // { role: 'appMenu' } 9 | ...(isMac ? [{ 10 | label: require('../package.json').name , 11 | submenu: [ 12 | { role: 'about' }, 13 | { type: 'separator' }, 14 | { role: 'services' }, 15 | { type: 'separator' }, 16 | { role: 'hide' }, 17 | { role: 'hideOthers' }, 18 | { role: 'unhide' }, 19 | { type: 'separator' }, 20 | { role: 'quit' } 21 | ] 22 | }] : []), 23 | // { role: 'fileMenu' } 24 | { 25 | label: 'File', 26 | submenu: [ 27 | isMac ? { role: 'close' } : { role: 'quit' } 28 | ] 29 | }, 30 | // { role: 'editMenu' } 31 | { 32 | label: 'Edit', 33 | submenu: [ 34 | { role: 'undo' }, 35 | { role: 'redo' }, 36 | { type: 'separator' }, 37 | { role: 'cut' }, 38 | { role: 'copy' }, 39 | { role: 'paste' }, 40 | ...(isMac ? [ 41 | { role: 'pasteAndMatchStyle' }, 42 | { role: 'delete' }, 43 | { role: 'selectAll' }, 44 | { type: 'separator' }, 45 | { 46 | label: 'Speech', 47 | submenu: [ 48 | { role: 'startSpeaking' }, 49 | { role: 'stopSpeaking' } 50 | ] 51 | } 52 | ] : [ 53 | { role: 'delete' }, 54 | { type: 'separator' }, 55 | { role: 'selectAll' } 56 | ]) 57 | ] 58 | }, 59 | { 60 | role: 'help', 61 | submenu: [ 62 | { 63 | label: 'Learn More', 64 | click: async () => { 65 | const { shell } = require('electron') 66 | await shell.openExternal('__domain__') 67 | } 68 | } 69 | ] 70 | } 71 | ] 72 | 73 | 74 | 75 | export {menu_template} -------------------------------------------------------------------------------- /electron_app/src/native_functions_vue_bridge.js: -------------------------------------------------------------------------------- 1 | 2 | 3 | function native_confirm(message){ 4 | return window.ipcRenderer.sendSync('native_confirm', message ); 5 | } 6 | 7 | function native_alert(message){ 8 | return window.ipcRenderer.sendSync('native_alert', message ); 9 | } 10 | 11 | export { native_confirm , native_alert } -------------------------------------------------------------------------------- /electron_app/src/pages/BlankPage.vue: -------------------------------------------------------------------------------- 1 | 3 | 28 | 30 | -------------------------------------------------------------------------------- /electron_app/src/pages/ContactUs.vue: -------------------------------------------------------------------------------- 1 | 15 | 16 | 42 | 44 | 45 | -------------------------------------------------------------------------------- /electron_app/src/pages/Homepage.vue: -------------------------------------------------------------------------------- 1 | 31 | 77 | 79 | -------------------------------------------------------------------------------- /electron_app/src/pages/Img2Img.vue: -------------------------------------------------------------------------------- 1 | 8 | 78 | 80 | -------------------------------------------------------------------------------- /electron_app/src/pages/Logs.vue: -------------------------------------------------------------------------------- 1 | 10 | 36 | 38 | -------------------------------------------------------------------------------- /electron_app/src/pages/PostProcessImage.vue: -------------------------------------------------------------------------------- 1 | 23 | 143 | 145 | -------------------------------------------------------------------------------- /electron_app/src/pages/Settings.vue: -------------------------------------------------------------------------------- 1 | 26 | 52 | 54 | -------------------------------------------------------------------------------- /electron_app/src/pages/Training.vue: -------------------------------------------------------------------------------- 1 | 7 | 8 | 34 | 36 | -------------------------------------------------------------------------------- /electron_app/src/pages/Txt2Img.vue: -------------------------------------------------------------------------------- 1 | 4 | 42 | 44 | -------------------------------------------------------------------------------- /electron_app/src/preload.js: -------------------------------------------------------------------------------- 1 | // src/preload.js 2 | 3 | import { contextBridge, ipcRenderer } from 'electron' 4 | 5 | contextBridge.exposeInMainWorld('ipcRenderer', ipcRenderer) 6 | contextBridge.exposeInMainWorld('ipcRenderer_on', ipcRenderer.on) 7 | 8 | var bind_ipc_renderer_on_fn = undefined; 9 | var bind_ipc_download_on_fns = {} 10 | 11 | function bind_ipc_renderer_on(fn) { 12 | bind_ipc_renderer_on_fn = fn; 13 | } 14 | 15 | contextBridge.exposeInMainWorld('bind_ipc_renderer_on', bind_ipc_renderer_on) 16 | 17 | ipcRenderer.on("to_renderer", (e, data) => { // the msg channel which is used for electron to send msges to browser / renderer 18 | if (bind_ipc_renderer_on_fn) 19 | bind_ipc_renderer_on_fn(data) 20 | }); 21 | 22 | 23 | function bind_ipc_download_on( download_id, fn_progress , fn_success, fn_error ) { 24 | console.log("gineded " + download_id) 25 | bind_ipc_download_on_fns[download_id] = { 26 | "progress" : fn_progress, 27 | "success" : fn_success, 28 | "error" : fn_error, 29 | } 30 | } 31 | 32 | contextBridge.exposeInMainWorld('bind_ipc_download_on', bind_ipc_download_on) 33 | 34 | 35 | function unbind_ipc_download_on( download_id ){ 36 | bind_ipc_download_on_fns[download_id] = undefined; 37 | } 38 | 39 | 40 | 41 | contextBridge.exposeInMainWorld('unbind_ipc_download_on', unbind_ipc_download_on) 42 | 43 | 44 | ipcRenderer.on("to_download", (e, data) => { // the msg channel which is used for electron to send msges to browser / download 45 | if(!bind_ipc_download_on_fns[data.download_id]){ 46 | console.log("no fn di "+ data.download_id) 47 | return 48 | } 49 | if(!bind_ipc_download_on_fns[data.download_id][data.fn]){ 50 | console.log("no fn d ") 51 | return 52 | } 53 | bind_ipc_download_on_fns[data.download_id][data.fn](data.msg) 54 | }); 55 | 56 | -------------------------------------------------------------------------------- /electron_app/src/py_vue_bridge.js: -------------------------------------------------------------------------------- 1 | import Vue from 'vue' 2 | 3 | 4 | import {native_alert} from "./native_functions_vue_bridge.js" 5 | 6 | var app_component_object; 7 | 8 | function update_state(msg) { 9 | 10 | if (app_component_object) { 11 | let keys = msg.split("___U_P_D_A_T_E___")[0]; 12 | let value = msg.split("___U_P_D_A_T_E___")[1]; 13 | 14 | keys = keys.split('.'); 15 | 16 | let object_to_update = app_component_object.app_state; 17 | 18 | for (let i = 0; i < keys.length - 1; i++) { 19 | object_to_update = object_to_update[keys[i]]; 20 | } 21 | 22 | 23 | let final_key = keys[keys.length - 1]; 24 | Vue.set(object_to_update, final_key, JSON.parse(value)) 25 | 26 | 27 | } else { 28 | alert("Err : app stage object not set yet.") 29 | } 30 | 31 | 32 | } 33 | 34 | 35 | function bind_app_component(app_component) { 36 | // this should be called by the main vue componnet, to set bind the "app state" object to the bridge 37 | app_component_object; 38 | app_component_object = app_component; 39 | } 40 | 41 | function on_msg_from_py(msg) { 42 | 43 | if (msg.substring(0, 4) == "utds") // update the state of 44 | { 45 | update_state(msg.substring(5)); 46 | } 47 | if (msg.substring(0, 4) == "sdbk") // update the state of 48 | { 49 | if(app_component_object) 50 | app_component_object.stable_diffusion.state_msg(msg.substring(5)) 51 | } else if (msg.substring(0, 4) == "alrt") // just alert 52 | { 53 | native_alert(msg.substring(5)); 54 | } 55 | 56 | 57 | } 58 | 59 | 60 | 61 | function add_log(msg) { 62 | if (app_component_object) { 63 | let out = app_component_object.app_state.logs + "\n" + msg 64 | if( out.length > 10000000){ 65 | out = out.slice(-1 * 5000000 ) 66 | } 67 | Vue.set(app_component_object.app_state, 'logs' , out ); 68 | } 69 | 70 | } 71 | 72 | 73 | function on_msg_recieve(msg) { // on new msg from python 74 | 75 | if (msg.substring(0, 4) == "py2b") { 76 | on_msg_from_py(msg.substring(5)) 77 | } 78 | else if (msg.substring(0, 4) == "adlg") { 79 | add_log(msg.substring(5)) 80 | } else { 81 | alert("recieved unk message " + msg.toString()) 82 | } 83 | 84 | } 85 | 86 | window.bind_ipc_renderer_on(on_msg_recieve) 87 | 88 | 89 | 90 | function send_to_py(msg) { 91 | window.ipcRenderer.sendSync('to_python_sync', msg) 92 | } 93 | 94 | function send_to_py_async() { 95 | 96 | 97 | } 98 | 99 | 100 | 101 | export { send_to_py, bind_app_component, send_to_py_async } -------------------------------------------------------------------------------- /electron_app/src/stable_diffusion_bridge.js: -------------------------------------------------------------------------------- 1 | import Vue from 'vue'; 2 | 3 | class StableDiffusion { 4 | 5 | constructor(){ 6 | Vue.set(this, 'is_backned_loaded' , true); 7 | this.is_input_ready = false; 8 | // this.is_backned_loaded = false; 9 | 10 | } 11 | 12 | state_msg(msg){ 13 | let msg_code = msg.substring(0, 4); 14 | if(msg_code == "mdld"){ 15 | 16 | Vue.set(this, 'is_backned_loaded' , false); 17 | alert(this.is_backned_loaded) 18 | 19 | } 20 | } 21 | 22 | } 23 | 24 | 25 | export {StableDiffusion} -------------------------------------------------------------------------------- /electron_app/src/utils/in_out_paint_utils.js: -------------------------------------------------------------------------------- 1 | 2 | 3 | function inpaint_assets(self , mode ){ 4 | let to_download = [] 5 | if((!self.sd_options.selected_sd_model) || self.sd_options.selected_sd_model == "SD1.5_Inpainting"){ 6 | to_download.push( { 7 | id : "SD1.5_Inpainting", 8 | filename: 'sd-v1-5-inpainting_fp16.tdict' , 9 | md5: '68303f49cca00968c39abddc20b622a6' , 10 | is_stock_model : true, 11 | url : 'https://huggingface.co/divamgupta/stable_diffusion_mps/resolve/main/sd-v1-5-inpainting_fp16.tdict' , 12 | title: "SD 1.5 Inpainting by RunwayML", 13 | model_meta_data : {"type" : "sd_model_inpaint", "float_type" : "float16" , "sd_type" : "SD_1x_inpaint" } 14 | } ) 15 | } 16 | 17 | // in generative fill, if a non inpaintgin model selected then it needs the contorlnet inpaint 18 | if(mode == "Generative Fill"){ 19 | if((!self.sd_options.selected_sd_model) || self.sd_options.selected_sd_model == "Default_SD1.5" || 20 | (self.app.assets_manager.all_avail_assets[self.sd_options.selected_sd_model] && (self.app.assets_manager.all_avail_assets[self.sd_options.selected_sd_model].model_meta_data||{}).type == "sd_model" )){ 21 | 22 | to_download.push( { 23 | "id": "just_control_v11p_sd15_inpaint_fp16", 24 | "filename": "just_control_v11p_sd15_inpaint_fp16.tdict", 25 | "md5": "08a0a43ff75a4a4941fbb885c50d7874", 26 | is_stock_model : true, 27 | "url": "https://huggingface.co/divamgupta/controlnet_tensorflow/resolve/main/just_control_v11p_sd15_inpaint_fp16.tdict", 28 | "title": "ControlNet Inpaint", 29 | "description": "ControlNet Inpaint model.", 30 | model_meta_data : {"type" : "controlnet_model" , "sd_type" : "SD_1x", "float_type" : "float16" } 31 | }) 32 | } 33 | } 34 | 35 | return to_download; 36 | } 37 | 38 | function prep_sd_optins(self , sd_options_object, mode , img_mask_url){ 39 | if(!sd_options_object.num_imgs) 40 | sd_options_object.num_imgs = 1 41 | 42 | if( mode == "Text To Image") { 43 | sd_options_object.sd_mode_override = "txt2img" 44 | } else { 45 | 46 | if(mode == "Generative Fill"){ 47 | 48 | let is_control_inpaint = false 49 | 50 | 51 | if((self.app.assets_manager.all_avail_assets[self.sd_options.selected_sd_model].model_meta_data||{}).type == "sd_model" ){ 52 | is_control_inpaint = true 53 | 54 | } 55 | 56 | if(is_control_inpaint){ 57 | sd_options_object.do_masking_diffusion = true 58 | sd_options_object.controlnet_model ="Inpaint" 59 | sd_options_object.controlnet_inp_img_preprocesser="Inpaint" 60 | sd_options_object.controlnet_input_image_path="NULL" 61 | sd_options_object.is_control_net=true 62 | sd_options_object.controlnet_guess_mode=true 63 | sd_options_object.guidance_scale = ( (sd_options_object.guidance_scale || 7.5) - 3.5 ) 64 | if(sd_options_object.guidance_scale <= 1) 65 | sd_options_object.guidance_scale = 1 66 | sd_options_object.sd_mode_override = "txt2img" 67 | sd_options_object.controlnet_tdict_path= self.app.assets_manager.get_downloaded_asset_path("just_control_v11p_sd15_inpaint_fp16") 68 | 69 | } else { 70 | // SD1.5 inpaint 71 | sd_options_object.is_sd15_inpaint = true 72 | sd_options_object.sd_mode_override = "txt2img" 73 | } 74 | } else { 75 | // Image to Image case 76 | sd_options_object.infill_alpha = true 77 | } 78 | 79 | sd_options_object.get_mask_from_image_alpha = true; 80 | 81 | if(self.sd_options.inp_only_update_masked){ 82 | sd_options_object.blur_mask = true 83 | sd_options_object.do_masking_diffusion = true 84 | 85 | } 86 | sd_options_object.mask_image_path = img_mask_url 87 | } 88 | } 89 | 90 | export { prep_sd_optins , inpaint_assets } -------------------------------------------------------------------------------- /electron_app/vue.config.js: -------------------------------------------------------------------------------- 1 | try { 2 | var build_config = require('./build_config.json'); 3 | console.log(build_config + "\n\n\n\n\n") 4 | } catch (err) { 5 | var build_config = {} 6 | } 7 | 8 | 9 | module.exports = { 10 | 11 | pluginOptions: { 12 | electronBuilder: { 13 | preload: './src/preload.js', 14 | 15 | // Or, for multiple preload files: 16 | // preload: { preload: 'src/preload.js', otherPreload: 'src/preload2.js' } 17 | builderOptions: { 18 | appId: 'com.diffusionbee.diffusionbee', 19 | artifactName: "DiffusionBee"+(build_config.build_name||"")+"-${version}.${ext}", 20 | 21 | afterSign: "./afterSignHook.js", 22 | "extraResources": [{ 23 | "from": process.env.BACKEND_BUILD_PATH , 24 | "to": "core", 25 | "filter": [ 26 | "**/*" 27 | ] 28 | }], // access via path.join(path.dirname(__dirname), 'liner_core' ); 29 | 30 | "mac": { 31 | "icon" : "build/Icon-1024.png" , 32 | "hardenedRuntime": true, 33 | "entitlements": "build/entitlements.mac.plist", 34 | "entitlementsInherit": "build/entitlements.mac.plist", 35 | "minimumSystemVersion": build_config.min_os_version || "12.6.0", 36 | "extendInfo": { 37 | "LSMinimumSystemVersion": build_config.min_os_version || "12.6.0" 38 | } , 39 | 40 | "target": { 41 | "target": "dmg", 42 | "arch": [ 43 | process.env.BUILD_ARCH //'arm64' , 'x64' 44 | ] 45 | } 46 | }, 47 | 48 | "win": { 49 | "icon" : "build/Icon-1024.png" , 50 | "target": { 51 | "target": "NSIS", 52 | "arch": [ 53 | process.arch 54 | ] 55 | } 56 | } 57 | } 58 | 59 | 60 | } 61 | } 62 | } -------------------------------------------------------------------------------- /package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "diffusion_bee_public", 3 | "lockfileVersion": 2, 4 | "requires": true, 5 | "packages": {} 6 | } 7 | --------------------------------------------------------------------------------