├── .gitattributes ├── .gitignore ├── .python_dependencies └── .gitignore ├── .vs ├── ProjectSettings.json ├── VSWorkspaceState.json ├── dream_textures │ └── v16 │ │ └── .suo └── slnx.sqlite ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── __init__.py ├── absolute_path.py ├── builtin_presets ├── Debug.py ├── Final.py └── Preview.py ├── classes.py ├── docs ├── IMAGE_GENERATION.md ├── TEXTURE_PROJECTION.md └── assets │ ├── banner.png │ ├── history │ ├── history-export.png │ └── history-import.png │ ├── image_generation.png │ ├── image_generation │ └── opening-ui.png │ ├── inpaint_outpaint.png │ ├── readme-toggle-console.png │ ├── texture_projection.png │ └── texture_projection │ ├── edit_mode.png │ └── projection.gif ├── generator_process ├── __init__.py ├── actions │ ├── SuperResolution.py │ ├── async_pipeline.py │ ├── controlnet_depth_to_image.py │ ├── controlnet_depth_to_image_int8.py │ ├── depth_to_image.py │ ├── huggingface_hub.py │ ├── ocio_transform.py │ ├── p2i_blender.png │ ├── prompt_to_image.py │ ├── prompt_to_image_int8.py │ └── test1_blender.png ├── actor.py ├── block_in_use.py ├── directml_patches.py └── models │ ├── __init__.py │ ├── fix_it_error.py │ └── pipeline.py ├── install.bat ├── model_download.py ├── operators ├── dream_texture.py ├── notify_result.py ├── open_latest_version.py ├── project.py └── view_history.py ├── pil_to_image.py ├── prompt_engineering.py ├── property_groups ├── dream_prompt.py └── dream_prompt_validation.py ├── realtime_viewport.py ├── render_pass.py ├── requirements └── win-openvino.txt ├── scripts └── train_detect_seamless.py ├── security.md ├── ui ├── panels │ ├── dream_texture.py │ ├── history.py │ ├── render_properties.py │ └── upscaling.py ├── presets.py └── space_types.py └── version.py /.gitattributes: -------------------------------------------------------------------------------- 1 | /stable-diffusion-2-1-openvino-models/Stable_Diffusion_2_1_base/text_encoder.bin filter=lfs diff=lfs merge=lfs -text 2 | /stable-diffusion-2-1-openvino-models/Stable_Diffusion_2_1_base/unet.bin filter=lfs diff=lfs merge=lfs -text 3 | /stable-diffusion-2-1-openvino-models/Stable_Diffusion_2_1_base/vae_decoder.bin filter=lfs diff=lfs merge=lfs -text 4 | /stable-diffusion-2-1-openvino-models/Stable_Diffusion_2_1_depth/cache/16527851708327011881.blob filter=lfs diff=lfs merge=lfs -text 5 | /stable-diffusion-2-1-openvino-models/Stable_Diffusion_2_1_depth/cache/18030390726059041906.blob filter=lfs diff=lfs merge=lfs -text 6 | /stable-diffusion-2-1-openvino-models/Stable_Diffusion_2_1_depth/cache/4023803310294881527.blob filter=lfs diff=lfs merge=lfs -text 7 | /stable-diffusion-2-1-openvino-models/Stable_Diffusion_2_1_depth/cache/531081115737342272.blob filter=lfs diff=lfs merge=lfs -text 8 | /stable-diffusion-2-1-openvino-models/Stable_Diffusion_2_1_depth/text_encoder.bin filter=lfs diff=lfs merge=lfs -text 9 | /stable-diffusion-2-1-openvino-models/Stable_Diffusion_2_1_depth/unet.bin filter=lfs diff=lfs merge=lfs -text 10 | /stable-diffusion-2-1-openvino-models/Stable_Diffusion_2_1_depth/vae_decoder.bin filter=lfs diff=lfs merge=lfs -text 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | share/python-wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .nox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | *.py,cover 52 | .hypothesis/ 53 | .pytest_cache/ 54 | cover/ 55 | 56 | # Translations 57 | *.mo 58 | *.pot 59 | 60 | # Django stuff: 61 | *.log 62 | local_settings.py 63 | db.sqlite3 64 | db.sqlite3-journal 65 | 66 | # Flask stuff: 67 | instance/ 68 | .webassets-cache 69 | 70 | # Scrapy stuff: 71 | .scrapy 72 | 73 | # Sphinx documentation 74 | docs/_build/ 75 | 76 | # PyBuilder 77 | .pybuilder/ 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | # For a library or package, you might want to ignore these files since the code is 89 | # intended to run in multiple environments; otherwise, check them in: 90 | # .python-version 91 | 92 | # pipenv 93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 96 | # install all needed dependencies. 97 | #Pipfile.lock 98 | 99 | # poetry 100 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 101 | # This is especially recommended for binary packages to ensure reproducibility, and is more 102 | # commonly ignored for libraries. 103 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 104 | #poetry.lock 105 | 106 | # pdm 107 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 108 | #pdm.lock 109 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 110 | # in version control. 111 | # https://pdm.fming.dev/#use-with-ide 112 | .pdm.toml 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | .dmypy.json 146 | dmypy.json 147 | 148 | # Pyre type checker 149 | .pyre/ 150 | 151 | # pytype static type analyzer 152 | .pytype/ 153 | 154 | # Cython debug symbols 155 | cython_debug/ 156 | 157 | # PyCharm 158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this file. For a more nuclear 161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 162 | #.idea/ 163 | -------------------------------------------------------------------------------- /.python_dependencies/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore -------------------------------------------------------------------------------- /.vs/ProjectSettings.json: -------------------------------------------------------------------------------- 1 | { 2 | "CurrentProjectSetting": null 3 | } -------------------------------------------------------------------------------- /.vs/VSWorkspaceState.json: -------------------------------------------------------------------------------- 1 | { 2 | "ExpandedNodes": [ 3 | "" 4 | ], 5 | "PreviewInSolutionExplorer": false 6 | } -------------------------------------------------------------------------------- /.vs/dream_textures/v16/.suo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/dream-textures-openvino/a337917b7903c257eefbfc361831ba7b273cfa4b/.vs/dream_textures/v16/.suo -------------------------------------------------------------------------------- /.vs/slnx.sqlite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/dream-textures-openvino/a337917b7903c257eefbfc361831ba7b273cfa4b/.vs/slnx.sqlite -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, caste, color, religion, or sexual 10 | identity and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the overall 26 | community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or advances of 31 | any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email address, 35 | without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | CommunityCodeOfConduct AT intel DOT com. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series of 86 | actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or permanent 93 | ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within the 113 | community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.1, available at 119 | [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. 120 | 121 | Community Impact Guidelines were inspired by 122 | [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. 123 | 124 | For answers to common questions about this code of conduct, see the FAQ at 125 | [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at 126 | [https://www.contributor-covenant.org/translations][translations]. 127 | 128 | [homepage]: https://www.contributor-covenant.org 129 | [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html 130 | [Mozilla CoC]: https://github.com/mozilla/diversity 131 | [FAQ]: https://www.contributor-covenant.org/faq 132 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | ### License 4 | 5 | is licensed under the terms in [LICENSE]. By contributing to the project, you agree to the license and copyright terms therein and release your contribution under these terms. 6 | 7 | ### Sign your work 8 | 9 | Please use the sign-off line at the end of the patch. Your signature certifies that you wrote the patch or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you can certify 10 | the below (from [developercertificate.org](http://developercertificate.org/)): 11 | 12 | ``` 13 | Developer Certificate of Origin 14 | Version 1.1 15 | 16 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 17 | 660 York Street, Suite 102, 18 | San Francisco, CA 94110 USA 19 | 20 | Everyone is permitted to copy and distribute verbatim copies of this 21 | license document, but changing it is not allowed. 22 | 23 | Developer's Certificate of Origin 1.1 24 | 25 | By making a contribution to this project, I certify that: 26 | 27 | (a) The contribution was created in whole or in part by me and I 28 | have the right to submit it under the open source license 29 | indicated in the file; or 30 | 31 | (b) The contribution is based upon previous work that, to the best 32 | of my knowledge, is covered under an appropriate open source 33 | license and I have the right under that license to submit that 34 | work with modifications, whether created in whole or in part 35 | by me, under the same open source license (unless I am 36 | permitted to submit under a different license), as indicated 37 | in the file; or 38 | 39 | (c) The contribution was provided directly to me by some other 40 | person who certified (a), (b) or (c) and I have not modified 41 | it. 42 | 43 | (d) I understand and agree that this project and the contribution 44 | are public and that a record of the contribution (including all 45 | personal information I submit with it, including my sign-off) is 46 | maintained indefinitely and may be redistributed consistent with 47 | this project or the open source license(s) involved. 48 | ``` 49 | 50 | Then you just add a line to every git commit message: 51 | 52 | Signed-off-by: Joe Smith 53 | 54 | Use your real name (sorry, no pseudonyms or anonymous contributions.) 55 | 56 | If you set your `user.name` and `user.email` git configs, you can sign your 57 | commit automatically with `git commit -s`. 58 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenVINO™ support for Dream Textures addon in Blender 2 | 3 | * Create textures, concept art, background assets, and more with a simple text prompt 4 | * Texture entire scenes with 'Project Dream Texture' and depth to image 5 | * Run the models locally on your Intel system and ability to choose CPU,iGPU,dGPU and/or NPU to offload the models. 6 | 7 | 8 | # Installation 9 | 10 | ### A. Install OpenVINO™ 11 | - Download and install [OpenVINO™](https://github.com/openvinotoolkit/openvino/releases) for your operating system. Please follow the instructions to download and extract OpenVINO™ from archive. Do not use pypi for installation. 12 | - Note that this addon has been tested with 2023.1.0 13 | 14 | 15 | ### B. Dream-Texture-Openvino Install 16 | Skip steps 1 and 2 if you already have Python3 and Git on Windows 17 | 18 | #### 1. Install Python 19 | - Download a Python installer from python.org. Choose Python 3.10 and make sure to pick a 64 bit version. For example, this 3.10.11 installer: https://www.python.org/ftp/python/3.10.11/python-3.10.11-amd64.exe
20 | - Double click on the installer to run it, and follow the steps in the installer. Check the box to add Python to your PATH, and to install py. At the end of the installer, there is an option to disable the PATH length limit. It is recommended to click this.
21 | 22 | #### 2. Install Git 23 | - Download and install [GIT](https://git-scm.com/) 24 | 25 | #### 3. Install the MSFT Visual C++ Runtime 26 | - Download and install [the latest supported redist](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist). 27 | 28 | #### 4. Install Blender 29 | - Install [Blender-3.4](https://mirrors.dotsrc.org/blender/release/Blender3.4/blender-3.4.0-windows-x64.msi) 30 | 31 | #### 5. Install Dream-texture-openvino addon 32 | 33 | 1. Clone this repository:
34 | ``` 35 | git clone https://github.com/intel/dream-textures-openvino.git dream_textures 36 | ``` 37 | 2. Copy this folder - "dream_textures" into your Blender addons folder - ```C:\Program Files\Blender Foundation\Blender 3.4\3.4\scripts\addons```
38 | 3. If you can't find the add-on folder, you can look at another third-party add-on you already have in Blender preferences and see where it is located.
39 | 4. Navigate to the dream-texture folder -
40 | ``` 41 | cd C:\Program Files\Blender Foundation\Blender 3.4\3.4\scripts\addons\dream_textures 42 | ``` 43 | 5. Install all the required python packages and download the models from Hugging face.
44 | All of the packages are installed to ```dream_textures\.python_dependencies```.
45 | The following commands assume they are being run from inside the dream_textures folder. If you get permission denied after running the below step, please open a new terminal with run as Administrator and try again.
46 | ``` 47 | install.bat 48 | ``` 49 | 6. Replace "openvino" folder in ```C:\Program Files\Blender Foundation\Blender 3.4\3.4\scripts\addons\dream_textures\.python_dependencies``` with the "openvino" folder present in your openvino_2023.1\python\openvino (from section A)
50 | 51 | 7. If you just want to download models -
52 | ``` 53 | "C:\Program Files\Blender Foundation\Blender 3.4\3.4\python\bin\python.exe" model_download.py 54 | ``` 55 | 8. Open a new terminal and Setup OpenVINO™ Environment
56 | Note that you will need to do these steps everytime you start Blender 57 | ``` 58 | C:\Path\to\where\you\installed\OpenVINO\setupvars.bat 59 | ``` 60 | 7. Start Blender application from the same terminal as step 8.
61 | ``` 62 | "C:\Program Files\Blender Foundation\Blender 3.4\blender.exe" 63 | ``` 64 | Once Blender application starts go to Edit->Preferences->Add-on and search for dream texture and enable it. 65 | 66 | 67 | 68 | # Usage 69 | 70 | Here's a few quick guides: 71 | 72 | 73 | ## [Image Generation](docs/IMAGE_GENERATION.md) 74 | Create textures, concept art, and more with text prompts. Learn how to use the various configuration options to get exactly what you're looking for. 75 | 76 | 77 | 78 | ## [Texture Projection](docs/TEXTURE_PROJECTION.md) 79 | Texture entire models and scenes with depth to image. 80 | 81 | 82 | 83 | ## [History](docs/HISTORY.md) 84 | Recall, export, and import history entries for later use. 85 | 86 | 87 | 88 | # Acknowledgements 89 | * Plugin architecture heavily inspired from dream-textures project by carson-katri - https://github.com/carson-katri/dream-textures 90 | 91 | 92 | # Disclaimer 93 | Stable Diffusion’s data model is governed by the Creative ML Open Rail M license, which is not an open source license. 94 | https://github.com/CompVis/stable-diffusion. Users are responsible for their own assessment whether their proposed use of the project code and model would be governed by and permissible under this license. 95 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | # This program is free software; you can redistribute it and/or modify 2 | # it under the terms of the GNU General Public License as published by 3 | # the Free Software Foundation; either version 3 of the License, or 4 | # (at your option) any later version. 5 | # 6 | # This program is distributed in the hope that it will be useful, but 7 | # WITHOUT ANY WARRANTY; without even the implied warranty of 8 | # MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 9 | # General Public License for more details. 10 | # 11 | # You should have received a copy of the GNU General Public License 12 | # along with this program. If not, see . 13 | 14 | bl_info = { 15 | "name": "Dream Textures", 16 | "author": "Dream Textures contributors", 17 | "description": "Use Stable Diffusion to generate unique textures straight from the shader editor.", 18 | "blender": (3, 1, 0), 19 | "version": (0, 1, 0), 20 | "location": "Image Editor -> Sidebar -> Dream", 21 | "category": "Paint" 22 | } 23 | 24 | from multiprocessing import current_process 25 | 26 | if current_process().name != "__actor__": 27 | import bpy 28 | from bpy.props import IntProperty, PointerProperty, EnumProperty, BoolProperty, CollectionProperty, FloatProperty 29 | import sys 30 | import os 31 | 32 | module_name = os.path.basename(os.path.dirname(__file__)) 33 | def clear_modules(): 34 | for name in list(sys.modules.keys()): 35 | if name.startswith(module_name) and name != module_name: 36 | del sys.modules[name] 37 | clear_modules() # keep before all addon imports 38 | 39 | #from .render_pass import register_render_pass, unregister_render_pass, pass_inputs 40 | from .prompt_engineering import * 41 | from .operators.open_latest_version import check_for_updates 42 | from .operators.project import framebuffer_arguments 43 | from .classes import CLASSES, PREFERENCE_CLASSES 44 | #from .tools import TOOLS 45 | from .operators.dream_texture import DreamTexture, kill_generator 46 | from .property_groups.dream_prompt import DreamPrompt 47 | #from .property_groups.seamless_result import SeamlessResult 48 | 49 | from .ui.presets import register_default_presets 50 | 51 | requirements_path_items = ( 52 | ('requirements/win-openvino.txt', 'Windows', 'Windows with Intel CPU, iGPU, dGPU'), 53 | ) 54 | 55 | def register(): 56 | dt_op = bpy.ops 57 | for name in DreamTexture.bl_idname.split("."): 58 | dt_op = getattr(dt_op, name) 59 | if hasattr(bpy.types, dt_op.idname()): # objects under bpy.ops are created on the fly, have to check that it actually exists a little differently 60 | raise RuntimeError("Another instance of Dream Textures is already running.") 61 | 62 | 63 | 64 | for cls in PREFERENCE_CLASSES: 65 | bpy.utils.register_class(cls) 66 | 67 | bpy.types.Scene.dream_textures_history = CollectionProperty(type=DreamPrompt) 68 | 69 | check_for_updates() 70 | 71 | bpy.types.Scene.dream_textures_prompt = PointerProperty(type=DreamPrompt) 72 | bpy.types.Scene.dream_textures_prompt_file = PointerProperty(type=bpy.types.Text) 73 | bpy.types.Scene.init_img = PointerProperty(name="Init Image", type=bpy.types.Image) 74 | bpy.types.Scene.init_mask = PointerProperty(name="Init Mask", type=bpy.types.Image) 75 | bpy.types.Scene.init_depth = PointerProperty(name="Init Depth", type=bpy.types.Image, description="Use an existing depth map. Leave blank to generate one from the init image") 76 | #bpy.types.Scene.seamless_result = PointerProperty(type=SeamlessResult) 77 | def get_selection_preview(self): 78 | history = bpy.context.scene.dream_textures_history 79 | if self.dream_textures_history_selection > 0 and self.dream_textures_history_selection < len(history): 80 | return history[self.dream_textures_history_selection].generate_prompt() 81 | return "" 82 | bpy.types.Scene.dream_textures_history_selection = IntProperty(default=1) 83 | bpy.types.Scene.dream_textures_history_selection_preview = bpy.props.StringProperty(name="", default="", get=get_selection_preview, set=lambda _, __: None) 84 | bpy.types.Scene.dream_textures_progress = bpy.props.IntProperty(name="", default=0, min=0, max=0) 85 | bpy.types.Scene.dream_textures_info = bpy.props.StringProperty(name="Info") 86 | 87 | bpy.types.Scene.dream_textures_viewport_enabled = BoolProperty(name="Viewport Enabled", default=False) 88 | #bpy.types.Scene.dream_textures_render_properties_enabled = BoolProperty(default=False) 89 | #bpy.types.Scene.dream_textures_render_properties_prompt = PointerProperty(type=DreamPrompt) 90 | #bpy.types.Scene.dream_textures_render_properties_pass_inputs = EnumProperty(name="Pass Inputs", items=pass_inputs) 91 | 92 | bpy.types.Scene.dream_textures_upscale_prompt = PointerProperty(type=DreamPrompt) 93 | bpy.types.Scene.dream_textures_upscale_tile_size = IntProperty(name="Scale Factor", default=4, step=1, min=1, max=4) 94 | bpy.types.Scene.dream_textures_upscale_blend = IntProperty(name="Blend", default=32, step=8, min=0, max=512) 95 | #bpy.types.Scene.dream_textures_upscale_seamless_result = PointerProperty(type=SeamlessResult) 96 | 97 | bpy.types.Scene.dream_textures_project_prompt = PointerProperty(type=DreamPrompt) 98 | bpy.types.Scene.dream_textures_project_framebuffer_arguments = EnumProperty(name="Inputs", items=framebuffer_arguments) 99 | bpy.types.Scene.dream_textures_project_bake = BoolProperty(name="Bake", default=False, description="Re-maps the generated texture onto the specified UV map") 100 | 101 | for cls in CLASSES: 102 | bpy.utils.register_class(cls) 103 | 104 | #for tool in TOOLS: 105 | # bpy.utils.register_tool(tool) 106 | 107 | # Monkey patch cycles render passes 108 | #register_render_pass() 109 | 110 | register_default_presets() 111 | 112 | def unregister(): 113 | for cls in PREFERENCE_CLASSES: 114 | bpy.utils.unregister_class(cls) 115 | 116 | for cls in CLASSES: 117 | bpy.utils.unregister_class(cls) 118 | #for tool in TOOLS: 119 | # bpy.utils.unregister_tool(tool) 120 | 121 | #unregister_render_pass() 122 | 123 | kill_generator() -------------------------------------------------------------------------------- /absolute_path.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | def absolute_path(component: str): 4 | """ 5 | Returns the absolute path to a file in the addon directory. 6 | 7 | Alternative to `os.abspath` that works the same on macOS and Windows. 8 | """ 9 | return os.path.join(os.path.dirname(os.path.realpath(__file__)), component) 10 | 11 | REAL_ESRGAN_WEIGHTS_PATH = absolute_path("weights/realesrgan/realesr-general-x4v3.pth") 12 | CLIPSEG_WEIGHTS_PATH = absolute_path("weights/clipseg/rd64-uni.pth") -------------------------------------------------------------------------------- /builtin_presets/Debug.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | prompt = bpy.context.scene.dream_textures_prompt 3 | 4 | prompt.steps = 20 5 | prompt.cfg_scale = 7.5 6 | prompt.scheduler = 'DPM Solver Multistep' 7 | prompt.step_preview_mode = 'Accurate' 8 | prompt.optimizations_attention_slicing = True 9 | prompt.optimizations_attention_slice_size_src = 'auto' 10 | prompt.optimizations_attention_slice_size = 1 11 | prompt.optimizations_cudnn_benchmark = False 12 | prompt.optimizations_tf32 = False 13 | prompt.optimizations_amp = False 14 | prompt.optimizations_half_precision = True 15 | prompt.optimizations_sequential_cpu_offload = False 16 | prompt.optimizations_channels_last_memory_format = False 17 | prompt.optimizations_batch_size = 1 18 | prompt.optimizations_vae_slicing = True 19 | prompt.optimizations_cpu_only = False 20 | -------------------------------------------------------------------------------- /builtin_presets/Final.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | prompt = bpy.context.scene.dream_textures_prompt 3 | 4 | prompt.steps = 50 5 | prompt.cfg_scale = 7.5 6 | prompt.scheduler = 'DPM Solver Multistep' 7 | prompt.step_preview_mode = 'Fast' 8 | prompt.optimizations_attention_slicing = True 9 | prompt.optimizations_attention_slice_size_src = 'auto' 10 | prompt.optimizations_attention_slice_size = 1 11 | prompt.optimizations_cudnn_benchmark = False 12 | prompt.optimizations_tf32 = False 13 | prompt.optimizations_amp = False 14 | prompt.optimizations_half_precision = True 15 | prompt.optimizations_sequential_cpu_offload = False 16 | prompt.optimizations_channels_last_memory_format = False 17 | prompt.optimizations_batch_size = 1 18 | prompt.optimizations_vae_slicing = True 19 | prompt.optimizations_cpu_only = False 20 | -------------------------------------------------------------------------------- /builtin_presets/Preview.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | prompt = bpy.context.scene.dream_textures_prompt 3 | 4 | prompt.steps = 20 5 | prompt.cfg_scale = 7.5 6 | prompt.scheduler = 'DPM Solver Multistep' 7 | prompt.step_preview_mode = 'Fast' 8 | prompt.optimizations_attention_slicing = True 9 | prompt.optimizations_attention_slice_size_src = 'auto' 10 | prompt.optimizations_attention_slice_size = 1 11 | prompt.optimizations_cudnn_benchmark = False 12 | prompt.optimizations_tf32 = False 13 | prompt.optimizations_amp = False 14 | prompt.optimizations_half_precision = True 15 | prompt.optimizations_sequential_cpu_offload = False 16 | prompt.optimizations_channels_last_memory_format = False 17 | prompt.optimizations_batch_size = 1 18 | prompt.optimizations_vae_slicing = True 19 | prompt.optimizations_cpu_only = False 20 | -------------------------------------------------------------------------------- /classes.py: -------------------------------------------------------------------------------- 1 | 2 | from .operators.open_latest_version import OpenLatestVersion 3 | from .operators.dream_texture import DreamTexture, ReleaseGenerator, CancelGenerator, LoadModel, ProjectLoadModel 4 | from .operators.view_history import SCENE_UL_HistoryList, RecallHistoryEntry, ClearHistory, RemoveHistorySelection, ExportHistorySelection, ImportPromptFile 5 | 6 | from .operators.project import ProjectDreamTexture, dream_texture_projection_panels 7 | from .operators.notify_result import NotifyResult 8 | from .property_groups.dream_prompt import DreamPrompt 9 | 10 | from .ui.panels import dream_texture, history 11 | 12 | from .ui.presets import DREAM_PT_AdvancedPresets, DREAM_MT_AdvancedPresets, AddAdvancedPreset, RestoreDefaultPresets 13 | 14 | CLASSES = ( 15 | 16 | 17 | DreamTexture, 18 | ReleaseGenerator, 19 | 20 | CancelGenerator, 21 | OpenLatestVersion, 22 | SCENE_UL_HistoryList, 23 | RecallHistoryEntry, 24 | ClearHistory, 25 | RemoveHistorySelection, 26 | ExportHistorySelection, 27 | ImportPromptFile, 28 | 29 | ProjectDreamTexture, 30 | LoadModel, 31 | ProjectLoadModel, 32 | 33 | DREAM_PT_AdvancedPresets, 34 | DREAM_MT_AdvancedPresets, 35 | AddAdvancedPreset, 36 | 37 | NotifyResult, 38 | 39 | # The order these are registered in matters 40 | *dream_texture.dream_texture_panels(), 41 | 42 | *history.history_panels(), 43 | *dream_texture_projection_panels(), 44 | ) 45 | 46 | PREFERENCE_CLASSES = ( 47 | DreamPrompt, 48 | RestoreDefaultPresets) -------------------------------------------------------------------------------- /docs/IMAGE_GENERATION.md: -------------------------------------------------------------------------------- 1 | # Image Generation 2 | 1. To open Dream Textures, go to an Image Editor or Shader Editor 3 | 2. Ensure the sidebar is visible by pressing *N* or checking *View* > *Sidebar* 4 | 3. Select the *Dream* panel to open the interface 5 | 6 | ![A screenshot showing the 'Dream' panel in an Image Editor space](assets/image_generation/opening-ui.png) 7 | 8 | 4. Select Stable_Diffusion_1_5 from OpenVino Model and device you want the model to run on. 9 | 5. Click on *Load Models* to load and compile the model on the device. This is a one-time step but if you need to chose a different model or the device please click *Load Models* again. 10 | 6. Enter a prompt and/or negative prompt and other parameters 11 | 7. Click on *Generate*. Wait for total inference steps to get completed. 12 | 13 | ### Prompt 14 | 15 | A few presets are available to help you create great prompts. They work by asking you to fill in a few simple fields, then generate a full prompt string that is passed to Stable Diffusion. 16 | 17 | The default preset is *Texture*. It asks for a subject, and adds the word `texture` to the end. So if you enter `brick wall`, it will use the prompt `brick wall texture`. 18 | 19 | ### Negative 20 | Enabling negative prompts gives you finer control over your image. For example, if you asked for a `cloud city`, but you wanted to remove the buildings it added, you could enter the negative prompt `building`. This would tell Stable Diffusion to avoid drawing buildings. You can add as much content you want to the negative prompt, and it will avoid everything entered. 21 | 22 | ### Size 23 | 24 | > Stable Diffusion-1.5 was trained on 512x512 images 25 | 26 | ### Source Image 27 | Choose an image from a specific *File* or use the *Open Image*. 28 | 29 | Three actions are available that work on a source image. 30 | 31 | #### Modify 32 | Mixes the image with the noise with the ratio specified by the *Noise Strength*. This will make Stable Diffusion match the style, composition, etc. from it. 33 | 34 | Strength specifies how much latent noise to mix with the image. A higher strength means more latent noise, and more deviation from the init image. If you want it to stick to the image more, decrease the strength. 35 | 36 | > Depending on the strength value, some steps will be skipped. For example, if you specified `10` steps and set strength to `0.5`, only `5` steps would be used. 37 | 38 | Fit to width/height will ensure the image is contained within the configured size. 39 | 40 | The *Image Type* option has a few options: 41 | 1. Color - Mixes the image with noise -- Not yet enabled 42 | 43 | > The following options require a depth model to be selected, `Stable_Diffusion_1_5_controlnet_depth`. 44 | 45 | 2. Color and Generated Depth - Uses MiDaS to infer the depth of the initial image and includes it in the conditioning. Can give results that more closely match the composition of the source image. 46 | 3. Color and Depth Map - Specify a secondary image to use as the depth map, instead of generating one with MiDaS. 47 | 4. Depth - Treats the intial image as a depth map, and ignores any color. The generated image will match the composition but not colors of the original. 48 | 49 | ### Advanced 50 | You can have more control over the generation by trying different values for these parameters: 51 | 52 | * Random Seed - When enabled, a seed will be selected for you 53 | * Seed - The value used to seed RNG, if text is input instead of a number its hash will be used 54 | * Steps - Number of sampler steps, higher steps will give the sampler more time to converge and clear up artifacts 55 | * CFG Scale - How strongly the prompt influences the output 56 | * Scheduler - Some schedulers take fewer steps to produce a good result than others. Try each one and see what you prefer. 57 | * Step Preview - Whether to show each step in the image editor. Defaults to 'Fast', which samples the latents without using the VAE. 58 | 59 | 60 | ### Iterations - Use 1 for now 61 | How many images to generate. This is only particularly useful when *Random Seed* is enabled. -------------------------------------------------------------------------------- /docs/TEXTURE_PROJECTION.md: -------------------------------------------------------------------------------- 1 | # Texture Projection 2 | 3 | Using depth to image, Dream Textures is able to texture entire scenes automatically with a simple prompt. 4 | 5 | It's sort of like [Ian Hubert's method](https://www.youtube.com/watch?v=v_ikG-u_6r0) in reverse. Instead of starting with an image and building geometry around that, we start with the geometry and generate an image that projects perfectly onto it. 6 | 7 | > Make sure you use only a depth model such as ``Stable_Diffusion_1_5_controlnet_depth``. 8 | Follow the steps below to project a texture onto your mesh. 9 | 10 | ## Selecting a Target 11 | In the 3D Viewport, select the objects you want to project onto. Then in edit mode, select all of the faces to target. Only the selected faces will be given the new texture. 12 | 13 | Every object in the viewport will be factored into the depth map. To only use the selected objects in the depth map, enter local view by pressing */* or selecting *View* > *Local View* > *Toggle Local View* in the viewport menu bar. 14 | 15 | > Tip: Large unbroken faces do not always project well. Try subdividing your mesh if the projection is warping. 16 | 17 | ![](assets/texture_projection/edit_mode.png) 18 | 19 | ## Prompting 20 | 21 | In the sidebar, select the "Dream" panel. Choose a depth model and devices from the dropdown, and click on *Load Models*. Once the model is loaded the *Project Dream Texture* button will get enable. Enter a prompt and any desired parameters All of the options available for image generation are available here as well. 22 | 23 | 24 | Please keep the size 512x512. Note that the depth data is in the same aspect ratio as the 3D Viewport window. If you have the Blender window landscape, you may want to adjust the size to be 512x512 or something similar. You could also shrink your window to make it square. 25 | 26 | ## Project Dream Texture 27 | After configuring everything, click the *Project Dream Texture* button. This will begin the depth to image process. 28 | You are free to move the viewport around as it generates. Switch to *Viewport Shading* mode to see each step as it generates live. 29 | 30 | A new material will be added named with the seed of the generated image. The UVs for each selected face are also updated to be projected from the angle the material was generated at. 31 | 32 | > Tip: In the sidebar under *View* you can adjust the *Focal Length* of the viewport. 33 | 34 | -------------------------------------------------------------------------------- /docs/assets/banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/dream-textures-openvino/a337917b7903c257eefbfc361831ba7b273cfa4b/docs/assets/banner.png -------------------------------------------------------------------------------- /docs/assets/history/history-export.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/dream-textures-openvino/a337917b7903c257eefbfc361831ba7b273cfa4b/docs/assets/history/history-export.png -------------------------------------------------------------------------------- /docs/assets/history/history-import.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/dream-textures-openvino/a337917b7903c257eefbfc361831ba7b273cfa4b/docs/assets/history/history-import.png -------------------------------------------------------------------------------- /docs/assets/image_generation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/dream-textures-openvino/a337917b7903c257eefbfc361831ba7b273cfa4b/docs/assets/image_generation.png -------------------------------------------------------------------------------- /docs/assets/image_generation/opening-ui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/dream-textures-openvino/a337917b7903c257eefbfc361831ba7b273cfa4b/docs/assets/image_generation/opening-ui.png -------------------------------------------------------------------------------- /docs/assets/inpaint_outpaint.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/dream-textures-openvino/a337917b7903c257eefbfc361831ba7b273cfa4b/docs/assets/inpaint_outpaint.png -------------------------------------------------------------------------------- /docs/assets/readme-toggle-console.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/dream-textures-openvino/a337917b7903c257eefbfc361831ba7b273cfa4b/docs/assets/readme-toggle-console.png -------------------------------------------------------------------------------- /docs/assets/texture_projection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/dream-textures-openvino/a337917b7903c257eefbfc361831ba7b273cfa4b/docs/assets/texture_projection.png -------------------------------------------------------------------------------- /docs/assets/texture_projection/edit_mode.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/dream-textures-openvino/a337917b7903c257eefbfc361831ba7b273cfa4b/docs/assets/texture_projection/edit_mode.png -------------------------------------------------------------------------------- /docs/assets/texture_projection/projection.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/dream-textures-openvino/a337917b7903c257eefbfc361831ba7b273cfa4b/docs/assets/texture_projection/projection.gif -------------------------------------------------------------------------------- /generator_process/__init__.py: -------------------------------------------------------------------------------- 1 | from .actor import Actor 2 | 3 | class Generator(Actor): 4 | """ 5 | The actor used for all background processes. 6 | """ 7 | 8 | from .actions.prompt_to_image import prompt_to_image, choose_device, load_models 9 | from .actions.prompt_to_image_int8 import prompt_to_image_int8, load_models_int8 10 | #from .actions.image_to_image import image_to_image 11 | #from .actions.inpaint import inpaint 12 | #from .actions.outpaint import outpaint 13 | #from .actions.upscale import upscale 14 | #from .actions.depth_to_image import depth_to_image,load_models_depth 15 | from .actions.controlnet_depth_to_image import depth_to_image,load_models_depth 16 | from .actions.controlnet_depth_to_image_int8 import depth_to_image_int8,load_models_depth_int8 17 | from .actions.huggingface_hub import hf_snapshot_download, hf_list_models, hf_list_installed_models 18 | from .actions.ocio_transform import ocio_transform 19 | #from .actions.convert_original_stable_diffusion_to_diffusers import convert_original_stable_diffusion_to_diffusers 20 | #from .actions.detect_seamless import detect_seamless 21 | -------------------------------------------------------------------------------- /generator_process/actions/SuperResolution.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2021 Intel Corporation 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | http://www.apache.org/licenses/LICENSE-2.0 7 | Unless required by applicable law or agreed to in writing, software 8 | distributed under the License is distributed on an "AS IS" BASIS, 9 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | See the License for the specific language governing permissions and 11 | limitations under the License. 12 | """ 13 | 14 | 15 | import site 16 | 17 | from ...absolute_path import absolute_path 18 | site.addsitedir(absolute_path(".python_dependencies")) 19 | 20 | import cv2 21 | import math 22 | import numpy as np 23 | 24 | from openvino.runtime import Model 25 | 26 | class SuperResolution(Model): 27 | def __init__(self, ie, model_path, input_image_shape, model_name): 28 | super().__init__(ie, model_path) 29 | self.model_name = model_name 30 | self.reshape(input_image_shape) 31 | if self.model_name == "esrgan" or self.model_name == "edsr": 32 | self.input_blob_name = self.prepare_inputs() 33 | else: 34 | self.input_blob_name , self.bicinput_blob_name = self.prepare_inputs() #, self.bicinput_blob_name 35 | self.output_blob_name = self.prepare_outputs() 36 | 37 | def reshape(self, base_shape): 38 | print("base shape:", base_shape) 39 | if self.model_name == "edsr": 40 | h, w = base_shape 41 | else: 42 | h, w, _ = base_shape 43 | 44 | 45 | input_iter = iter(self.net.input_info) 46 | input_layer = next(input_iter) 47 | input_shape = self.net.input_info[input_layer].input_data.shape 48 | input_num = len(self.net.input_info) 49 | print("self.net.input_info:",self.net.input_info) 50 | 51 | if input_num == 2: 52 | output_num = len(self.net.outputs) 53 | output_blob_name = next(iter(self.net.outputs)) 54 | output_blob = self.net.outputs[output_blob_name] 55 | print("output_blob :", output_blob.shape[2]) 56 | coeff = output_blob.shape[2] / input_shape[2] 57 | 58 | bicinput_blob_name = next(input_iter) 59 | bicinput_blob = self.net.input_info[bicinput_blob_name] 60 | bicinput_size = bicinput_blob.input_data.shape 61 | bicinput_size[0] = 1 62 | bicinput_size[2] = coeff * h 63 | bicinput_size[3] = coeff * w 64 | 65 | input_shape[2]=h 66 | input_shape[3]=w 67 | 68 | if self.model_name == "esrgan" or self.model_name == "edsr": 69 | self.net.reshape({input_layer: input_shape}) 70 | else: 71 | self.net.reshape({input_layer: input_shape, bicinput_blob_name: bicinput_size}) 72 | 73 | def prepare_inputs(self): 74 | input_num = len(self.net.input_info) 75 | if input_num != 1 and input_num != 2: 76 | raise RuntimeError("The demo supports topologies with 1 or 2 inputs only") 77 | 78 | iter_blob = iter(self.net.input_info) 79 | input_blob_name = next(iter_blob) 80 | input_blob = self.net.input_info[input_blob_name] 81 | input_blob.precision = "FP32" 82 | 83 | input_size = input_blob.input_data.shape 84 | if len(input_size) != 4 and input_size[1] != 1 and input_size[1] != 3: 85 | raise RuntimeError("one or 3-channel 4-dimensional model's input is expected") 86 | else: 87 | self.n, self.c, self.h, self.w = input_size 88 | 89 | print("iter1",input_blob_name) 90 | 91 | if input_num == 2: 92 | bicinput_blob_name = next(iter_blob) 93 | #print("iter2",bicinput_blob_name) 94 | bicinput_blob = self.net.input_info[bicinput_blob_name] 95 | bicinput_blob.precision = "FP32" 96 | temp = 0 97 | #print("input_blob :", input_blob.input_data.shape) 98 | bicinput_size = bicinput_blob.input_data.shape 99 | #print("bic :", bicinput_blob.input_data.shape) 100 | if len(bicinput_size) != 4: 101 | raise RuntimeError("Number of dimensions for both inputs must be 4") 102 | if input_size[2] >= bicinput_size[2] and input_size[3] >= bicinput_size[3]: 103 | print("add later") 104 | input_blob_name = temp 105 | input_blob_name = bicinput_blob_name 106 | bicinput_blob_name = temp 107 | 108 | #print("input_blob_name in pre inputs", input_blob_name) 109 | 110 | if self.model_name == "esrgan" or self.model_name == "edsr": 111 | return input_blob_name 112 | else: 113 | return input_blob_name,bicinput_blob_name 114 | 115 | def prepare_outputs(self): 116 | output_num = len(self.net.outputs) 117 | if output_num != 1: 118 | raise RuntimeError("Demo supports topologies only with 1 output") 119 | 120 | output_blob_name = next(iter(self.net.outputs)) 121 | output_blob = self.net.outputs[output_blob_name] 122 | output_blob.precision = "FP32" 123 | 124 | output_size = output_blob.shape 125 | if len(output_size) != 4: 126 | raise Exception("Unexpected output blob shape {}. Only 4D output blob is supported".format(output_size)) 127 | 128 | return output_blob_name 129 | 130 | def preprocess(self, inputs): 131 | image = inputs 132 | input_num = len(self.net.input_info) 133 | 134 | if self.model_name == "edsr": 135 | image = np.expand_dims(image, axis=-1) 136 | 137 | if image.shape[0] != self.h or image.shape[1] != self.w: 138 | 139 | self.logger.warn("Chosen model aspect ratio doesn't match image aspect ratio") 140 | resized_image = cv2.resize(image, (self.w, self.h)) 141 | else: 142 | resized_image = image 143 | 144 | if input_num == 2: 145 | 146 | bicinput_blob = self.net.input_info[self.bicinput_blob_name] 147 | bicinput_size = bicinput_blob.input_data.shape 148 | width = bicinput_size[3] 149 | ht = bicinput_size[2] 150 | resized_image_bic = cv2.resize(image, (width, ht)) 151 | resized_image_bic = resized_image_bic.transpose((2, 0, 1)) 152 | resized_image_bic = np.expand_dims(resized_image_bic, 0) 153 | 154 | 155 | resized_image= resized_image.astype(np.float32) 156 | #print("resssimage------",resized_image[0][0]) 157 | if self.model_name == "esrgan": 158 | resized_image = resized_image / 255.0 # for esrgan 159 | 160 | resized_image = resized_image.transpose((2, 0, 1)) 161 | resized_image = np.expand_dims(resized_image, 0) 162 | #print("resized_image",resized_image.shape) 163 | 164 | if self.model_name == "esrgan" or self.model_name == "edsr": 165 | dict_inputs = {self.input_blob_name: resized_image} 166 | else: 167 | dict_inputs = {self.input_blob_name: resized_image , self.bicinput_blob_name: resized_image_bic.astype(np.float32)} 168 | 169 | return dict_inputs, image.shape[1::-1] 170 | 171 | def postprocess(self, outputs, dsize): 172 | print("outputs", outputs[self.output_blob_name].shape) 173 | if self.model_name == "edsr" : 174 | prediction = outputs[self.output_blob_name][0] #.squeeze() 175 | print("outputs", prediction.shape) 176 | prediction = prediction.transpose((1, 2, 0)) 177 | else: 178 | prediction = outputs[self.output_blob_name].squeeze() 179 | print("outputs", prediction.shape) 180 | prediction = prediction.transpose((1, 2, 0)) 181 | prediction *= 255 182 | 183 | prediction = np.clip(prediction, 0, 255) #if not done then we get artifacts due to pixel overflow 184 | 185 | return prediction.astype(np.uint8) 186 | -------------------------------------------------------------------------------- /generator_process/actions/async_pipeline.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2020 Intel Corporation 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | """ 16 | 17 | import logging 18 | import threading 19 | from collections import deque 20 | from typing import Dict, Set 21 | 22 | 23 | def parse_devices(device_string): 24 | colon_position = device_string.find(':') 25 | if colon_position != -1: 26 | device_type = device_string[:colon_position] 27 | if device_type == 'HETERO' or device_type == 'MULTI': 28 | comma_separated_devices = device_string[colon_position + 1:] 29 | devices = comma_separated_devices.split(',') 30 | for device in devices: 31 | parenthesis_position = device.find(':') 32 | if parenthesis_position != -1: 33 | device = device[:parenthesis_position] 34 | return devices 35 | return (device_string,) 36 | 37 | 38 | def parse_value_per_device(devices: Set[str], values_string: str)-> Dict[str, int]: 39 | """Format: :,: or just """ 40 | values_string_upper = values_string.upper() 41 | result = {} 42 | device_value_strings = values_string_upper.split(',') 43 | for device_value_string in device_value_strings: 44 | device_value_list = device_value_string.split(':') 45 | if len(device_value_list) == 2: 46 | if device_value_list[0] in devices: 47 | result[device_value_list[0]] = int(device_value_list[1]) 48 | elif len(device_value_list) == 1 and device_value_list[0] != '': 49 | for device in devices: 50 | result[device] = int(device_value_list[0]) 51 | elif device_value_list[0] != '': 52 | raise RuntimeError(f'Unknown string format: {values_string}') 53 | return result 54 | 55 | 56 | def get_user_config(flags_d: str, flags_nstreams: str, flags_nthreads: int)-> Dict[str, str]: 57 | config = {} 58 | 59 | devices = set(parse_devices(flags_d)) 60 | 61 | device_nstreams = parse_value_per_device(devices, flags_nstreams) 62 | for device in devices: 63 | if device == 'CPU': # CPU supports a few special performance-oriented keys 64 | # limit threading for CPU portion of inference 65 | if flags_nthreads: 66 | config['CPU_THREADS_NUM'] = str(flags_nthreads) 67 | 68 | config['CPU_BIND_THREAD'] = 'NO' 69 | 70 | # for CPU execution, more throughput-oriented execution via streams 71 | config['CPU_THROUGHPUT_STREAMS'] = str(device_nstreams[device]) \ 72 | if device in device_nstreams else 'CPU_THROUGHPUT_AUTO' 73 | elif device == 'GPU': 74 | config['GPU_THROUGHPUT_STREAMS'] = str(device_nstreams[device]) \ 75 | if device in device_nstreams else 'GPU_THROUGHPUT_AUTO' 76 | if 'MULTI' in flags_d and 'CPU' in devices: 77 | # multi-device execution with the CPU + GPU performs best with GPU throttling hint, 78 | # which releases another CPU thread (that is otherwise used by the GPU driver for active polling) 79 | config['GPU_PLUGIN_THROTTLE'] = '1' 80 | return config 81 | 82 | 83 | class AsyncPipeline: 84 | def __init__(self, ie, model, plugin_config, device='CPU', max_num_requests=1): 85 | self.model = model 86 | self.logger = logging.getLogger() 87 | 88 | self.logger.info('Loading network to {} plugin...'.format(device)) 89 | self.exec_net = ie.load_network(network=self.model.net, device_name=device, 90 | config=plugin_config, num_requests=max_num_requests) 91 | if max_num_requests == 0: 92 | # ExecutableNetwork doesn't allow creation of additional InferRequests. Reload ExecutableNetwork 93 | # +1 to use it as a buffer of the pipeline 94 | self.exec_net = ie.load_network(network=self.model.net, device_name=device, 95 | config=plugin_config, num_requests=len(self.exec_net.requests) + 1) 96 | 97 | self.empty_requests = deque(self.exec_net.requests) 98 | self.completed_request_results = {} 99 | self.callback_exceptions = {} 100 | self.event = threading.Event() 101 | 102 | def inference_completion_callback(self, status, callback_args): 103 | try: 104 | request, id, meta, preprocessing_meta = callback_args 105 | if status != 0: 106 | raise RuntimeError('Infer Request has returned status code {}'.format(status)) 107 | raw_outputs = {key: blob.buffer for key, blob in request.output_blobs.items()} 108 | self.completed_request_results[id] = (raw_outputs, meta, preprocessing_meta) 109 | self.empty_requests.append(request) 110 | except Exception as e: 111 | self.callback_exceptions.append(e) 112 | self.event.set() 113 | 114 | def submit_data(self, inputs, id, meta): 115 | request = self.empty_requests.popleft() 116 | if len(self.empty_requests) == 0: 117 | self.event.clear() 118 | inputs, preprocessing_meta = self.model.preprocess(inputs) 119 | request.set_completion_callback(py_callback=self.inference_completion_callback, 120 | py_data=(request, id, meta, preprocessing_meta)) 121 | request.async_infer(inputs=inputs) 122 | 123 | def get_raw_result(self, id): 124 | if id in self.completed_request_results: 125 | return self.completed_request_results.pop(id) 126 | return None 127 | 128 | def get_result(self, id): 129 | result = self.get_raw_result(id) 130 | if result: 131 | raw_result, meta, preprocess_meta = result 132 | return self.model.postprocess(raw_result, preprocess_meta), meta 133 | return None 134 | 135 | def is_ready(self): 136 | return len(self.empty_requests) != 0 137 | 138 | def has_completed_request(self): 139 | return len(self.completed_request_results) != 0 140 | 141 | def await_all(self): 142 | for request in self.exec_net.requests: 143 | request.wait() 144 | 145 | def await_any(self): 146 | if len(self.empty_requests) == 0: 147 | self.event.wait() 148 | -------------------------------------------------------------------------------- /generator_process/actions/depth_to_image.py: -------------------------------------------------------------------------------- 1 | from typing import Union, Generator, Callable, List, Optional 2 | import os 3 | from contextlib import nullcontext 4 | import inspect 5 | 6 | from numpy.typing import NDArray 7 | import numpy as np 8 | import random 9 | from .prompt_to_image import Scheduler, device_name_enum, model_size_enum, model_name_enum, Optimizations, StepPreviewMode, ImageGenerationResult 10 | from ..models import Pipeline 11 | 12 | #from diffusers import StableDiffusionDepth2ImgPipeline 13 | # tokenizer 14 | 15 | from pathlib import Path 16 | 17 | pipeline = Pipeline.STABLE_DIFFUSION 18 | 19 | import diffusers 20 | from diffusers import StableDiffusionDepth2ImgPipeline 21 | from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler 22 | from openvino.runtime import Model, Core 23 | from transformers import CLIPTokenizer 24 | import torch 25 | import open_clip 26 | import PIL.Image 27 | import PIL.ImageOps 28 | 29 | 30 | 31 | class StableDiffusionEngineDepth(diffusers.DiffusionPipeline): 32 | def __init__( 33 | self, 34 | model="bes-dev/stable-diffusion-v1-4-openvino", 35 | device="CPU" 36 | 37 | ): 38 | self.tokenizer = open_clip.get_tokenizer('ViT-H-14') 39 | print("Starting Depth Model load") 40 | 41 | self.core = Core() 42 | self.core.set_property({'CACHE_DIR': os.path.join(model, 'cache')}) #adding caching to reduce init time 43 | self.text_encoder = self.core.compile_model(os.path.join(model, "text_encoder.xml"), device) 44 | self._text_encoder_output = self.text_encoder.output(0) 45 | 46 | self.unet = self.core.compile_model(os.path.join(model, "unet.xml"), device) 47 | self._unet_output = self.unet.output(0) 48 | self.latent_shape = tuple(self.unet.inputs[0].shape)[1:] 49 | self.vae_decoder = self.core.compile_model(os.path.join(model, "vae_decoder.xml"), device) 50 | self.vae_encoder = self.core.compile_model(os.path.join(model, "vae_encoder.xml"), device) 51 | 52 | self.init_image_shape = tuple(self.vae_encoder.inputs[0].shape)[2:] 53 | 54 | self._vae_d_output = self.vae_decoder.output(0) 55 | self._vae_e_output = self.vae_encoder.output(0) if self.vae_encoder is not None else None 56 | 57 | self.height = self.unet.input(0).shape[2] * 8 58 | self.width = self.unet.input(0).shape[3] * 8 59 | 60 | super().__init__() 61 | 62 | self.vae_scale_factor = 8 63 | self.scaling_factor = 0.18215 64 | 65 | 66 | def prepare_depth(self, depth, image, dtype, device): 67 | device = torch.device('cpu') # if device.type == 'mps' else device.type) 68 | 69 | if depth is None: 70 | print("Depth is None so Depth setting up the depth model") 71 | from transformers import DPTForDepthEstimation, DPTImageProcessor 72 | import contextlib 73 | 74 | 75 | feature_extractor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas") 76 | depth_estimator = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas") 77 | depth_estimator = depth_estimator.to(device) 78 | 79 | pixel_values = feature_extractor(images=image, return_tensors="pt").pixel_values 80 | 81 | pixel_values = pixel_values.to(device=device) 82 | # The DPT-Hybrid model uses batch-norm layers which are not compatible with fp16. 83 | # So we use `torch.autocast` here for half precision inference. 84 | #context_manger = torch.autocast("cuda", dtype=dtype) if device.type == "cuda" else contextlib.nullcontext() 85 | context_manger = contextlib.nullcontext() 86 | with context_manger: 87 | depth_map = depth_estimator(pixel_values).predicted_depth 88 | depth_map = torch.nn.functional.interpolate( 89 | depth_map.unsqueeze(1), 90 | size=(self.height // self.vae_scale_factor, self.width // self.vae_scale_factor), 91 | mode="bicubic", 92 | align_corners=False, 93 | ) 94 | 95 | depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True) 96 | depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True) 97 | depth_map = 2.0 * (depth_map - depth_min) / (depth_max - depth_min) - 1.0 98 | depth_map = depth_map.to("cpu",dtype) #depth_map.to(device) 99 | return depth_map 100 | else: 101 | if isinstance(depth, PIL.Image.Image): 102 | depth = np.array(depth.convert("L")) 103 | depth = depth.astype(np.float32) / 255.0 104 | depth = depth[None, None] 105 | depth = torch.from_numpy(depth) 106 | return depth 107 | 108 | def prepare_depth_latents( 109 | self, depth, batch_size, dtype, device, generator, do_classifier_free_guidance 110 | ): 111 | # resize the mask to latents shape as we concatenate the mask to the latents 112 | # we do that before converting to dtype to avoid breaking in case we're using cpu_offload 113 | # and half precision 114 | depth = torch.nn.functional.interpolate( 115 | depth, size=(self.height // self.vae_scale_factor, self.width // self.vae_scale_factor) 116 | ) 117 | depth = depth.to(device=device,dtype=torch.float) 118 | #depth = depth.to(device=device, dtype=dtype) 119 | 120 | # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method 121 | depth = depth.repeat(batch_size, 1, 1, 1) 122 | depth = torch.cat([depth] * 2) if do_classifier_free_guidance else depth 123 | return depth 124 | 125 | def prepare_img2img_latents(self, batch_size, num_channels_latents, dtype, device, generator, latents=None, image=None, timestep=None,scheduler=LMSDiscreteScheduler): 126 | shape = (batch_size, num_channels_latents, self.height // self.vae_scale_factor, self.width // self.vae_scale_factor) 127 | if isinstance(generator, list) and len(generator) != batch_size: 128 | raise ValueError( 129 | f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" 130 | f" size of {batch_size}. Make sure the batch size matches the length of the generators." 131 | ) 132 | 133 | if latents is None: 134 | rand_device = "cpu" #if device.type == "mps" else device 135 | 136 | if isinstance(generator, list): 137 | shape = (1,) + shape[1:] 138 | latents = [ 139 | torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) 140 | for i in range(batch_size) 141 | ] 142 | latents = torch.cat(latents, dim=0).to(device) 143 | else: 144 | latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) 145 | else: 146 | if latents.shape != shape: 147 | raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") 148 | latents = latents.to("cpu") 149 | 150 | # scale the initial noise by the standard deviation required by the scheduler 151 | latents = latents * scheduler.init_noise_sigma 152 | 153 | if image is not None: 154 | image = image.to(device="cpu", dtype=dtype) 155 | if isinstance(generator, list): 156 | image_latents = [ 157 | self.vae.encode(image[0:1]).latent_dist.sample(generator[i]) for i in range(batch_size) 158 | ] 159 | image_latents = torch.cat(image_latents, dim=0) 160 | else: 161 | image_latents = self.vae.encode(image).latent_dist.sample(generator) 162 | image_latents = torch.nn.functional.interpolate( 163 | image_latents, size=(self.height // self.vae_scale_factor, self.width // self.vae_scale_factor) 164 | ) 165 | image_latents = 0.18215 * image_latents 166 | rand_device = "cpu" #if device.type == "mps" else device 167 | shape = image_latents.shape 168 | if isinstance(generator, list): 169 | shape = (1,) + shape[1:] 170 | noise = [ 171 | torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in 172 | range(batch_size) 173 | ] 174 | noise = torch.cat(noise, dim=0).to(device) 175 | else: 176 | noise = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) 177 | latents = scheduler.add_noise(image_latents, noise, timestep) 178 | 179 | return latents 180 | def prepare_latents(self, image = None, scheduler=LMSDiscreteScheduler, latent_timestep:torch.Tensor = None): 181 | """ 182 | Function for getting initial latents for starting generation 183 | 184 | Parameters: 185 | image (PIL.Image.Image, *optional*, None): 186 | Input image for generation, if not provided randon noise will be used as starting point 187 | latent_timestep (torch.Tensor, *optional*, None): 188 | Predicted by scheduler initial step for image generation, required for latent image mixing with nosie 189 | Returns: 190 | latents (np.ndarray): 191 | Image encoded in latent space 192 | """ 193 | latents_shape = (1, 4, self.height // 8, self.width // 8) 194 | noise = np.random.randn(*latents_shape).astype(np.float32) 195 | if image is None: 196 | # if we use LMSDiscreteScheduler, let's make sure latents are mulitplied by sigmas 197 | if isinstance(scheduler, LMSDiscreteScheduler): 198 | noise = noise * scheduler.sigmas[0].numpy() 199 | return noise 200 | elif isinstance(scheduler, EulerDiscreteScheduler): 201 | noise = noise * scheduler.sigmas.max().numpy() 202 | return noise 203 | else: 204 | return noise 205 | 206 | input_image = image 207 | moments = self.vae_encoder(input_image)[self._vae_e_output] 208 | mean, logvar = np.split(moments, 2, axis=1) 209 | std = np.exp(logvar * 0.5) 210 | latents = (mean + std * np.random.randn(*mean.shape)) * 0.18215 211 | latents = scheduler.add_noise(torch.from_numpy(latents), torch.from_numpy(noise), latent_timestep).numpy() 212 | return latents 213 | 214 | def prepare_extra_step_kwargs(self, generator, eta,scheduler): 215 | # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature 216 | # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. 217 | # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 218 | # and should be between [0, 1] 219 | 220 | accepts_eta = "eta" in set(inspect.signature(scheduler.step).parameters.keys()) 221 | extra_step_kwargs = {} 222 | if accepts_eta: 223 | extra_step_kwargs["eta"] = eta 224 | 225 | # check if the scheduler accepts generator 226 | accepts_generator = "generator" in set(inspect.signature(scheduler.step).parameters.keys()) 227 | if accepts_generator: 228 | extra_step_kwargs["generator"] = generator 229 | return extra_step_kwargs 230 | 231 | 232 | def get_timesteps(self, num_inference_steps, strength, scheduler): 233 | # get the original timestep using init_timestep 234 | offset = scheduler.config.get("steps_offset", 0) 235 | init_timestep = int(num_inference_steps * strength) + offset 236 | init_timestep = min(init_timestep, num_inference_steps) 237 | 238 | t_start = max(num_inference_steps - init_timestep + offset , 0) 239 | timesteps = scheduler.timesteps[t_start:] 240 | 241 | return timesteps, num_inference_steps - t_start 242 | 243 | @torch.no_grad() 244 | def __call__( 245 | self, 246 | prompt: Union[str, List[str]], 247 | depth_image: Union[torch.FloatTensor, PIL.Image.Image], 248 | image: Optional[Union[torch.FloatTensor, PIL.Image.Image]] = None, 249 | scheduler=None, 250 | strength: float = 0.8, 251 | num_inference_steps: int = 50, 252 | guidance_scale: float = 7.5, 253 | negative_prompt: Optional[Union[str, List[str]]] = None, 254 | num_images_per_prompt: Optional[int] = 1, 255 | eta: float = 0.0, 256 | generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, 257 | latents: Optional[torch.FloatTensor] = None, 258 | output_type: Optional[str] = "pil", 259 | return_dict: bool = True, 260 | callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, 261 | callback_steps: Optional[int] = 1, 262 | **kwargs, 263 | ): 264 | 265 | # 0. Default height and width to unet 266 | #height = height or 768 #self.unet.config.sample_size * self.vae_scale_factor 267 | #width = width or 768 #self.unet.config.sample_size * self.vae_scale_factor 268 | 269 | # 1. Check inputs 270 | diffusers.StableDiffusionInpaintPipeline.check_inputs(self,prompt, self.height, self.width, strength, callback_steps) #self.check_inputs(prompt, height, width, callback_steps) 271 | 272 | # 2. Define call parameters 273 | batch_size = 1 if isinstance(prompt, str) else len(prompt) 274 | device = "cpu" #self._execution_device 275 | # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) 276 | # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` 277 | # corresponds to doing no classifier free guidance. 278 | do_classifier_free_guidance = guidance_scale > 1.0 279 | 280 | # 3. Encode input prompt 281 | #text_embeddings = self._encode_prompt( 282 | # prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt 283 | #) 284 | if isinstance(prompt, str): 285 | batch_size = 1 286 | elif isinstance(prompt, list): 287 | batch_size = len(prompt) 288 | else: 289 | raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") 290 | img_buffer = [] 291 | 292 | text_input = self.tokenizer(prompt) 293 | text_embeddings = self.text_encoder(text_input)[self._text_encoder_output] 294 | 295 | if do_classifier_free_guidance: 296 | if negative_prompt is None: 297 | uncond_tokens = [""] * batch_size 298 | 299 | elif isinstance(negative_prompt, str): 300 | uncond_tokens = [negative_prompt] 301 | else: 302 | uncond_tokens = negative_prompt 303 | uncond_input = self.tokenizer(uncond_tokens) 304 | uncond_embeddings = self.text_encoder(uncond_input)[self._text_encoder_output] 305 | text_embeddings = np.concatenate([uncond_embeddings, text_embeddings]) 306 | 307 | # 4. Prepare the depth image 308 | depth = self.prepare_depth(depth_image, image, torch.float, device) 309 | 310 | if image is not None and isinstance(image, PIL.Image.Image): 311 | image = diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess(image) 312 | 313 | # 5. set timesteps 314 | scheduler.set_timesteps(num_inference_steps, device=device) 315 | timesteps = scheduler.timesteps 316 | if image is not None: 317 | timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength,scheduler) 318 | 319 | # 6. Prepare latent variables 320 | num_channels_latents = 4 #self.vae.config.latent_channels 321 | latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) 322 | 323 | latents = self.prepare_latents(image, scheduler, latent_timestep) 324 | 325 | 326 | # 7. Prepare mask latent variables 327 | depth = self.prepare_depth_latents( 328 | depth, 329 | batch_size * num_images_per_prompt, 330 | text_embeddings.dtype, 331 | device, 332 | generator, 333 | do_classifier_free_guidance, 334 | ) 335 | 336 | # 8. Check that sizes of mask, masked image and latents match 337 | num_channels_depth = depth.shape[1] 338 | if num_channels_latents + num_channels_depth != 5: #self.unet.config.in_channels: 339 | raise ValueError( 340 | f"Select a depth model, such as 'stabilityai/stable-diffusion-2-depth'" 341 | ) 342 | 343 | # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline 344 | extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta,scheduler) 345 | 346 | # 10. Denoising loop 347 | num_warmup_steps = len(timesteps) - num_inference_steps * scheduler.order 348 | with self.progress_bar(total=num_inference_steps) as progress_bar: 349 | for i, t in enumerate(timesteps): 350 | # expand the latents if we are doing classifier free guidance 351 | latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents #torch.cat([latents] * 2) if do_classifier_free_guidance else latents 352 | 353 | # concat latents, mask, masked_image_latents in the channel dimension 354 | latent_model_input = scheduler.scale_model_input(latent_model_input, t) 355 | latent_model_input = torch.cat([latent_model_input, depth], dim=1) 356 | 357 | # predict the noise residual 358 | noise_pred = self.unet([latent_model_input, t, text_embeddings])[self._unet_output] 359 | 360 | # perform guidance 361 | if do_classifier_free_guidance: 362 | noise_pred = noise_pred[0] + guidance_scale * (noise_pred[1] - noise_pred[0]) 363 | 364 | 365 | # compute the previous noisy sample x_t -> x_t-1 366 | latents = scheduler.step(torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs).prev_sample.numpy() 367 | 368 | if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0): 369 | progress_bar.update() 370 | # NOTE: Modified to yield the latents instead of calling a callback. 371 | yield ImageGenerationResult.step_preview(self, kwargs['step_preview_mode'], self.width, self.height, latents, generator, i) 372 | 373 | #self.vae.config.scaling_factor = 8 374 | # 11. Post-processing 375 | 376 | #latents = 1 / self.scaling_factor * latents 377 | print("-------------before vae_decoder----------------") 378 | image = self.vae_decoder(latents)[self._vae_d_output] 379 | print("-------------After vae_decoder-----------------") 380 | image = np.clip(image / 2 + 0.5, 0, 1) #(image / 2 + 0.5).clamp(0, 1) 381 | image = np.transpose(image, (0, 2, 3, 1)) #image.cpu().permute(0, 2, 3, 1).float().numpy() 382 | 383 | filePath = 'test1_blender.png' 384 | if os.path.exists(filePath): 385 | os.remove(filePath) 386 | 387 | pil_image = self.numpy_to_pil(image) 388 | pil_image[0].save(filePath) 389 | #image = self.decode_latents(latents) 390 | 391 | # TODO: Add UI to enable this. 392 | # 12. Run safety checker 393 | # image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype) 394 | 395 | # NOTE: Modified to yield the decoded image as a numpy array. 396 | yield ImageGenerationResult( 397 | [np.asarray(PIL.ImageOps.flip(image).convert('RGBA'), dtype=np.float32) / 255. 398 | for i, image in enumerate(self.numpy_to_pil(image))], 399 | [gen.initial_seed() for gen in generator] if isinstance(generator, list) else [generator.initial_seed()], 400 | num_inference_steps, 401 | True 402 | ) 403 | 404 | 405 | 406 | ov_pipe_depth = None 407 | 408 | def load_models_depth(self,model_path,infer_device): 409 | global ov_pipe_depth 410 | try: 411 | ov_pipe_depth = StableDiffusionEngineDepth( 412 | model = model_path, 413 | device = infer_device) 414 | except: 415 | return False 416 | 417 | #print("PROCESS ID in print function", os.getpid()) 418 | return True 419 | 420 | def depth_to_image( 421 | self, 422 | pipeline: Pipeline, 423 | 424 | infer_model: model_name_enum, 425 | 426 | scheduler: Scheduler, 427 | 428 | optimizations: Optimizations, 429 | 430 | depth: NDArray | None, 431 | image: NDArray | str | None, 432 | strength: float, 433 | prompt: str | list[str], 434 | steps: int, 435 | seed: int, 436 | 437 | 438 | infer_model_size: model_size_enum, 439 | infer_device: device_name_enum, 440 | 441 | cfg_scale: float, 442 | use_negative_prompt: bool, 443 | negative_prompt: str, 444 | 445 | 446 | 447 | step_preview_mode: StepPreviewMode, 448 | 449 | **kwargs 450 | ) -> Generator[NDArray, None, None]: 451 | match pipeline: 452 | case Pipeline.STABLE_DIFFUSION: 453 | 454 | 455 | 456 | 457 | if optimizations.cpu_only: 458 | device = "cpu" 459 | else: 460 | device = self.choose_device() 461 | 462 | 463 | prediction_type_ = 'epsilon' 464 | if scheduler == Scheduler.LMS_DISCRETE: 465 | select_scheduler = LMSDiscreteScheduler 466 | 467 | elif scheduler == Scheduler.EULER_DISCRETE: 468 | select_scheduler = EulerDiscreteScheduler 469 | 470 | elif scheduler == Scheduler.DPM_SOLVER_MULTISTEP: 471 | select_scheduler = DPMSolverMultistepScheduler 472 | prediction_type_ = 'v_prediction' 473 | else: 474 | select_scheduler = EulerDiscreteScheduler 475 | 476 | 477 | 478 | sch = select_scheduler( 479 | beta_start=0.00085, 480 | beta_end=0.012, 481 | beta_schedule="scaled_linear") 482 | 483 | 484 | 485 | # RNG 486 | batch_size = len(prompt) if isinstance(prompt, list) else 1 487 | generator = [] 488 | for _ in range(batch_size): 489 | gen = torch.Generator(device="cpu" if device in ("mps", "privateuseone") else device) # MPS and DML do not support the `Generator` API 490 | generator.append(gen.manual_seed(random.randrange(0, np.iinfo(np.uint32).max) if seed is None else seed)) 491 | if batch_size == 1: 492 | # Some schedulers don't handle a list of generators: https://github.com/huggingface/diffusers/issues/1909 493 | generator = generator[0] 494 | 495 | # Init Image 496 | # FIXME: The `unet.config.sample_size` of the depth model is `32`, not `64`. For now, this will be hardcoded to `512`. 497 | if infer_model_size.name == "model_size_512": 498 | height = 512 499 | width = 512 500 | else: 501 | height = 768 502 | width = 768 503 | 504 | 505 | rounded_size = ( 506 | int(8 * (width // 8)), 507 | int(8 * (height // 8)), 508 | ) 509 | depth_image = PIL.ImageOps.flip(PIL.Image.fromarray(np.uint8(depth * 255)).convert('L')).resize(rounded_size) if depth is not None else None 510 | init_image = None if image is None else (PIL.Image.open(image) if isinstance(image, str) else PIL.Image.fromarray(image.astype(np.uint8))).convert('RGB').resize(rounded_size) 511 | if depth_image is None: 512 | print("DEPTH IS NONE") 513 | else: 514 | print("DEPTH IS NOT NONE..its pre generated") 515 | 516 | # Inference 517 | #with (torch.inference_mode() if device not in ('mps', "privateuseone") else nullcontext()), \ 518 | #(torch.autocast(device) if optimizations.can_use("amp", device) else nullcontext()): 519 | yield from ov_pipe_depth( 520 | prompt=prompt, 521 | depth_image=depth_image, 522 | image=init_image, 523 | strength=strength, 524 | scheduler = sch, 525 | num_inference_steps=steps, 526 | guidance_scale=cfg_scale, 527 | negative_prompt=negative_prompt if use_negative_prompt else None, 528 | num_images_per_prompt=1, 529 | eta=0.0, 530 | generator=generator, 531 | latents=None, 532 | output_type="pil", 533 | return_dict=True, 534 | callback=None, 535 | callback_steps=1, 536 | step_preview_mode=step_preview_mode 537 | ) 538 | 539 | 540 | case Pipeline.STABILITY_SDK: 541 | import stability_sdk 542 | raise NotImplementedError() 543 | case _: 544 | raise Exception(f"Unsupported pipeline {pipeline}.") -------------------------------------------------------------------------------- /generator_process/actions/ocio_transform.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from numpy.typing import NDArray 3 | import math 4 | 5 | def ocio_transform( 6 | self, 7 | input_image: NDArray, 8 | config_path: str, 9 | exposure: float, 10 | gamma: float, 11 | view_transform: str, 12 | display_device: str, 13 | look: str, 14 | inverse: bool 15 | ): 16 | import PyOpenColorIO as OCIO 17 | 18 | ocio_config = OCIO.Config.CreateFromFile(config_path) 19 | 20 | # A reimplementation of `OCIOImpl::createDisplayProcessor` from the Blender source. 21 | # https://github.com/dfelinto/blender/blob/87a0770bb969ce37d9a41a04c1658ea09c63933a/intern/opencolorio/ocio_impl.cc#L643 22 | def create_display_processor( 23 | config, 24 | input_colorspace, 25 | view, 26 | display, 27 | look, 28 | scale, # Exposure 29 | exponent, # Gamma 30 | inverse=False 31 | ): 32 | group = OCIO.GroupTransform() 33 | 34 | # Exposure 35 | if scale != 1: 36 | # Always apply exposure in scene linear. 37 | color_space_transform = OCIO.ColorSpaceTransform() 38 | color_space_transform.setSrc(input_colorspace) 39 | color_space_transform.setDst(OCIO.ROLE_SCENE_LINEAR) 40 | group.appendTransform(color_space_transform) 41 | 42 | # Make further transforms aware of the color space change 43 | input_colorspace = OCIO.ROLE_SCENE_LINEAR 44 | 45 | # Apply scale 46 | matrix_transform = OCIO.MatrixTransform([scale, 0.0, 0.0, 0.0, 0.0, scale, 0.0, 0.0, 0.0, 0.0, scale, 0.0, 0.0, 0.0, 0.0, 1.0]) 47 | group.appendTransform(matrix_transform) 48 | 49 | # Add look transform 50 | use_look = look is not None and len(look) > 0 51 | if use_look: 52 | look_output = config.getLook(look).getProcessSpace() 53 | if look_output is not None and len(look_output) > 0: 54 | look_transform = OCIO.LookTransform() 55 | look_transform.setSrc(input_colorspace) 56 | look_transform.setDst(look_output) 57 | look_transform.setLooks(look) 58 | group.appendTransform(look_transform) 59 | # Make further transforms aware of the color space change. 60 | input_colorspace = look_output 61 | else: 62 | # For empty looks, no output color space is returned. 63 | use_look = False 64 | 65 | # Add view and display transform 66 | display_view_transform = OCIO.DisplayViewTransform() 67 | display_view_transform.setSrc(input_colorspace) 68 | display_view_transform.setLooksBypass(True) 69 | display_view_transform.setView(view) 70 | display_view_transform.setDisplay(display) 71 | group.appendTransform(display_view_transform) 72 | 73 | # Gamma 74 | if exponent != 1: 75 | exponent_transform = OCIO.ExponentTransform([exponent, exponent, exponent, 1.0]) 76 | group.appendTransform(exponent_transform) 77 | 78 | # Create processor from transform. This is the moment were OCIO validates 79 | # the entire transform, no need to check for the validity of inputs above. 80 | try: 81 | if inverse: 82 | group.setDirection(OCIO.TransformDirection.TRANSFORM_DIR_INVERSE) 83 | processor = config.getProcessor(group) 84 | if processor is not None: 85 | return processor 86 | except Exception as e: 87 | self.send_exception(True, msg=str(e), trace="") 88 | 89 | return None 90 | 91 | # Exposure and gamma transformations derived from Blender source: 92 | # https://github.com/dfelinto/blender/blob/87a0770bb969ce37d9a41a04c1658ea09c63933a/source/blender/imbuf/intern/colormanagement.c#L825 93 | scale = math.pow(2, exposure) 94 | exponent = 1 if gamma == 1 else (1 / (gamma if gamma > sys.float_info.epsilon else sys.float_info.epsilon)) 95 | processor = create_display_processor(ocio_config, OCIO.ROLE_SCENE_LINEAR, view_transform, display_device, look if look != 'None' else None, scale, exponent, inverse) 96 | 97 | processor.getDefaultCPUProcessor().applyRGBA(input_image) 98 | return input_image -------------------------------------------------------------------------------- /generator_process/actions/p2i_blender.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/dream-textures-openvino/a337917b7903c257eefbfc361831ba7b273cfa4b/generator_process/actions/p2i_blender.png -------------------------------------------------------------------------------- /generator_process/actions/test1_blender.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/dream-textures-openvino/a337917b7903c257eefbfc361831ba7b273cfa4b/generator_process/actions/test1_blender.png -------------------------------------------------------------------------------- /generator_process/actor.py: -------------------------------------------------------------------------------- 1 | from multiprocessing import Queue, Process, Lock, current_process, get_context 2 | import multiprocessing.synchronize 3 | import enum 4 | import traceback 5 | import threading 6 | from typing import Type, TypeVar, Callable, Any, MutableSet, Generator 7 | import site 8 | import sys 9 | from ..absolute_path import absolute_path 10 | 11 | def _load_dependencies(): 12 | site.addsitedir(absolute_path(".python_dependencies")) 13 | deps = sys.path.pop(-1) 14 | sys.path.insert(0, deps) 15 | if current_process().name == "__actor__": 16 | _load_dependencies() 17 | 18 | class Future: 19 | """ 20 | Object that represents a value that has not completed processing, but will in the future. 21 | 22 | Add callbacks to be notified when values become available, or use `.result()` and `.exception()` to wait for the value. 23 | """ 24 | _response_callbacks: MutableSet[Callable[['Future', Any], None]] = set() 25 | _exception_callbacks: MutableSet[Callable[['Future', BaseException], None]] = set() 26 | _done_callbacks: MutableSet[Callable[['Future'], None]] = set() 27 | _responses: list = [] 28 | _exception: BaseException | None = None 29 | _done_event: threading.Event 30 | done: bool = False 31 | cancelled: bool = False 32 | call_done_on_exception: bool = True 33 | 34 | def __init__(self): 35 | self._response_callbacks = set() 36 | self._exception_callbacks = set() 37 | self._done_callbacks = set() 38 | self._responses = [] 39 | self._exception = None 40 | self._done_event = threading.Event() 41 | self.done = False 42 | self.cancelled = False 43 | self.call_done_on_exception = True 44 | 45 | def result(self, last_only=False): 46 | """ 47 | Get the result value (blocking). 48 | """ 49 | def _response(): 50 | match len(self._responses): 51 | case 0: 52 | return None 53 | case 1: 54 | return self._responses[0] 55 | case _: 56 | return self._responses[-1] if last_only else self._responses 57 | if self._exception is not None: 58 | raise self._exception 59 | if self.done: 60 | return _response() 61 | else: 62 | self._done_event.wait() 63 | if self._exception is not None: 64 | raise self._exception 65 | return _response() 66 | 67 | def exception(self): 68 | if self.done: 69 | return self._exception 70 | else: 71 | self._done_event.wait() 72 | return self._exception 73 | 74 | def cancel(self): 75 | self.cancelled = True 76 | 77 | def _run_on_main_thread(self, func): 78 | import bpy 79 | bpy.app.timers.register(func) 80 | 81 | def add_response(self, response): 82 | """ 83 | Add a response value and notify all consumers. 84 | """ 85 | self._responses.append(response) 86 | def run_callbacks(): 87 | for response_callback in self._response_callbacks: 88 | response_callback(self, response) 89 | self._run_on_main_thread(run_callbacks) 90 | 91 | def set_exception(self, exception: BaseException): 92 | """ 93 | Set the exception. 94 | """ 95 | self._exception = exception 96 | def run_callbacks(): 97 | for exception_callback in self._exception_callbacks: 98 | exception_callback(self, exception) 99 | self._run_on_main_thread(run_callbacks) 100 | 101 | def set_done(self): 102 | """ 103 | Mark the future as done. 104 | """ 105 | assert not self.done 106 | self.done = True 107 | self._done_event.set() 108 | if self._exception is None or self.call_done_on_exception: 109 | def run_callbacks(): 110 | for done_callback in self._done_callbacks: 111 | done_callback(self) 112 | self._run_on_main_thread(run_callbacks) 113 | 114 | def add_response_callback(self, callback: Callable[['Future', Any], None]): 115 | """ 116 | Add a callback to run whenever a response is received. 117 | Will be called multiple times by generator functions. 118 | """ 119 | self._response_callbacks.add(callback) 120 | 121 | def add_exception_callback(self, callback: Callable[['Future', BaseException], None]): 122 | """ 123 | Add a callback to run when the future errors. 124 | Will only be called once at the first exception. 125 | """ 126 | self._exception_callbacks.add(callback) 127 | 128 | def add_done_callback(self, callback: Callable[['Future'], None]): 129 | """ 130 | Add a callback to run when the future is marked as done. 131 | Will only be called once. 132 | """ 133 | self._done_callbacks.add(callback) 134 | 135 | class ActorContext(enum.IntEnum): 136 | """ 137 | The context of an `Actor` object. 138 | 139 | One `Actor` instance is the `FRONTEND`, while the other instance is the backend, which runs in a separate process. 140 | The `FRONTEND` sends messages to the `BACKEND`, which does work and returns a result. 141 | """ 142 | FRONTEND = 0 143 | BACKEND = 1 144 | 145 | class Message: 146 | """ 147 | Represents a function signature with a method name, positonal arguments, and keyword arguments. 148 | 149 | Note: All arguments must be picklable. 150 | """ 151 | 152 | def __init__(self, method_name, args, kwargs): 153 | self.method_name = method_name 154 | self.args = args 155 | self.kwargs = kwargs 156 | 157 | CANCEL = "__cancel__" 158 | END = "__end__" 159 | 160 | def _start_backend(cls, message_queue, response_queue): 161 | cls( 162 | ActorContext.BACKEND, 163 | message_queue=message_queue, 164 | response_queue=response_queue 165 | ).start() 166 | 167 | class TracedError(BaseException): 168 | def __init__(self, base: BaseException, trace: str): 169 | self.base = base 170 | self.trace = trace 171 | 172 | T = TypeVar('T', bound='Actor') 173 | 174 | class Actor: 175 | """ 176 | Base class for specialized actors. 177 | 178 | Uses queues to send actions to a background process and receive a response. 179 | Calls to any method declared by the frontend are automatically dispatched to the backend. 180 | 181 | All function arguments must be picklable. 182 | """ 183 | 184 | _message_queue: Queue 185 | _response_queue: Queue 186 | _lock: multiprocessing.synchronize.Lock 187 | 188 | _shared_instance = None 189 | 190 | # Methods that are not used for message passing, and should not be overridden in `_setup`. 191 | _protected_methods = { 192 | "start", 193 | "close", 194 | "is_alive", 195 | "can_use", 196 | "shared" 197 | } 198 | 199 | def __init__(self, context: ActorContext, message_queue: Queue = None, response_queue: Queue = None): 200 | self.context = context 201 | self._message_queue = message_queue if message_queue is not None else get_context('spawn').Queue(maxsize=1) 202 | self._response_queue = response_queue if response_queue is not None else get_context('spawn').Queue(maxsize=1) 203 | self._setup() 204 | self.__class__._shared_instance = self 205 | 206 | def _setup(self): 207 | """ 208 | Setup the Actor after initialization. 209 | """ 210 | match self.context: 211 | case ActorContext.FRONTEND: 212 | self._lock = Lock() 213 | for name in filter(lambda name: callable(getattr(self, name)) and not name.startswith("_") and name not in self._protected_methods, dir(self)): 214 | setattr(self, name, self._send(name)) 215 | case ActorContext.BACKEND: 216 | pass 217 | 218 | @classmethod 219 | def shared(cls: Type[T]) -> T: 220 | return cls._shared_instance or cls(ActorContext.FRONTEND).start() 221 | 222 | def start(self: T) -> T: 223 | """ 224 | Start the actor process. 225 | """ 226 | match self.context: 227 | case ActorContext.FRONTEND: 228 | self.process = get_context('spawn').Process(target=_start_backend, args=(self.__class__, self._message_queue, self._response_queue), name="__actor__", daemon=True) 229 | self.process.start() 230 | case ActorContext.BACKEND: 231 | self._backend_loop() 232 | return self 233 | 234 | def close(self): 235 | """ 236 | Stop the actor process. 237 | """ 238 | match self.context: 239 | case ActorContext.FRONTEND: 240 | self.process.terminate() 241 | self._message_queue.close() 242 | self._response_queue.close() 243 | case ActorContext.BACKEND: 244 | pass 245 | 246 | @classmethod 247 | def shared_close(cls: Type[T]): 248 | if cls._shared_instance is None: 249 | return 250 | cls._shared_instance.close() 251 | cls._shared_instance = None 252 | 253 | def is_alive(self): 254 | match self.context: 255 | case ActorContext.FRONTEND: 256 | return self.process.is_alive() 257 | case ActorContext.BACKEND: 258 | return True 259 | 260 | def can_use(self): 261 | if result := self._lock.acquire(block=False): 262 | self._lock.release() 263 | return result 264 | 265 | def _backend_loop(self): 266 | while True: 267 | self._receive(self._message_queue.get()) 268 | 269 | def _receive(self, message: Message): 270 | try: 271 | response = getattr(self, message.method_name)(*message.args, **message.kwargs) 272 | if isinstance(response, Generator): 273 | for res in iter(response): 274 | extra_message = None 275 | try: 276 | extra_message = self._message_queue.get(block=False) 277 | except: 278 | pass 279 | if extra_message == Message.CANCEL: 280 | break 281 | self._response_queue.put(res) 282 | else: 283 | self._response_queue.put(response) 284 | except Exception as e: 285 | trace = traceback.format_exc() 286 | try: 287 | if sys.modules[e.__module__].__file__.startswith(absolute_path(".python_dependencies")): 288 | e = RuntimeError(repr(e)) 289 | # might be more suitable to have specific substitute exceptions for cases 290 | # like torch.cuda.OutOfMemoryError for frontend handling in the future 291 | except (AttributeError, KeyError): 292 | pass 293 | self._response_queue.put(TracedError(e, trace)) 294 | self._response_queue.put(Message.END) 295 | 296 | def _send(self, name): 297 | def _send(*args, _block=False, **kwargs): 298 | future = Future() 299 | def _send_thread(future: Future): 300 | self._lock.acquire() 301 | self._message_queue.put(Message(name, args, kwargs)) 302 | 303 | while not future.done: 304 | if future.cancelled: 305 | self._message_queue.put(Message.CANCEL) 306 | response = self._response_queue.get() 307 | if response == Message.END: 308 | future.set_done() 309 | elif isinstance(response, TracedError): 310 | response.base.__cause__ = Exception(response.trace) 311 | future.set_exception(response.base) 312 | elif isinstance(response, Exception): 313 | future.set_exception(response) 314 | else: 315 | future.add_response(response) 316 | 317 | self._lock.release() 318 | if _block: 319 | _send_thread(future) 320 | else: 321 | thread = threading.Thread(target=_send_thread, args=(future,), daemon=True) 322 | thread.start() 323 | return future 324 | return _send 325 | 326 | def __del__(self): 327 | self.close() -------------------------------------------------------------------------------- /generator_process/block_in_use.py: -------------------------------------------------------------------------------- 1 | def block_in_use(func): 2 | def block(self, *args, **kwargs): 3 | if self.in_use: 4 | raise RuntimeError(f"Can't call {func.__qualname__} while process is in use") 5 | self.in_use = True 6 | 7 | # generator function is separate so in_use gets set immediately rather than waiting for first next() call 8 | def sub(): 9 | try: 10 | yield from func(self, *args, **kwargs) 11 | finally: 12 | self.in_use = False 13 | return sub() 14 | 15 | # Pass the name through so we can use it in `setattr` on `GeneratorProcess`. 16 | block.__name__ = func.__name__ 17 | return block -------------------------------------------------------------------------------- /generator_process/directml_patches.py: -------------------------------------------------------------------------------- 1 | import functools 2 | 3 | import torch 4 | from torch import Tensor 5 | 6 | active_dml_patches: list | None = None 7 | 8 | 9 | def tensor_ensure_device(self, other, *, pre_patch): 10 | """Fix for operations where one tensor is DML and the other is CPU.""" 11 | if isinstance(other, Tensor) and self.device != other.device: 12 | if self.device.type != "cpu": 13 | other = other.to(self.device) 14 | else: 15 | self = self.to(other.device) 16 | return pre_patch(self, other) 17 | 18 | 19 | def baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None, pre_patch): 20 | if input.device.type == "privateuseone" and beta == 0: 21 | if out is not None: 22 | torch.bmm(batch1, batch2, out=out) 23 | out *= alpha 24 | return out 25 | return alpha * (batch1 @ batch2) 26 | return pre_patch(input, batch1, batch2, beta=beta, alpha=alpha, out=out) 27 | 28 | 29 | def pad(input, pad, mode="constant", value=None, *, pre_patch): 30 | if input.device.type == "privateuseone" and mode == "constant": 31 | pad_dims = torch.tensor(pad, dtype=torch.int32).view(-1, 2).flip(0) 32 | both_ends = False 33 | for pre, post in pad_dims: 34 | if pre != 0 and post != 0: 35 | both_ends = True 36 | break 37 | if both_ends: 38 | if value is None: 39 | value = 0 40 | if pad_dims.size(0) < input.ndim: 41 | pad_dims = pre_patch(pad_dims, (0, 0, input.ndim-pad_dims.size(0), 0)) 42 | ret = torch.full(torch.Size(torch.tensor(input.size(), dtype=pad_dims.dtype) + pad_dims.sum(dim=1)), 43 | fill_value=value, dtype=input.dtype, device=input.device) 44 | assign_slices = [slice(max(0, int(pre)), None if post <= 0 else -max(0, int(post))) for pre, post in pad_dims] 45 | index_slices = [slice(max(0, -int(pre)), None if post >= 0 else -max(0, -int(post))) for pre, post in pad_dims] 46 | ret[assign_slices] = input[index_slices] 47 | return ret 48 | return pre_patch(input, pad, mode=mode, value=value) 49 | 50 | 51 | def getitem(self, key, *, pre_patch): 52 | if isinstance(key, Tensor) and "privateuseone" in [self.device.type, key.device.type] and key.numel() == 1: 53 | return pre_patch(self, int(key)) 54 | return pre_patch(self, key) 55 | 56 | 57 | def enable(pipe): 58 | global active_dml_patches 59 | if active_dml_patches is not None: 60 | return 61 | active_dml_patches = [] 62 | 63 | def dml_patch(object, name, patched): 64 | original = getattr(object, name) 65 | setattr(object, name, functools.partial(patched, pre_patch=original)) 66 | active_dml_patches.append({"object": object, "name": name, "original": original}) 67 | 68 | def dml_patch_method(object, name, patched): 69 | original = getattr(object, name) 70 | setattr(object, name, functools.partialmethod(patched, pre_patch=original)) 71 | active_dml_patches.append({"object": object, "name": name, "original": original}) 72 | 73 | # Not all places where the patches have an effect are necessarily listed. 74 | 75 | # PNDMScheduler.step() 76 | dml_patch_method(Tensor, "__mul__", tensor_ensure_device) 77 | # PNDMScheduler.step() 78 | dml_patch_method(Tensor, "__sub__", tensor_ensure_device) 79 | # DDIMScheduler.step() last timestep in image_to_image 80 | dml_patch_method(Tensor, "__truediv__", tensor_ensure_device) 81 | 82 | # CrossAttention.get_attention_scores() 83 | # AttentionBlock.forward() 84 | # Diffusers implementation gives torch.empty() tensors with beta=0 to baddbmm(), which may contain NaNs. 85 | # DML implementation doesn't properly ignore input argument with beta=0 and causes NaN propagation. 86 | dml_patch(torch, "baddbmm", baddbmm) 87 | 88 | dml_patch(torch.nn.functional, "pad", pad) 89 | # DDIMScheduler.step(), PNDMScheduler.step(), No error messages or crashes, just may randomly freeze. 90 | dml_patch_method(Tensor, "__getitem__", getitem) 91 | 92 | def decorate_forward(name, module): 93 | """Helper function to better find which modules DML fails in as it often does 94 | not raise an exception and immediately crashes the python interpreter.""" 95 | original = module.forward 96 | 97 | def func(self, *args, **kwargs): 98 | print(f"{name} in module {type(self)}") 99 | 100 | def nan_check(key, x): 101 | if isinstance(x, Tensor) and x.dtype in [torch.float16, torch.float32] and x.isnan().any(): 102 | raise RuntimeError(f"{key} got NaN!") 103 | 104 | for i, v in enumerate(args): 105 | nan_check(i, v) 106 | for k, v in kwargs.items(): 107 | nan_check(k, v) 108 | return original(*args, **kwargs) 109 | module.forward = func.__get__(module) 110 | 111 | # only enable when testing 112 | # for name, model in [("text_encoder", pipe.text_encoder), ("unet", pipe.unet), ("vae", pipe.vae)]: 113 | # for module in model.modules(): 114 | # decorate_forward(name, module) 115 | 116 | 117 | def disable(pipe): 118 | global active_dml_patches 119 | if active_dml_patches is None: 120 | return 121 | for patch in active_dml_patches: 122 | setattr(patch["object"], patch["name"], patch["original"]) 123 | active_dml_patches = None 124 | -------------------------------------------------------------------------------- /generator_process/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline import * 2 | from .fix_it_error import * -------------------------------------------------------------------------------- /generator_process/models/fix_it_error.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, Any 2 | 3 | class FixItError(Exception): 4 | """An exception with a solution. 5 | 6 | Call the `draw` method to render the UI elements responsible for resolving this error. 7 | """ 8 | def __init__(self, message, fix_it: Callable[[Any, Any], None]): 9 | super().__init__(message) 10 | 11 | self._fix_it = fix_it 12 | 13 | def draw(self, context, layout): 14 | self._fix_it(context, layout) -------------------------------------------------------------------------------- /generator_process/models/pipeline.py: -------------------------------------------------------------------------------- 1 | import enum 2 | import os 3 | 4 | class Pipeline(enum.IntEnum): 5 | STABLE_DIFFUSION = 0 6 | 7 | STABILITY_SDK = 1 8 | 9 | @staticmethod 10 | def local_available(): 11 | from ...absolute_path import absolute_path 12 | return os.path.exists(absolute_path(".python_dependencies/diffusers")) 13 | 14 | @staticmethod 15 | def directml_available(): 16 | from ...absolute_path import absolute_path 17 | return os.path.exists(absolute_path(".python_dependencies/torch_directml")) 18 | 19 | def __str__(self): 20 | return self.name 21 | 22 | def model(self): 23 | return True 24 | 25 | def init_img_actions(self): 26 | match self: 27 | case Pipeline.STABLE_DIFFUSION: 28 | return ['modify', 'inpaint', 'outpaint'] 29 | case Pipeline.STABILITY_SDK: 30 | return ['modify', 'inpaint'] 31 | 32 | def inpaint_mask_sources(self): 33 | match self: 34 | case Pipeline.STABLE_DIFFUSION: 35 | return ['alpha', 'prompt'] 36 | case Pipeline.STABILITY_SDK: 37 | return ['alpha'] 38 | 39 | def color_correction(self): 40 | match self: 41 | case Pipeline.STABLE_DIFFUSION: 42 | return True 43 | case Pipeline.STABILITY_SDK: 44 | return False 45 | 46 | def negative_prompts(self): 47 | match self: 48 | case Pipeline.STABLE_DIFFUSION: 49 | return True 50 | case Pipeline.STABILITY_SDK: 51 | return False 52 | 53 | def seamless(self): 54 | match self: 55 | case Pipeline.STABLE_DIFFUSION: 56 | return True 57 | case Pipeline.STABILITY_SDK: 58 | return False 59 | 60 | def upscaling(self): 61 | match self: 62 | case Pipeline.STABLE_DIFFUSION: 63 | return True 64 | case Pipeline.STABILITY_SDK: 65 | return False 66 | 67 | def depth(self): 68 | match self: 69 | case Pipeline.STABLE_DIFFUSION: 70 | return True 71 | case Pipeline.STABILITY_SDK: 72 | return False -------------------------------------------------------------------------------- /install.bat: -------------------------------------------------------------------------------- 1 | :<> 32) # 64 bit hash down to 32 bits 227 | 228 | def get_optimizations(self: DreamPrompt): 229 | optimizations = Optimizations() 230 | for prop in dir(self): 231 | split_name = prop.replace('optimizations_', '') 232 | if prop.startswith('optimizations_') and hasattr(optimizations, split_name): 233 | setattr(optimizations, split_name, getattr(self, prop)) 234 | if self.optimizations_attention_slice_size_src == 'auto': 235 | optimizations.attention_slice_size = 'auto' 236 | return optimizations 237 | 238 | def generate_args(self): 239 | args = { key: getattr(self, key) for key in DreamPrompt.__annotations__ } 240 | if not args['use_negative_prompt']: 241 | args['negative_prompt'] = None 242 | args['prompt'] = self.generate_prompt() 243 | args['seed'] = self.get_seed() 244 | args['optimizations'] = self.get_optimizations() 245 | args['infer_device_text'] = device_name_enum(args['infer_device_text']) 246 | args['infer_device_unet'] = device_name_enum(args['infer_device_unet']) 247 | try: 248 | args['infer_device_unet_pos'] = device_name_enum_npu(args['infer_device_unet_pos']) 249 | args['infer_device_unet_neg'] = device_name_enum_npu(args['infer_device_unet_neg']) 250 | except: 251 | print("unet pos, neg device not set--check if you have NPU and correct openvino env") 252 | args['infer_device_vae'] = device_name_enum(args['infer_device_vae']) 253 | args['infer_model'] = model_name_enum(args['infer_model']) 254 | args['infer_model_size'] = model_size_enum(args['infer_model_size']) 255 | args['scheduler'] = Scheduler(args['scheduler']) 256 | args['step_preview_mode'] = StepPreviewMode(args['step_preview_mode']) 257 | args['pipeline'] = Pipeline[args['pipeline']] 258 | # args['outpaint_origin'] = (args['outpaint_origin'][0], args['outpaint_origin'][1]) 259 | 260 | if not args['Tiling']: 261 | args['Tiling'] = None 262 | args['width'] = args['width'] if args['use_size'] else None 263 | args['height'] = args['height'] if args['use_size'] else None 264 | return args 265 | 266 | 267 | 268 | 269 | DreamPrompt.generate_prompt = generate_prompt 270 | DreamPrompt.get_prompt_subject = get_prompt_subject 271 | DreamPrompt.get_seed = get_seed 272 | DreamPrompt.get_optimizations = get_optimizations 273 | DreamPrompt.generate_args = generate_args 274 | weight_path = os.path.join(os.path.expanduser("~"), "Blender_SD_models") 275 | DreamPrompt.weight_path = weight_path 276 | 277 | DreamPrompt.validate = validate -------------------------------------------------------------------------------- /property_groups/dream_prompt_validation.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | from ..generator_process.models import Pipeline, FixItError 4 | from ..generator_process.actions.huggingface_hub import ModelType 5 | #from ..preferences import OpenURL 6 | 7 | def validate(self, context, task: ModelType | None = None) -> bool: 8 | 9 | 10 | 11 | if task is None: 12 | scene = context.scene 13 | 14 | generated_args = scene.dream_textures_prompt.generate_args() 15 | infer_model = generated_args['infer_model'].name 16 | if self.use_init_img: 17 | match self.init_img_action: 18 | case 'modify': 19 | match self.modify_action_source_type: 20 | case 'color': 21 | if infer_model == "Stable_Diffusion_1_5_int8": 22 | task = ModelType.PROMPT_TO_IMAGE_INT8 23 | else: 24 | task = ModelType.PROMPT_TO_IMAGE 25 | case 'depth_generated' | 'depth_map' | 'depth': 26 | if infer_model == "Stable_Diffusion_1_5_controlnet_depth_int8": 27 | task = ModelType.DEPTH_INT8 28 | else: 29 | task = ModelType.DEPTH 30 | if task is None: 31 | if infer_model == "Stable_Diffusion_1_5_int8": 32 | task = ModelType.PROMPT_TO_IMAGE_INT8 33 | else: 34 | task = ModelType.PROMPT_TO_IMAGE 35 | 36 | # Check if the pipeline supports the task. 37 | pipeline = Pipeline.STABLE_DIFFUSION #Pipeline[self.pipeline] 38 | match task: 39 | case ModelType.DEPTH: 40 | if not pipeline.depth(): 41 | raise FixItError( 42 | f"""The selected pipeline does not support {task.name.replace('_', ' ').lower()} tasks. 43 | Select a different pipeline below.""", 44 | lambda _, layout: layout.prop(self, "pipeline") 45 | ) 46 | case ModelType.DEPTH_INT8: 47 | if not pipeline.depth(): 48 | raise FixItError( 49 | f"""The selected pipeline does not support {task.name.replace('_', ' ').lower()} tasks. 50 | Select a different pipeline below.""", 51 | lambda _, layout: layout.prop(self, "pipeline") 52 | ) 53 | 54 | # Pipeline-specific checks 55 | match pipeline: 56 | case Pipeline.STABLE_DIFFUSION: 57 | if not Pipeline.local_available(): 58 | raise FixItError( 59 | "Local generation is not available for the variant of the add-on you have installed. Choose a different Pipeline such as 'DreamStudio'", 60 | lambda _, layout: layout.prop(self, "pipeline") 61 | ) 62 | 63 | if self.infer_model != task.recommended_model(): 64 | raise FixItError( 65 | f"""Incorrect model type selected for {task.name.replace('_', ' ').lower()} tasks. 66 | Select {task.recommended_model()} for the task from below.""", 67 | lambda _, layout: layout.prop(self, "infer_model") 68 | 69 | ) 70 | 71 | 72 | init_image = None 73 | if self.use_init_img: 74 | match self.init_img_src: 75 | case 'file': 76 | init_image = context.scene.init_img 77 | case 'open_editor': 78 | for area in context.screen.areas: 79 | if area.type == 'IMAGE_EDITOR': 80 | if area.spaces.active.image is not None: 81 | init_image = area.spaces.active.image 82 | if init_image is not None and init_image.type == 'RENDER_RESULT': 83 | def fix_init_img(ctx, layout): 84 | layout.prop(self, "init_img_src", expand=True) 85 | if self.init_img_src == 'file': 86 | layout.template_ID(context.scene, "init_img", open="image.open") 87 | layout.label(text="Or, enable the render pass to generate after each render.") 88 | #layout.operator(OpenURL.bl_idname, text="Learn More", icon="QUESTION").url = "https://github.com/carson-katri/dream-textures/blob/main/docs/RENDER_PASS.md" 89 | raise FixItError("""'Render Result' cannot be used as a source image. 90 | Save the image then open the file to use it as a source image.""", 91 | fix_init_img 92 | ) 93 | 94 | return True -------------------------------------------------------------------------------- /realtime_viewport.py: -------------------------------------------------------------------------------- 1 | # Realtime Viewport is still under development, and is not currently used. 2 | import bpy 3 | import cycles 4 | import time 5 | import threading 6 | import gpu 7 | from gpu_extras.batch import batch_for_shader 8 | import numpy as np 9 | from multiprocessing.shared_memory import SharedMemory 10 | from .operators.dream_texture import dream_texture 11 | 12 | view_update_original = cycles.CyclesRender.view_update 13 | view_draw_original = cycles.CyclesRender.view_draw 14 | 15 | def debounce(wait_time): 16 | """ 17 | Decorator that will debounce a function so that it is called after wait_time seconds 18 | If it is called multiple times, will wait for the last call to be debounced and run only this one. 19 | """ 20 | 21 | def decorator(function): 22 | def debounced(*args, **kwargs): 23 | def call_function(): 24 | debounced._timer = None 25 | return function(*args, **kwargs) 26 | # if we already have a call to the function currently waiting to be executed, reset the timer 27 | if debounced._timer is not None: 28 | debounced._timer.cancel() 29 | 30 | # after wait_time, call the function provided to the decorator with its arguments 31 | debounced._timer = threading.Timer(wait_time, call_function) 32 | debounced._timer.start() 33 | 34 | debounced._timer = None 35 | return debounced 36 | 37 | return decorator 38 | 39 | def DREAMTEXTURES_HT_viewport_enabled(self, context): 40 | self.layout.prop(context.scene, "dream_textures_viewport_enabled", text="", icon="OUTLINER_OB_VOLUME" if context.scene.dream_textures_viewport_enabled else "VOLUME_DATA", toggle=True) 41 | 42 | is_rendering_viewport = False 43 | last_viewport_update = time.time() 44 | last_viewport_pixel_buffer_update = time.time() 45 | dream_viewport = None 46 | is_rendering_dream = False 47 | render_dream_flag = False 48 | viewport_pixel_buffer = None 49 | viewport_size = (0, 0) 50 | ignore_next = 0 51 | def create_image(): 52 | print("Create image") 53 | global dream_viewport 54 | dream_viewport = bpy.data.images.new('Dream Viewport', width=32, height=32) 55 | 56 | def register_realtime_viewport(): 57 | bpy.app.timers.register(create_image) 58 | 59 | def view_update_decorator(original): 60 | def view_update(self, context, depsgraph): 61 | result = original(self, context, depsgraph) 62 | global last_viewport_update 63 | global ignore_next 64 | if ignore_next <= 0: 65 | last_viewport_update = time.time() 66 | print("View Update") 67 | ignore_next -= 1 68 | return result 69 | return view_update 70 | cycles.CyclesRender.view_update = view_update_decorator(cycles.CyclesRender.view_update) 71 | 72 | def updates_stopped(): 73 | global last_viewport_update 74 | global is_rendering_viewport 75 | global is_rendering_dream 76 | threshold_reached = (time.time() - last_viewport_update) < 0.5 77 | if threshold_reached != is_rendering_viewport: 78 | is_rendering_viewport = threshold_reached 79 | global viewport_pixel_buffer 80 | if not is_rendering_viewport and not is_rendering_dream and viewport_pixel_buffer is not None: 81 | print("Stopped rendering viewport") 82 | is_rendering_dream = True 83 | array = np.flipud((np.array(viewport_pixel_buffer) * 255).astype(np.int8)) 84 | pixels_memory = SharedMemory(create=True, size=array.nbytes) 85 | pixels_memory_array = np.ndarray(array.shape, dtype=array.dtype, buffer=pixels_memory.buf) 86 | pixels_memory_array[:] = array[:] 87 | 88 | def image_callback(shared_memory_name, seed, width, height, upscaled=False): 89 | if not upscaled: 90 | shared_memory = SharedMemory(shared_memory_name) 91 | pixels = np.frombuffer(shared_memory.buf, dtype=np.float32).copy() 92 | 93 | global ignore_next 94 | ignore_next = 5 95 | global dream_viewport 96 | dream_viewport.scale(width, height) 97 | dream_viewport.pixels[:] = pixels 98 | 99 | shared_memory.close() 100 | pixels_memory.close() 101 | 102 | print("Done") 103 | global is_rendering_dream 104 | is_rendering_dream = False 105 | # for area in bpy.context.screen.areas: 106 | # if area.type == 'VIEW_3D': 107 | # area.tag_redraw() 108 | 109 | def step_callback(step, width=None, height=None, shared_memory_name=None): 110 | pass 111 | 112 | dream_texture(bpy.context.scene.dream_textures_render_properties_prompt, step_callback, image_callback, init_img_shared_memory=pixels_memory.name, init_img_shared_memory_width=viewport_size[0], init_img_shared_memory_height=viewport_size[1]) 113 | return 0.5 114 | bpy.app.timers.register(updates_stopped) 115 | 116 | def draw(): 117 | global last_viewport_pixel_buffer_update 118 | if not bpy.context.scene.dream_textures_viewport_enabled: 119 | return 120 | if (time.time() - last_viewport_pixel_buffer_update) < 0.5: 121 | return 122 | last_viewport_pixel_buffer_update = time.time() 123 | # get currently bound framebuffer 124 | framebuffer = gpu.state.active_framebuffer_get() 125 | 126 | # get information on current viewport 127 | viewport_info = gpu.state.viewport_get() 128 | width = viewport_info[2] 129 | height = viewport_info[3] 130 | 131 | global viewport_pixel_buffer 132 | global viewport_size 133 | viewport_pixel_buffer = framebuffer.read_color(0, 0, width, height, 4, 0, 'FLOAT').to_list() 134 | viewport_size = (width, height) 135 | 136 | bpy.types.SpaceView3D.draw_handler_add(draw, (), 'WINDOW', 'PRE_VIEW') 137 | def draw_dream(): 138 | global is_rendering_dream 139 | global is_rendering_viewport 140 | global dream_viewport 141 | if not bpy.context.scene.dream_textures_viewport_enabled or is_rendering_viewport: 142 | return 143 | texture = gpu.texture.from_image(dream_viewport) 144 | viewport_info = gpu.state.viewport_get() 145 | width = viewport_info[2] 146 | height = viewport_info[3] 147 | shader = gpu.shader.from_builtin("2D_IMAGE") 148 | shader.bind() 149 | shader.uniform_sampler("image", texture) 150 | batch = batch_for_shader(shader, 'TRI_FAN', { 151 | 'pos': ((0, 0), (width, 0), (width, height), (0, height)), 152 | 'texCoord': ((0, 0), (1, 0), (1, 1), (0, 1)), 153 | }) 154 | batch.draw(shader) 155 | bpy.types.SpaceView3D.draw_handler_add(draw_dream, (), 'WINDOW', 'POST_PIXEL') 156 | 157 | bpy.types.VIEW3D_HT_header.append(DREAMTEXTURES_HT_viewport_enabled) 158 | 159 | def unregister_realtime_viewport(): 160 | global view_update_original 161 | cycles.CyclesRender.view_update = view_update_original 162 | global view_draw_original 163 | cycles.CyclesRender.view_draw = view_draw_original 164 | 165 | bpy.types.VIEW3D_HT_header.remove(DREAMTEXTURES_HT_viewport_enabled) -------------------------------------------------------------------------------- /render_pass.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import cycles 3 | import numpy as np 4 | import os 5 | from .generator_process.actions.prompt_to_image import ImageGenerationResult 6 | from .generator_process import Generator 7 | import threading 8 | 9 | print("TESTING RENDER PASS IMPORTS") 10 | 11 | pass_inputs = [ 12 | ('color', 'Color', 'Provide the scene color as input'), 13 | ('depth', 'Depth', 'Provide the Z pass as depth input'), 14 | ('color_depth', 'Color and Depth', 'Provide the scene color and depth as input'), 15 | ] 16 | 17 | update_render_passes_original = cycles.CyclesRender.update_render_passes 18 | render_original = cycles.CyclesRender.render 19 | # del_original = cycles.CyclesRender.__del__ 20 | 21 | def register_render_pass(): 22 | def update_render_passes_decorator(original): 23 | def update_render_passes(self, scene=None, renderlayer=None): 24 | result = original(self, scene, renderlayer) 25 | self.register_pass(scene, renderlayer, "Dream Textures", 4, "RGBA", 'COLOR') 26 | return result 27 | return update_render_passes 28 | cycles.CyclesRender.update_render_passes = update_render_passes_decorator(cycles.CyclesRender.update_render_passes) 29 | def render_decorator(original): 30 | def render(self, depsgraph): 31 | scene = depsgraph.scene if hasattr(depsgraph, "scene") else depsgraph 32 | if not scene.dream_textures_render_properties_enabled: 33 | return original(self, depsgraph) 34 | result = original(self, depsgraph) 35 | try: 36 | original_result = self.get_result() 37 | self.add_pass("Dream Textures", 4, "RGBA") 38 | scale = scene.render.resolution_percentage / 100.0 39 | size_x = int(scene.render.resolution_x * scale) 40 | size_y = int(scene.render.resolution_y * scale) 41 | if size_x % 64 != 0 or size_y % 64 != 0: 42 | self.report({"ERROR"}, f"Image dimensions must be multiples of 64 (e.x. 512x512, 512x768, ...) closest is {round(size_x/64)*64}x{round(size_y/64)*64}") 43 | return result 44 | render_result = self.begin_result(0, 0, size_x, size_y) 45 | for layer in render_result.layers: 46 | for render_pass in layer.passes: 47 | if render_pass.name == "Dream Textures": 48 | try: 49 | self._render_dream_textures_pass(layer, (size_x, size_y), scene, render_pass, render_result) 50 | except Exception as e: 51 | self.error_set(str(e)) 52 | else: 53 | source_pass = None 54 | for original_layer in original_result.layers: 55 | if layer.name == original_layer.name: 56 | for original_pass in original_layer.passes: 57 | if original_pass.name == render_pass.name: 58 | source_pass = original_pass 59 | pixels = np.empty((len(source_pass.rect), len(source_pass.rect[0])), dtype=np.float32) 60 | source_pass.rect.foreach_get(pixels) 61 | render_pass.rect[:] = pixels 62 | self.end_result(render_result) 63 | except Exception as e: 64 | print(e) 65 | return result 66 | return render 67 | cycles.CyclesRender.render = render_decorator(cycles.CyclesRender.render) 68 | cycles.CyclesRender._render_dream_textures_pass = _render_dream_textures_pass 69 | 70 | # def del_decorator(original): 71 | # def del_patch(self): 72 | # result = original(self) 73 | # kill_generator() 74 | # return result 75 | # return del_patch 76 | # cycles.CyclesRender.__del__ = del_decorator(cycles.CyclesRender.__del__) 77 | 78 | def unregister_render_pass(): 79 | global update_render_passes_original 80 | cycles.CyclesRender.update_render_passes = update_render_passes_original 81 | global render_original 82 | cycles.CyclesRender.render = render_original 83 | del cycles.CyclesRender._render_dream_textures_pass 84 | # global del_original 85 | # cycles.CyclesRender.__del__ = del_original 86 | 87 | def _render_dream_textures_pass(self, layer, size, scene, render_pass, render_result): 88 | self.update_stats("Dream Textures", "Starting") 89 | 90 | rect = layer.passes["Combined"].rect 91 | 92 | match scene.dream_textures_render_properties_pass_inputs: 93 | case 'color': pass 94 | case 'depth' | 'color_depth': 95 | depth = np.empty((size[0] * size[1], 1), dtype=np.float32) 96 | layer.passes["Depth"].rect.foreach_get(depth) 97 | depth = (1 - np.interp(depth, [0, np.ma.masked_equal(depth, depth.max(), copy=False).max()], [0, 1])).reshape((size[1], size[0])) 98 | 99 | combined_pixels = np.empty((size[0] * size[1], 4), dtype=np.float32) 100 | rect.foreach_get(combined_pixels) 101 | 102 | gen = Generator.shared() 103 | self.update_stats("Dream Textures", "Applying color management transforms") 104 | combined_pixels = gen.ocio_transform( 105 | combined_pixels, 106 | config_path=os.path.join(bpy.utils.resource_path('LOCAL'), 'datafiles/colormanagement/config.ocio'), 107 | exposure=scene.view_settings.exposure, 108 | gamma=scene.view_settings.gamma, 109 | view_transform=scene.view_settings.view_transform, 110 | display_device=scene.display_settings.display_device, 111 | look=scene.view_settings.look, 112 | inverse=False 113 | ).result() 114 | 115 | self.update_stats("Dream Textures", "Generating...") 116 | print("DOES THIS COME HERE IN RENDER") 117 | generated_args = scene.dream_textures_render_properties_prompt.generate_args() 118 | generated_args['width'] = size[0] 119 | generated_args['height'] = size[1] 120 | match scene.dream_textures_render_properties_pass_inputs: 121 | case 'color': 122 | f = gen.prompt_to_image( 123 | image=np.flipud(combined_pixels.reshape((size[1], size[0], 4)) * 255).astype(np.uint8), 124 | **generated_args 125 | ) 126 | case 'depth': 127 | f = gen.depth_to_image( 128 | depth=depth, 129 | image=None, 130 | **generated_args 131 | ) 132 | case 'color_depth': 133 | f = gen.depth_to_image( 134 | depth=depth, 135 | image=np.flipud(combined_pixels.reshape((size[1], size[0], 4)) * 255).astype(np.uint8), 136 | **generated_args 137 | ) 138 | event = threading.Event() 139 | def on_step(_, step: ImageGenerationResult): 140 | if step.final: 141 | return 142 | self.update_progress(step.step / generated_args['steps']) 143 | if len(step.images) > 0: 144 | combined_pixels = step.images[0] 145 | render_pass.rect.foreach_set(combined_pixels.reshape((size[0] * size[1], 4))) 146 | self.update_result(render_result) # This does not seem to have an effect. 147 | def on_done(future): 148 | nonlocal combined_pixels 149 | result = future.result(last_only=True) 150 | combined_pixels = result.images[0] 151 | event.set() 152 | f.add_response_callback(on_step) 153 | f.add_done_callback(on_done) 154 | event.wait() 155 | 156 | # Perform an inverse transform so when Blender applies its transform everything looks correct. 157 | self.update_stats("Dream Textures", "Applying inverse color management transforms") 158 | combined_pixels = gen.ocio_transform( 159 | combined_pixels.reshape((size[0] * size[1], 4)), 160 | config_path=os.path.join(bpy.utils.resource_path('LOCAL'), 'datafiles/colormanagement/config.ocio'), 161 | exposure=scene.view_settings.exposure, 162 | gamma=scene.view_settings.gamma, 163 | view_transform=scene.view_settings.view_transform, 164 | display_device=scene.display_settings.display_device, 165 | look=scene.view_settings.look, 166 | inverse=True 167 | ).result() 168 | 169 | combined_pixels = combined_pixels.reshape((size[0] * size[1], 4)) 170 | render_pass.rect.foreach_set(combined_pixels) 171 | 172 | self.update_stats("Dream Textures", "Finished") -------------------------------------------------------------------------------- /requirements/win-openvino.txt: -------------------------------------------------------------------------------- 1 | # openvino and its dependencies 2 | openvino-dev==2023.1.0 3 | openvino-telemetry==2023.1.0 4 | nncf==2.5.0 5 | 6 | 7 | # deep learning frameworks 8 | onnx>=1.11.0 9 | 10 | 11 | tensorflow>=2.12.1 12 | 13 | #tensorflow-datasets==4.2.0 14 | 15 | --find-links https://download.pytorch.org/whl/torch_stable.html 16 | #torch==1.13.1; sys_platform == 'darwin' 17 | #torch==1.13.1+cpu; sys_platform == 'linux' or platform_system == 'Windows' 18 | #torchvision==0.14.1; sys_platform == 'darwin' 19 | #torchvision==0.14.1+cpu; sys_platform == 'linux' or platform_system == 'Windows' 20 | #torchmetrics>=0.11.0 21 | pytorch-lightning 22 | torch>=2.1 23 | 24 | 25 | accelerate 26 | huggingface_hub 27 | 28 | transformers>=4.25.1 29 | diffusers==0.24.0 30 | monai>=0.9.1 31 | open_clip_torch 32 | 33 | # others 34 | numpy>=1.21.0 35 | opencv-python 36 | Pillow>=8.3.2 37 | matplotlib>=3.4,<3.5.3 38 | scipy 39 | 40 | # The packages below are not directly required. They are dependencies of 41 | # other dependencies that are pinned to a specific version to avoid 42 | # compatibility issues or vulnerabilities 43 | 44 | scikit-image>=0.19.2 45 | setuptools>=56.0.0 46 | 47 | -------------------------------------------------------------------------------- /scripts/train_detect_seamless.py: -------------------------------------------------------------------------------- 1 | """ 2 | It's recommended to copy this script to its own project folder to 3 | keep it with your own image samples and trained models. 4 | 5 | Each dataset should have images of the same square dimensions for batched training and validation. 6 | You can train with multiple datasets in the same session. 7 | 8 | dataset_layout/ 9 | imagesNone/ 10 | [sample_images] 11 | imagesX/ 12 | [sample_images] 13 | imagesY/ 14 | [sample_images] 15 | imagesXY/ 16 | [sample_images] 17 | """ 18 | 19 | # if torch, numpy, and cv2 are not installed to site-packages 20 | # import site 21 | # site.addsitedir(r"path/to/dream_textures/.python_dependencies") 22 | 23 | import itertools 24 | import os 25 | from datetime import datetime 26 | 27 | import cv2 28 | import numpy as np 29 | import torch 30 | from numpy._typing import NDArray 31 | from torch import nn 32 | from torch.utils.data import Dataset, DataLoader 33 | 34 | EDGE_SLICE = 8 35 | if torch.cuda.is_available(): 36 | DEVICE = 'cuda' 37 | elif torch.backends.mps.is_available(): 38 | DEVICE = 'mps' 39 | else: 40 | DEVICE = 'cpu' 41 | 42 | 43 | class SeamlessModel(nn.Module): 44 | def __init__(self): 45 | super(SeamlessModel, self).__init__() 46 | self.conv = nn.Sequential( 47 | nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1), 48 | nn.Dropout(.2), 49 | nn.PReLU(64), 50 | nn.Conv2d(64, 16, kernel_size=3, stride=1, padding=1), 51 | nn.Dropout(.2), 52 | nn.PReLU(16), 53 | nn.Conv2d(16, 64, kernel_size=8, stride=4, padding=0), 54 | nn.Dropout(.2), 55 | nn.PReLU(64), 56 | nn.Conv2d(64, 64, kernel_size=(1, 3), stride=1, padding=0), 57 | nn.Dropout(.2), 58 | ) 59 | self.gru = nn.GRU(64, 32, batch_first=True) 60 | self.fc = nn.Linear(32, 1) 61 | 62 | def forward(self, x: torch.Tensor): 63 | if len(x.size()) == 3: 64 | x = x.unsqueeze(0) 65 | # x[batch, channels, height, EDGE_SLICE*2] 66 | x = self.conv(x) 67 | # x[batch, features, height/4, 1] 68 | h = torch.zeros(self.gru.num_layers, x.size()[0], self.gru.hidden_size, 69 | dtype=x.dtype, device=x.device) 70 | x = x.squeeze(3).transpose(2, 1) 71 | # x[batch, height/4, features] 72 | x, h = self.gru(x, h) 73 | return torch.tanh(self.fc(x[:, -1])) 74 | 75 | 76 | def image_edges(path): 77 | image: NDArray = cv2.imread(path) 78 | # Pretty sure loading images is a bottleneck and makes the first epoch incredibly slow until fully in RAM. 79 | # Might be worth caching the edges in an easier to deserialize format with np.savez() 80 | 81 | edge_x = np.zeros((image.shape[0], EDGE_SLICE * 2, 3), dtype=np.float32) 82 | edge_x[:, :EDGE_SLICE] = image[:, -EDGE_SLICE:] 83 | edge_x[:, EDGE_SLICE:] = image[:, :EDGE_SLICE] 84 | 85 | edge_y = np.zeros((EDGE_SLICE * 2, image.shape[1], 3), dtype=np.float32) 86 | edge_y[:EDGE_SLICE] = image[-EDGE_SLICE:] 87 | edge_y[EDGE_SLICE:] = image[:EDGE_SLICE] 88 | 89 | return edge_x, edge_y 90 | 91 | 92 | def prepare_edge(edge: NDArray, axis: str) -> torch.Tensor: 93 | edge = (edge * 2 / 255 - 1) 94 | if axis == 'x': 95 | edge = edge.transpose(2, 0, 1) 96 | elif axis == 'y': 97 | edge = edge.transpose(2, 1, 0) 98 | else: 99 | raise ValueError('axis should be "x" or "y"') 100 | return torch.as_tensor(edge, dtype=torch.float32) 101 | 102 | 103 | def prepare_edges(edge_x: NDArray, edge_y: NDArray) -> tuple[torch.Tensor, torch.Tensor]: 104 | edge_x = edge_x * 2 / 255 - 1 105 | edge_y = edge_y * 2 / 255 - 1 106 | edge_x = edge_x.transpose(2, 0, 1) 107 | edge_y = edge_y.transpose(2, 1, 0) 108 | return torch.as_tensor(edge_x, dtype=torch.float32), torch.as_tensor(edge_y, dtype=torch.float32) 109 | 110 | 111 | def seamless_tensor(seamless): 112 | return torch.tensor([1 if seamless else -1], dtype=torch.float32) 113 | 114 | 115 | class EdgeDataset(Dataset): 116 | def __init__(self, path): 117 | self.data = [] 118 | self._load_dir(os.path.join(path, 'imagesNone'), (False, False)) 119 | self._load_dir(os.path.join(path, 'imagesX'), (True, False)) 120 | self._load_dir(os.path.join(path, 'imagesY'), (False, True)) 121 | self._load_dir(os.path.join(path, 'imagesXY'), (True, True)) 122 | print(f'dataset loaded {path} contains {len(self)}') 123 | 124 | def _load_dir(self, imdir, seamless): 125 | if not os.path.exists(imdir): 126 | print(f'skipping {imdir}, does not exist') 127 | return 128 | if not os.path.isdir(imdir): 129 | print(f'skipping {imdir}, not a directory') 130 | return 131 | print(f'loading {imdir}') 132 | 133 | for image in sorted(os.listdir(imdir)): 134 | path = os.path.join(imdir, image) 135 | if not os.path.isfile(path): 136 | continue 137 | self.data.append((seamless_tensor(seamless[0]), None, 'x', path)) 138 | self.data.append((seamless_tensor(seamless[1]), None, 'y', path)) 139 | 140 | def __len__(self): 141 | return len(self.data) 142 | 143 | def __getitem__(self, idx) -> tuple[torch.Tensor, torch.Tensor, str, str]: 144 | ret = self.data[idx] 145 | if ret[1] is not None: 146 | return ret 147 | path = ret[3] 148 | edge_x, edge_y = prepare_edges(*image_edges(path)) 149 | # Edges will be cached in cpu when first requested. Might not be desirable with a large enough dataset. 150 | if idx % 2 == 0: 151 | ret = (ret[0], edge_x, 'x', path) 152 | self.data[idx] = ret 153 | self.data[idx + 1] = (self.data[idx + 1][0], edge_y, 'y', path) 154 | else: 155 | self.data[idx - 1] = (self.data[idx - 1][0], edge_x, 'x', path) 156 | ret = (ret[0], edge_y, 'y', path) 157 | self.data[idx] = ret 158 | return ret 159 | 160 | 161 | CHANNEL_PERMUTATIONS = [*itertools.permutations((0, 1, 2))] 162 | 163 | 164 | class PermutedEdgeDataset(Dataset): 165 | """Permutes the channels to better generalize color data.""" 166 | 167 | def __init__(self, dataset: EdgeDataset | str): 168 | if isinstance(dataset, str): 169 | dataset = EdgeDataset(dataset) 170 | self.base_dataset = dataset 171 | 172 | def __len__(self): 173 | return len(self.base_dataset) * len(CHANNEL_PERMUTATIONS) 174 | 175 | def __getitem__(self, idx): 176 | perm = CHANNEL_PERMUTATIONS[idx % len(CHANNEL_PERMUTATIONS)] 177 | result, edge, edge_type, path = self.base_dataset[idx // len(CHANNEL_PERMUTATIONS)] 178 | edge_perm = torch.zeros(edge.size(), dtype=edge.dtype) 179 | edge_perm[0] = edge[perm[0]] 180 | edge_perm[1] = edge[perm[1]] 181 | edge_perm[2] = edge[perm[2]] 182 | return result, edge_perm, edge_type, path, perm 183 | 184 | 185 | def mix_iter(*iterables): 186 | """Iterates through multiple objects while attempting to balance 187 | by yielding from which one has the highest of remaining/length""" 188 | iterables = [x for x in iterables if len(x) > 0] 189 | lengths = [len(x) for x in iterables] 190 | counts = lengths.copy() 191 | ratios = [1.] * len(iterables) 192 | iters = [x.__iter__() for x in iterables] 193 | while True: 194 | idx = -1 195 | max_ratio = 0 196 | for i, ratio in enumerate(ratios): 197 | if ratio > max_ratio: 198 | idx = i 199 | max_ratio = ratio 200 | if idx == -1: 201 | return 202 | c = counts[idx] - 1 203 | counts[idx] = c 204 | ratios[idx] = c / lengths[idx] 205 | yield next(iters[idx]) 206 | 207 | 208 | def train(model: nn.Module, train_datasets, valid_datasets, epochs=1000, training_rate=0.0001, batch=50): 209 | train_loaders = [DataLoader(PermutedEdgeDataset(ds), batch_size=batch, shuffle=True, num_workers=0, pin_memory=True) 210 | for ds in train_datasets] 211 | valid_loaders = [DataLoader(ds, batch_size=batch, num_workers=0, pin_memory=True) 212 | for ds in valid_datasets] 213 | 214 | criterion = nn.MSELoss() 215 | criterion.to(DEVICE) 216 | optimizer = torch.optim.SGD(model.parameters(), training_rate, .9) 217 | 218 | def train_one_epoch(): 219 | running_loss = 0. 220 | print_rate = 5000 221 | print_after = print_rate 222 | for i, data in enumerate(mix_iter(*train_loaders)): 223 | seamless = data[0].to(DEVICE) 224 | edge = data[1].to(DEVICE) 225 | 226 | optimizer.zero_grad() 227 | 228 | output = model(edge) 229 | 230 | loss: torch.Tensor = criterion(output, seamless) 231 | loss.backward() 232 | 233 | torch.nn.utils.clip_grad_norm_(model.parameters(), 5) 234 | 235 | optimizer.step() 236 | 237 | running_loss += loss.item() 238 | 239 | if i * batch > print_after: 240 | print_after += print_rate 241 | print(f"LOSS train {running_loss / (i + 1)}") 242 | 243 | return running_loss / (i + 1) 244 | 245 | timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') 246 | 247 | best_vloss = 1_000_000. 248 | 249 | for epoch in range(epochs): 250 | print(f'EPOCH {epoch}:') 251 | 252 | model.train(True) 253 | avg_loss = train_one_epoch() 254 | 255 | model.train(False) 256 | 257 | running_vloss = 0.0 258 | with torch.no_grad(): 259 | for i, vdata in enumerate(mix_iter(valid_loaders)): 260 | expected_results = vdata[0].to(DEVICE) 261 | inputs = vdata[1].to(DEVICE) 262 | outputs = model(inputs) 263 | vloss = criterion(outputs, expected_results) 264 | running_vloss += vloss 265 | 266 | avg_vloss = running_vloss / (i + 1) 267 | print(f'LOSS train {avg_loss} valid {avg_vloss}') 268 | 269 | # Track best performance, and save the model's state 270 | if avg_vloss < best_vloss: 271 | best_vloss = avg_vloss 272 | model_path = f'model/model_{timestamp}_{epoch}_{int(avg_vloss * 1000)}.pt' 273 | torch.save(model.state_dict(), model_path) 274 | 275 | 276 | @torch.no_grad() 277 | def validate(model, datasets): 278 | # datasets here do not need images of equal sizes or to be square as there is no batching 279 | passes = 0 280 | fails = 0 281 | print_limit = 100 282 | print_count = 0 283 | 284 | def do_print(result, path, axis): 285 | nonlocal print_count 286 | if print_count < print_limit: 287 | print(f'{path} {axis} {result}') 288 | print_count += 1 289 | 290 | for valid_dataset in datasets: 291 | for data in valid_dataset: 292 | expected_result = data[0] 293 | tensor = data[1] 294 | axis = data[2] 295 | path = data[3] 296 | result = model(tensor.to(DEVICE))[0].item() 297 | if expected_result.item() == 1: 298 | if result >= 0: 299 | passes += 1 300 | else: 301 | fails += 1 302 | do_print(result, path, axis) 303 | elif expected_result.item() == -1: 304 | if result < 0: 305 | passes += 1 306 | else: 307 | fails += 1 308 | do_print(result, path, axis) 309 | else: 310 | raise RuntimeError(f'Unexpected result target {expected_result.item()}') 311 | if print_count > print_limit: 312 | print(f"{print_count - print_limit} more") 313 | total = passes + fails 314 | print(f"Passed: {passes} | {passes / total * 100:.2f}%") # edge accuracy 315 | print(f"Failed: {fails} | {fails / total * 100:.2f}%") 316 | print(f"Passed²: {(passes / total) ** 2 * 100:.2f}%") # image accuracy 317 | 318 | 319 | # I prefer to not perpetuate the public distribution of torch.save() pickled files. 320 | def save_npz(path, state_dict): 321 | np.savez(path, **state_dict) 322 | 323 | 324 | def load_npz(path): 325 | state_dict_np: dict[str, NDArray] = np.load(path, allow_pickle=False) 326 | state_dict_torch = dict() 327 | for name, arr in state_dict_np.items(): 328 | state_dict_torch[name] = torch.from_numpy(arr) 329 | return state_dict_torch 330 | 331 | 332 | def main(): 333 | model = SeamlessModel() 334 | 335 | # resume training or validate a saved model 336 | # model.load_state_dict(load_npz("model.npz")) 337 | # model.load_state_dict(torch.load("model/model_20221203_162623_26_10.pt")) 338 | 339 | model.to(DEVICE) 340 | 341 | datasets = [ 342 | (EdgeDataset('train/samples'), EdgeDataset('valid/samples')), 343 | (EdgeDataset('train/samples2x'), EdgeDataset('valid/samples2x')), 344 | (EdgeDataset('train/samples4x'), EdgeDataset('valid/samples4x')) 345 | ] 346 | 347 | # Though it's possible to keep training and validation samples in the same dataset, you really shouldn't. 348 | # If you add new samples to a dataset that's being used like this DO NOT resume a previously trained model. 349 | # Training and validation samples will get reshuffled and your validation samples will likely be overfit. 350 | # gen = torch.Generator().manual_seed(132) 351 | # datasets = [ 352 | # torch.utils.data.random_split(EdgeDataset('samples'), [.8, .2], gen), 353 | # torch.utils.data.random_split(EdgeDataset('samples2x'), [.8, .2], gen), 354 | # torch.utils.data.random_split(EdgeDataset('samples4x'), [.8, .2], gen) 355 | # ] 356 | 357 | # If you're generating new samples it can be useful to modify generator_process/actions/prompt_to_image.py 358 | # to automatically save images to the dataset. It's best to keep them separate at first and run a previously 359 | # trained model on them to help find bad samples. Stable diffusion can at times add solid colored borders to 360 | # the edges of images that are not meant to be seamless. I recommend deleting all samples where an edge 361 | # appears seamless with scrutiny but was not generated to be, don't move it to another folder in the dataset. 362 | # datasets = [ 363 | # (None, EdgeDataset('tmp')) 364 | # ] 365 | 366 | # If you only want to validate a saved model. 367 | # datasets = [(None, valid) for _, valid in datasets] 368 | 369 | train_datasets = [] 370 | valid_datasets = [] 371 | for t, v in datasets: 372 | if t is not None: 373 | train_datasets.append(t) 374 | if v is not None: 375 | valid_datasets.append(v) 376 | 377 | try: 378 | if len(train_datasets) > 0: 379 | train(model, train_datasets, valid_datasets, epochs=50, training_rate=0.001) 380 | # It should easily converge in under 50 epochs. 381 | # Training rate is a little high, but I've never managed better 382 | # results with a lower rate and several times more epochs. 383 | except KeyboardInterrupt: 384 | pass 385 | 386 | # As long as your images have meaningful names you can get feedback on 387 | # what kind of images aren't detecting well to add similar samples. 388 | model.train(False) 389 | validate(model, valid_datasets) 390 | 391 | 392 | if __name__ == '__main__': 393 | main() 394 | -------------------------------------------------------------------------------- /security.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | Intel is committed to rapidly addressing security vulnerabilities affecting our customers and providing clear guidance on the solution, impact, severity and mitigation. 3 | 4 | ## Reporting a Vulnerability 5 | Please report any security vulnerabilities in this project utilizing the guidelines [here](https://www.intel.com/content/www/us/en/security-center/vulnerability-handling-guidelines.html). 6 | -------------------------------------------------------------------------------- /ui/panels/dream_texture.py: -------------------------------------------------------------------------------- 1 | from bpy.types import Panel 2 | from bpy_extras.io_utils import ImportHelper 3 | 4 | import webbrowser 5 | import os 6 | import shutil 7 | 8 | from ...absolute_path import CLIPSEG_WEIGHTS_PATH 9 | from ..presets import DREAM_PT_AdvancedPresets 10 | from ...pil_to_image import * 11 | from ...prompt_engineering import * 12 | from ...operators.dream_texture import DreamTexture, ReleaseGenerator, CancelGenerator, LoadModel 13 | from ...operators.open_latest_version import OpenLatestVersion 14 | from ...operators.view_history import ImportPromptFile 15 | from ..space_types import SPACE_TYPES 16 | from ...property_groups.dream_prompt import DreamPrompt 17 | 18 | from ...generator_process.models import Pipeline, FixItError 19 | 20 | def dream_texture_panels(): 21 | for space_type in SPACE_TYPES: 22 | class DreamTexturePanel(Panel): 23 | """Creates a Panel in the scene context of the properties editor""" 24 | bl_label = "Dream Texture" 25 | bl_idname = f"DREAM_PT_dream_panel_{space_type}" 26 | bl_category = "Dream" 27 | bl_space_type = space_type 28 | bl_region_type = 'UI' 29 | 30 | @classmethod 31 | def poll(cls, context): 32 | if cls.bl_space_type == 'NODE_EDITOR': 33 | return context.area.ui_type == "ShaderNodeTree" or context.area.ui_type == "CompositorNodeTree" 34 | else: 35 | return True 36 | 37 | def draw_header_preset(self, context): 38 | layout = self.layout 39 | layout.operator(ImportPromptFile.bl_idname, text="", icon="IMPORT") 40 | layout.separator() 41 | 42 | def draw(self, context): 43 | layout = self.layout 44 | layout.use_property_split = True 45 | layout.use_property_decorate = False 46 | 47 | #if is_force_show_download(): 48 | # layout.operator(OpenLatestVersion.bl_idname, icon="IMPORT", text="Download Latest Release") 49 | #elif new_version_available(): 50 | # layout.operator(OpenLatestVersion.bl_idname, icon="IMPORT") 51 | 52 | #layout.prop(context.scene.dream_textures_prompt, "pipeline") 53 | #layout.prop(get_prompt(context), "infer_device") 54 | #if Pipeline[context.scene.dream_textures_prompt.pipeline].model(): 55 | #layout.prop(context.scene.dream_textures_prompt, "model") 56 | layout.prop(context.scene.dream_textures_prompt, "infer_model") 57 | layout.prop(context.scene.dream_textures_prompt, "infer_model_size") 58 | #row = layout.row() 59 | #row.operator(LoadModel.bl_idname, icon="PLAY", text="Load Models") 60 | 61 | DreamTexturePanel.__name__ = f"DREAM_PT_dream_panel_{space_type}" 62 | yield DreamTexturePanel 63 | 64 | def get_prompt(context): 65 | return context.scene.dream_textures_prompt 66 | 67 | #def get_seamless_result(context, prompt): 68 | # init_image = None 69 | # if prompt.use_init_img and prompt.init_img_action in ['modify', 'inpaint']: 70 | # match prompt.init_img_src: 71 | # case 'file': 72 | # init_image = context.scene.init_img 73 | # case 'open_editor': 74 | # for area in context.screen.areas: 75 | # if area.type == 'IMAGE_EDITOR': 76 | # if area.spaces.active.image is not None: 77 | # init_image = area.spaces.active.image 78 | # context.scene.seamless_result.check(init_image) 79 | # return context.scene.seamless_result 80 | 81 | yield from create_panel(space_type, 'UI', DreamTexturePanel.bl_idname, prompt_panel, get_prompt) 82 | #yield create_panel(space_type, 'UI', DreamTexturePanel.bl_idname, size_panel, get_prompt) 83 | yield from create_panel(space_type, 'UI', DreamTexturePanel.bl_idname, init_image_panels, get_prompt) 84 | yield from create_panel(space_type, 'UI', DreamTexturePanel.bl_idname, advanced_panel, get_prompt) 85 | yield create_panel(space_type, 'UI', DreamTexturePanel.bl_idname, actions_panel, get_prompt) 86 | 87 | def create_panel(space_type, region_type, parent_id, ctor, get_prompt, use_property_decorate=False, **kwargs): 88 | class BasePanel(bpy.types.Panel): 89 | bl_category = "Dream" 90 | bl_space_type = space_type 91 | bl_region_type = region_type 92 | 93 | class SubPanel(BasePanel): 94 | bl_category = "Dream" 95 | bl_space_type = space_type 96 | bl_region_type = region_type 97 | bl_parent_id = parent_id 98 | 99 | def draw(self, context): 100 | self.layout.use_property_decorate = use_property_decorate 101 | 102 | return ctor(SubPanel, space_type, get_prompt, **kwargs) 103 | 104 | def prompt_panel(sub_panel, space_type, get_prompt): #, get_seamless_result=None): 105 | class PromptPanel(sub_panel): 106 | """Create a subpanel for prompt input""" 107 | bl_label = "Prompt" 108 | bl_idname = f"DREAM_PT_dream_panel_prompt_{space_type}" 109 | 110 | def draw_header_preset(self, context): 111 | layout = self.layout 112 | layout.prop(get_prompt(context), "prompt_structure", text="") 113 | 114 | def draw(self, context): 115 | super().draw(context) 116 | layout = self.layout 117 | layout.use_property_split = True 118 | prompt = get_prompt(context) 119 | 120 | structure = next(x for x in prompt_structures if x.id == prompt.prompt_structure) 121 | for segment in structure.structure: 122 | segment_row = layout.row() 123 | enum_prop = 'prompt_structure_token_' + segment.id + '_enum' 124 | is_custom = getattr(prompt, enum_prop) == 'custom' 125 | if is_custom: 126 | segment_row.prop(prompt, 'prompt_structure_token_' + segment.id) 127 | enum_cases = DreamPrompt.__annotations__[enum_prop].keywords['items'] 128 | if len(enum_cases) != 1 or enum_cases[0][0] != 'custom': 129 | segment_row.prop(prompt, enum_prop, icon_only=is_custom) 130 | if prompt.prompt_structure == file_batch_structure.id: 131 | layout.template_ID(context.scene, "dream_textures_prompt_file", open="text.open") 132 | ''' if Pipeline[prompt.pipeline].seamless(): 133 | layout.prop(prompt, "seamless_axes") 134 | if prompt.seamless_axes == SeamlessAxes.AUTO and get_seamless_result is not None: 135 | auto_row = self.layout.row() 136 | auto_row.enabled = False 137 | auto_row.prop(get_seamless_result(context, prompt), "result") 138 | ''' 139 | 140 | yield PromptPanel 141 | 142 | class NegativePromptPanel(sub_panel): 143 | """Create a subpanel for negative prompt input""" 144 | bl_idname = f"DREAM_PT_dream_panel_negative_prompt_{space_type}" 145 | bl_label = "Negative" 146 | bl_parent_id = PromptPanel.bl_idname 147 | 148 | @classmethod 149 | def poll(cls, context): 150 | return get_prompt(context).prompt_structure != file_batch_structure.id and Pipeline[get_prompt(context).pipeline].negative_prompts() 151 | 152 | def draw_header(self, context): 153 | layout = self.layout 154 | layout.prop(get_prompt(context), "use_negative_prompt", text="") 155 | 156 | def draw(self, context): 157 | super().draw(context) 158 | layout = self.layout 159 | layout.use_property_split = True 160 | layout.enabled = layout.enabled and get_prompt(context).use_negative_prompt 161 | scene = context.scene 162 | 163 | layout.prop(get_prompt(context), "negative_prompt") 164 | yield NegativePromptPanel 165 | 166 | def size_panel(sub_panel, space_type, get_prompt): 167 | class SizePanel(sub_panel): 168 | """Create a subpanel for size options""" 169 | bl_idname = f"DREAM_PT_dream_panel_size_{space_type}" 170 | bl_label = "Size" 171 | bl_options = {'DEFAULT_CLOSED'} 172 | 173 | def draw_header(self, context): 174 | self.layout.prop(get_prompt(context), "use_size", text="") 175 | 176 | def draw(self, context): 177 | super().draw(context) 178 | layout = self.layout 179 | layout.use_property_split = True 180 | layout.enabled = layout.enabled and get_prompt(context).use_size 181 | 182 | layout.prop(get_prompt(context), "width") 183 | layout.prop(get_prompt(context), "height") 184 | return SizePanel 185 | 186 | def init_image_panels(sub_panel, space_type, get_prompt): 187 | class InitImagePanel(sub_panel): 188 | """Create a subpanel for init image options""" 189 | bl_idname = f"DREAM_PT_dream_panel_init_image_{space_type}" 190 | bl_label = "Source Image" 191 | bl_options = {'DEFAULT_CLOSED'} 192 | 193 | def draw_header(self, context): 194 | self.layout.prop(get_prompt(context), "use_init_img", text="") 195 | 196 | def draw(self, context): 197 | super().draw(context) 198 | layout = self.layout 199 | prompt = get_prompt(context) 200 | layout.enabled = prompt.use_init_img 201 | 202 | layout.prop(prompt, "init_img_src", expand=True) 203 | if prompt.init_img_src == 'file': 204 | layout.template_ID(context.scene, "init_img", open="image.open") 205 | layout.prop(prompt, "init_img_action", expand=True) 206 | 207 | layout.use_property_split = True 208 | 209 | #if prompt.init_img_action == 'modify': 210 | # layout.prop(prompt, "fit") 211 | layout.prop(prompt, "strength") 212 | #if Pipeline[prompt.pipeline].color_correction(): 213 | # layout.prop(prompt, "use_init_img_color") 214 | if prompt.init_img_action == 'modify': 215 | layout.prop(prompt, "modify_action_source_type") 216 | if prompt.modify_action_source_type == 'depth_map': 217 | layout.template_ID(context.scene, "init_depth", open="image.open") 218 | yield InitImagePanel 219 | 220 | def advanced_panel(sub_panel, space_type, get_prompt): 221 | class AdvancedPanel(sub_panel): 222 | """Create a subpanel for advanced options""" 223 | bl_idname = f"DREAM_PT_dream_panel_advanced_{space_type}" 224 | bl_label = "Advanced" 225 | bl_options = {'DEFAULT_CLOSED'} 226 | 227 | def draw_header_preset(self, context): 228 | DREAM_PT_AdvancedPresets.draw_panel_header(self.layout) 229 | 230 | def draw(self, context): 231 | super().draw(context) 232 | layout = self.layout 233 | layout.use_property_split = True 234 | 235 | scene = context.scene 236 | 237 | generated_args = scene.dream_textures_prompt.generate_args() 238 | infer_model = generated_args['infer_model'].name 239 | 240 | 241 | 242 | if infer_model == "Stable_Diffusion_1_5": 243 | layout.prop(get_prompt(context), "Tiling") 244 | 245 | 246 | layout.prop(get_prompt(context), "infer_device_text") 247 | if infer_model == "Stable_Diffusion_1_5_int8" or infer_model == "Stable_Diffusion_1_5_controlnet_depth_int8": 248 | layout.prop(get_prompt(context), "infer_device_unet_pos") 249 | layout.prop(get_prompt(context), "infer_device_unet_neg") 250 | else: 251 | layout.prop(get_prompt(context), "infer_device_unet") 252 | 253 | 254 | 255 | layout.prop(get_prompt(context), "infer_device_vae") 256 | layout.prop(get_prompt(context), "random_seed") 257 | if not get_prompt(context).random_seed: 258 | layout.prop(get_prompt(context), "seed") 259 | # advanced_box.prop(self, "iterations") # Disabled until supported by the addon. 260 | layout.prop(get_prompt(context), "steps") 261 | layout.prop(get_prompt(context), "cfg_scale") 262 | layout.prop(get_prompt(context), "scheduler") 263 | layout.prop(get_prompt(context), "step_preview_mode") 264 | 265 | yield AdvancedPanel 266 | 267 | 268 | 269 | 270 | def advanced_panel_project(sub_panel, space_type, get_prompt): 271 | class AdvancedPanelProject(sub_panel): 272 | """Create a subpanel for advanced options in Dream Project""" 273 | bl_idname = f"DREAM_PT_dream_panel_advanced_project_{space_type}" 274 | bl_label = "AdvancedProject" 275 | bl_options = {'DEFAULT_CLOSED'} 276 | 277 | def draw_header_preset(self, context): 278 | DREAM_PT_AdvancedPresets.draw_panel_header(self.layout) 279 | 280 | def draw(self, context): 281 | super().draw(context) 282 | layout = self.layout 283 | layout.use_property_split = True 284 | 285 | scene = context.scene 286 | 287 | 288 | generated_args_project = context.scene.dream_textures_project_prompt.generate_args() 289 | infer_model_project = generated_args_project['infer_model'].name 290 | 291 | layout.prop(get_prompt(context), "infer_device_text") 292 | 293 | 294 | if infer_model_project == "Stable_Diffusion_1_5_controlnet_depth_int8": 295 | layout.prop(get_prompt(context), "infer_device_unet_pos") 296 | layout.prop(get_prompt(context), "infer_device_unet_neg") 297 | else: 298 | layout.prop(get_prompt(context), "infer_device_unet") 299 | 300 | 301 | layout.prop(get_prompt(context), "infer_device_vae") 302 | layout.prop(get_prompt(context), "random_seed") 303 | if not get_prompt(context).random_seed: 304 | layout.prop(get_prompt(context), "seed") 305 | # advanced_box.prop(self, "iterations") # Disabled until supported by the addon. 306 | layout.prop(get_prompt(context), "steps") 307 | layout.prop(get_prompt(context), "cfg_scale") 308 | layout.prop(get_prompt(context), "scheduler") 309 | layout.prop(get_prompt(context), "step_preview_mode") 310 | 311 | yield AdvancedPanelProject 312 | 313 | 314 | def actions_panel(sub_panel, space_type, get_prompt): 315 | class ActionsPanel(sub_panel): 316 | """Create a subpanel for actions""" 317 | bl_idname = f"DREAM_PT_dream_panel_actions_{space_type}" 318 | bl_label = "Advanced" 319 | bl_options = {'HIDE_HEADER'} 320 | 321 | def draw(self, context): 322 | super().draw(context) 323 | layout = self.layout 324 | layout.use_property_split = True 325 | 326 | prompt = get_prompt(context) 327 | 328 | iterations_row = layout.row() 329 | iterations_row.enabled = prompt.prompt_structure != file_batch_structure.id 330 | iterations_row.prop(prompt, "iterations") 331 | 332 | col = layout.column(align=True) 333 | row = col.row(align=True) 334 | #row = layout.row() 335 | row.scale_y = 1.5 336 | if context.scene.dream_textures_progress <= 0: 337 | if context.scene.dream_textures_info != "": 338 | row.label(text=context.scene.dream_textures_info, icon="INFO") 339 | else: 340 | row.operator(LoadModel.bl_idname, icon="PLAY", text="Load Models") 341 | row.operator(ReleaseGenerator.bl_idname, icon="X", text="") 342 | row.operator(DreamTexture.bl_idname, icon="PLAY", text="Generate") 343 | else: 344 | disabled_row = row.row() 345 | disabled_row.use_property_split = True 346 | disabled_row.prop(context.scene, 'dream_textures_progress', slider=True) 347 | disabled_row.enabled = False 348 | if CancelGenerator.poll(context): 349 | row.operator(CancelGenerator.bl_idname, icon="CANCEL", text="") 350 | row.operator(ReleaseGenerator.bl_idname, icon="X", text="") 351 | 352 | # Validation 353 | try: 354 | prompt.validate(context) 355 | except FixItError as e: 356 | error_box = layout.box() 357 | error_box.use_property_split = False 358 | for i, line in enumerate(e.args[0].split('\n')): 359 | error_box.label(text=line, icon="ERROR" if i == 0 else "NONE") 360 | e.draw(context, error_box) 361 | except Exception as e: 362 | print(e) 363 | return ActionsPanel 364 | -------------------------------------------------------------------------------- /ui/panels/history.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from bpy.types import Panel 3 | from ...pil_to_image import * 4 | from ...prompt_engineering import * 5 | from ...operators.dream_texture import DreamTexture, ReleaseGenerator 6 | from ...operators.view_history import ExportHistorySelection, ImportPromptFile, RecallHistoryEntry, ClearHistory, RemoveHistorySelection 7 | #from ...operators.open_latest_version import OpenLatestVersion, is_force_show_download, new_version_available 8 | #from ...preferences import StableDiffusionPreferences 9 | from ..space_types import SPACE_TYPES 10 | 11 | def history_panels(): 12 | for space_type in SPACE_TYPES: 13 | class HistoryPanel(Panel): 14 | """Panel for Dream Textures History""" 15 | bl_label = "History" 16 | bl_category = "Dream" 17 | bl_idname = f"DREAM_PT_dream_history_panel_{space_type}" 18 | bl_space_type = space_type 19 | bl_region_type = 'UI' 20 | 21 | @classmethod 22 | def poll(cls, context): 23 | if cls.bl_space_type == 'NODE_EDITOR': 24 | return context.area.ui_type == "ShaderNodeTree" or context.area.ui_type == "CompositorNodeTree" 25 | else: 26 | return True 27 | 28 | def draw(self, context): 29 | self.layout.template_list("SCENE_UL_HistoryList", "", context.scene, "dream_textures_history", context.scene, "dream_textures_history_selection") 30 | 31 | row = self.layout.row() 32 | row.prop(context.scene, "dream_textures_history_selection_preview") 33 | row.operator(RemoveHistorySelection.bl_idname, text="", icon="X") 34 | row.operator(ExportHistorySelection.bl_idname, text="", icon="EXPORT") 35 | 36 | self.layout.operator(RecallHistoryEntry.bl_idname) 37 | self.layout.operator(ClearHistory.bl_idname) 38 | HistoryPanel.__name__ = f"DREAM_PT_dream_history_panel_{space_type}" 39 | yield HistoryPanel -------------------------------------------------------------------------------- /ui/panels/render_properties.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from .dream_texture import create_panel, prompt_panel, advanced_panel 3 | from ...property_groups.dream_prompt import pipeline_options 4 | from ...generator_process.actions.prompt_to_image import Pipeline 5 | from ...generator_process.actions.huggingface_hub import ModelType 6 | from ...preferences import StableDiffusionPreferences 7 | 8 | class RenderPropertiesPanel(bpy.types.Panel): 9 | """Panel for Dream Textures render properties""" 10 | bl_label = "Dream Textures" 11 | bl_idname = "DREAM_PT_dream_render_properties_panel" 12 | bl_space_type = 'PROPERTIES' 13 | bl_region_type = 'WINDOW' 14 | bl_context = 'render' 15 | bl_options = {'DEFAULT_CLOSED'} 16 | 17 | @classmethod 18 | def poll(self, context): 19 | return context.scene.render.engine == 'CYCLES' 20 | 21 | def draw_header(self, context): 22 | self.layout.prop(context.scene, "dream_textures_render_properties_enabled", text="") 23 | 24 | def draw(self, context): 25 | layout = self.layout 26 | layout.use_property_split = True 27 | layout.use_property_decorate = False 28 | layout.active = context.scene.dream_textures_render_properties_enabled 29 | 30 | #if len(pipeline_options(self, context)) > 1: 31 | # layout.prop(context.scene.dream_textures_render_properties_prompt, "pipeline") 32 | #if Pipeline[context.scene.dream_textures_render_properties_prompt.pipeline].model(): 33 | #layout.prop(context.scene.dream_textures_render_properties_prompt, 'model') 34 | layout.prop(context.scene.dream_textures_render_properties_prompt, "infer_model") 35 | row = layout.row() 36 | row.operator(LoadModel.bl_idname, icon="PLAY", text="Load Models") 37 | 38 | layout.prop(context.scene.dream_textures_render_properties_prompt, "strength") 39 | layout.prop(context.scene, "dream_textures_render_properties_pass_inputs") 40 | if context.scene.dream_textures_render_properties_pass_inputs != 'color': 41 | if not bpy.context.view_layer.use_pass_z: 42 | box = layout.box() 43 | box.label(text="Z Pass Disabled", icon="ERROR") 44 | box.label(text="Enable the Z pass to use depth pass inputs") 45 | box.use_property_split = False 46 | box.prop(context.view_layer, "use_pass_z") 47 | 48 | if not Pipeline[context.scene.dream_textures_render_properties_prompt.pipeline].depth(): 49 | box = layout.box() 50 | box.label(text="Unsupported pipeline", icon="ERROR") 51 | box.label(text="The selected pipeline does not support depth to image.") 52 | 53 | models = list(filter( 54 | lambda m: m.model_base == context.scene.dream_textures_render_properties_prompt.model, 55 | context.preferences.addons[StableDiffusionPreferences.bl_idname].preferences.installed_models 56 | )) 57 | if len(models) > 0 and ModelType[models[0].model_type] != ModelType.DEPTH: 58 | box = layout.box() 59 | box.label(text="Unsupported model", icon="ERROR") 60 | box.label(text="Select a depth model, such as 'stabilityai/stable-diffusion-2-depth'") 61 | 62 | def render_properties_panels(): 63 | yield RenderPropertiesPanel 64 | def get_prompt(context): 65 | return context.scene.dream_textures_render_properties_prompt 66 | space_type = RenderPropertiesPanel.bl_space_type 67 | region_type = RenderPropertiesPanel.bl_region_type 68 | panels = [ 69 | *create_panel(space_type, region_type, RenderPropertiesPanel.bl_idname, prompt_panel, get_prompt, True), 70 | *create_panel(space_type, region_type, RenderPropertiesPanel.bl_idname, advanced_panel, get_prompt, True), 71 | ] 72 | for panel in panels: 73 | def draw_decorator(original): 74 | def draw(self, context): 75 | self.layout.enabled = context.scene.dream_textures_render_properties_enabled 76 | return original(self, context) 77 | return draw 78 | panel.draw = draw_decorator(panel.draw) 79 | if hasattr(panel, 'draw_header_preset'): 80 | panel.draw_header_preset = draw_decorator(panel.draw_header_preset) 81 | if hasattr(panel, 'draw_header'): 82 | panel.draw_header = draw_decorator(panel.draw_header) 83 | yield panel -------------------------------------------------------------------------------- /ui/panels/upscaling.py: -------------------------------------------------------------------------------- 1 | from bpy.types import Panel 2 | from ...pil_to_image import * 3 | from ...prompt_engineering import * 4 | from ...operators.upscale import Upscale 5 | from ...operators.dream_texture import CancelGenerator, ReleaseGenerator 6 | 7 | from ...generator_process.actions.prompt_to_image import Pipeline 8 | from .dream_texture import create_panel, advanced_panel 9 | from ..space_types import SPACE_TYPES 10 | 11 | def upscaling_panels(): 12 | for space_type in SPACE_TYPES: 13 | class UpscalingPanel(Panel): 14 | """Panel for AI Upscaling""" 15 | bl_label = "AI Upscaling" 16 | bl_category = "Dream" 17 | bl_idname = f"DREAM_PT_dream_upscaling_panel_{space_type}" 18 | bl_space_type = space_type 19 | bl_region_type = 'UI' 20 | bl_options = {'DEFAULT_CLOSED'} 21 | 22 | @classmethod 23 | def poll(cls, context): 24 | if not Pipeline[context.scene.dream_textures_prompt.pipeline].upscaling(): 25 | return False 26 | if cls.bl_space_type == 'NODE_EDITOR': 27 | return context.area.ui_type == "ShaderNodeTree" or context.area.ui_type == "CompositorNodeTree" 28 | else: 29 | return True 30 | 31 | def draw(self, context): 32 | layout = self.layout 33 | layout.use_property_split = True 34 | layout.use_property_decorate = False 35 | 36 | prompt = context.scene.dream_textures_upscale_prompt 37 | 38 | #layout.prop(prompt, "prompt_structure_token_subject") 39 | layout.prop(context.scene, "dream_textures_upscale_tile_size") 40 | layout.prop(context.scene.dream_textures_upscale_prompt, "infer_device") 41 | #layout.prop(context.scene, "dream_textures_upscale_blend") 42 | 43 | #layout.prop(prompt, "seamless_axes") 44 | 45 | 46 | 47 | UpscalingPanel.__name__ = UpscalingPanel.bl_idname 48 | class ActionsPanel(Panel): 49 | """Panel for AI Upscaling Actions""" 50 | bl_category = "Dream" 51 | bl_label = "Actions" 52 | bl_idname = f"DREAM_PT_dream_upscaling_actions_panel_{space_type}" 53 | bl_space_type = space_type 54 | bl_region_type = 'UI' 55 | bl_parent_id = UpscalingPanel.bl_idname 56 | bl_options = {'HIDE_HEADER'} 57 | 58 | @classmethod 59 | def poll(cls, context): 60 | if not Pipeline[context.scene.dream_textures_prompt.pipeline].upscaling(): 61 | return False 62 | if cls.bl_space_type == 'NODE_EDITOR': 63 | return context.area.ui_type == "ShaderNodeTree" or context.area.ui_type == "CompositorNodeTree" 64 | else: 65 | return True 66 | 67 | def draw(self, context): 68 | layout = self.layout 69 | layout.use_property_split = True 70 | layout.use_property_decorate = False 71 | 72 | image = None 73 | for area in context.screen.areas: 74 | if area.type == 'IMAGE_EDITOR': 75 | image = area.spaces.active.image 76 | row = layout.row() 77 | row.scale_y = 1.5 78 | if context.scene.dream_textures_progress <= 0: 79 | if context.scene.dream_textures_info != "": 80 | row.label(text=context.scene.dream_textures_info, icon="INFO") 81 | else: 82 | row.operator( 83 | Upscale.bl_idname, 84 | text=f"Upscale Image", #to {image.size[0] * 4}x{image.size[1] * 4}" if image is not None else "Upscale", 85 | icon="FULLSCREEN_ENTER" 86 | ) 87 | else: 88 | disabled_row = row.row() 89 | disabled_row.use_property_split = True 90 | disabled_row.prop(context.scene, 'dream_textures_progress', slider=True) 91 | disabled_row.enabled = False 92 | if CancelGenerator.poll(context): 93 | row.operator(CancelGenerator.bl_idname, icon="CANCEL", text="") 94 | row.operator(ReleaseGenerator.bl_idname, icon="X", text="") 95 | yield UpscalingPanel 96 | advanced_panels = [*create_panel(space_type, 'UI', UpscalingPanel.bl_idname, advanced_panel, lambda context: context.scene.dream_textures_upscale_prompt)] 97 | #outer_panel = advanced_panels[0] 98 | #outer_original_idname = outer_panel.bl_idname 99 | #outer_panel.bl_idname += "_upscaling" 100 | #for panel in advanced_panels: 101 | #panel.bl_idname += "_upscaling" 102 | #if panel.bl_parent_id == outer_original_idname: 103 | #panel.bl_parent_id = outer_panel.bl_idname 104 | #yield panel 105 | yield ActionsPanel -------------------------------------------------------------------------------- /ui/presets.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from bpy.types import Panel, Operator, Menu 3 | from bl_operators.presets import AddPresetBase 4 | from bl_ui.utils import PresetPanel 5 | from typing import _AnnotatedAlias 6 | import os 7 | import shutil 8 | from ..absolute_path import absolute_path 9 | from ..generator_process.actions.prompt_to_image import Optimizations 10 | 11 | class DreamTexturesPresetPanel(PresetPanel, Panel): 12 | preset_operator = "script.execute_preset" 13 | 14 | class DREAM_PT_AdvancedPresets(DreamTexturesPresetPanel): 15 | bl_label = "Advanced Presets" 16 | preset_subdir = "dream_textures/advanced" 17 | preset_add_operator = "dream_textures.advanced_preset_add" 18 | 19 | class DREAM_MT_AdvancedPresets(Menu): 20 | bl_label = 'Advanced Presets' 21 | preset_subdir = 'dream_textures/advanced' 22 | preset_operator = 'script.execute_preset' 23 | draw = Menu.draw_preset 24 | 25 | class AddAdvancedPreset(AddPresetBase, Operator): 26 | bl_idname = 'dream_textures.advanced_preset_add' 27 | bl_label = 'Add Advanced Preset' 28 | preset_menu = 'DREAM_MT_AdvancedPresets' 29 | 30 | preset_subdir = 'dream_textures/advanced' 31 | 32 | preset_defines = ['prompt = bpy.context.scene.dream_textures_prompt'] 33 | preset_values = [ 34 | "prompt.steps", 35 | "prompt.cfg_scale", 36 | "prompt.scheduler", 37 | "prompt.step_preview_mode", 38 | 39 | "prompt.optimizations_attention_slicing", 40 | "prompt.optimizations_attention_slice_size_src", 41 | "prompt.optimizations_attention_slice_size", 42 | "prompt.optimizations_cudnn_benchmark", 43 | "prompt.optimizations_tf32", 44 | "prompt.optimizations_amp", 45 | "prompt.optimizations_half_precision", 46 | "prompt.optimizations_sequential_cpu_offload", 47 | "prompt.optimizations_channels_last_memory_format", 48 | "prompt.optimizations_batch_size", 49 | "prompt.optimizations_vae_slicing", 50 | "prompt.optimizations_cpu_only", 51 | ] 52 | 53 | class RestoreDefaultPresets(Operator): 54 | bl_idname = "dream_textures.restore_default_presets" 55 | bl_label = "Restore Default Presets" 56 | bl_description = ("Restores all default presets provided by the addon.") 57 | bl_options = {"REGISTER", "INTERNAL"} 58 | 59 | def execute(self, context): 60 | register_default_presets(force=True) 61 | return {"FINISHED"} 62 | 63 | PRESETS_PATH = os.path.join(bpy.utils.user_resource('SCRIPTS'), 'presets/dream_textures/advanced') 64 | DEFAULT_PRESETS_PATH = absolute_path('builtin_presets') 65 | def register_default_presets(force=False): 66 | presets_path_exists = os.path.isdir(PRESETS_PATH) 67 | if not presets_path_exists or force: 68 | if not presets_path_exists: 69 | os.makedirs(PRESETS_PATH) 70 | for default_preset in os.listdir(DEFAULT_PRESETS_PATH): 71 | if not os.path.exists(os.path.join(PRESETS_PATH, default_preset)): 72 | shutil.copy(os.path.join(DEFAULT_PRESETS_PATH, default_preset), PRESETS_PATH) 73 | 74 | def default_presets_missing(): 75 | if not os.path.isdir(PRESETS_PATH): 76 | return True 77 | for default_preset in os.listdir(DEFAULT_PRESETS_PATH): 78 | if not os.path.exists(os.path.join(PRESETS_PATH, default_preset)): 79 | return True -------------------------------------------------------------------------------- /ui/space_types.py: -------------------------------------------------------------------------------- 1 | SPACE_TYPES = {'IMAGE_EDITOR', 'NODE_EDITOR'} -------------------------------------------------------------------------------- /version.py: -------------------------------------------------------------------------------- 1 | VERSION = (0, 1, 0) 2 | def version_tag(version): 3 | return f"{version[0]}.{version[1]}.{version[2]}" 4 | 5 | def version_tuple(tag): 6 | return tuple(map(lambda x: int(x), tag.split('.'))) 7 | --------------------------------------------------------------------------------