├── environment.yml ├── LICENSE ├── README.md ├── .gitignore ├── start_sd_server.py ├── __init__.py └── operators.py /environment.yml: -------------------------------------------------------------------------------- 1 | name: SDTG4B_CONDA 2 | channels: 3 | - conda-forge 4 | - nvidia 5 | - pytorch 6 | dependencies: 7 | - python=3.10 8 | - pytorch-cuda=11.7 9 | - diffusers 10 | - transformers 11 | - pillow 12 | - accelerate 13 | - numpy 14 | - opencv 15 | 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Paweł 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SDTG4B 2 | Stable Diffusion Texture Generator For Blender 3 | 4 | ![obraz](https://user-images.githubusercontent.com/3722204/215628215-339e9609-86ac-40f7-8478-77077ffeec4e.png) 5 | 6 | https://user-images.githubusercontent.com/3722204/214519268-2d9e45e6-711e-45fd-b461-fff06c17f1d1.mp4 7 | 8 | https://user-images.githubusercontent.com/3722204/213944732-2a86cb32-83b0-43c3-b83b-8fa765138b24.mp4 9 | 10 | https://user-images.githubusercontent.com/3722204/215635067-d8b07c4f-06b8-4388-8777-6bb3262934c2.mp4 11 | 12 | # How to install 13 | - Download and install the zip file of the current release from: https://github.com/p4vv37/SDTG4B/releases/tag/release 14 | ![obraz](https://user-images.githubusercontent.com/3722204/215626359-36699423-6668-4382-b617-48a0df0e29e1.png) 15 | 16 | - Create a new Conda environment described in [environment.yml](https://github.com/p4vv37/SDTG4B/blob/main/environment.yml) file by using the command: 17 | 18 | conda env update --file environment.yml --prune 19 | 20 | 21 | # How to generate texture 22 | 23 | - Run the [start_sd_server.py](https://github.com/p4vv37/SDTG4B/blob/main/start_sd_server.py) file in conda environment with required packages or use the SD server settings > Run SD server button from plugin UI. 24 | 25 | - The plugin UI should be visible in the 3D Viewport during object mode 26 | 27 | - Set a value for the Target object. **Target object need to have correct UVs** 28 | 29 | - Print the Generate textures button of the plugin UI 30 | 31 | - Creating a simple texture and unchecking the Start with empty texture" checkbox greatly influences a quality of the result. 32 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /start_sd_server.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import time 3 | from http.server import BaseHTTPRequestHandler, HTTPServer 4 | import json 5 | import torch 6 | from diffusers import StableDiffusionDepth2ImgPipeline 7 | from PIL import Image, ImageChops 8 | import numpy as np 9 | import cv2 10 | import os 11 | 12 | 13 | def finish_texture(out_img_arr, partial=False): 14 | for x in range(out_img_arr.shape[0]): 15 | for y in range(out_img_arr.shape[1]): 16 | color = out_img_arr[x][y] 17 | if sum(color) < 0.00001: 18 | number_of_colors = 0 19 | out_color = np.array([0, 0, 0]) 20 | for x1, y1 in [[x, y - 1], [x, y + 1]]: 21 | if x1 >= out_img_arr.shape[0] or y1 >= out_img_arr.shape[1]: 22 | continue 23 | c = out_img_arr[x1][y1] 24 | if sum(c) > 0.00001: 25 | out_color += c 26 | number_of_colors += 1 27 | if number_of_colors == 0 or (partial and number_of_colors < 2): 28 | continue 29 | out_color = out_color / float(number_of_colors) 30 | out_img_arr[x, y] = out_color 31 | 32 | for x in range(out_img_arr.shape[0]): 33 | for y in range(out_img_arr.shape[1]): 34 | color = out_img_arr[x][y] 35 | if sum(color) < 0.00001: 36 | number_of_colors = 0 37 | out_color = np.array([0, 0, 0]) 38 | for x1, y1 in [[x - 1, y], [x + 1, y]]: 39 | if x1 >= out_img_arr.shape[0] or y1 >= out_img_arr.shape[1]: 40 | continue 41 | c = out_img_arr[x1][y1] 42 | if sum(c) > 0.00001: 43 | out_color += c 44 | number_of_colors += 1 45 | if number_of_colors == 0 or (partial and number_of_colors < 2): 46 | continue 47 | out_color = out_color / float(number_of_colors) 48 | out_img_arr[x, y] = out_color 49 | return out_img_arr 50 | 51 | 52 | class Handler(BaseHTTPRequestHandler): 53 | depth2img_pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( 54 | "stabilityai/stable-diffusion-2-depth", 55 | torch_dtype=torch.float16, 56 | ).to("cuda") 57 | 58 | # noinspection PyPep8Naming 59 | def do_GET(self): 60 | if self.path == "/status": 61 | self.send_response(200) 62 | self.send_header('python 3 ', 'text/html') 63 | self.end_headers() 64 | return 65 | 66 | length = int(self.headers.get('content-length')) 67 | field_data = self.rfile.read(length) 68 | data = json.loads(str(field_data, "UTF-8")) 69 | 70 | prompt = data.get("prompt") 71 | n_prompt = data.get("n_prompt", "") 72 | 73 | num_inference_steps = data.get("steps") 74 | depth_path = data.get("depth") 75 | src_path = data.get("render") 76 | uv_path = data.get("uv") 77 | alpha_path = data.get("alpha") 78 | out_txt_path = data.get("out_txt") 79 | diffuse_path = data.get("diffuse") 80 | strength = float(data.get("strength", 0.8)) 81 | depth_based_mixing = int(data.get("depth_based_mixing", False)) 82 | 83 | seed = data.get("seed", 1024) 84 | generator = torch.Generator(device="cuda").manual_seed(seed) 85 | 86 | if prompt is None or out_txt_path is None: 87 | self.send_response(400) 88 | self.send_header('Incorrect payload', 'text/html') 89 | self.end_headers() 90 | return 91 | 92 | self.send_response(200) 93 | self.send_header('python 3 ', 'text/html') 94 | self.end_headers() 95 | 96 | if self.path == "/depth2img_step": 97 | init_img = Image.open(src_path) 98 | original_alpha_img = Image.open(alpha_path).convert("RGB") 99 | diffuse_img = Image.open(diffuse_path) 100 | gray = Image.new('RGB', diffuse_img.size, (128, 128, 128)) 101 | diffuse_img = ImageChops.blend(diffuse_img, gray, 0.5 * strength) 102 | diffuse_img = ImageChops.multiply(diffuse_img, original_alpha_img) 103 | # diffuse_img.save(r"C:\git\NeuralNetworksSketchbook\sd_texturing\tmp\test.png") 104 | depth_arr = np.array(Image.open(depth_path).convert("L")) 105 | depth_arr *= 1000 106 | depth_arr += 1000 107 | depth_arr = np.expand_dims(depth_arr, axis=0) 108 | depth_arr = torch.from_numpy(depth_arr) 109 | 110 | img = self.depth2img_pipe(prompt=prompt, image=diffuse_img, depth_map=depth_arr, negative_prompt=n_prompt, 111 | guidance_scale=9, strength=0.8, generator=generator, 112 | num_inference_steps=num_inference_steps, num_images_per_prompt=1).images[0] 113 | img.save(pathlib.Path(src_path).parent / "prev.png") 114 | 115 | os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1" 116 | 117 | scaled_img_size = [x * 2 for x in init_img.size] 118 | 119 | # Scale for UV interpolation 120 | img = np.array(img.resize(scaled_img_size, Image.Resampling.BICUBIC)) 121 | depth_arr = np.array(Image.open(depth_path).resize(scaled_img_size, Image.Resampling.BICUBIC)) 122 | uv_img = cv2.imread(uv_path, cv2.IMREAD_UNCHANGED) 123 | uv_img = cv2.cvtColor(uv_img, cv2.COLOR_BGR2RGB) 124 | uv_img = cv2.resize(uv_img, scaled_img_size, interpolation=cv2.INTER_CUBIC) 125 | alpha_img = Image.open(alpha_path).resize(scaled_img_size, Image.Resampling.BICUBIC) 126 | 127 | # diffuse_img = Image.open(diffuse_path) 128 | 129 | uv_img_arr = np.asarray(uv_img) 130 | uv_img_arr = np.clip(uv_img_arr, 0, 1.0) 131 | img_arr = np.asarray(img) 132 | 133 | out_img = Image.open(out_txt_path) 134 | out_img_arr = np.array(out_img) 135 | wip_out_img_arr = out_img_arr.copy() 136 | src_alpha_arr = np.array(alpha_img) 137 | 138 | for x in range(uv_img_arr.shape[0]): 139 | for y in range(uv_img_arr.shape[1]): 140 | u, v, w = uv_img_arr[x][y] 141 | a = src_alpha_arr[x][y] 142 | try: 143 | if a > 244 and sum([u, v, w]) > 0.00000001: 144 | u2 = int(out_img_arr.shape[1] - 1) - int(out_img_arr.shape[1] * v) - 1 145 | v2 = int(out_img_arr.shape[0] * u) - 1 146 | 147 | if depth_based_mixing and sum(out_img_arr[u2, v2]) > 0: 148 | depth = (np.clip(depth_arr[x][y][0] / 255, 0, 0.5) * 2) ** 2 149 | wip_out_img_arr[u2, v2] = img_arr[x][y] * (1 - depth) + out_img_arr[u2, v2] * depth 150 | else: 151 | wip_out_img_arr[u2, v2] = img_arr[x][y] 152 | except Exception as e: 153 | pass 154 | for x in range(out_img_arr.shape[0]): 155 | for y in range(out_img_arr.shape[1]): 156 | if depth_based_mixing: 157 | out_img_arr[x, y] = wip_out_img_arr[x][y] 158 | elif sum(out_img_arr[x, y]) == 0: 159 | out_img_arr[x, y] = wip_out_img_arr[x][y] 160 | 161 | out_img_arr = finish_texture(out_img_arr, partial=True) 162 | out = Image.fromarray(out_img_arr.astype('uint8'), 'RGB') 163 | out.save(out_txt_path) 164 | 165 | if self.path == "/finish_texture": 166 | out_img = Image.open(out_txt_path) 167 | out_img_arr = np.array(out_img) 168 | out_img_arr = finish_texture(out_img_arr) 169 | out = Image.fromarray(out_img_arr.astype('uint8'), 'RGB') 170 | out.save(out_txt_path) 171 | 172 | message = F"Request {self.path} processed" 173 | print(message) 174 | self.wfile.write(bytes(message, "utf8")) 175 | 176 | 177 | def start_server(port): 178 | with HTTPServer(('127.0.0.1', port), Handler) as server: 179 | server.serve_forever() 180 | 181 | 182 | if __name__ == "__main__": 183 | start_server(5000) 184 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import bpy 3 | from bpy.props import (StringProperty, 4 | BoolProperty, 5 | IntProperty, 6 | FloatProperty, 7 | PointerProperty, 8 | ) 9 | from bpy.types import (Panel, 10 | Menu, 11 | Operator, 12 | PropertyGroup, 13 | ) 14 | 15 | from .operators import (WM_OT_RunSD, 16 | WM_OT_GenerateTxt, 17 | WM_OT_PreviewCameraPath 18 | ) 19 | 20 | bl_info = { 21 | "name": "Stable Diffusion textures generator", 22 | "description": "", 23 | "author": "Pawel Kowalski", 24 | "version": (0, 0, 1), 25 | "blender": (2, 80, 0), 26 | "location": "3D View > Tools", 27 | "warning": "", 28 | "wiki_url": "", 29 | "tracker_url": "", 30 | "category": "Material" 31 | } 32 | 33 | 34 | class SDProperties(PropertyGroup): 35 | prompt: StringProperty( 36 | name="Prompt", 37 | description="Description of desired image", 38 | default="An oil painting of a pirate treasure chest, gold, coins, highly detailed, trending on artstation, " 39 | "concept art, Professional, gold coins on ground, wooden box with wooden cover", 40 | maxlen=256, 41 | subtype='NONE' 42 | ) 43 | 44 | negative_prompt: StringProperty( 45 | name="Negative prompt", 46 | description="Description of how image should not look like", 47 | default="blue topping, dark, shadows, bright spots, glossy, only gold", 48 | maxlen=256, 49 | subtype='NONE' 50 | ) 51 | 52 | out_dir: StringProperty( 53 | name="Output directory", 54 | description="Directory for output and temporary files.", 55 | default="cwd", 56 | maxlen=256, 57 | subtype='DIR_PATH' 58 | ) 59 | 60 | out_txt: StringProperty( 61 | name="Output directory", 62 | description="Directory for output and temporary files.", 63 | default="cwd" + os.sep + "txt.png", 64 | maxlen=256, 65 | subtype='DIR_PATH' 66 | ) 67 | 68 | host: StringProperty( 69 | name="SD server host address", 70 | description="Stable Diffusion server address", 71 | default="127.0.0.1", 72 | maxlen=256, 73 | subtype='NONE' 74 | ) 75 | 76 | num_inference_steps: IntProperty( 77 | name="Number of interference steps", 78 | description="A number of denoising steps", 79 | default=30, 80 | min=1, 81 | max=100 82 | ) 83 | 84 | guidance_scale: FloatProperty( 85 | name="Guidance scale", 86 | description="Scale for classifier-free guidance", 87 | default=7.5, 88 | min=1.0, 89 | max=20.0 90 | ) 91 | 92 | seed: IntProperty( 93 | name="Seed", 94 | description="Random seed", 95 | default=1234567, 96 | min=0 97 | ) 98 | 99 | views_num: IntProperty( 100 | name="Number of views", 101 | description="Number of views that will be rendered around the object for texture generation", 102 | default=4, 103 | min=0, 104 | max=8 105 | ) 106 | 107 | camera_r: FloatProperty( 108 | name="Camera radius", 109 | description="Radius of camera path", 110 | default=6.0, 111 | min=1.0, 112 | max=20.0 113 | ) 114 | 115 | camera_z: FloatProperty( 116 | name="Camera Z position", 117 | description="Z position of camera path", 118 | default=2, 119 | min=1.0, 120 | max=20.0 121 | ) 122 | 123 | port: IntProperty( 124 | name="Server port", 125 | description="Port of stable diffusion server", 126 | default=5000, 127 | min=0 128 | ) 129 | 130 | target: PointerProperty( 131 | name="Target object", 132 | description="Object that will be used for the texture generation", 133 | type=bpy.types.Object 134 | ) 135 | 136 | depth_based_blending: BoolProperty( 137 | name="Depth-based blending", 138 | description="Enable/disable depth-based renders blending for texture", 139 | default=True 140 | ) 141 | 142 | clear_txt: BoolProperty( 143 | name="Start with empty texture", 144 | description="If enabled, the output texture will be cleared before generation.\n" 145 | "If disabled, the output texture will be used as a starting point for generation\n" 146 | "This might improve control over the result and improve quality of the result.", 147 | default=True 148 | ) 149 | 150 | resolution_x: IntProperty( 151 | name="Resolution X", 152 | description="Resolution X", 153 | default=768, 154 | min=0 155 | ) 156 | 157 | resolution_y: IntProperty( 158 | name="Resolution Y", 159 | description="Resolution X", 160 | default=512, 161 | min=0 162 | ) 163 | 164 | # my_enum: EnumProperty( 165 | # name="Dropdown:", 166 | # description="Apply Data to attribute.", 167 | # items=[('OP1', "Option 1", ""), 168 | # ('OP2', "Option 2", ""), 169 | # ('OP3', "Option 3", ""), 170 | # ] 171 | # ) 172 | 173 | 174 | # ------------------------------------------------------------------------ 175 | # Menus 176 | # ------------------------------------------------------------------------ 177 | 178 | 179 | class OBJECT_MT_CustomMenu(Menu): 180 | bl_label = "Select" 181 | bl_idname = "OBJECT_MT_custom_menu" 182 | 183 | def draw(self, context): 184 | layout = self.layout 185 | 186 | # Built-in operators 187 | layout.operator("object.select_all", text="Select/Deselect All").action = 'TOGGLE' 188 | layout.operator("object.select_all", text="Inverse").action = 'INVERT' 189 | layout.operator("object.select_random", text="Random") 190 | 191 | 192 | # ------------------------------------------------------------------------ 193 | # Panel in Object Mode 194 | # ------------------------------------------------------------------------ 195 | 196 | 197 | class OBJECT_PT_MainSDPanel(Panel): 198 | bl_label = "Stable Diffusion Texture Generator" 199 | bl_idname = "OBJECT_PT_main_sd_panel" 200 | bl_space_type = "VIEW_3D" 201 | bl_region_type = "UI" 202 | bl_category = "Tools" 203 | bl_context = "objectmode" 204 | 205 | @classmethod 206 | def poll(self, context): 207 | return context.object is not None 208 | 209 | def draw(self, context): 210 | layout = self.layout 211 | scene = context.scene 212 | sd_tool = scene.sd_txt_tool 213 | 214 | layout.label(text="SD generation settings") 215 | layout.prop(sd_tool, "prompt") 216 | layout.prop(sd_tool, "negative_prompt") 217 | layout.prop(sd_tool, "target") 218 | layout.prop(sd_tool, "out_dir") 219 | layout.prop(sd_tool, "out_txt") 220 | layout.prop(sd_tool, "clear_txt") 221 | 222 | 223 | class OBJECT_PT_SDPanel(Panel): 224 | bl_label = "Stable Diffusion settings" 225 | bl_idname = "OBJECT_PT_sd_panel" 226 | bl_space_type = "VIEW_3D" 227 | bl_region_type = "UI" 228 | bl_category = "Tools" 229 | bl_context = "objectmode" 230 | bl_parent_id = "OBJECT_PT_main_sd_panel" 231 | bl_options = {"DEFAULT_CLOSED"} 232 | 233 | @classmethod 234 | def poll(self, context): 235 | return context.object is not None 236 | 237 | def draw(self, context): 238 | layout = self.layout 239 | scene = context.scene 240 | sd_tool = scene.sd_txt_tool 241 | 242 | layout.prop(sd_tool, "depth_based_blending") 243 | layout.prop(sd_tool, "num_inference_steps") 244 | layout.prop(sd_tool, "guidance_scale") 245 | layout.prop(sd_tool, "seed") 246 | layout.prop(sd_tool, "resolution_x") 247 | layout.prop(sd_tool, "resolution_y") 248 | 249 | 250 | class OBJECT_PT_ScenePanel(Panel): 251 | bl_label = "Scene settings" 252 | bl_idname = "OBJECT_PT_scene_panel" 253 | bl_space_type = "VIEW_3D" 254 | bl_region_type = "UI" 255 | bl_category = "Tools" 256 | bl_context = "objectmode" 257 | bl_parent_id = "OBJECT_PT_main_sd_panel" 258 | bl_options = {"DEFAULT_CLOSED"} 259 | 260 | @classmethod 261 | def poll(self, context): 262 | return context.object is not None 263 | 264 | def draw(self, context): 265 | layout = self.layout 266 | scene = context.scene 267 | sd_tool = scene.sd_txt_tool 268 | 269 | layout.prop(sd_tool, "views_num") 270 | layout.prop(sd_tool, "camera_r") 271 | layout.prop(sd_tool, "camera_z") 272 | # layout.operator("wm.preview_camera") 273 | 274 | 275 | class OBJECT_PT_ServerPanel(Panel): 276 | bl_label = "SD server settings" 277 | bl_idname = "OBJECT_PT_server_panel" 278 | bl_space_type = "VIEW_3D" 279 | bl_region_type = "UI" 280 | bl_category = "Tools" 281 | bl_context = "objectmode" 282 | bl_parent_id = "OBJECT_PT_main_sd_panel" 283 | bl_options = {"DEFAULT_CLOSED"} 284 | 285 | @classmethod 286 | def poll(self, context): 287 | return context.object is not None 288 | 289 | def draw(self, context): 290 | layout = self.layout 291 | scene = context.scene 292 | sd_tool = scene.sd_txt_tool 293 | 294 | layout.prop(sd_tool, "host") 295 | layout.prop(sd_tool, "port") 296 | layout.operator("wm.start_sd_server") 297 | 298 | 299 | class OBJECT_PT_ActionsPanel(Panel): 300 | bl_label = "Execute" 301 | bl_idname = "OBJECT_PT_actions_panel" 302 | bl_space_type = "VIEW_3D" 303 | bl_region_type = "UI" 304 | bl_category = "Tools" 305 | bl_context = "objectmode" 306 | bl_parent_id = "OBJECT_PT_main_sd_panel" 307 | 308 | @classmethod 309 | def poll(self, context): 310 | return context.object is not None 311 | 312 | def draw(self, context): 313 | layout = self.layout 314 | scene = context.scene 315 | sd_tool = scene.sd_txt_tool 316 | 317 | layout.operator("wm.generate_txt") 318 | # ------------------------------------------------------------------------ 319 | # Registration 320 | # ------------------------------------------------------------------------ 321 | 322 | classes = ( 323 | SDProperties, 324 | WM_OT_RunSD, 325 | WM_OT_GenerateTxt, 326 | WM_OT_PreviewCameraPath, 327 | OBJECT_PT_MainSDPanel, 328 | OBJECT_PT_SDPanel, 329 | OBJECT_PT_ScenePanel, 330 | OBJECT_PT_ServerPanel, 331 | OBJECT_PT_ActionsPanel 332 | ) 333 | 334 | 335 | def register(): 336 | from bpy.utils import register_class 337 | for cls in classes: 338 | register_class(cls) 339 | 340 | bpy.types.Scene.sd_txt_tool = PointerProperty(type=SDProperties) 341 | 342 | 343 | def unregister(): 344 | from bpy.utils import unregister_class 345 | for cls in reversed(classes): 346 | unregister_class(cls) 347 | del bpy.types.Scene.sd_txt_tool 348 | 349 | 350 | if __name__ == "__main__": 351 | register() 352 | -------------------------------------------------------------------------------- /operators.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import socket 3 | import threading 4 | import time 5 | 6 | import requests 7 | import math 8 | import mathutils 9 | import os 10 | import bpy 11 | from bpy.types import Operator 12 | 13 | 14 | class SDProcessor(threading.Thread): 15 | waiting_for_render = False 16 | waiting_for_refresh = False 17 | camera_location = (0, 0, 0) 18 | stop = False 19 | 20 | def __init__(self, 21 | data, api_url, resolution_x, resolution_y, camera_r, camera_z, wm, views_num, camera): 22 | self.data = data 23 | self.api_url = api_url 24 | self.resolution_x = resolution_x 25 | self.resolution_y = resolution_y 26 | self.camera_r = camera_r 27 | self.camera_z = camera_z 28 | self.views_num = views_num 29 | self.camera = camera 30 | self.iteration = 0 31 | threading.Thread.__init__(self) 32 | 33 | def finish_texture(self): 34 | data = self.data.copy() 35 | response = requests.get(self.api_url + "/finish_texture", json=data) 36 | print(response.status_code, response.text) 37 | 38 | def depth2img(self, **kwargs): 39 | data = self.data.copy() 40 | data.update(kwargs) 41 | try: 42 | requests.get(self.api_url + "/depth2img_step", json=data, timeout=1) 43 | except (requests.exceptions.ConnectionError, 44 | requests.exceptions.Timeout, 45 | requests.exceptions.ReadTimeout): 46 | pass 47 | 48 | while True: 49 | try: 50 | requests.get(self.api_url + "/status", json={}, timeout=1) 51 | except (requests.exceptions.ConnectionError, 52 | requests.exceptions.Timeout, 53 | requests.exceptions.ReadTimeout): 54 | continue 55 | break 56 | 57 | def render_view(self, angle, z_offset=2, radius=7): 58 | print(F"Rendering: {angle} in thread.") 59 | 60 | angle_radians = 2 * math.pi * angle / 360 61 | 62 | # Randomly place the camera on a circle around the object at the same height as the main camera 63 | new_camera_pos = mathutils.Vector((self.camera_r * math.cos(angle_radians), 64 | radius * math.sin(angle_radians), 65 | self.camera_z)) 66 | self.camera_location = new_camera_pos 67 | 68 | # Render UVs and Depth 69 | self.waiting_for_render = True 70 | while self.waiting_for_render: 71 | time.sleep(1) 72 | self.iteration += 1 73 | 74 | def run(self): 75 | 76 | number_of_renders = self.views_num 77 | for num in range(number_of_renders): 78 | angle = 360 * num / number_of_renders 79 | self.render_view(angle) 80 | if self.stop: 81 | return 82 | self.depth2img() 83 | bpy.context.window_manager.progress_update(70 * num // number_of_renders) 84 | if self.stop: 85 | return 86 | # top part problem 87 | self.render_view(0, z_offset=3, radius=3) 88 | if self.stop: 89 | return 90 | self.depth2img(strength=0.5) 91 | bpy.context.window_manager.progress_update(80) 92 | 93 | self.render_view(0) 94 | if self.stop: 95 | return 96 | self.depth2img(strength=0.5) 97 | bpy.context.window_manager.progress_update(90) 98 | 99 | self.finish_texture() 100 | 101 | bpy.context.window_manager.progress_update(100) 102 | bpy.context.window_manager.progress_end() 103 | self.waiting_for_refresh = True 104 | 105 | 106 | def create_material(txt_path): 107 | output_image = bpy.data.images.new(str(txt_path), width=768, height=768) 108 | output_image.file_format = 'PNG' 109 | output_image.filepath = str(txt_path) 110 | output_image.save() 111 | 112 | mat = bpy.data.materials.get("SDResultMaterial") 113 | if mat is None: 114 | mat = bpy.data.materials.new(name="SDResultMaterial") 115 | mat.use_nodes = True 116 | principled = mat.node_tree.nodes.get('Principled BSDF') 117 | 118 | tex_node = mat.node_tree.nodes.new('ShaderNodeTexImage') 119 | tex_node.image = output_image 120 | mat.node_tree.links.new(tex_node.outputs[0], principled.inputs[0]) 121 | 122 | uv_node = mat.node_tree.nodes.new("ShaderNodeTexCoord") 123 | aov_out_node = mat.node_tree.nodes.new("ShaderNodeOutputAOV") 124 | aov_out_node.name = "UV" 125 | mat.node_tree.links.new(uv_node.outputs[2], aov_out_node.inputs[0]) 126 | return mat 127 | 128 | 129 | # ------------------------------------------------------------------------ 130 | # Operators 131 | # ------------------------------------------------------------------------ 132 | 133 | PROCESS_CACHE = None 134 | 135 | 136 | class WM_OT_RunSD(Operator): 137 | bl_label = "Run SD server" 138 | bl_idname = "wm.start_sd_server" 139 | 140 | def execute(self, context): 141 | scene = context.scene 142 | sd_tool = scene.sd_txt_tool 143 | 144 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 145 | result = sock.connect_ex((sd_tool.host, sd_tool.port)) 146 | if result == 0: 147 | self.report({"INFO"}, "Port is already taken or the server is already running.") 148 | return {'FINISHED'} 149 | import subprocess 150 | 151 | if "SDTG4B_CONDA" not in str(subprocess.check_output("conda info --envs ")): 152 | self.report({"ERROR"}, F"Conda environment SDTG4B_CONDA not found in the system. " 153 | F"Please make sure, that proper environment exists.") 154 | return {'FINISHED'} 155 | global PROCESS_CACHE 156 | script_path = os.path.dirname(__file__) + os.sep + "start_sd_server.py" 157 | PROCESS_CACHE = subprocess.Popen(["conda", "run", "-n", "SDTG4B_CONDA", "python", script_path]) 158 | return {'FINISHED'} 159 | 160 | 161 | class WM_OT_PreviewCameraPath(Operator): 162 | bl_label = "Generate camera path preview" 163 | bl_idname = "wm.preview_camera" 164 | 165 | def execute(self, context): 166 | scene = context.scene 167 | sd_tool = scene.sd_txt_tool 168 | 169 | if sd_tool.out_txt == os.sep + "txt.png" or not sd_tool.out_txt: 170 | txt_path = str(self.result_path / "txt.png") 171 | else: 172 | txt_path = self.sd_tool.out_txt 173 | return {'FINISHED'} 174 | 175 | 176 | class WM_OT_CreateMaterial(Operator): 177 | bl_label = "Create material, that will be used by the tool" 178 | bl_idname = "wm.create_sd_material" 179 | 180 | def execute(self, context): 181 | scene = context.scene 182 | sd_tool = scene.sd_txt_tool 183 | 184 | if sd_tool.out_txt == os.sep + "txt.png" or not sd_tool.out_txt: 185 | txt_path = str(self.result_path / "txt.png") 186 | else: 187 | txt_path = self.sd_tool.out_txt 188 | create_material(txt_path) 189 | 190 | return {'FINISHED'} 191 | 192 | 193 | class WM_OT_GenerateTxt(Operator): 194 | bl_label = "Generate textures" 195 | bl_idname = "wm.generate_txt" 196 | output_img_path = "" 197 | api_url = "" 198 | sd_tool = None 199 | out_path = pathlib.Path() 200 | tmp_path = pathlib.Path() 201 | result_path = pathlib.Path() 202 | txt_path = pathlib.Path() 203 | wm = None 204 | t = None 205 | progress = 0.0 206 | _timer = None 207 | t_cache = list() 208 | 209 | def generate_data(self): 210 | frame_code = "{0:0>4}".format(bpy.context.scene.frame_current) 211 | 212 | return { 213 | "prompt": self.sd_tool.prompt, 214 | "n_prompt": self.sd_tool.negative_prompt, 215 | "depth": str(self.tmp_path / F"depth{frame_code}.bmp"), 216 | "uv": str(self.tmp_path / F"uv{frame_code}.exr"), 217 | "out_txt": self.txt_path, 218 | "render": str(self.tmp_path / F"Image{frame_code}.png"), 219 | "alpha": str(self.tmp_path / F"alpha{frame_code}.png"), 220 | "diffuse": str(self.tmp_path / F"diffuse{frame_code}.bmp"), 221 | "depth_based_mixing": self.sd_tool.depth_based_blending, 222 | "steps": self.sd_tool.num_inference_steps, 223 | "guidance_scale": self.sd_tool.guidance_scale, 224 | "seed": self.sd_tool.seed 225 | } 226 | 227 | def setup_composition_nodes_and_material(self): 228 | print("Preparing the scene..") 229 | # Materials, textures... 230 | bpy.context.scene.use_nodes = True 231 | tree = bpy.context.scene.node_tree 232 | for node in tree.nodes: 233 | if node.label == "Output": 234 | node.base_path = str(pathlib.Path(self.result_path)) 235 | 236 | bpy.context.window_manager.progress_update(33) 237 | 238 | # Create/use material 239 | if self.sd_tool.clear_txt and os.path.exists(self.txt_path): 240 | os.remove(self.txt_path) 241 | mat = create_material(self.txt_path) 242 | 243 | if self.sd_tool.target.data.materials: 244 | self.sd_tool.target.data.materials[0] = mat 245 | else: 246 | # no slots 247 | self.sd_tool.target.data.materials.append(mat) 248 | 249 | # Render and composition 250 | bpy.context.scene.use_nodes = True 251 | tree = bpy.context.scene.node_tree 252 | for node in tree.nodes: 253 | tree.nodes.remove(node) 254 | 255 | bpy.data.scenes["Scene"].view_layers["ViewLayer"].use_pass_diffuse_color = True 256 | bpy.data.scenes["Scene"].view_layers["ViewLayer"].use_pass_combined = True 257 | bpy.data.scenes["Scene"].view_layers["ViewLayer"].use_pass_z = True 258 | try: 259 | bpy.data.scenes["Scene"].view_layers["ViewLayer"].uv = False 260 | except Exception: 261 | pass 262 | bpy.ops.scene.view_layer_add_aov() 263 | bpy.data.scenes["Scene"].view_layers["ViewLayer"].active_aov.name = "UV" 264 | 265 | bpy.data.scenes["Scene"].render.film_transparent = True 266 | bpy.data.scenes["Scene"].render.engine = 'BLENDER_EEVEE' 267 | 268 | render_layers = tree.nodes.new('CompositorNodeRLayers') 269 | 270 | # depth 271 | depth_file_output = tree.nodes.new(type="CompositorNodeOutputFile") 272 | depth_file_output.label = 'Depth Output' 273 | depth_file_output.base_path = str(self.tmp_path) 274 | depth_file_output.file_slots[0].path = "depth" 275 | depth_file_output.file_slots[0].use_node_format = True 276 | depth_file_output.format.file_format = "BMP" 277 | 278 | normalize_node = tree.nodes.new(type="CompositorNodeNormalize") 279 | 280 | tree.links.new(render_layers.outputs['Depth'], normalize_node.inputs[0]) 281 | tree.links.new(normalize_node.outputs['Value'], depth_file_output.inputs[0]) 282 | 283 | bpy.context.window_manager.progress_update(70) 284 | 285 | # uv 286 | uv_file_output = tree.nodes.new(type="CompositorNodeOutputFile") 287 | uv_file_output.label = 'UV Output' 288 | uv_file_output.base_path = str(self.tmp_path) 289 | uv_file_output.file_slots[0].path = "uv" 290 | uv_file_output.file_slots[0].use_node_format = True 291 | uv_file_output.format.file_format = "OPEN_EXR" 292 | uv_file_output.format.color_depth = '32' 293 | tree.links.new(render_layers.outputs['UV'], uv_file_output.inputs[0]) 294 | 295 | bpy.context.window_manager.progress_update(80) 296 | 297 | # alpha 298 | alpha_file_output = tree.nodes.new(type="CompositorNodeOutputFile") 299 | alpha_file_output.label = 'Alpha Output' 300 | alpha_file_output.base_path = str(self.tmp_path) 301 | alpha_file_output.file_slots[0].path = "alpha" 302 | alpha_file_output.file_slots[0].use_node_format = True 303 | alpha_file_output.format.file_format = "PNG" 304 | alpha_file_output.format.color_mode = "BW" 305 | tree.links.new(render_layers.outputs['Alpha'], alpha_file_output.inputs[0]) 306 | 307 | bpy.context.window_manager.progress_update(90) 308 | 309 | # diffuse 310 | diffuse_file_output = tree.nodes.new(type="CompositorNodeOutputFile") 311 | diffuse_file_output.label = 'Diffuse Output' 312 | diffuse_file_output.base_path = str(self.tmp_path) 313 | diffuse_file_output.file_slots[0].path = "diffuse" 314 | diffuse_file_output.file_slots[0].use_node_format = True 315 | diffuse_file_output.format.file_format = "BMP" 316 | 317 | luma_key_node = tree.nodes.new("CompositorNodeLumaMatte") 318 | luma_key_node.limit_max = 0.01 319 | luma_key_node.limit_min = 0.00 320 | tree.links.new(render_layers.outputs['DiffCol'], luma_key_node.inputs[0]) 321 | 322 | mix_node = tree.nodes.new("CompositorNodeMixRGB") 323 | tree.links.new(luma_key_node.outputs['Matte'], mix_node.inputs[0]) 324 | tree.links.new(render_layers.outputs['Image'], mix_node.inputs[1]) 325 | tree.links.new(render_layers.outputs['DiffCol'], mix_node.inputs[2]) 326 | 327 | tree.links.new(mix_node.outputs['Image'], diffuse_file_output.inputs[0]) 328 | 329 | # Image 330 | 331 | # alpha 332 | image_file_output = tree.nodes.new(type="CompositorNodeOutputFile") 333 | image_file_output.label = 'Alpha Output' 334 | image_file_output.base_path = str(self.tmp_path) 335 | image_file_output.file_slots[0].path = "Image" 336 | image_file_output.file_slots[0].use_node_format = True 337 | image_file_output.format.file_format = "PNG" 338 | tree.links.new(render_layers.outputs['Image'], image_file_output.inputs[0]) 339 | 340 | bpy.context.window_manager.progress_update(99) 341 | 342 | def modal(self, context, event): 343 | if event.type in {'ESC'}: 344 | self.cancel(context) 345 | return {'CANCELLED'} 346 | 347 | if event.type == 'TIMER' and self.t is not None: 348 | min_value = 100 * (max(self.t.iteration - 1, 0) / (self.t.views_num + 2)) 349 | max_value = 100 * (self.t.iteration / (self.t.views_num + 2)) 350 | delta = max_value - min_value 351 | self.progress += 0.05 * (delta - self.progress) 352 | bpy.context.window_manager.progress_update(min_value + + self.progress) 353 | 354 | if self.t is not None and self.t.is_alive(): 355 | if self.t.waiting_for_render: 356 | print("RENDER!") 357 | # Reload textures: 358 | for img in bpy.data.images: 359 | img.reload() 360 | 361 | # Basic parameters 362 | scene = bpy.data.scenes['Scene'] 363 | render = scene.render 364 | 365 | # Resolution change 366 | render.resolution_x = self.sd_tool.resolution_x 367 | render.resolution_y = self.sd_tool.resolution_y 368 | 369 | bpy.context.scene.camera.location = self.t.camera_location 370 | 371 | bpy.ops.render.render() 372 | self.t.waiting_for_render = False 373 | self.progress = 0.0 374 | if self.t.waiting_for_refresh: 375 | for img in bpy.data.images: 376 | img.reload() 377 | self.t.waiting_for_refresh = False 378 | return {'PASS_THROUGH'} 379 | print("END") 380 | time.sleep(1) 381 | for img in bpy.data.images: 382 | img.reload() 383 | return {'FINISHED'} 384 | 385 | def cancel(self, context): 386 | self.t.stop = True 387 | wm = context.window_manager 388 | wm.event_timer_remove(self._timer) 389 | 390 | def invoke(self, context, event): 391 | print("Invoked") 392 | self.progress = 0.0 393 | scene = context.scene 394 | self.sd_tool = scene.sd_txt_tool 395 | 396 | for value_name in ("prompt", "target", "out_dir"): 397 | if not getattr(self.sd_tool, value_name): 398 | self.report({"ERROR"}, F"{value_name} value cannot be empty") 399 | return {'CANCELLED'} 400 | 401 | if self.sd_tool.out_dir and self.sd_tool.out_dir != "cwd": 402 | self.out_path = pathlib.Path(self.sd_tool.out_dir) 403 | else: 404 | self.out_path = pathlib.Path(bpy.path.abspath("//")) 405 | 406 | if not self.out_path.exists() or not self.out_path.is_dir(): 407 | self.report({"ERROR"}, F"Output directory '{self.out_path}' is not correct") 408 | return {'CANCELLED'} 409 | 410 | self.tmp_path = self.out_path / "tmp" 411 | self.result_path = self.out_path / "result" 412 | self.tmp_path.mkdir(exist_ok=True) 413 | self.result_path.mkdir(exist_ok=True) 414 | 415 | if self.sd_tool.out_txt == "cwd" + os.sep + "txt.png" or not self.sd_tool.out_txt: 416 | self.txt_path = str(self.result_path / "txt.png") 417 | else: 418 | self.txt_path = self.sd_tool.out_txt 419 | 420 | self.api_url = F"http://{self.sd_tool.host}:{self.sd_tool.port}" 421 | try: 422 | requests.get(self.api_url + "/status", json={}, timeout=1) 423 | except (ConnectionError, requests.exceptions.ConnectTimeout, requests.exceptions.Timeout): 424 | self.report({"ERROR"}, 425 | F"""Port {self.sd_tool.port} of host {self.sd_tool.host} is not opened. 426 | Start the Stable Diffusion server by: 427 | - Executing the file start_sd_server.py 428 | - SD Server Settings >> Run SD Server 429 | Tf the server is running, make sure, that port and host of SD server are correct.""") 430 | return {'CANCELLED'} 431 | except requests.exceptions.ConnectTimeout: 432 | pass 433 | 434 | bpy.context.window_manager.progress_begin(0, 100) 435 | bpy.context.window_manager.progress_update(0) 436 | 437 | self.setup_composition_nodes_and_material() 438 | 439 | bpy.context.window_manager.progress_update(0) 440 | 441 | bpy.ops.object.camera_add(enter_editmode=False) 442 | camera = bpy.context.object 443 | bpy.context.scene.camera = camera 444 | 445 | # Add a new track to constraint and set it to track your object 446 | track_to = bpy.context.object.constraints.new('TRACK_TO') 447 | track_to.target = self.sd_tool.target 448 | track_to.track_axis = 'TRACK_NEGATIVE_Z' 449 | track_to.up_axis = 'UP_Y' 450 | 451 | self.t_cache.append(self.t) 452 | self.t = SDProcessor(data=self.generate_data(), 453 | api_url=self.api_url, 454 | resolution_x=self.sd_tool.resolution_x, resolution_y=self.sd_tool.resolution_y, 455 | camera_r=self.sd_tool.camera_r, camera_z=self.sd_tool.camera_z, 456 | wm=bpy.context.window_manager, 457 | views_num=self.sd_tool.views_num, 458 | camera=camera) 459 | 460 | self.t.start() 461 | self._timer = bpy.context.window_manager.event_timer_add(1, window=context.window) 462 | context.window_manager.modal_handler_add(self) 463 | return {'RUNNING_MODAL'} 464 | --------------------------------------------------------------------------------