├── .gitignore ├── images └── screenshot.jpg ├── Readme.md ├── old_sd_firstpasser ├── ui.py └── tools.py └── scripts ├── old_sd_firstpasser_txt2img.py └── old_sd_firstpasser_img2img.py /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .vscode 3 | .directory 4 | -------------------------------------------------------------------------------- /images/screenshot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/light-and-ray/sd-webui-old-sd-firstpasser/HEAD/images/screenshot.jpg -------------------------------------------------------------------------------- /Readme.md: -------------------------------------------------------------------------------- 1 | # Old SD firstpasser 2 | 3 | This is an extension for [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) which adds selectable txt2img and img2img script, which firstpasses image with SD 1.x model. It is useful if you want to use Loras, Embeddings, ControlNet etc made for SD 1.x or SDXL inside SDXL or SD3 Medium 4 | 5 | ![](/images/screenshot.jpg) 6 | 7 | Hires fix is supported 8 | 9 | For quick access to SD 1.x loras in SDXL I recommend to add `lora_show_all` in the quick settings 10 | 11 | Inspired by [XAdapter](https://github.com/showlab/X-Adapter), which [also just firstpasses generation with SD 1.5](https://github.com/showlab/X-Adapter/issues/25) 12 | -------------------------------------------------------------------------------- /old_sd_firstpasser/ui.py: -------------------------------------------------------------------------------- 1 | import json 2 | import gradio as gr 3 | from modules import ui_settings, shared 4 | from old_sd_firstpasser.tools import quote_swap, NAME 5 | 6 | 7 | def makeUI(script): 8 | with gr.Row(): 9 | firstpass_steps = gr.Slider( 10 | label='Firstpass steps', 11 | value=20, 12 | step=1, 13 | minimum=1, 14 | maximum=150, 15 | elem_id="firstpass_steps" 16 | ) 17 | firstpass_denoising = gr.Slider(label='Firstpass denoising', 18 | value=0.55, elem_id="firstpass_denoising", 19 | minimum=0.0, maximum=1.0, step=0.01 20 | ) 21 | with gr.Row(): 22 | firstpass_upscaler = gr.Dropdown( 23 | value="ESRGAN_4x", 24 | choices=[x.name for x in shared.sd_upscalers], 25 | label="Firstpass upscaler", 26 | elem_id="firstpass_upscaler", 27 | ) 28 | with gr.Row(): 29 | sd_1_checkpoint = ui_settings.create_setting_component('sd_model_checkpoint') 30 | sd_1_checkpoint.label = "Checkpoint for SD 1.x pass" 31 | with gr.Row(): 32 | sdxl_checkpoint = ui_settings.create_setting_component('sd_model_checkpoint') 33 | sdxl_checkpoint.label = "Checkpoint for SDXL pass" 34 | with gr.Row(): 35 | network_type = gr.Radio(value="Auto", choices=["Auto", "SD1", "SDXL"], 36 | label="Firstpass network type", info="Auto means guess by loras metadata. " 37 | "For ControlNet and other networks set it up manually", 38 | elem_classes=['compact-checkbox-group']) 39 | def get_infotext_field(d, field): 40 | if NAME in d: 41 | return d[NAME].get(field) 42 | 43 | script.infotext_fields = [ 44 | (firstpass_steps, lambda d: get_infotext_field(d, 'steps')), 45 | (firstpass_denoising, lambda d: get_infotext_field(d, 'denoising')), 46 | (firstpass_upscaler, lambda d: get_infotext_field(d, 'upscaler')), 47 | (sd_1_checkpoint, lambda d: get_infotext_field(d, 'model_sd1')), 48 | (sdxl_checkpoint, lambda d: get_infotext_field(d, 'model_sdxl')), 49 | (network_type, lambda d: get_infotext_field(d, 'network_type')), 50 | ] 51 | 52 | return [firstpass_steps, firstpass_denoising, firstpass_upscaler, sd_1_checkpoint, 53 | sdxl_checkpoint, network_type] 54 | 55 | 56 | def pares_infotext(infotext, params): 57 | try: 58 | params[NAME] = json.loads(params[NAME].translate(quote_swap)) 59 | except Exception: 60 | pass -------------------------------------------------------------------------------- /old_sd_firstpasser/tools.py: -------------------------------------------------------------------------------- 1 | import math, copy, re 2 | import gradio as gr 3 | from modules import shared, sd_models 4 | from modules.processing import (Processed, StableDiffusionProcessingTxt2Img, 5 | StableDiffusionProcessingImg2Img, StableDiffusionProcessing, 6 | ) 7 | import networks 8 | from modules import ui_extra_networks 9 | 10 | 11 | IS_WEBUI_1_9 = hasattr(shared.cmd_opts, 'unix_filenames_sanitization') 12 | quote_swap = str.maketrans('\'"', '"\'') 13 | 14 | 15 | def limitSizeByOneDimension(size: tuple, limit: int): 16 | w, h = size 17 | if h > w: 18 | if h > limit: 19 | w = limit / h * w 20 | h = limit 21 | else: 22 | if w > limit: 23 | h = limit / w * h 24 | w = limit 25 | 26 | return (int(w), int(h)) 27 | 28 | 29 | def getJobsCountTxt2Img(originalP: StableDiffusionProcessingTxt2Img) -> int: 30 | jobs = originalP.n_iter 31 | secondpass_count = originalP.batch_size * originalP.n_iter 32 | jobs += secondpass_count 33 | if originalP.enable_hr: 34 | jobs += secondpass_count 35 | return jobs 36 | 37 | 38 | def getTotalStepsTxt2Img(originalP: StableDiffusionProcessingTxt2Img, firstpass_steps: int, firstpass_denoising: float) -> int: 39 | totalSteps = firstpass_steps * originalP.n_iter 40 | secondpass_count = originalP.batch_size * originalP.n_iter 41 | totalSteps += secondpass_count * min(math.ceil(originalP.steps * firstpass_denoising + 1), originalP.steps) 42 | if originalP.enable_hr: 43 | totalSteps += secondpass_count * originalP.hr_second_pass_steps 44 | return totalSteps 45 | 46 | 47 | def getJobsCountImg2Img(originalP: StableDiffusionProcessingImg2Img) -> int: 48 | return 1 + originalP.n_iter 49 | 50 | 51 | def getTotalStepsImg2Img(originalP: StableDiffusionProcessingImg2Img, firstpass_steps: int, firstpass_denoising: float) -> int: 52 | totalSteps = min(math.ceil(firstpass_steps * originalP.denoising_strength + 1), firstpass_steps) 53 | totalSteps += originalP.n_iter * min(math.ceil(originalP.steps * firstpass_denoising + 1), originalP.steps) 54 | return totalSteps 55 | 56 | def getSecondPassBeginFromImg2Img(originalP: StableDiffusionProcessingImg2Img, firstpass_steps: int) -> int: 57 | totalSteps = min(math.ceil(firstpass_steps * originalP.denoising_strength + 1), firstpass_steps) 58 | return totalSteps 59 | 60 | 61 | def convert_txt2img_to_img2img(txt2img: StableDiffusionProcessingTxt2Img) -> StableDiffusionProcessingImg2Img: 62 | txt2imgKWArgs = {} 63 | txt2imgArgs = ['sd_model', 'outpath_samples', 'outpath_grids', 'prompt', 'negative_prompt', 'styles', 64 | 'sampler_name', 'batch_size', 'n_iter', 'steps', 'cfg_scale', 'width', 'height', 'override_settings', 65 | 'do_not_save_samples', *(['scheduler'] if IS_WEBUI_1_9 else []) 66 | ] 67 | for arg in txt2imgArgs: 68 | txt2imgKWArgs[arg] = getattr(txt2img, arg, None) 69 | 70 | img2imgKWArgs = { 71 | 'init_images': [], 72 | 'mask': None, 73 | 'mask_blur': 2, 74 | 'inpainting_fill': 0, 75 | 'resize_mode': 0, 76 | 'denoising_strength': 0.5, 77 | 'image_cfg_scale': 1.5, 78 | 'inpaint_full_res': False, 79 | 'inpaint_full_res_padding': 90, 80 | 'inpainting_mask_invert': False, 81 | } 82 | 83 | img2img = StableDiffusionProcessingImg2Img(**txt2imgKWArgs, **img2imgKWArgs) 84 | 85 | otherArgs = ['seed', 'subseed', 'subseed_strength', 'refiner_checkpoint', 'refiner_checkpoint', 86 | 'refiner_switch_at', 'seed_resize_from_h', 'seed_resize_from_w', 'extra_generation_params'] 87 | 88 | for arg in otherArgs: 89 | value = getattr(txt2img, arg, None) 90 | setattr(img2img, arg, value) 91 | 92 | return img2img 93 | 94 | 95 | def interrupted(): 96 | return shared.state.interrupted or getattr(shared.state, 'stopping_generation', False) 97 | 98 | 99 | def _removeAllNetworksWithErrorsWarnings(string: str) -> str: 100 | resLines = [] 101 | for line in string.split('\n'): 102 | if not line.startswith('Networks with errors:'): 103 | resLines.append(line) 104 | return '\n'.join(resLines) 105 | 106 | 107 | 108 | def removeAllNetworksWithErrorsWarnings(processed: Processed): 109 | processed.comments = _removeAllNetworksWithErrorsWarnings(processed.comments) 110 | 111 | NAME = "Old SD firstpasser" 112 | 113 | 114 | def get_model_short_title(model_aliases): 115 | if model := sd_models.get_closet_checkpoint_match(model_aliases): 116 | return model.short_title 117 | return model_aliases 118 | 119 | 120 | def getSDVersion(lora: str): 121 | lora_on_disk = networks.available_networks.get(lora) 122 | if not lora_on_disk: 123 | lora_on_disk = networks.available_network_aliases.get(lora) 124 | if not lora_on_disk: 125 | return None 126 | 127 | loraPage = None 128 | for page in ui_extra_networks.extra_pages: 129 | if page.title == "Lora": 130 | loraPage = page 131 | break 132 | assert loraPage 133 | 134 | for item in loraPage.list_items(): 135 | if item['filename'] == lora_on_disk.filename: 136 | return item['sd_version'] 137 | 138 | return None 139 | 140 | 141 | 142 | def guessNetworkType(p: StableDiffusionProcessing): 143 | p = copy.copy(p) 144 | fullPrompt = shared.prompt_styles.apply_styles_to_prompt(p.prompt, p.styles) 145 | loras = re.findall('= 1.7 14 | from modules.ui_components import InputAccordion 15 | else: 16 | InputAccordion = None 17 | 18 | 19 | 20 | class ScriptSelectable(scripts.Script): 21 | def __init__(self): 22 | self.scriptsImages = [] 23 | self.scriptsInfotexts = [] 24 | self.originalUpscaler = None 25 | self.firstpass_upscaler = None 26 | self.total_tqdm_total = None 27 | self.total_tqdm_second_pass_begin_from = 0 28 | 29 | def title(self): 30 | return NAME 31 | 32 | def show(self, is_img2img): 33 | return is_img2img 34 | 35 | def ui(self, is_img2img): 36 | ui = makeUI(self) 37 | return ui 38 | 39 | 40 | def run(self, originalP: StableDiffusionProcessingImg2Img, firstpass_steps, firstpass_denoising, 41 | firstpass_upscaler, sd_1_checkpoint, sdxl_checkpoint, network_type): 42 | if network_type == "Auto": 43 | network_type = guessNetworkType(originalP) 44 | originalCheckpoint = shared.opts.sd_model_checkpoint if not 'sd_model_checkpoint' in originalP.override_settings else originalP.override_settings['sd_model_checkpoint'] 45 | self.originalUpscaler = shared.opts.upscaler_for_img2img 46 | try: 47 | shared.state.textinfo = "switching sd checkpoint" 48 | if network_type == "SD1": 49 | shared.opts.sd_model_checkpoint = sd_1_checkpoint 50 | else: # SDXL 51 | shared.opts.sd_model_checkpoint = sdxl_checkpoint 52 | sd_models.reload_model_weights() 53 | 54 | originalP.do_not_save_grid = True 55 | 56 | originalP.extra_generation_params['Script'] = NAME 57 | originalP.extra_generation_params[NAME] = json.dumps({ 58 | 'steps': firstpass_steps, 59 | 'denoising': firstpass_denoising, 60 | 'upscaler': firstpass_upscaler, 61 | 'model_sd1': get_model_short_title(sd_1_checkpoint), 62 | 'model_sdxl': get_model_short_title(sdxl_checkpoint), 63 | }).translate(quote_swap) 64 | 65 | img2imgP = copy.copy(originalP) 66 | if network_type == 'SD1': 67 | img2imgP.width, img2imgP.height = limitSizeByOneDimension((originalP.width, originalP.height), 512) 68 | else: # SDXL 69 | img2imgP.width, img2imgP.height = limitSizeByOneDimension((originalP.width, originalP.height), 1024) 70 | img2imgP.steps = firstpass_steps 71 | img2imgP.batch_size = 1 72 | img2imgP.n_iter = 1 73 | img2imgP.override_settings['sd_vae'] = 'Automatic' 74 | 75 | if not originalP.init_images or not all(originalP.init_images): # txt2img equivalent 76 | dummy_image = Image.new('RGB', (originalP.width, originalP.height)) 77 | img2imgP.init_images = [dummy_image] 78 | img2imgP.image_mask = Image.new('L', (img2imgP.width, img2imgP.height), 255) 79 | img2imgP.inpaint_full_res = False 80 | img2imgP.inpainting_fill = 2 # latent noise 81 | img2imgP.denoising_strength = 1.0 82 | originalP.denoising_strength = 1.0 83 | shared.state.job_count = getJobsCountImg2Img(originalP) 84 | self.total_tqdm_total = getTotalStepsImg2Img(originalP, firstpass_steps, firstpass_denoising) 85 | self.total_tqdm_second_pass_begin_from = getSecondPassBeginFromImg2Img(originalP, firstpass_steps) 86 | shared.total_tqdm.updateTotal(self.total_tqdm_total) 87 | 88 | with closing(img2imgP): 89 | img2imgP.old_sd_firstpasser_prevent_recursion = True 90 | shared.state.textinfo = f"firstpassing with {network_type.lower()}" 91 | processed1: Processed = process_images(img2imgP) 92 | # throwing away all extra images e.g. controlnet preprocessed 93 | n = len(processed1.all_seeds) 94 | self.scriptsImages = processed1.images[n:] 95 | self.scriptsInfotexts = processed1.infotexts[n:] 96 | originalP.init_images = processed1.images[:n] 97 | originalP.denoising_strength = firstpass_denoising 98 | originalP.seed = processed1.all_seeds[0] 99 | originalP.subseed = processed1.all_subseeds[0] 100 | finally: 101 | shared.state.textinfo = "switching sd checkpoint" 102 | shared.opts.sd_model_checkpoint = originalCheckpoint 103 | sd_models.reload_model_weights() 104 | shared.state.textinfo = "generating" 105 | self.firstpass_upscaler = firstpass_upscaler 106 | originalP.selectable_old_sd_firstpasser_script = self 107 | 108 | 109 | 110 | 111 | class ScriptBackground(scripts.Script): 112 | def title(self): 113 | return NAME + " background" 114 | 115 | def show(self, is_img2img): 116 | return scripts.AlwaysVisible if is_img2img else False 117 | 118 | def ui(self, is_img2img): 119 | return [] 120 | 121 | def before_process(self, originalP: StableDiffusionProcessingImg2Img, *args): 122 | selectable: ScriptSelectable = getattr(originalP, 'selectable_old_sd_firstpasser_script', None) 123 | if selectable is None: 124 | return 125 | 126 | if 'upscaler_for_img2img' in originalP.override_settings: 127 | del originalP.override_settings['upscaler_for_img2img'] 128 | shared.opts.upscaler_for_img2img = selectable.firstpass_upscaler 129 | 130 | shared.total_tqdm.updateTotal(selectable.total_tqdm_total) 131 | for _ in range(selectable.total_tqdm_second_pass_begin_from): 132 | shared.total_tqdm.update() 133 | 134 | 135 | def postprocess(self, originalP: StableDiffusionProcessingImg2Img, processed: Processed, *args): 136 | selectable: ScriptSelectable = getattr(originalP, 'selectable_old_sd_firstpasser_script', None) 137 | if selectable is None: 138 | return 139 | processed.images += selectable.scriptsImages 140 | processed.infotexts += selectable.scriptsInfotexts 141 | removeAllNetworksWithErrorsWarnings(processed) 142 | if selectable.originalUpscaler: 143 | shared.opts.upscaler_for_img2img = selectable.originalUpscaler 144 | --------------------------------------------------------------------------------