├── .gitignore ├── LICENSE ├── README.md ├── composable_lora.py └── scripts └── composable_lora_script.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 opparco 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Composable LoRA 2 | This extension replaces the built-in LoRA forward procedure. 3 | 4 | ## Features 5 | ### Compatible with Composable-Diffusion 6 | By associating LoRA's insertion position in the prompt with "AND" syntax, LoRA's scope of influence is limited to a specific subprompt. 7 | 8 | ### Eliminate the impact on negative prompts 9 | With the built-in LoRA, negative prompts are always affected by LoRA. This often has a negative impact on the output. 10 | So this extension offers options to eliminate the negative effects. 11 | 12 | ## How to use 13 | ### Enabled 14 | When checked, Composable LoRA is enabled. 15 | 16 | ### Use Lora in uc text model encoder 17 | Enable LoRA for uncondition (negative prompt) text model encoder. 18 | With this disabled, you can expect better output. 19 | 20 | ### Use Lora in uc diffusion model 21 | Enable LoRA for uncondition (negative prompt) diffusion model (denoiser). 22 | With this disabled, you can expect better output. 23 | 24 | ## compatibilities 25 | --always-batch-cond-uncond must be enabled with --medvram or --lowvram 26 | -------------------------------------------------------------------------------- /composable_lora.py: -------------------------------------------------------------------------------- 1 | from typing import List, Dict 2 | import re 3 | import torch 4 | 5 | from modules import extra_networks, shared 6 | 7 | re_AND = re.compile(r"\bAND\b") 8 | 9 | 10 | def load_prompt_loras(prompt: str): 11 | prompt_loras.clear() 12 | subprompts = re_AND.split(prompt) 13 | tmp_prompt_loras = [] 14 | for i, subprompt in enumerate(subprompts): 15 | loras = {} 16 | _, extra_network_data = extra_networks.parse_prompt(subprompt) 17 | for params in extra_network_data['lora']: 18 | name = params.items[0] 19 | multiplier = float(params.items[1]) if len(params.items) > 1 else 1.0 20 | loras[name] = multiplier 21 | 22 | tmp_prompt_loras.append(loras) 23 | prompt_loras.extend(tmp_prompt_loras * num_batches) 24 | 25 | 26 | def reset_counters(): 27 | global text_model_encoder_counter 28 | global diffusion_model_counter 29 | 30 | # reset counter to uc head 31 | text_model_encoder_counter = -1 32 | diffusion_model_counter = 0 33 | 34 | 35 | def lora_forward(compvis_module, input, res): 36 | global text_model_encoder_counter 37 | global diffusion_model_counter 38 | 39 | import lora 40 | 41 | if len(lora.loaded_loras) == 0: 42 | return res 43 | 44 | lora_layer_name: str | None = getattr(compvis_module, 'lora_layer_name', None) 45 | if lora_layer_name is None: 46 | return res 47 | 48 | num_loras = len(lora.loaded_loras) 49 | if text_model_encoder_counter == -1: 50 | text_model_encoder_counter = len(prompt_loras) * num_loras 51 | 52 | # print(f"lora.forward lora_layer_name={lora_layer_name} in.shape={input.shape} res.shape={res.shape} num_batches={num_batches} num_prompts={num_prompts}") 53 | 54 | for lora in lora.loaded_loras: 55 | module = lora.modules.get(lora_layer_name, None) 56 | if module is None: 57 | continue 58 | 59 | if shared.opts.lora_apply_to_outputs and res.shape == input.shape: 60 | patch = module.up(module.down(res)) 61 | else: 62 | patch = module.up(module.down(input)) 63 | 64 | alpha = module.alpha / module.up.weight.shape[1] if module.alpha else 1.0 65 | 66 | num_prompts = len(prompt_loras) 67 | 68 | # print(f"lora.name={lora.name} lora.mul={lora.multiplier} alpha={alpha} pat.shape={patch.shape}") 69 | 70 | if enabled: 71 | if lora_layer_name.startswith("transformer_"): # "transformer_text_model_encoder_" 72 | # 73 | if 0 <= text_model_encoder_counter // num_loras < len(prompt_loras): 74 | # c 75 | loras = prompt_loras[text_model_encoder_counter // num_loras] 76 | multiplier = loras.get(lora.name, 0.0) 77 | if multiplier != 0.0: 78 | # print(f"c #{text_model_encoder_counter // num_loras} lora.name={lora.name} mul={multiplier}") 79 | res += multiplier * alpha * patch 80 | else: 81 | # uc 82 | if opt_uc_text_model_encoder and lora.multiplier != 0.0: 83 | # print(f"uc #{text_model_encoder_counter // num_loras} lora.name={lora.name} lora.mul={lora.multiplier}") 84 | res += lora.multiplier * alpha * patch 85 | 86 | if lora_layer_name.endswith("_11_mlp_fc2"): # last lora_layer_name of text_model_encoder 87 | text_model_encoder_counter += 1 88 | # c1 c1 c2 c2 .. .. uc uc 89 | if text_model_encoder_counter == (len(prompt_loras) + num_batches) * num_loras: 90 | text_model_encoder_counter = 0 91 | 92 | elif lora_layer_name.startswith("diffusion_model_"): # "diffusion_model_" 93 | 94 | if res.shape[0] == num_batches * num_prompts + num_batches: 95 | # tensor.shape[1] == uncond.shape[1] 96 | tensor_off = 0 97 | uncond_off = num_batches * num_prompts 98 | for b in range(num_batches): 99 | # c 100 | for p, loras in enumerate(prompt_loras): 101 | multiplier = loras.get(lora.name, 0.0) 102 | if multiplier != 0.0: 103 | # print(f"tensor #{b}.{p} lora.name={lora.name} mul={multiplier}") 104 | res[tensor_off] += multiplier * alpha * patch[tensor_off] 105 | tensor_off += 1 106 | 107 | # uc 108 | if opt_uc_diffusion_model and lora.multiplier != 0.0: 109 | # print(f"uncond lora.name={lora.name} lora.mul={lora.multiplier}") 110 | res[uncond_off] += lora.multiplier * alpha * patch[uncond_off] 111 | uncond_off += 1 112 | else: 113 | # tensor.shape[1] != uncond.shape[1] 114 | cur_num_prompts = res.shape[0] 115 | base = (diffusion_model_counter // cur_num_prompts) // num_loras * cur_num_prompts 116 | if 0 <= base < len(prompt_loras): 117 | # c 118 | for off in range(cur_num_prompts): 119 | loras = prompt_loras[base + off] 120 | multiplier = loras.get(lora.name, 0.0) 121 | if multiplier != 0.0: 122 | # print(f"c #{base + off} lora.name={lora.name} mul={multiplier}", lora_layer_name=lora_layer_name) 123 | res[off] += multiplier * alpha * patch[off] 124 | else: 125 | # uc 126 | if opt_uc_diffusion_model and lora.multiplier != 0.0: 127 | # print(f"uc {lora_layer_name} lora.name={lora.name} lora.mul={lora.multiplier}") 128 | res += lora.multiplier * alpha * patch 129 | 130 | if lora_layer_name.endswith("_11_1_proj_out"): # last lora_layer_name of diffusion_model 131 | diffusion_model_counter += cur_num_prompts 132 | # c1 c2 .. uc 133 | if diffusion_model_counter >= (len(prompt_loras) + num_batches) * num_loras: 134 | diffusion_model_counter = 0 135 | else: 136 | # default 137 | if lora.multiplier != 0.0: 138 | # print(f"default {lora_layer_name} lora.name={lora.name} lora.mul={lora.multiplier}") 139 | res += lora.multiplier * alpha * patch 140 | else: 141 | # default 142 | if lora.multiplier != 0.0: 143 | # print(f"DEFAULT {lora_layer_name} lora.name={lora.name} lora.mul={lora.multiplier}") 144 | res += lora.multiplier * alpha * patch 145 | 146 | return res 147 | 148 | 149 | def lora_Linear_forward(self, input): 150 | return lora_forward(self, input, torch.nn.Linear_forward_before_lora(self, input)) 151 | 152 | 153 | def lora_Conv2d_forward(self, input): 154 | return lora_forward(self, input, torch.nn.Conv2d_forward_before_lora(self, input)) 155 | 156 | 157 | enabled = False 158 | opt_uc_text_model_encoder = False 159 | opt_uc_diffusion_model = False 160 | verbose = True 161 | 162 | num_batches: int = 0 163 | prompt_loras: List[Dict[str, float]] = [] 164 | text_model_encoder_counter: int = -1 165 | diffusion_model_counter: int = 0 166 | -------------------------------------------------------------------------------- /scripts/composable_lora_script.py: -------------------------------------------------------------------------------- 1 | # 2 | # Composable-Diffusion with Lora 3 | # 4 | import torch 5 | import gradio as gr 6 | 7 | import composable_lora 8 | import modules.scripts as scripts 9 | from modules import script_callbacks 10 | from modules.processing import StableDiffusionProcessing 11 | 12 | 13 | def unload(): 14 | torch.nn.Linear.forward = torch.nn.Linear_forward_before_lora 15 | torch.nn.Conv2d.forward = torch.nn.Conv2d_forward_before_lora 16 | 17 | 18 | if not hasattr(torch.nn, 'Linear_forward_before_lora'): 19 | torch.nn.Linear_forward_before_lora = torch.nn.Linear.forward 20 | 21 | if not hasattr(torch.nn, 'Conv2d_forward_before_lora'): 22 | torch.nn.Conv2d_forward_before_lora = torch.nn.Conv2d.forward 23 | 24 | torch.nn.Linear.forward = composable_lora.lora_Linear_forward 25 | torch.nn.Conv2d.forward = composable_lora.lora_Conv2d_forward 26 | 27 | script_callbacks.on_script_unloaded(unload) 28 | 29 | 30 | class ComposableLoraScript(scripts.Script): 31 | def title(self): 32 | return "Composable Lora" 33 | 34 | def show(self, is_img2img): 35 | return scripts.AlwaysVisible 36 | 37 | def ui(self, is_img2img): 38 | with gr.Group(): 39 | with gr.Accordion("Composable Lora", open=False): 40 | enabled = gr.Checkbox(value=False, label="Enabled") 41 | opt_uc_text_model_encoder = gr.Checkbox(value=False, label="Use Lora in uc text model encoder") 42 | opt_uc_diffusion_model = gr.Checkbox(value=False, label="Use Lora in uc diffusion model") 43 | 44 | return [enabled, opt_uc_text_model_encoder, opt_uc_diffusion_model] 45 | 46 | def process(self, p: StableDiffusionProcessing, enabled: bool, opt_uc_text_model_encoder: bool, opt_uc_diffusion_model: bool): 47 | composable_lora.enabled = enabled 48 | composable_lora.opt_uc_text_model_encoder = opt_uc_text_model_encoder 49 | composable_lora.opt_uc_diffusion_model = opt_uc_diffusion_model 50 | 51 | composable_lora.num_batches = p.batch_size 52 | 53 | prompt = p.all_prompts[0] 54 | composable_lora.load_prompt_loras(prompt) 55 | 56 | def process_batch(self, p: StableDiffusionProcessing, *args, **kwargs): 57 | composable_lora.reset_counters() 58 | --------------------------------------------------------------------------------