├── requirements.txt ├── screenshot.png ├── LICENSE ├── run_lora_merger_windows.bat ├── run_lora_merger_linux.sh ├── .gitignore ├── README.md └── flux_lora_merger_gui.py /requirements.txt: -------------------------------------------------------------------------------- 1 | PyQt5>=5.15 2 | safetensors>=0.4.0 3 | tqdm>=4.60 -------------------------------------------------------------------------------- /screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tarkansarim/Flux-Lora-Block-Merger/HEAD/screenshot.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 tarkansarim 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /run_lora_merger_windows.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | cd /d "%~dp0" 3 | 4 | set VENV_DIR=venv 5 | 6 | echo Checking Python... 7 | python --version || (echo ERROR: Python not found. Ensure it's installed/in PATH. & pause & exit /b 1) 8 | 9 | if not exist "%VENV_DIR%\Scripts\activate.bat" ( 10 | echo Creating virtual environment... 11 | python -m venv "%VENV_DIR%" || (echo ERROR: Failed to create venv. & pause & exit /b 1) 12 | ) else ( 13 | echo Virtual environment found. 14 | ) 15 | 16 | echo Activating virtual environment... 17 | call "%VENV_DIR%\Scripts\activate.bat" || (echo ERROR: Failed to activate venv. & pause & exit /b 1) 18 | 19 | echo Updating pip... 20 | python -m pip install --upgrade pip || (echo ERROR: Failed to update pip. & pause & exit /b 1) 21 | 22 | echo Checking PyTorch... 23 | pip show torch >nul 2>&1 24 | if errorlevel 1 ( 25 | echo Installing PyTorch... 26 | pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu || (echo ERROR: Failed to install PyTorch. & pause & exit /b 1) 27 | ) else ( 28 | echo PyTorch already installed. 29 | ) 30 | 31 | if exist requirements.txt ( 32 | echo Installing requirements.txt... 33 | pip install -r requirements.txt || (echo ERROR: Failed to install dependencies. & pause & exit /b 1) 34 | ) else ( 35 | echo ERROR: requirements.txt missing. & pause & exit /b 1 36 | ) 37 | 38 | if exist flux_lora_merger_gui.py ( 39 | echo Running GUI... 40 | python flux_lora_merger_gui.py || (echo ERROR: GUI script failed. & pause & exit /b 1) 41 | ) else ( 42 | echo ERROR: flux_lora_merger_gui.py missing. & pause & exit /b 1 43 | ) 44 | 45 | echo Script finished. 46 | pause 47 | exit /b 0 -------------------------------------------------------------------------------- /run_lora_merger_linux.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Get the directory where the script is located 4 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 5 | echo "Script directory: $SCRIPT_DIR" 6 | 7 | # Define the virtual environment directory name within the script directory 8 | VENV_DIR="$SCRIPT_DIR/venv" 9 | echo "Virtual environment target: $VENV_DIR" 10 | 11 | # Create virtual environment if the activate script doesn't exist 12 | if [ ! -f "$VENV_DIR/bin/activate" ]; then 13 | echo "Creating virtual environment in $VENV_DIR..." 14 | python3 -m venv "$VENV_DIR" 15 | if [ $? -ne 0 ]; then 16 | echo "--------------------------------------------------" 17 | echo "ERROR: Failed to create virtual environment." 18 | echo "Ensure python3 and python3-venv package are installed." 19 | echo "--------------------------------------------------" 20 | exit 1 21 | fi 22 | else 23 | echo "Virtual environment found." 24 | fi 25 | 26 | # Activate virtual environment 27 | echo "Activating virtual environment..." 28 | source "$VENV_DIR/bin/activate" 29 | if [ $? -ne 0 ]; then 30 | echo "--------------------------------------------------" 31 | echo "ERROR: Failed to activate virtual environment." 32 | echo "--------------------------------------------------" 33 | exit 1 34 | fi 35 | 36 | # Ensure pip is up-to-date within the venv 37 | echo "Updating pip..." 38 | pip install --upgrade pip 39 | 40 | # Crucial Step: Remind user about PyTorch BEFORE installing others 41 | echo "--------------------------------------------------" 42 | echo "IMPORTANT: This script assumes PyTorch (appropriate CPU/CUDA version)" 43 | echo "has ALREADY been installed in the '$VENV_DIR' environment." 44 | echo "If not, please stop this script (Ctrl+C), install PyTorch manually:" 45 | echo " source \"$VENV_DIR/bin/activate\"" 46 | echo " pip install torch torchvision torchaudio <...options based on your system...>" 47 | echo " (See: https://pytorch.org/get-started/locally/)" 48 | echo "and then re-run this script." 49 | echo "--------------------------------------------------" 50 | # Optional: Add a short pause/prompt? 51 | # read -p "Press Enter to continue if PyTorch is installed, or Ctrl+C to exit..." 52 | 53 | # Install other dependencies from requirements.txt 54 | echo "Installing other dependencies (PyQt5, safetensors, tqdm)..." 55 | # Ensure we are in script dir to find requirements.txt reliably 56 | cd "$SCRIPT_DIR" 57 | pip install -r requirements.txt 58 | if [ $? -ne 0 ]; then 59 | echo "--------------------------------------------------" 60 | echo "ERROR: Failed to install dependencies from requirements.txt." 61 | echo "Check the file contents and your internet connection." 62 | echo "--------------------------------------------------" 63 | exit 1 64 | fi 65 | 66 | # Launch the GUI 67 | echo "Starting Flux LoRA Merger GUI..." 68 | python3 flux_lora_merger_gui.py 69 | if [ $? -ne 0 ]; then 70 | echo "--------------------------------------------------" 71 | echo "ERROR: Failed to run the Python script." 72 | echo "Check the script for errors or missing dependencies (like PyTorch)." 73 | echo "--------------------------------------------------" 74 | exit 1 75 | fi 76 | 77 | echo "--------------------------------------------------" 78 | echo "GUI closed. Script finished." 79 | echo "To deactivate the virtual environment manually, type: deactivate" 80 | echo "--------------------------------------------------" 81 | 82 | exit 0 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # UV 98 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | #uv.lock 102 | 103 | # poetry 104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 105 | # This is especially recommended for binary packages to ensure reproducibility, and is more 106 | # commonly ignored for libraries. 107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 108 | #poetry.lock 109 | 110 | # pdm 111 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 112 | #pdm.lock 113 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 114 | # in version control. 115 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 116 | .pdm.toml 117 | .pdm-python 118 | .pdm-build/ 119 | 120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 121 | __pypackages__/ 122 | 123 | # Celery stuff 124 | celerybeat-schedule 125 | celerybeat.pid 126 | 127 | # SageMath parsed files 128 | *.sage.py 129 | 130 | # Environments 131 | .env 132 | .venv 133 | env/ 134 | venv/ 135 | ENV/ 136 | env.bak/ 137 | venv.bak/ 138 | 139 | # Spyder project settings 140 | .spyderproject 141 | .spyproject 142 | 143 | # Rope project settings 144 | .ropeproject 145 | 146 | # mkdocs documentation 147 | /site 148 | 149 | # mypy 150 | .mypy_cache/ 151 | .dmypy.json 152 | dmypy.json 153 | 154 | # Pyre type checker 155 | .pyre/ 156 | 157 | # pytype static type analyzer 158 | .pytype/ 159 | 160 | # Cython debug symbols 161 | cython_debug/ 162 | 163 | # PyCharm 164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 166 | # and can be added to the global gitignore or merged into this file. For a more nuclear 167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 168 | #.idea/ 169 | 170 | # Ruff stuff: 171 | .ruff_cache/ 172 | 173 | # PyPI configuration file 174 | .pypirc 175 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Flux LoRA Merger GUI (Rank Padding) 2 | 3 | A simple graphical user interface (GUI) built with PyQt5 to merge LoRA (Low-Rank Adaptation) `.safetensors` files, specifically designed for models following the Blackforest Labs Flux architecture (featuring double and single blocks). This tool handles merging LoRAs with potentially different ranks by padding the smaller rank LoRA to match the larger one before performing a weighted average based on user-defined ratios. 4 | 5 | Repository: [https://github.com/tarkansarim/Flux-Lora-Block-Merger.git](https://github.com/tarkansarim/Flux-Lora-Block-Merger.git) 6 | 7 | ## Features 8 | 9 | * **Graphical Interface:** Easy-to-use interface for selecting LoRAs and setting merge parameters. 10 | * **Rank Mismatch Handling:** Uses zero-padding to merge LoRAs of different ranks, preserving the structure of the higher-rank LoRA. 11 | * **Block-Level Control:** Individual sliders for all identified `double_blocks` (19) and `single_blocks` (38) in the Flux architecture, allowing fine-grained merging ratios. 12 | * **CLIP Ratio Control:** Separate ratio control for non-block weights (assumed to be Text Encoder/CLIP). Default ratio is 0.0 (favoring LoRA 1). 13 | * **Presets:** Quickly set all block sliders to favor LoRA 1 or LoRA 2, or target specific block types. 14 | * **Merge to Full Model (Optional):** Ability to merge the resulting LoRA directly into base UNet (Flux), CLIP-L, and T5-XXL models. Merged full models are saved to the *original base model directories*. 15 | * **Settings Persistence:** Remembers your last used file paths and slider settings between sessions. 16 | * **Installer Scripts:** Includes convenience scripts for Windows (`run_lora_merger_windows.bat`) and Linux (`run_lora_merger_linux.sh`) with robust error checking to set up a virtual environment and install dependencies using `requirements.txt`. 17 | 18 | ## Screenshot 19 | 20 | ***IMPORTANT: Replace the line below with your actual screenshot!*** 21 | ![Flux LoRA Merger GUI Screenshot](screenshot.png) 22 | 23 | ## Requirements 24 | 25 | * Python 3.8+ (Ensure Python is installed and added to your system PATH) 26 | * **PyTorch:** Must be installed (CPU or correct CUDA version). See installation steps. 27 | * Dependencies listed in `requirements.txt`: 28 | * PyQt5 29 | * safetensors 30 | * tqdm 31 | 32 | ## Installation & Usage (Windows - Recommended) 33 | 34 | 1. Download the repository files (e.g., using "Download ZIP" on GitHub or `git clone https://github.com/tarkansarim/Flux-Lora-Block-Merger.git`). 35 | 2. Place the files in a desired location. Ensure `flux_lora_merger_gui.py`, `run_lora_merger_windows.bat`, and `requirements.txt` are in the same folder. 36 | 3. Double-click the `run_lora_merger_windows.bat` file. 37 | 4. It will robustly: 38 | * Check if Python is accessible. 39 | * Create/activate a Python virtual environment named `venv` in the **same directory**. 40 | * Update pip within the venv. 41 | * Install a **CPU version** of PyTorch if not already present in the venv. 42 | * Check for `requirements.txt` and install other dependencies (`PyQt5`, `safetensors`, `tqdm`) from it. 43 | * Check for `flux_lora_merger_gui.py` and launch the GUI. 44 | * Pause with error messages if any step fails. 45 | 5. Follow the GUI Usage steps below. 46 | *(Note: If you need a GPU/CUDA version of PyTorch, manually install it within the activated `venv` *before* running the batch file. The script will detect it and skip the CPU installation. See [PyTorch installation guide](https://pytorch.org/get-started/locally/)).* 47 | 48 | ## Installation & Usage (Linux) 49 | 50 | 1. **Clone or Download:** Get the repository files. Ensure `flux_lora_merger_gui.py`, `run_lora_merger_linux.sh`, and `requirements.txt` are in the same folder. 51 | ```bash 52 | git clone https://github.com/tarkansarim/Flux-Lora-Block-Merger.git 53 | cd Flux-Lora-Block-Merger # Navigate to the repo folder 54 | ``` 55 | 2. **Create Venv & Install PyTorch Manually:** This is crucial as the script doesn't handle PyTorch versions automatically. 56 | * Create the environment (in the current directory): `python3 -m venv venv` 57 | * Activate it: `source venv/bin/activate` 58 | * Install PyTorch (CPU or correct CUDA version): Follow official instructions at [https://pytorch.org/get-started/locally/](https://pytorch.org/get-started/locally/). *Ensure the `venv` is active during installation!* 59 | * (Optional: `deactivate` now if you wish) 60 | 3. **Make the script executable (first time only):** 61 | ```bash 62 | chmod +x run_lora_merger_linux.sh 63 | ``` 64 | 4. **Run the setup & launch script:** 65 | ```bash 66 | ./run_lora_merger_linux.sh 67 | ``` 68 | 5. This will: 69 | * Activate the `venv` in the current directory. 70 | * Update pip. 71 | * Install the *other* dependencies (`PyQt5`, `safetensors`, `tqdm`) from `requirements.txt`. 72 | * Launch the GUI (`flux_lora_merger_gui.py`), with error checking. 73 | 6. Follow the GUI Usage steps below. 74 | 75 | ## Installation & Usage (Manual - macOS/Other) 76 | 77 | 1. **Clone or Download:** Get the repository files. 78 | 2. **Create & Activate Virtual Environment (Recommended):** 79 | ```bash 80 | python3 -m venv venv # Creates venv in current dir 81 | # Activate: source venv/bin/activate 82 | ``` 83 | 3. **Install Dependencies:** 84 | * **Install PyTorch first:** Follow official instructions at [https://pytorch.org/get-started/locally/](https://pytorch.org/get-started/locally/). 85 | * **Install other requirements:** 86 | ```bash 87 | pip install -r requirements.txt 88 | ``` 89 | 4. **Run the script:** 90 | ```bash 91 | python3 flux_lora_merger_gui.py 92 | ``` 93 | 5. Follow the GUI Usage steps below. 94 | 95 | ## Usage (GUI) 96 | 97 | 1. Use the "Browse" buttons to select LoRA 1 (Base) and LoRA 2 (Detail). 98 | 2. Use "Save As" for the output merged LoRA file. 99 | 3. Adjust block sliders (0.0 = 100% LoRA 1, 1.0 = 100% LoRA 2). 100 | 4. Adjust "CLIP Merge Ratio" for text encoder parts (Default: 0.0). 101 | 5. Use "Merge Preset" for quick settings. 102 | 6. **Optional Full Model Merge:** 103 | * Check the "Merge into Full Models..." box. 104 | * Select base UNet, CLIP-L, T5-XXL files. 105 | * Adjust "Strength Multiplier" (Default: 1.4). 106 | 7. Click "Merge and Save". 107 | 8. The merged LoRA is saved to the specified output path. Merged full models are saved in their **original base model directories**. 108 | 109 | ## License 110 | 111 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. 112 | 113 | ## Contributing 114 | 115 | Feel free to open issues on GitHub for bug reports or feature requests. Pull requests are also welcome. -------------------------------------------------------------------------------- /flux_lora_merger_gui.py: -------------------------------------------------------------------------------- 1 | # --- START OF FILE flux_lora_merger_gui_v6_final.py --- 2 | 3 | import sys 4 | import os 5 | import logging 6 | from PyQt5.QtWidgets import ( 7 | QApplication, QWidget, QLabel, QPushButton, QFileDialog, QVBoxLayout, 8 | QHBoxLayout, QLineEdit, QMessageBox, QScrollArea, QGridLayout, QDoubleSpinBox, 9 | QSlider, QComboBox, QCheckBox 10 | # QApplication imported below for centering logic 11 | ) 12 | from PyQt5.QtCore import QSettings, Qt 13 | from safetensors.torch import load_file, save_file 14 | import torch 15 | from tqdm import tqdm # Optional, for progress bar 16 | 17 | # --- Basic Logging Setup --- 18 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 19 | logger = logging.getLogger(__name__) 20 | # --- End Logging Setup --- 21 | 22 | class LoraMerger(QWidget): 23 | # ... (ALL class methods: __init__, load_file, save_file, update_slider, etc. remain IDENTICAL to v5) ... 24 | # --- __init__ (Defaults: Strength=1.4, ClipRatio=0.0, Presets updated) --- 25 | def __init__(self): 26 | super().__init__() 27 | self.setWindowTitle("Flux LoRA Merger (Rank Padding)") 28 | self.settings = QSettings("BlackforestFluxTools", "LoraMergerRankPadding") 29 | self.last_dir = self.settings.value("last_dir", os.getcwd()) 30 | logger.info(f"Loaded last directory: {self.last_dir}") 31 | self.lora1_label = QLabel("LoRA 1 (Base):"); self.lora1_input = QLineEdit(self.settings.value("lora1_path", "")); self.lora1_browse = QPushButton("Browse") 32 | self.lora2_label = QLabel("LoRA 2 (Detail):"); self.lora2_input = QLineEdit(self.settings.value("lora2_path", "")); self.lora2_browse = QPushButton("Browse") 33 | self.output_label = QLabel("Output LoRA File:"); self.output_input = QLineEdit(self.settings.value("output_path", "")); self.output_browse = QPushButton("Save As") 34 | self.unet_label = QLabel("Base UNet Model (Optional):"); self.unet_input = QLineEdit(self.settings.value("unet_path", "")); self.unet_browse = QPushButton("Browse") 35 | self.clip_label = QLabel("Base CLIP-L Model (Optional):"); self.clip_input = QLineEdit(self.settings.value("clip_path", "")); self.clip_browse = QPushButton("Browse") 36 | self.t5xxl_label = QLabel("Base T5-XXL Model (Optional):"); self.t5xxl_input = QLineEdit(self.settings.value("t5xxl_path", "")); self.t5xxl_browse = QPushButton("Browse") 37 | self.preset_label = QLabel("Merge Preset:"); self.preset_dropdown = QComboBox(); self.preset_dropdown.addItems(["", "All Blocks LoRA 1", "All Blocks LoRA 2", "All Double Blocks 1", "All Double Blocks 0", "All Single Blocks 1", "All Single Blocks 0"]) 38 | self.strength_label = QLabel("Strength Multiplier (for Full Model Merge):"); self.strength_spinbox = QDoubleSpinBox(); self.strength_spinbox.setRange(0.0, 5.0); self.strength_spinbox.setSingleStep(0.1); self.strength_spinbox.setValue(float(self.settings.value("strength", 1.4))) # Default 1.4 39 | self.scale_lora_checkbox = QCheckBox("Scale Saved LoRA (Non-standard)"); self.scale_lora_checkbox.setToolTip("Applies strength multiplier directly to saved LoRA weights."); self.scale_lora_checkbox.setChecked(self.settings.value("scale_lora", False, type=bool)) 40 | self.clip_ratio_label = QLabel("CLIP Merge Ratio (0.0 = LoRA1, 1.0 = LoRA2):"); self.clip_ratio_spinbox = QDoubleSpinBox(); self.clip_ratio_spinbox.setRange(0.0, 1.0); self.clip_ratio_spinbox.setSingleStep(0.05); self.clip_ratio_spinbox.setValue(float(self.settings.value("clip_ratio", 0.0))) # Default 0.0 41 | self.merge_full_checkbox = QCheckBox("Merge into Full Models (UNet, CLIP-L, T5-XXL)"); self.merge_full_checkbox.setChecked(self.settings.value("merge_full", False, type=bool)) 42 | self.reset_button = QPushButton("Reset Sliders"); self.merge_button = QPushButton("Merge and Save") 43 | layout = QVBoxLayout() 44 | for label, input_field, button in [(self.lora1_label, self.lora1_input, self.lora1_browse), (self.lora2_label, self.lora2_input, self.lora2_browse), (self.output_label, self.output_input, self.output_browse), (self.unet_label, self.unet_input, self.unet_browse), (self.clip_label, self.clip_input, self.clip_browse), (self.t5xxl_label, self.t5xxl_input, self.t5xxl_browse)]: hbox = QHBoxLayout(); hbox.addWidget(label); hbox.addWidget(input_field); hbox.addWidget(button); layout.addLayout(hbox) 45 | preset_layout = QHBoxLayout(); preset_layout.addWidget(self.preset_label); preset_layout.addWidget(self.preset_dropdown); layout.addLayout(preset_layout) 46 | strength_layout = QHBoxLayout(); strength_layout.addWidget(self.strength_label); strength_layout.addWidget(self.strength_spinbox); layout.addLayout(strength_layout) 47 | scale_layout = QHBoxLayout(); scale_layout.addWidget(self.scale_lora_checkbox); layout.addLayout(scale_layout) 48 | clip_layout = QHBoxLayout(); clip_layout.addWidget(self.clip_ratio_label); clip_layout.addWidget(self.clip_ratio_spinbox); layout.addLayout(clip_layout) 49 | block_layout = QGridLayout(); block_layout.addWidget(QLabel("Block"), 0, 0); block_layout.addWidget(QLabel("Blend Ratio (0.0 = LoRA1, 1.0 = LoRA2)"), 0, 1); self.block_sliders = [] 50 | row = 1; double_block_count = 19; single_block_count = 38 51 | logger.info(f"Initializing sliders for {double_block_count} double blocks and {single_block_count} single blocks.") 52 | for block_type, count in [("double_blocks", double_block_count), ("single_blocks", single_block_count)]: 53 | for i in range(count): 54 | block_name = f"{block_type}_{i}"; label = QLabel(block_name); slider = QSlider(Qt.Horizontal); slider.setRange(0, 100); slider.setValue(int(self.settings.value(f"slider_{block_name}", 50))) 55 | spinbox = QDoubleSpinBox(); spinbox.setRange(0.0, 1.0); spinbox.setSingleStep(0.01); spinbox.setValue(slider.value() / 100.0) 56 | slider.valueChanged.connect(lambda val, sp=spinbox, name=block_name: self.update_slider(sp, name, val)); spinbox.valueChanged.connect(lambda val, s=slider, name=block_name: self.update_spinbox(s, name, val)) 57 | block_layout.addWidget(label, row, 0); block_layout.addWidget(slider, row, 1); block_layout.addWidget(spinbox, row, 2); self.block_sliders.append((block_name, slider, spinbox)); row += 1 58 | scroll = QScrollArea(); container = QWidget(); container.setLayout(block_layout); scroll.setWidget(container); scroll.setWidgetResizable(True); scroll.setMinimumHeight(300); layout.addWidget(scroll) 59 | button_layout = QHBoxLayout(); button_layout.addWidget(self.merge_full_checkbox); button_layout.addWidget(self.reset_button); button_layout.addWidget(self.merge_button); layout.addLayout(button_layout) 60 | self.setLayout(layout) 61 | self.lora1_browse.clicked.connect(lambda: self.load_file(self.lora1_input, "lora1_path")); self.lora2_browse.clicked.connect(lambda: self.load_file(self.lora2_input, "lora2_path")) 62 | self.output_browse.clicked.connect(lambda: self.save_file(self.output_input, "output_path")); self.unet_browse.clicked.connect(lambda: self.load_file(self.unet_input, "unet_path")) 63 | self.clip_browse.clicked.connect(lambda: self.load_file(self.clip_input, "clip_path")); self.t5xxl_browse.clicked.connect(lambda: self.load_file(self.t5xxl_input, "t5xxl_path")) 64 | self.merge_button.clicked.connect(self.merge_loras); self.reset_button.clicked.connect(self.reset_sliders); self.preset_dropdown.currentIndexChanged.connect(self.apply_preset) 65 | self.strength_spinbox.valueChanged.connect(lambda val: self.settings.setValue("strength", val)); self.scale_lora_checkbox.stateChanged.connect(lambda state: self.settings.setValue("scale_lora", bool(state))) 66 | self.clip_ratio_spinbox.valueChanged.connect(lambda val: self.settings.setValue("clip_ratio", val)); self.merge_full_checkbox.stateChanged.connect(lambda state: self.settings.setValue("merge_full", bool(state))) 67 | 68 | # --- load_file --- 69 | def load_file(self, line_edit, settings_key): 70 | file, _ = QFileDialog.getOpenFileName(self, "Select File", self.last_dir, "Safetensors files (*.safetensors)") 71 | if file: line_edit.setText(file); self.last_dir = os.path.dirname(file); self.settings.setValue("last_dir", self.last_dir); self.settings.setValue(settings_key, file); logger.info(f"Loaded '{settings_key}': {file}") 72 | 73 | # --- save_file --- 74 | def save_file(self, line_edit, settings_key): 75 | suggested_name = "" 76 | if not line_edit.text() and self.lora1_input.text() and self.lora2_input.text(): l1_base = os.path.splitext(os.path.basename(self.lora1_input.text()))[0]; l2_base = os.path.splitext(os.path.basename(self.lora2_input.text()))[0]; suggested_name = f"merged_padded_{l1_base}_{l2_base}.safetensors" 77 | default_path = os.path.join(self.last_dir, suggested_name) 78 | file, _ = QFileDialog.getSaveFileName(self, "Save Merged File", default_path, "Safetensors files (*.safetensors)") 79 | if file: 80 | if not file.endswith(".safetensors"): file += ".safetensors" 81 | line_edit.setText(file); self.last_dir = os.path.dirname(file); self.settings.setValue("last_dir", self.last_dir); self.settings.setValue(settings_key, file); logger.info(f"Set '{settings_key}': {file}") 82 | 83 | # --- update_slider --- 84 | def update_slider(self, spinbox, block_name, value): spinbox.setValue(value / 100.0); self.settings.setValue(f"slider_{block_name}", value) 85 | 86 | # --- update_spinbox --- 87 | def update_spinbox(self, slider, block_name, value): slider_value = int(value * 100); slider.setValue(slider_value) if slider.value() != slider_value else None; self.settings.setValue(f"slider_{block_name}", slider_value) 88 | 89 | # --- reset_sliders (Defaults: Strength=1.4, ClipRatio=0.0) --- 90 | def reset_sliders(self): 91 | logger.info("Resetting all settings to default."); default_slider_val = 50 92 | for _, slider, _ in self.block_sliders: slider.setValue(default_slider_val) 93 | self.clip_ratio_spinbox.setValue(0.0); self.strength_spinbox.setValue(1.4) # Defaults 94 | self.scale_lora_checkbox.setChecked(False); self.merge_full_checkbox.setChecked(False); self.preset_dropdown.setCurrentIndex(0) 95 | 96 | # --- apply_preset (Handles updated preset list) --- 97 | def apply_preset(self): 98 | preset = self.preset_dropdown.currentText(); target_val = 0 99 | if preset == "": return 100 | if "LoRA 2" in preset or "Blocks 1" in preset: target_val = 100 101 | logger.info(f"Applying preset: '{preset}' with target value {target_val}%") 102 | if "All Blocks" in preset: logger.info("Setting all block sliders."); [s.setValue(target_val) for _, s, _ in self.block_sliders] 103 | elif "All Double Blocks" in preset: logger.info("Setting double blocks to target, single blocks to 0."); [s.setValue(target_val if n.startswith("double") else 0) for n, s, _ in self.block_sliders] 104 | elif "All Single Blocks" in preset: logger.info("Setting single blocks to target, double blocks to 0."); [s.setValue(target_val if n.startswith("single") else 0) for n, s, _ in self.block_sliders] 105 | 106 | # --- get_block_from_lora_name --- 107 | def get_block_from_lora_name(self, lora_name): 108 | parts = lora_name.split('.'); relevant_part = parts[-1] 109 | for prefix in ["double_blocks_", "single_blocks_"]: 110 | if prefix in relevant_part: sub_parts = relevant_part.split(prefix); block_num_str = sub_parts[1].split('_')[0] if len(sub_parts) > 1 else None; return f"{prefix}{block_num_str}" if block_num_str and block_num_str.isdigit() else None 111 | return None 112 | 113 | # --- has_unet_weights --- 114 | def has_unet_weights(self, lora_sd): return any(k.startswith(("lora_unet_", "lora_flux_")) for k in lora_sd.keys()) 115 | 116 | # --- has_clip_weights --- 117 | def has_clip_weights(self, lora_sd): return any(k.startswith(("lora_te_", "lora_clip_l_", "lora_t5_")) for k in lora_sd.keys()) 118 | 119 | # --- apply_lora_to_base (Corrected Exception Handling) --- 120 | def apply_lora_to_base(self, lora_sd, base_sd, lora_name_to_base_key, strength_factor, working_device="cpu"): 121 | logger.info(f"Applying LoRA to base model on {working_device} with strength {strength_factor}") 122 | skipped_keys = []; applied_count = 0 123 | if not base_sd: logger.warning("Base state dict is empty during apply_lora_to_base."); return 124 | base_device = next(iter(base_sd.values())).device if base_sd else "cpu" 125 | for key in tqdm(list(lora_sd.keys()), desc="Applying LoRA to base"): 126 | if key.endswith(".lora_down.weight"): 127 | lora_name = key.replace(".lora_down.weight", ""); base_key = lora_name_to_base_key.get(lora_name) 128 | if base_key is None or base_key not in base_sd: 129 | if lora_name not in skipped_keys: logger.warning(f"Base key not found for LoRA '{lora_name}'. Skipping apply."); skipped_keys.append(lora_name) 130 | continue 131 | up_key = key.replace(".lora_down.weight", ".lora_up.weight"); alpha_key = lora_name + ".alpha" 132 | if up_key not in lora_sd: logger.warning(f"Missing up key '{up_key}' for '{lora_name}' during apply. Skipping."); skipped_keys.append(lora_name); continue 133 | try: # Try applying delta 134 | down_weight = lora_sd[key].to(working_device, dtype=torch.float32); up_weight = lora_sd[up_key].to(working_device, dtype=torch.float32) 135 | dim = down_weight.size(0); alpha = lora_sd.get(alpha_key, torch.tensor(dim)).item(); scale = alpha / dim if dim > 0 else 0 136 | if base_key not in base_sd: logger.warning(f"Base key '{base_key}' missing before applying delta. Skipping."); skipped_keys.append(lora_name); continue 137 | original_dtype = base_sd[base_key].dtype; weight = base_sd[base_key].to(working_device, dtype=torch.float32); delta = None 138 | if len(weight.size()) == 2: delta = (up_weight @ down_weight) * scale * strength_factor 139 | elif len(weight.size()) == 4 and down_weight.size()[2:4] == (1, 1): delta = (up_weight.squeeze(3).squeeze(2) @ down_weight.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze(3) * scale * strength_factor 140 | elif len(weight.size()) == 4: delta = torch.nn.functional.conv2d(down_weight.permute(1, 0, 2, 3), up_weight).permute(1, 0, 2, 3) * scale * strength_factor 141 | else: logger.warning(f"Unsupported weight dim {len(weight.size())} for key {base_key} during apply."); skipped_keys.append(lora_name); continue 142 | if delta is not None: weight += delta; base_sd[base_key] = weight.to(base_device, dtype=original_dtype); applied_count += 1 143 | else: logger.warning(f"Delta calculation resulted in None for key {base_key}."); skipped_keys.append(lora_name) 144 | except Exception as e: # Catch errors during delta application 145 | logger.error(f"Error applying LoRA delta to key {base_key}: {e}", exc_info=True); skipped_keys.append(lora_name) 146 | try: # Attempt to restore original weight ON ERROR 147 | if base_key in base_sd: base_sd[base_key] = base_sd[base_key].to(base_device, dtype=original_dtype) 148 | else: logger.warning(f"Could not attempt restore for {base_key}: Key missing.") 149 | except Exception as restore_e: logger.error(f"Failed to restore weight for key {base_key} after error: {restore_e}", exc_info=True) 150 | logger.info(f"Finished applying LoRA. Applied {applied_count} modules. Skipped/errored: {len(set(skipped_keys))} unique modules.") 151 | if skipped_keys: logger.warning(f"Unique skipped/errored LoRA modules during base apply: {list(set(skipped_keys))}") 152 | 153 | # --- Main Merge Logic (Includes Rank Padding) --- 154 | def merge_loras(self): 155 | lora1_path = self.lora1_input.text(); lora2_path = self.lora2_input.text(); output_path = self.output_input.text() 156 | if not (lora1_path and os.path.exists(lora1_path)): QMessageBox.warning(self, "Input Error", "Select valid LoRA 1."); return 157 | if not (lora2_path and os.path.exists(lora2_path)): QMessageBox.warning(self, "Input Error", "Select valid LoRA 2."); return 158 | if not output_path: self.save_file(self.output_input, "output_path"); output_path = self.output_input.text(); 159 | if not output_path: QMessageBox.warning(self, "Input Error", "Specify output file path."); return 160 | 161 | unet_path = self.unet_input.text(); clip_path = self.clip_input.text(); t5xxl_path = self.t5xxl_input.text() 162 | merge_full = self.merge_full_checkbox.isChecked(); scale_lora = self.scale_lora_checkbox.isChecked() 163 | 164 | try: 165 | logger.info(f"Loading LoRA 1: {lora1_path}"); lora_sd1 = load_file(lora1_path, device="cpu") 166 | logger.info(f"Loading LoRA 2: {lora2_path}"); lora_sd2 = load_file(lora2_path, device="cpu") 167 | 168 | if merge_full: # Base model check 169 | missing = []; has_unet = self.has_unet_weights(lora_sd1) or self.has_unet_weights(lora_sd2); has_clip = self.has_clip_weights(lora_sd1) or self.has_clip_weights(lora_sd2); has_t5 = any("lora_t5_" in k for k in set(lora_sd1.keys()).union(lora_sd2.keys())) 170 | if has_unet and not (unet_path and os.path.exists(unet_path)): missing.append("UNet (Flux)") 171 | if has_clip and not (clip_path and os.path.exists(clip_path)): missing.append("CLIP-L") 172 | if has_t5 and not (t5xxl_path and os.path.exists(t5xxl_path)): missing.append("T5-XXL") 173 | if missing: QMessageBox.warning(self, "Missing Base Models", f"Full merge checked, but missing:\n- {', '.join(missing)}"); return 174 | 175 | block_ratios = {name: spinbox.value() for name, _, spinbox in self.block_sliders} 176 | clip_ratio = self.clip_ratio_spinbox.value(); strength_factor = self.strength_spinbox.value() 177 | 178 | merged_data = {}; all_keys = set(lora_sd1.keys()).union(lora_sd2.keys()) 179 | padded_count = 0; skipped_pairs = [] 180 | 181 | logger.info("Starting LoRA merge process with rank padding...") 182 | for key in tqdm(all_keys, desc="Merging LoRA keys"): 183 | if key.endswith(".lora_down.weight"): 184 | name = key.replace(".lora_down.weight", ""); up_key = key.replace(".lora_down.weight", ".lora_up.weight"); alpha_key = name + ".alpha" 185 | pair1 = key in lora_sd1 and up_key in lora_sd1; pair2 = key in lora_sd2 and up_key in lora_sd2 186 | if not (pair1 or pair2): 187 | if name not in skipped_pairs: logger.warning(f"Skipping '{name}': Missing up/down pair."); skipped_pairs.append(name) 188 | continue 189 | block = self.get_block_from_lora_name(name); is_text = block is None; ratio = block_ratios.get(block, clip_ratio) if not is_text else clip_ratio 190 | t1d = lora_sd1.get(key); t1u = lora_sd1.get(up_key); t2d = lora_sd2.get(key); t2u = lora_sd2.get(up_key); a1 = lora_sd1.get(alpha_key); a2 = lora_sd2.get(alpha_key) 191 | eff_t1d, eff_t1u = t1d, t1u; eff_t2d, eff_t2u = t2d, t2u # Start with original tensors 192 | 193 | if t1d is not None and t2d is not None: # Both LoRAs have this module, check rank 194 | r1 = t1d.size(0); r2 = t2d.size(0) 195 | if r1 != r2: # Ranks differ, apply padding 196 | padded_count += 1; target = max(r1, r2) 197 | if r1 < r2: # Pad 1 to match 2 198 | eff_t1d = torch.zeros(target, t1d.size(1), dtype=t1d.dtype, device='cpu'); eff_t1d[:r1, :] = t1d 199 | eff_t1u = torch.zeros(t1u.size(0), target, dtype=t1u.dtype, device='cpu'); eff_t1u[:, :r1] = t1u 200 | else: # Pad 2 to match 1 (r2 < r1) 201 | eff_t2d = torch.zeros(target, t2d.size(1), dtype=t2d.dtype, device='cpu'); eff_t2d[:r2, :] = t2d 202 | eff_t2u = torch.zeros(t2u.size(0), target, dtype=t2u.dtype, device='cpu'); eff_t2u[:, :r2] = t2u 203 | 204 | down_w, up_w, alpha_v = None, None, None # Calculate final weights 205 | if eff_t1d is not None and eff_t2d is not None: # Average effective tensors 206 | down_w = eff_t1d*(1-ratio) + eff_t2d*ratio; up_w = eff_t1u*(1-ratio) + eff_t2u*ratio 207 | a1v = a1.item() if a1 is not None else t1d.size(0); a2v = a2.item() if a2 is not None else t2d.size(0); alpha_v = a1v*(1-ratio) + a2v*ratio 208 | elif pair1: # Only LoRA 1 has pair 209 | down_w = eff_t1d; up_w = eff_t1u; alpha_v = a1.item() if a1 is not None else eff_t1d.size(0) 210 | elif pair2: # Only LoRA 2 has pair 211 | down_w = eff_t2d; up_w = eff_t2u; alpha_v = a2.item() if a2 is not None else eff_t2d.size(0) 212 | else: continue # Should not happen if pair check passed 213 | 214 | if scale_lora: # Apply direct scaling if requested 215 | if down_w is not None: down_w *= strength_factor 216 | if up_w is not None: up_w *= strength_factor 217 | 218 | # Store results 219 | if down_w is not None: merged_data[key] = down_w 220 | if up_w is not None: merged_data[up_key] = up_w 221 | if alpha_v is not None: merged_data[alpha_key] = torch.tensor(alpha_v) 222 | 223 | if not merged_data: QMessageBox.critical(self, "Merge Error", "No mergeable keys found."); return # Final check 224 | logger.info(f"Merge complete. Padding applied to {padded_count} modules."); msg = f"Merged LoRA saved:\n{output_path}"; 225 | if skipped_pairs: logger.warning(f"Skipped {len(skipped_pairs)} modules missing pairs.") 226 | if padded_count > 0: msg += f"\n\nNote: Rank padding applied to {padded_count} modules." 227 | logger.info(f"Saving merged LoRA ({len(merged_data)} tensors) to: {output_path}"); save_file(merged_data, output_path) 228 | 229 | if merge_full: # Merge into Full Models (Save to Original Dirs) 230 | logger.info("Merging into full models..."); parts = []; dev = "cuda" if torch.cuda.is_available() else "cpu"; logger.info(f"Using device: {dev}") 231 | m_str = strength_factor if not scale_lora else 1.0 # Use strength only if LoRA wasn't scaled 232 | for base_path, prefix_list, out_name_part, model_name in [(unet_path, ["lora_unet_", "lora_flux_"], "unet", "UNet"), (clip_path, ["lora_te_", "lora_clip_l_"], "clip-l", "CLIP-L"), (t5xxl_path, ["lora_t5_"], "t5xxl", "T5-XXL")]: 233 | if base_path and os.path.exists(base_path): 234 | try: 235 | logger.info(f"Loading Base {model_name}: {base_path}"); base_sd = load_file(base_path, device="cpu") 236 | key_map = {f"{pfx}{k.replace('.weight', '').replace('.', '_')}": k for k in base_sd if k.endswith(".weight") for pfx in prefix_list} 237 | self.apply_lora_to_base(merged_data, base_sd, key_map, m_str, dev) 238 | lora_n = os.path.splitext(os.path.basename(output_path))[0]; out_dir = os.path.dirname(base_path) # Original dir 239 | out_path = os.path.join(out_dir, f"merged_{out_name_part}_{lora_n}.safetensors") 240 | logger.info(f"Saving merged {model_name} to: {out_path}"); save_file(base_sd, out_path); parts.append(f"{model_name}: {out_path}"); del base_sd 241 | except Exception as e: logger.error(f"Failed to merge {model_name}: {e}", exc_info=True); QMessageBox.warning(self, f"{model_name} Merge Error", f"Failed to merge {model_name}:\n{e}") 242 | if parts: msg += "\n\nFull models saved to original folders." 243 | 244 | logger.info("Saving final settings."); self.settings.setValue("lora1_path", lora1_path); self.settings.setValue("lora2_path", lora2_path); self.settings.setValue("output_path", output_path); self.settings.setValue("unet_path", unet_path); self.settings.setValue("clip_path", clip_path); self.settings.setValue("t5xxl_path", t5xxl_path); self.settings.setValue("strength", strength_factor); self.settings.setValue("scale_lora", scale_lora); self.settings.setValue("clip_ratio", clip_ratio); self.settings.setValue("merge_full", merge_full) 245 | QMessageBox.information(self, "Success", msg) 246 | 247 | except Exception as e: logger.error(f"Operation failed: {e}", exc_info=True); QMessageBox.critical(self, "Error", f"Failed:\n{e}\n\nCheck console log.") 248 | finally: logger.debug("Cleaning up tensors."); del lora_sd1, lora_sd2, merged_data; torch.cuda.empty_cache() if torch.cuda.is_available() else None 249 | 250 | # --- Main execution block --- 251 | if __name__ == "__main__": 252 | app = QApplication(sys.argv) 253 | app.setOrganizationName("BlackforestFluxTools"); app.setApplicationName("LoraMergerRankPadding") 254 | 255 | window = LoraMerger() 256 | 257 | # --- Center and Resize Window --- 258 | desired_width = 800 259 | desired_height = 750 # Adjusted slightly 260 | window.resize(desired_width, desired_height) 261 | 262 | try: 263 | # Get available screen geometry (excludes taskbar, etc.) 264 | screen_geometry = app.primaryScreen().availableGeometry() 265 | screen_center = screen_geometry.center() 266 | 267 | # Get window frame geometry AFTER resizing 268 | frame_geometry = window.frameGeometry() 269 | frame_geometry.moveCenter(screen_center) # Move the frame's center 270 | 271 | # Move the window's top-left to the calculated frame top-left 272 | window.move(frame_geometry.topLeft()) 273 | logger.info(f"Window resized to {desired_width}x{desired_height} and centered.") 274 | except Exception as center_e: 275 | logger.warning(f"Could not center or resize window: {center_e}") 276 | # --- End Center and Resize --- 277 | 278 | window.show() 279 | sys.exit(app.exec_()) 280 | # --- END OF FILE --- --------------------------------------------------------------------------------