The response has been limited to 50k tokens of the smallest files in the repo. You can remove this limitation by removing the max tokens filter.
├── .augmentignore
├── .cache
    ├── config
    │   └── .gitkeep
    ├── keras
    │   └── .gitkeep
    ├── nv
    │   └── .gitkeep
    ├── triton
    │   └── .gitkeep
    └── user
    │   └── .gitkeep
├── .dockerignore
├── .env
├── .gitattributes
├── .github
    ├── FUNDING.yml
    ├── dependabot.yml
    └── workflows
    │   ├── docker_publish.yml
    │   └── typos.yaml
├── .gitignore
├── .gitmodules
├── .hadolint.yml
├── .python-version
├── .release
├── .vscode
    └── settings.json
├── Dockerfile
├── LICENSE.md
├── README.md
├── SECURITY.md
├── _typos.toml
├── assets
    ├── js
    │   ├── localization.js
    │   └── script.js
    └── style.css
├── config example.toml
├── config_files
    └── accelerate
    │   ├── default_config.yaml
    │   └── runpod.yaml
├── dataset
    ├── images
    │   └── .gitkeep
    ├── logs
    │   └── .gitkeep
    ├── outputs
    │   └── .gitkeep
    └── regularization
    │   └── .gitkeep
├── docker-compose.yaml
├── docs
    ├── Finetuning
    │   └── top_level.md
    ├── Installation
    │   ├── pip_linux.md
    │   ├── pip_windows.md
    │   ├── uv_linux.md
    │   └── uv_windows.md
    ├── LoRA
    │   ├── options.md
    │   └── top_level.md
    ├── config_README-ja.md
    ├── fine_tune_README_ja.md
    ├── gen_img_README-ja.md
    ├── image_folder_structure.md
    ├── installation_docker.md
    ├── installation_novita.md
    ├── installation_runpod.md
    ├── train_README-ja.md
    ├── train_README-zh.md
    ├── train_README.md
    ├── train_db_README-ja.md
    ├── train_db_README-zh.md
    ├── train_lllite_README-ja.md
    ├── train_lllite_README.md
    ├── train_network_README-ja.md
    ├── train_network_README-zh.md
    ├── train_ti_README-ja.md
    └── troubleshooting_tesla_v100.md
├── examples
    ├── LoRA based finetuning 2 phase.ps1
    ├── caption.ps1
    ├── caption_subfolders.ps1
    ├── finetune_latent.ps1
    ├── kohya-1-folders.ps1
    ├── kohya-3-folders.ps1
    ├── kohya.ps1
    ├── kohya_finetune.ps1
    ├── kohya_new-v3.ps1
    ├── kohya_train_db_fixed_with-reg_SDv2 512 base.ps1
    ├── lucoris extract examples.txt
    ├── pull kohya_ss sd-scripts updates in.md
    ├── stable_cascade
    │   └── test.toml
    └── word_frequency.ps1
├── gui-uv.bat
├── gui-uv.sh
├── gui.bat
├── gui.ps1
├── gui.sh
├── kohya_gui.py
├── kohya_gui
    ├── __init__.py
    ├── basic_caption_gui.py
    ├── blip2_caption_gui.py
    ├── blip_caption_gui.py
    ├── class_accelerate_launch.py
    ├── class_advanced_training.py
    ├── class_basic_training.py
    ├── class_command_executor.py
    ├── class_configuration_file.py
    ├── class_flux1.py
    ├── class_folders.py
    ├── class_gui_config.py
    ├── class_huggingface.py
    ├── class_lora_tab.py
    ├── class_metadata.py
    ├── class_sample_images.py
    ├── class_sd3.py
    ├── class_sdxl_parameters.py
    ├── class_source_model.py
    ├── class_tensorboard.py
    ├── common_gui.py
    ├── convert_lcm_gui.py
    ├── convert_model_gui.py
    ├── custom_logging.py
    ├── dataset_balancing_gui.py
    ├── dreambooth_folder_creation_gui.py
    ├── dreambooth_gui.py
    ├── extract_lora_from_dylora_gui.py
    ├── extract_lora_gui.py
    ├── extract_lycoris_locon_gui.py
    ├── finetune_gui.py
    ├── flux_extract_lora_gui.py
    ├── flux_merge_lora_gui.py
    ├── git_caption_gui.py
    ├── group_images_gui.py
    ├── localization.py
    ├── localization_ext.py
    ├── lora_gui.py
    ├── manual_caption_gui.py
    ├── merge_lora_gui.py
    ├── merge_lycoris_gui.py
    ├── resize_lora_gui.py
    ├── sd_modeltype.py
    ├── svd_merge_lora_gui.py
    ├── textual_inversion_gui.py
    ├── utilities.py
    ├── verify_lora_gui.py
    └── wd14_caption_gui.py
├── localizations
    ├── Put localization files here.txt
    ├── chinese-sample.json
    ├── en-GB.json
    ├── zh-CN.json
    └── zh-TW.json
├── models
    └── .keep
├── presets
    ├── dreambooth
    │   ├── sd3_bdsqlsz_v1.json
    │   └── sd3_bdsqlsz_v2.json
    ├── finetune
    │   ├── SDXL - AI_Now PagedAdamW8bit v1.0.json
    │   ├── SDXL - Essenz series by AI_Characters_Training v1.0.json
    │   ├── adafactor.json
    │   ├── lion.json
    │   ├── prepare_presets.md
    │   └── user_presets
    │   │   └── .put your own presets here
    └── lora
    │   ├── SDXL - 1 image LoRA v1.0.json
    │   ├── SDXL - LoHA AI_Characters v1.0.json
    │   ├── SDXL - LoKR v1.0.json
    │   ├── SDXL - LoRA AI_Now ADamW v1.0.json
    │   ├── SDXL - LoRA AI_Now prodigy v1.0.json
    │   ├── SDXL - LoRA AI_characters standard v1.0.json
    │   ├── SDXL - LoRA AI_characters standard v1.1.json
    │   ├── SDXL - LoRA adafactor v1.0.json
    │   ├── SDXL - LoRA aitrepreneur clothing v1.0.json
    │   ├── SDXL - LoRA by malcolmrey training v1.0.json
    │   ├── SDXL - LoRA face dogu_cat v1.0.json
    │   ├── SDXL - LoRA finetuning phase 1_v1.1.json
    │   ├── SDXL - LoRA finetuning phase 2_v1.1.json
    │   ├── SDXL - LoRA kudou-reira dadaptadam v1.0.json
    │   ├── SDXL - LoRA kudou-reira dadaptadam v1.1.json
    │   ├── SDXL - LoRA kudou-reira prodigy v4.0.json
    │   ├── SDXL - edgLoRAXL AI_Now.json
    │   ├── SDXL - edgLoRAXL.json
    │   ├── flux1D - adamw8bit fp8.json
    │   ├── iA3-Prodigy-sd15.json
    │   ├── ia3-sd15.json
    │   ├── locon-dadaptation-sdxl.json
    │   ├── loha-sd15.json
    │   ├── lokr-sd15.json
    │   ├── prepare_presets.md
    │   ├── sd15 - EDG_LoConOptiSettings.json
    │   ├── sd15 - EDG_LoHaOptiSettings.json
    │   ├── sd15 - EDG_LoraOptiSettings.json
    │   ├── sd15 - GLoRA v1.0.json
    │   ├── sd15 - LoKR v1.0.json
    │   ├── sd15 - LoKr v1.1.json
    │   ├── sd15 - LoKr v2.0.json
    │   └── user_presets
    │       └── .put your own presets here
├── pyproject.toml
├── requirements.txt
├── requirements_ipex_xpu.txt
├── requirements_linux.txt
├── requirements_linux_ipex.txt
├── requirements_linux_rocm.txt
├── requirements_macos_amd64.txt
├── requirements_macos_arm64.txt
├── requirements_pytorch_windows.txt
├── requirements_runpod.txt
├── requirements_windows.txt
├── setup-3.10.bat
├── setup-runpod.sh
├── setup.bat
├── setup.ps1
├── setup.sh
├── setup
    ├── check_local_modules.py
    ├── create_user_files.py
    ├── debug_info.py
    ├── docker_setup.py
    ├── setup_common.py
    ├── setup_linux.py
    ├── setup_runpod.py
    ├── setup_windows.py
    ├── update_bitsandbytes.py
    └── validate_requirements.py
├── test
    ├── config
    │   ├── Diag-OFT-AdamW8bit-toml.json
    │   ├── DyLoRA-Adafactor-toml.json
    │   ├── LoKR-AdamW8bit-toml.json
    │   ├── SDXL-Standard-Adafactor.json
    │   ├── SDXL-Standard-AdamW.json
    │   ├── SDXL-Standard-AdamW8bit.json
    │   ├── Standard-AdamW.json
    │   ├── Standard-AdamW8bit.json
    │   ├── TI-AdamW8bit-SDXL.json
    │   ├── TI-AdamW8bit-toml.json
    │   ├── TI-AdamW8bit.json
    │   ├── dataset-finetune.toml
    │   ├── dataset-masked_loss.toml
    │   ├── dataset-multires.toml
    │   ├── dataset.toml
    │   ├── dreambooth-Adafactor.json
    │   ├── dreambooth-AdamW.json
    │   ├── dreambooth-AdamW8bit-masked_loss-toml.json
    │   ├── dreambooth-AdamW8bit-toml.json
    │   ├── dreambooth-AdamW8bit.json
    │   ├── dreambooth-DAdaptAdam.json
    │   ├── dreambooth-Prodigy-SDXL.json
    │   ├── dreambooth-Prodigy.json
    │   ├── dreambooth.json
    │   ├── finetune-AdamW-toml.json
    │   ├── finetune-AdamW.json
    │   ├── iA3-Prodigy.json
    │   ├── locon-Adafactor.json
    │   ├── locon-AdamW.json
    │   ├── locon-AdamW8bit-masked_loss-toml.json
    │   ├── locon-AdamW8bit-toml.json
    │   ├── locon-AdamW8bit.json
    │   ├── locon-Prodigy.json
    │   ├── loha-Prodigy.json
    │   ├── meta-1_lat.json
    │   └── t5clrs.json
    ├── ft
    │   └── .keep
    ├── img with spaces
    │   └── 10_darius kawasaki person
    │   │   ├── Dariusz_Zawadzki.jpg
    │   │   ├── Dariusz_Zawadzki.txt
    │   │   ├── Dariusz_Zawadzki_2.jpg
    │   │   ├── Dariusz_Zawadzki_2.txt
    │   │   ├── Dariusz_Zawadzki_3.jpg
    │   │   ├── Dariusz_Zawadzki_3.txt
    │   │   ├── Dariusz_Zawadzki_4.jpg
    │   │   ├── Dariusz_Zawadzki_4.txt
    │   │   ├── Dariusz_Zawadzki_5.jpg
    │   │   ├── Dariusz_Zawadzki_5.txt
    │   │   ├── Dariusz_Zawadzki_6.jpg
    │   │   ├── Dariusz_Zawadzki_6.txt
    │   │   ├── Dariusz_Zawadzki_7.jpg
    │   │   ├── Dariusz_Zawadzki_7.txt
    │   │   ├── Dariusz_Zawadzki_8.jpg
    │   │   └── Dariusz_Zawadzki_8.txt
    ├── img
    │   └── 10_darius kawasaki person
    │   │   ├── Dariusz_Zawadzki.jpg
    │   │   ├── Dariusz_Zawadzki.txt
    │   │   ├── Dariusz_Zawadzki_2.jpg
    │   │   ├── Dariusz_Zawadzki_2.txt
    │   │   ├── Dariusz_Zawadzki_3.jpg
    │   │   ├── Dariusz_Zawadzki_3.txt
    │   │   ├── Dariusz_Zawadzki_4.jpg
    │   │   ├── Dariusz_Zawadzki_4.txt
    │   │   ├── Dariusz_Zawadzki_5.jpg
    │   │   ├── Dariusz_Zawadzki_5.txt
    │   │   ├── Dariusz_Zawadzki_6.jpg
    │   │   ├── Dariusz_Zawadzki_6.txt
    │   │   ├── Dariusz_Zawadzki_7.jpg
    │   │   ├── Dariusz_Zawadzki_7.txt
    │   │   ├── Dariusz_Zawadzki_8.jpg
    │   │   └── Dariusz_Zawadzki_8.txt
    ├── log
    │   └── .keep
    ├── logs
    │   └── .keep
    └── masked_loss
    │   ├── Dariusz_Zawadzki.jpg
    │   ├── Dariusz_Zawadzki_2.jpg
    │   ├── Dariusz_Zawadzki_3.jpg
    │   ├── Dariusz_Zawadzki_4.jpg
    │   ├── Dariusz_Zawadzki_5.jpg
    │   ├── Dariusz_Zawadzki_6.jpg
    │   ├── Dariusz_Zawadzki_7.jpg
    │   └── Dariusz_Zawadzki_8.jpg
├── tools
    ├── analyse_loha.py
    ├── caption.py
    ├── caption_from_filename.py
    ├── cleanup_captions.py
    ├── convert_html_to_md.py
    ├── convert_images_to_hq_jpg.py
    ├── convert_images_to_webp.py
    ├── create_txt_from_images.py
    ├── crop_images_to_n_buckets.py
    ├── dummy_loha.py
    ├── extract loha and lora examples.txt
    ├── extract_locon.py
    ├── extract_loha_from_model.py
    ├── extract_lora_from_models-new.py
    ├── extract_model_difference.py
    ├── gradio_theme_builder.py
    ├── group_images.py
    ├── group_images_recommended_size.py
    ├── lcm_convert.py
    ├── lycoris_locon_extract.py
    ├── lycoris_utils.py
    ├── merge_lycoris.py
    ├── prepare_presets.py
    ├── prune.py
    ├── rename_depth_mask.py
    └── resize_lora.py
└── uv.lock


/.augmentignore:
--------------------------------------------------------------------------------
 1 | .env
 2 | .cache
 3 | .vscode
 4 | __pycache__
 5 | bitsandbytes_windows
 6 | cudnn_windows
 7 | data
 8 | dataset
 9 | docs
10 | examples
11 | outputs
12 | SmilingWolf
13 | test
14 | v2_inference
15 | venv


--------------------------------------------------------------------------------
/.cache/config/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/.cache/config/.gitkeep


--------------------------------------------------------------------------------
/.cache/keras/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/.cache/keras/.gitkeep


--------------------------------------------------------------------------------
/.cache/nv/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/.cache/nv/.gitkeep


--------------------------------------------------------------------------------
/.cache/triton/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/.cache/triton/.gitkeep


--------------------------------------------------------------------------------
/.cache/user/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/.cache/user/.gitkeep


--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
 1 | .cache/
 2 | cudnn_windows/
 3 | bitsandbytes_windows/
 4 | bitsandbytes_windows_deprecated/
 5 | dataset/
 6 | models/
 7 | __pycache__/
 8 | venv/
 9 | **/.hadolint.yml
10 | **/*.log
11 | **/.git
12 | **/.gitignore
13 | **/.env
14 | **/.github
15 | **/.vscode
16 | **/*.ps1
17 | 


--------------------------------------------------------------------------------
/.env:
--------------------------------------------------------------------------------
1 | TENSORBOARD_PORT=6006
2 | 


--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.sh text eol=lf
2 | *.ps1 text eol=crlf
3 | *.bat text eol=crlf
4 | *.cmd text eol=crlf


--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 | 
3 | github: [bmaltais]
4 | 


--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | ---
2 | version: 2
3 | updates:
4 |   - package-ecosystem: "github-actions"
5 |     directory: "/"
6 |     schedule:
7 |       interval: "monthly"
8 | 


--------------------------------------------------------------------------------
/.github/workflows/typos.yaml:
--------------------------------------------------------------------------------
 1 | ---
 2 | # yamllint disable rule:line-length
 3 | name: Typos
 4 | 
 5 | on:  # yamllint disable-line rule:truthy
 6 |   push:
 7 |   pull_request:
 8 |     types:
 9 |       - opened
10 |       - synchronize
11 |       - reopened
12 | 
13 | jobs:
14 |   build:
15 |     runs-on: ubuntu-latest
16 | 
17 |     steps:
18 |       - uses: actions/checkout@v4
19 | 
20 |       - name: typos-action
21 |         uses: crate-ci/typos@v1.32.0
22 | 


--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
 1 | # Python
 2 | .venv
 3 | venv
 4 | venv2
 5 | __pycache__
 6 | *.egg-info
 7 | build
 8 | wd14_tagger_model
 9 | 
10 | # IDE and Editor specific
11 | .vscode
12 | 
13 | # CUDNN for Windows
14 | cudnn_windows
15 | 
16 | # Cache and temporary files
17 | .cache
18 | .DS_Store
19 | 
20 | # Scripts and executables
21 | locon
22 | gui-user.bat
23 | gui-user.ps1
24 | 
25 | # Version control
26 | SmilingWolf
27 | wandb
28 | 
29 | # Setup and logs
30 | setup.log
31 | logs
32 | 
33 | # Miscellaneous
34 | uninstall.txt
35 | 
36 | # Test files
37 | test/output
38 | test/log*
39 | test/*.json
40 | test/ft
41 | 
42 | # Temporary requirements
43 | requirements_tmp_for_setup.txt
44 | 
45 | *.npz
46 | presets/*/user_presets/*
47 | inputs
48 | outputs
49 | dataset/**
50 | !dataset/**/
51 | !dataset/**/.gitkeep
52 | models
53 | data
54 | config.toml
55 | sd-scripts
56 | venv
57 | venv*
58 | .python-version


--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "sd-scripts"]
2 |     path = sd-scripts
3 |     url = https://github.com/kohya-ss/sd-scripts.git


--------------------------------------------------------------------------------
/.hadolint.yml:
--------------------------------------------------------------------------------
1 | ignored:
2 |   - DL3042 # Avoid use of cache directory with pip. Use `pip install --no-cache-dir <package>`
3 |   - DL3013 # Pin versions in pip. Instead of `pip install <package>` use `pip install <package>==<version>`
4 |   - DL3008 # Pin versions in apt get install. Instead of `apt-get install <package>` use `apt-get install <package>=<version>`
5 |   - DL4006 # Set the SHELL option -o pipefail before RUN with a pipe in it
6 |   - SC2015 # Note that A && B || C is not if-then-else. C may run when A is true.


--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------
1 | 3.11
2 | 


--------------------------------------------------------------------------------
/.release:
--------------------------------------------------------------------------------
1 | v25.2.1
2 | 


--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 |     "python.linting.enabled": true,
3 |     "python.formatting.provider": "yapf",
4 |     "DockerRun.DisableDockerrc": true,
5 |     "augment.enableAutomaticCompletions": true
6 | }


--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
 1 | # Security Policy
 2 | 
 3 | ## Supported Versions
 4 | 
 5 | Versions that are currently being supported with security updates.
 6 | 
 7 | | Version | Supported          |
 8 | | ------- | ------------------ |
 9 | | 23.2.x   | :white_check_mark: |
10 | | < 23.1.x   | :x:                |
11 | 
12 | ## Reporting a Vulnerability
13 | 
14 | Please open an issue if you discover a security issue.
15 | 


--------------------------------------------------------------------------------
/_typos.toml:
--------------------------------------------------------------------------------
 1 | # Files for typos
 2 | # Instruction:  https://github.com/marketplace/actions/typos-action#getting-started
 3 | 
 4 | [default.extend-identifiers]
 5 | 
 6 | [default.extend-words]
 7 | NIN="NIN"
 8 | parms="parms"
 9 | nin="nin"
10 | extention="extention" # Intentionally left
11 | nd="nd"
12 | pn="pn"
13 | shs="shs"
14 | sts="sts"
15 | scs="scs"
16 | cpc="cpc"
17 | coc="coc"
18 | cic="cic"
19 | msm="msm"
20 | usu="usu"
21 | ici="ici"
22 | lvl="lvl"
23 | dii="dii"
24 | muk="muk"
25 | ori="ori"
26 | hru="hru"
27 | rik="rik"
28 | koo="koo"
29 | yos="yos"
30 | wn="wn"
31 | parm = "parm"
32 | 
33 | 
34 | [files]
35 | extend-exclude = ["_typos.toml", "venv"]
36 | 


--------------------------------------------------------------------------------
/assets/js/localization.js:
--------------------------------------------------------------------------------
  1 | var re_num = /^[.\d]+$/;
  2 | var re_emoji = /[\p{Extended_Pictographic}\u{1F3FB}-\u{1F3FF}\u{1F9B0}-\u{1F9B3}]/u;
  3 | 
  4 | var original_lines = {};
  5 | var translated_lines = {};
  6 | 
  7 | function hasLocalization() {
  8 |     return window.localization && Object.keys(window.localization).length > 0;
  9 | }
 10 | 
 11 | function textNodesUnder(el) {
 12 |     var n, a = [], walk = document.createTreeWalker(el, NodeFilter.SHOW_TEXT, null, false);
 13 |     while ((n = walk.nextNode())) a.push(n);
 14 |     return a;
 15 | }
 16 | 
 17 | function canBeTranslated(node, text) {
 18 |     if (!text) return false;
 19 |     if (!node.parentElement) return false;
 20 | 
 21 |     var parentType = node.parentElement.nodeName;
 22 |     if (parentType == 'SCRIPT' || parentType == 'STYLE' || parentType == 'TEXTAREA') return false;
 23 | 
 24 |     if (parentType == 'OPTION' || parentType == 'SPAN') {
 25 |         var pnode = node;
 26 |         for (var level = 0; level < 4; level++) {
 27 |             pnode = pnode.parentElement;
 28 |             if (!pnode) break;
 29 |         }
 30 |     }
 31 | 
 32 |     if (re_num.test(text)) return false;
 33 |     if (re_emoji.test(text)) return false;
 34 |     return true;
 35 | }
 36 | 
 37 | function getTranslation(text) {
 38 |     if (!text) return undefined;
 39 | 
 40 |     if (translated_lines[text] === undefined) {
 41 |         original_lines[text] = 1;
 42 |     }
 43 | 
 44 |     var tl = localization[text];
 45 |     if (tl !== undefined) {
 46 |         translated_lines[tl] = 1;
 47 |     }
 48 | 
 49 |     return tl;
 50 | }
 51 | 
 52 | function processTextNode(node) {
 53 |     var text = node.textContent.trim();
 54 | 
 55 |     if (!canBeTranslated(node, text)) return;
 56 | 
 57 |     var tl = getTranslation(text);
 58 |     if (tl !== undefined) {
 59 |         node.textContent = tl;
 60 |     }
 61 | }
 62 | 
 63 | function processNode(node) {
 64 |     console.log(node.nodeType + " " + node.nodeName + " " + node.nodeValue)
 65 |     if (node.nodeType == 3) {
 66 |         processTextNode(node);
 67 |         return;
 68 |     }
 69 | 
 70 |     if (node.title) {
 71 |         let tl = getTranslation(node.title);
 72 |         if (tl !== undefined) {
 73 |             node.title = tl;
 74 |         }
 75 |     }
 76 | 
 77 |     if (node.placeholder) {
 78 |         let tl = getTranslation(node.placeholder);
 79 |         if (tl !== undefined) {
 80 |             node.placeholder = tl;
 81 |         }
 82 |     }
 83 | 
 84 |     textNodesUnder(node).forEach(function(node) {
 85 |         processTextNode(node);
 86 |     });
 87 | }
 88 | 
 89 | document.addEventListener("DOMContentLoaded", function() {
 90 |     if (!hasLocalization()) {
 91 |         return;
 92 |     }
 93 | 
 94 |     onUiUpdate(function(m) {
 95 |         m.forEach(function(mutation) {
 96 |             mutation.addedNodes.forEach(function(node) {
 97 |                 processNode(node);
 98 |             });
 99 |         });
100 |     });
101 | 
102 |     processNode(gradioApp());
103 | });


--------------------------------------------------------------------------------
/config_files/accelerate/default_config.yaml:
--------------------------------------------------------------------------------
 1 | command_file: null
 2 | commands: null
 3 | compute_environment: LOCAL_MACHINE
 4 | deepspeed_config: {}
 5 | distributed_type: 'NO'
 6 | downcast_bf16: 'no'
 7 | dynamo_backend: 'NO'
 8 | fsdp_config: {}
 9 | gpu_ids: all
10 | machine_rank: 0
11 | main_process_ip: null
12 | main_process_port: null
13 | main_training_function: main
14 | megatron_lm_config: {}
15 | mixed_precision: 'no'
16 | num_machines: 1
17 | num_processes: 1
18 | rdzv_backend: static
19 | same_network: true
20 | tpu_name: null
21 | tpu_zone: null
22 | use_cpu: false


--------------------------------------------------------------------------------
/config_files/accelerate/runpod.yaml:
--------------------------------------------------------------------------------
 1 | command_file: null
 2 | commands: null
 3 | compute_environment: LOCAL_MACHINE
 4 | deepspeed_config: {}
 5 | distributed_type: 'NO'
 6 | downcast_bf16: 'no'
 7 | dynamo_backend: 'NO'
 8 | fsdp_config: {}
 9 | gpu_ids: all
10 | machine_rank: 0
11 | main_process_ip: null
12 | main_process_port: null
13 | main_training_function: main
14 | megatron_lm_config: {}
15 | mixed_precision: 'no'
16 | num_machines: 1
17 | num_processes: 1
18 | rdzv_backend: static
19 | same_network: true
20 | tpu_name: null
21 | tpu_zone: null
22 | use_cpu: false


--------------------------------------------------------------------------------
/dataset/images/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/dataset/images/.gitkeep


--------------------------------------------------------------------------------
/dataset/logs/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/dataset/logs/.gitkeep


--------------------------------------------------------------------------------
/dataset/outputs/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/dataset/outputs/.gitkeep


--------------------------------------------------------------------------------
/dataset/regularization/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/dataset/regularization/.gitkeep


--------------------------------------------------------------------------------
/docker-compose.yaml:
--------------------------------------------------------------------------------
 1 | services:
 2 |   kohya-ss-gui:
 3 |     container_name: kohya-ss-gui
 4 |     image: ghcr.io/bmaltais/kohya-ss-gui:latest
 5 |     user: 1000:0
 6 |     build:
 7 |       context: .
 8 |       args:
 9 |         - UID=1000
10 |       cache_from:
11 |         - ghcr.io/bmaltais/kohya-ss-gui:cache
12 |       cache_to:
13 |         - type=inline
14 |     ports:
15 |       - 7860:7860
16 |     environment:
17 |       SAFETENSORS_FAST_GPU: 1
18 |       TENSORBOARD_PORT: ${TENSORBOARD_PORT:-6006}
19 |     tmpfs:
20 |       - /tmp
21 |     volumes:
22 |       - /tmp/.X11-unix:/tmp/.X11-unix
23 |       - ./models:/app/models
24 |       - ./dataset:/dataset
25 |       - ./dataset/images:/app/data
26 |       - ./dataset/logs:/app/logs
27 |       - ./dataset/outputs:/app/outputs
28 |       - ./dataset/regularization:/app/regularization
29 |       - ./models:/app/models
30 |       - ./.cache/config:/app/config
31 |       - ./.cache/user:/home/1000/.cache
32 |       - ./.cache/triton:/home/1000/.triton
33 |       - ./.cache/nv:/home/1000/.nv
34 |       - ./.cache/keras:/home/1000/.keras
35 |       - ./.cache/config:/home/1000/.config # For backward compatibility
36 |     deploy:
37 |       resources:
38 |         reservations:
39 |           devices:
40 |             - driver: nvidia
41 |               capabilities: [gpu]
42 |               device_ids: ["all"]
43 | 
44 |   tensorboard:
45 |     container_name: tensorboard
46 |     image: tensorflow/tensorflow:latest-gpu
47 |     ports:
48 |       # !Please change the port in .env file
49 |       - ${TENSORBOARD_PORT:-6006}:6006
50 |     volumes:
51 |       - ./dataset/logs:/app/logs
52 |     command: tensorboard --logdir=/app/logs --bind_all
53 |     deploy:
54 |       resources:
55 |         reservations:
56 |           devices:
57 |             - driver: nvidia
58 |               capabilities: [gpu]
59 |               device_ids: ["all"]
60 | 


--------------------------------------------------------------------------------
/docs/Finetuning/top_level.md:
--------------------------------------------------------------------------------
 1 | # Finetuning Resource Guide
 2 | 
 3 | This guide is a resource compilation to facilitate the development of robust LoRA models.
 4 | 
 5 | -Need to add resources here
 6 | 
 7 | ## Guidelines for SDXL Finetuning 
 8 | 
 9 | - Set the `Max resolution` to at least 1024x1024, as this is the standard resolution for SDXL.
10 | - The fine-tuning can be done with 24GB GPU memory with the batch size of 1.
11 |   - Train U-Net only.
12 |   - Use gradient checkpointing.
13 |   - Use `--cache_text_encoder_outputs` option and caching latents.
14 |   - Use Adafactor optimizer. RMSprop 8bit or Adagrad 8bit may work. AdamW 8bit doesn't seem to work.
15 | - PyTorch 2 seems to use slightly less GPU memory than PyTorch 1.
16 | 
17 | Example of the optimizer settings for Adafactor with the fixed learning rate:
18 | ```
19 | optimizer_type = "adafactor"
20 | optimizer_args = [ "scale_parameter=False", "relative_step=False", "warmup_init=False" ]
21 | lr_scheduler = "constant_with_warmup"
22 | lr_warmup_steps = 100
23 | learning_rate = 4e-7 # SDXL original learning rate
24 | ```
25 | 
26 | ## Resource Contributions
27 | 
28 | If you have valuable resources to add, kindly create a PR on Github.


--------------------------------------------------------------------------------
/docs/LoRA/top_level.md:
--------------------------------------------------------------------------------
 1 | # LoRA Resource Guide
 2 | 
 3 | This guide is a resource compilation to facilitate the development of robust LoRA models.
 4 | 
 5 | Access EDG's tutorials here: https://ko-fi.com/post/EDGs-tutorials-P5P6KT5MT
 6 | 
 7 | ## Guidelines for SDXL LoRA Training 
 8 | 
 9 | - Set the `Max resolution` to at least 1024x1024, as this is the standard resolution for SDXL.
10 | - Use a GPU that has at least 12GB memory for the LoRA training process.
11 | - We strongly recommend using the `--network_train_unet_only` option for SDXL LoRA to avoid unforeseen training results caused by dual text encoders in SDXL.
12 | - PyTorch 2 tends to use less GPU memory than PyTorch 1.
13 | 
14 | Here's an example configuration for the Adafactor optimizer with a fixed learning rate:
15 | 
16 | ```
17 | optimizer_type = "adafactor"
18 | optimizer_args = [ "scale_parameter=False", "relative_step=False", "warmup_init=False" ]
19 | lr_scheduler = "constant_with_warmup"
20 | lr_warmup_steps = 100
21 | learning_rate = 4e-7 # This is the standard learning rate for SDXL
22 | ```
23 | 
24 | ## Resource Contributions
25 | 
26 | If you have valuable resources to add, kindly create a PR on Github.


--------------------------------------------------------------------------------
/docs/image_folder_structure.md:
--------------------------------------------------------------------------------
 1 | # Drambootd, Lora and TI image folder structure
 2 | 
 3 | To ensure successful training with Kohya, it is crucial to follow a specific folder structure that provides the necessary image repeats. Please adhere to the following structure precisely:
 4 | 
 5 | Folder Structure Example:
 6 | 
 7 | ```txt
 8 | c:
 9 | |
10 | ├──images
11 | |   |
12 | |   ├── 30_cat
13 | |   |   |
14 | |   |   ├── image1.jpg
15 | |   |   ├── image1.txt
16 | |   |   ├── image2.png
17 | |   |   └── image2.txt
18 | |   |
19 | |   ├── 30_dog
20 | |   |   |
21 | |   |   ├── image1.jpg
22 | |   |   ├── image1.txt
23 | |   |   ├── image2.png
24 | |   |   └── image2.txt
25 | |   |
26 | |   └── 40_black mamba
27 | |       |
28 | |       ├── image1.jpg
29 | |       ├── image1.txt
30 | |       ├── image2.png
31 | |       └── image2.txt
32 | |
33 | ├──regularization
34 | |   |
35 | |   ├── 1_cat
36 | |   |   |
37 | |   |   ├── reg1.jpg
38 | |   |   ├── reg2.jpg
39 | |   |
40 | |   ├── 1_dog
41 | |   |   |
42 | |   |   ├── reg1.jpg
43 | |   |   ├── reg2.jpg
44 | |   |
45 | |   └── 1_black mamba
46 | |       |
47 | |       ├── reg1.jpg
48 | |       ├── reg2.jpg
49 | 
50 | ```
51 | 
52 | Please note the following important information regarding file extensions and their impact on concept names during model training:
53 | 
54 | If a file with a .txt or .caption extension and the same name as an image is present in the image subfolder, it will take precedence over the concept name during the model training process.
55 | For example, if there is an image file named image1.jpg in the 30_cat subfolder, and there is a corresponding text file named image1.txt or image1.caption in the same subfolder, the concept name used during training will be determined by the content of that text file rather than the subfolder name.
56 | 
57 | Ensure that the content of such text files accurately reflects the desired concept name or any relevant caption information associated with the corresponding image.
58 | 
59 | By considering this information and maintaining the proper folder structure, including any necessary text or caption files, you can ensure a smooth and effective training process with Kohya.


--------------------------------------------------------------------------------
/docs/installation_docker.md:
--------------------------------------------------------------------------------
 1 | ### Docker
 2 | 
 3 | #### Get your Docker ready for GPU support
 4 | 
 5 | ##### Windows
 6 | 
 7 | Once you have installed [**Docker Desktop**](https://www.docker.com/products/docker-desktop/), [**CUDA Toolkit**](https://developer.nvidia.com/cuda-downloads), [**NVIDIA Windows Driver**](https://www.nvidia.com.tw/Download/index.aspx), and ensured that your Docker is running with [**WSL2**](https://docs.docker.com/desktop/wsl/#turn-on-docker-desktop-wsl-2), you are ready to go.
 8 | 
 9 | Here is the official documentation for further reference.  
10 | <https://docs.nvidia.com/cuda/wsl-user-guide/index.html#nvidia-compute-software-support-on-wsl-2>
11 | <https://docs.docker.com/desktop/wsl/use-wsl/#gpu-support>
12 | 
13 | ##### Linux, OSX
14 | 
15 | Install an NVIDIA GPU Driver if you do not already have one installed.  
16 | <https://docs.nvidia.com/datacenter/tesla/tesla-installation-notes/index.html>
17 | 
18 | Install the NVIDIA Container Toolkit with this guide.  
19 | <https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html>
20 | 
21 | #### Design of our Dockerfile
22 | 
23 | - It is required that all training data is stored in the `dataset` subdirectory, which is mounted into the container at `/dataset`.
24 | - Please note that the file picker functionality is not available. Instead, you will need to manually input the folder path and configuration file path.
25 | - TensorBoard has been separated from the project.
26 |   - TensorBoard is not included in the Docker image.
27 |   - The "Start TensorBoard" button has been hidden.
28 |   - TensorBoard is launched from a distinct container [as shown here](/docker-compose.yaml#L41).
29 | - The browser won't be launched automatically. You will need to manually open the browser and navigate to [http://localhost:7860/](http://localhost:7860/) and [http://localhost:6006/](http://localhost:6006/)
30 | - This Dockerfile has been designed to be easily disposable. You can discard the container at any time and restart it with the new code version.
31 | 
32 | #### Use the pre-built Docker image
33 | 
34 | ```bash
35 | git clone --recursive https://github.com/bmaltais/kohya_ss.git
36 | cd kohya_ss
37 | docker compose up -d
38 | ```
39 | 
40 | To update the system, do `docker compose down && docker compose up -d --pull always`
41 | 
42 | #### Local docker build
43 | 
44 | > [!IMPORTANT]  
45 | > Clone the Git repository ***recursively*** to include submodules:  
46 | > `git clone --recursive https://github.com/bmaltais/kohya_ss.git`
47 | 
48 | ```bash
49 | git clone --recursive https://github.com/bmaltais/kohya_ss.git
50 | cd kohya_ss
51 | docker compose up -d --build
52 | ```
53 | 
54 | > [!NOTE]  
55 | > Building the image may take up to 20 minutes to complete.
56 | 
57 | To update the system, ***checkout to the new code version*** and rebuild using `docker compose down && docker compose up -d --build --pull always`
58 | 
59 | > [!NOTE]
60 | > If you are running on Linux, an alternative Docker container port with fewer limitations is available [here](https://github.com/P2Enjoy/kohya_ss-docker).
61 | 
62 | #### ashleykleynhans runpod docker builds
63 | 
64 | You may want to use the following repositories when running on runpod:
65 | 
66 | - Standalone Kohya_ss template: <https://github.com/ashleykleynhans/kohya-docker>
67 | - Auto1111 + Kohya_ss GUI template: <https://github.com/ashleykleynhans/stable-diffusion-docker>
68 | 


--------------------------------------------------------------------------------
/docs/installation_novita.md:
--------------------------------------------------------------------------------
 1 | ### Novita
 2 | 
 3 | #### Pre-built Novita template
 4 | 
 5 | 1. Open the Novita template by clicking on <https://novita.ai/gpus-console?templateId=312>.
 6 | 
 7 | 2. Deploy the template on the desired host.
 8 | 
 9 | 3. Once deployed, connect to the Novita on HTTP 7860 to access the kohya_ss GUI.
10 | 


--------------------------------------------------------------------------------
/docs/installation_runpod.md:
--------------------------------------------------------------------------------
 1 | ### Runpod
 2 | 
 3 | #### Manual installation
 4 | 
 5 | To install the necessary components for Runpod and run kohya_ss, follow these steps:
 6 | 
 7 | 1. Select the Runpod pytorch 2.2.0 template. This is important. Other templates may not work.
 8 | 
 9 | 2. SSH into the Runpod.
10 | 
11 | 3. Clone the repository by running the following command:
12 | 
13 |    ```shell
14 |    cd /workspace
15 |    git clone --recursive https://github.com/bmaltais/kohya_ss.git
16 |    ```
17 | 
18 | 4. Run the setup script:
19 | 
20 |    ```shell
21 |    cd kohya_ss
22 |    ./setup-runpod.sh
23 |    ```
24 | 
25 | 5. Run the GUI with:
26 | 
27 |    ```shell
28 |    ./gui.sh --share --headless
29 |    ```
30 | 
31 |    or with this if you expose 7860 directly via the runpod configuration:
32 | 
33 |    ```shell
34 |    ./gui.sh --listen=0.0.0.0 --headless
35 |    ```
36 | 
37 | 6. Connect to the public URL displayed after the installation process is completed.
38 | 
39 | #### Pre-built Runpod template
40 | 
41 | To run from a pre-built Runpod template, you can:
42 | 
43 | 1. Open the Runpod template by clicking on <https://runpod.io/gsc?template=ya6013lj5a&ref=w18gds2n>.
44 | 
45 | 2. Deploy the template on the desired host.
46 | 
47 | 3. Once deployed, connect to the Runpod on HTTP 3010 to access the kohya_ss GUI. You can also connect to auto1111 on HTTP 3000.
48 | 


--------------------------------------------------------------------------------
/docs/troubleshooting_tesla_v100.md:
--------------------------------------------------------------------------------
 1 | ### LORA Training on TESLA V100 - GPU Utilization Issue
 2 | 
 3 | #### Issue Summary
 4 | 
 5 | When training LORA on a TESLA V100, users reported low GPU utilization. Additionally, there was difficulty in specifying GPUs other than the default for training.
 6 | 
 7 | #### Potential Solutions
 8 | 
 9 | - **GPU Selection:** Users can specify GPU IDs in the setup configuration to select the desired GPUs for training.
10 | - **Improving GPU Load:** Utilizing `adamW8bit` optimizer and increasing the batch size can help achieve 70-80% GPU utilization without exceeding GPU memory limits.
11 | 


--------------------------------------------------------------------------------
/examples/LoRA based finetuning 2 phase.ps1:
--------------------------------------------------------------------------------
 1 | $pretrainedModel = "D:\models\sdxl\nsfw_v1.0_00002_.safetensors"
 2 | $trainDataDir = "D:\dataset\harold\img"
 3 | $loggingDir = "D:\dataset\harold\lora\sdxl-logs"
 4 | $outputName = "harold_v1.0a"
 5 | $outputDir = "d:\lycoris\sdxl"
 6 | 
 7 | $networkWeights = Join-Path -Path $outputDir -ChildPath "$outputName.safetensors"
 8 | $outputName2 = "$outputName" + "e2"
 9 | 
10 | accelerate launch --num_cpu_threads_per_process=2 "./sdxl_train_network.py" --enable_bucket --pretrained_model_name_or_path="$pretrainedModel" --train_data_dir="$trainDataDir" --resolution="1024,1024" --output_dir="$outputDir" --logging_dir="$loggingDir" --network_alpha="256" --training_comment="trigger words: " --save_model_as=safetensors --network_module=networks.lora --unet_lr=1e-05 --network_train_unet_only --network_dim=256 --output_name="$outputName" --lr_scheduler_num_cycles="1" --scale_weight_norms="1" --network_dropout="0.1" --cache_text_encoder_outputs --no_half_vae --lr_scheduler="cosine" --train_batch_size="4" --max_train_steps="40" --save_every_n_epochs="10" --mixed_precision="bf16" --save_precision="bf16" --seed="17415" --caption_extension=".txt" --cache_latents --cache_latents_to_disk --optimizer_type="AdamW" --optimizer_args weight_decay=0.05 betas=0.9,0.98 --max_train_epochs="10" --max_data_loader_n_workers="0" --keep_tokens="1" --bucket_reso_steps=32 --min_snr_gamma=5 --gradient_checkpointing --xformers --bucket_no_upscale --noise_offset=0.0357 --adaptive_noise_scale=0.00357 --log_prefix=xl-loha
11 | 
12 | accelerate launch --num_cpu_threads_per_process=2 "./sdxl_train_network.py" --enable_bucket --pretrained_model_name_or_path="$pretrainedModel" --train_data_dir="$trainDataDir" --resolution="1024,1024" --output_dir="$outputDir" --logging_dir="$loggingDir" --network_alpha="256" --training_comment="trigger: portrait" --save_model_as=safetensors --network_module=networks.lora --unet_lr=1e-05 --network_train_unet_only --network_dim=256 --network_weights="$networkWeights" --output_name="$outputName2" --lr_scheduler_num_cycles="1" --scale_weight_norms="1" --network_dropout="0.1" --cache_text_encoder_outputs --no_half_vae --lr_scheduler="constant" --train_batch_size="1" --max_train_steps="16" --save_every_n_epochs="1" --mixed_precision="bf16" --save_precision="bf16" --seed="17415" --caption_extension=".txt" --cache_latents --cache_latents_to_disk --optimizer_type="AdamW" --optimizer_args weight_decay=0.05 betas=0.9,0.98 --max_train_epochs="1" --max_data_loader_n_workers="0" --keep_tokens="1" --bucket_reso_steps=32 --min_snr_gamma=5 --gradient_checkpointing --xformers --bucket_no_upscale --noise_offset=0.0357 --adaptive_noise_scale=0.00357 --log_prefix=xl-loha
13 | 


--------------------------------------------------------------------------------
/examples/caption.ps1:
--------------------------------------------------------------------------------
 1 | # This powershell script will create a text file for each files in the folder
 2 | #
 3 | # Useful to create base caption that will be augmented on a per image basis
 4 | 
 5 | $folder = "D:\some\folder\location\"
 6 | $file_pattern="*.*"
 7 | $caption_text="some caption text"
 8 | 
 9 | $files = Get-ChildItem $folder$file_pattern -Include *.png, *.jpg, *.webp -File
10 | foreach ($file in $files) {
11 |     if (-not(Test-Path -Path $folder\"$($file.BaseName).txt" -PathType Leaf)) {
12 |         New-Item -ItemType file -Path $folder -Name "$($file.BaseName).txt" -Value $caption_text
13 |     }
14 | }


--------------------------------------------------------------------------------
/examples/caption_subfolders.ps1:
--------------------------------------------------------------------------------
 1 | # This powershell script will create a text file for each files in the folder
 2 | #
 3 | # Useful to create base caption that will be augmented on a per image basis
 4 | 
 5 | $folder = "D:\test\t2\"
 6 | $file_pattern="*.*"
 7 | $text_fir_file="bigeyes style"
 8 | 
 9 | foreach ($file in Get-ChildItem $folder\$file_pattern -File)
10 | {
11 |     New-Item -ItemType file -Path $folder -Name "$($file.BaseName).txt" -Value $text_fir_file
12 | }
13 | 
14 | foreach($directory in Get-ChildItem -path $folder -Directory)
15 | {
16 |     foreach ($file in Get-ChildItem $folder\$directory\$file_pattern)
17 |     {
18 |         New-Item -ItemType file -Path $folder\$directory -Name "$($file.BaseName).txt" -Value $text_fir_file
19 |     }
20 | }
21 | 


--------------------------------------------------------------------------------
/examples/finetune_latent.ps1:
--------------------------------------------------------------------------------
 1 | # Command 1: merge_captions_to_metadata.py
 2 | $captionExtension = "--caption_extension=.txt"
 3 | $sourceDir1 = "d:\test\1_1960-1969"
 4 | $targetFile1 = "d:\test\1_1960-1969/meta_cap.json"
 5 | 
 6 | # Command 2: prepare_buckets_latents.py
 7 | $targetLatentFile = "d:\test\1_1960-1969/meta_lat.json"
 8 | $modelFile = "E:\models\sdxl\sd_xl_base_0.9.safetensors"
 9 | 
10 | ./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py $captionExtension $sourceDir1 $targetFile1 --full_path
11 | ./venv/Scripts/python.exe finetune/prepare_buckets_latents.py $sourceDir1 $targetFile1 $targetLatentFile $modelFile --batch_size=4 --max_resolution=1024,1024 --min_bucket_reso=64 --max_bucket_reso=2048 --mixed_precision=bf16 --full_path
12 | 


--------------------------------------------------------------------------------
/examples/kohya-1-folders.ps1:
--------------------------------------------------------------------------------
 1 | # This powershell script will create a model using the fine tuning dreambooth method. It will require landscape,
 2 | # portrait and square images.
 3 | #
 4 | # Adjust the script to your own needs
 5 | 
 6 | # Sylvia Ritter
 7 | # variable values
 8 | $pretrained_model_name_or_path = "D:\models\v1-5-pruned-mse-vae.ckpt"
 9 | $data_dir = "D:\test\squat"
10 | $train_dir = "D:\test\"
11 | $resolution = "512,512"
12 | 
13 | $image_num = Get-ChildItem $data_dir -Recurse -File -Include *.png | Measure-Object | %{$_.Count}
14 | 
15 | Write-Output "image_num: $image_num"
16 | 
17 | $learning_rate = 1e-6
18 | $dataset_repeats = 40
19 | $train_batch_size = 8
20 | $epoch = 1
21 | $save_every_n_epochs=1
22 | $mixed_precision="fp16"
23 | $num_cpu_threads_per_process=6
24 | 
25 | # You should not have to change values past this point
26 | 
27 | $output_dir = $train_dir + "\model"
28 | $repeats = $image_num * $dataset_repeats
29 | $mts = [Math]::Ceiling($repeats / $train_batch_size * $epoch)
30 | 
31 | Write-Output "Repeats: $repeats"
32 | 
33 | .\venv\Scripts\activate
34 | 
35 | accelerate launch --num_cpu_threads_per_process $num_cpu_threads_per_process train_db.py `
36 |     --pretrained_model_name_or_path=$pretrained_model_name_or_path `
37 |     --train_data_dir=$data_dir `
38 |     --output_dir=$output_dir `
39 |     --resolution=$resolution `
40 |     --train_batch_size=$train_batch_size `
41 |     --learning_rate=$learning_rate `
42 |     --max_train_steps=$mts `
43 |     --use_8bit_adam `
44 |     --xformers `
45 |     --mixed_precision=$mixed_precision `
46 |     --cache_latents `
47 |     --save_every_n_epochs=$save_every_n_epochs `
48 |     --fine_tuning `
49 |     --dataset_repeats=$dataset_repeats `
50 |     --save_precision="fp16"
51 |     
52 | # 2nd pass at half the dataset repeat value
53 | 
54 | accelerate launch --num_cpu_threads_per_process $num_cpu_threads_per_process train_db.py `
55 |     --pretrained_model_name_or_path=$output_dir"\last.ckpt" `
56 |     --train_data_dir=$data_dir `
57 |     --output_dir=$output_dir"2" `
58 |     --resolution=$resolution `
59 |     --train_batch_size=$train_batch_size `
60 |     --learning_rate=$learning_rate `
61 |     --max_train_steps=$([Math]::Ceiling($mts/2)) `
62 |     --use_8bit_adam `
63 |     --xformers `
64 |     --mixed_precision=$mixed_precision `
65 |     --cache_latents `
66 |     --save_every_n_epochs=$save_every_n_epochs `
67 |     --fine_tuning `
68 |     --dataset_repeats=$([Math]::Ceiling($dataset_repeats/2)) `
69 |     --save_precision="fp16"
70 |     
71 |     accelerate launch --num_cpu_threads_per_process $num_cpu_threads_per_process train_db.py `
72 |     --pretrained_model_name_or_path=$output_dir"\last.ckpt" `
73 |     --train_data_dir=$data_dir `
74 |     --output_dir=$output_dir"2" `
75 |     --resolution=$resolution `
76 |     --train_batch_size=$train_batch_size `
77 |     --learning_rate=$learning_rate `
78 |     --max_train_steps=$mts `
79 |     --use_8bit_adam `
80 |     --xformers `
81 |     --mixed_precision=$mixed_precision `
82 |     --cache_latents `
83 |     --save_every_n_epochs=$save_every_n_epochs `
84 |     --fine_tuning `
85 |     --dataset_repeats=$dataset_repeats `
86 |     --save_precision="fp16"
87 |     


--------------------------------------------------------------------------------
/examples/kohya_train_db_fixed_with-reg_SDv2 512 base.ps1:
--------------------------------------------------------------------------------
 1 | # This powershell script will create a model using the fine tuning dreambooth method. It will require landscape,
 2 | # portrait and square images.
 3 | #
 4 | # Adjust the script to your own needs
 5 | 
 6 | # variable values
 7 | $pretrained_model_name_or_path = "D:\models\512-base-ema.ckpt"
 8 | $data_dir = "D:\models\dariusz_zawadzki\kohya_reg\data"
 9 | $reg_data_dir = "D:\models\dariusz_zawadzki\kohya_reg\reg"
10 | $logging_dir = "D:\models\dariusz_zawadzki\logs"
11 | $output_dir = "D:\models\dariusz_zawadzki\train_db_model_reg_v2"
12 | $resolution = "512,512"
13 | $lr_scheduler="polynomial"
14 | $cache_latents = 1 # 1 = true, 0 = false
15 | 
16 | $image_num = Get-ChildItem $data_dir -Recurse -File -Include *.png, *.jpg, *.webp | Measure-Object | %{$_.Count}
17 | 
18 | Write-Output "image_num: $image_num"
19 | 
20 | $dataset_repeats = 200
21 | $learning_rate = 2e-6
22 | $train_batch_size = 4
23 | $epoch = 1
24 | $save_every_n_epochs=1
25 | $mixed_precision="bf16"
26 | $num_cpu_threads_per_process=6
27 | 
28 | # You should not have to change values past this point
29 | if ($cache_latents -eq 1) {
30 |     $cache_latents_value="--cache_latents"
31 | }
32 | else {
33 |     $cache_latents_value=""
34 | }
35 | 
36 | $repeats = $image_num * $dataset_repeats
37 | $mts = [Math]::Ceiling($repeats / $train_batch_size * $epoch)
38 | 
39 | Write-Output "Repeats: $repeats"
40 | 
41 | cd D:\kohya_ss
42 | .\venv\Scripts\activate
43 | 
44 | accelerate launch --num_cpu_threads_per_process $num_cpu_threads_per_process train_db.py `
45 |     --v2 `
46 |     --pretrained_model_name_or_path=$pretrained_model_name_or_path `
47 |     --train_data_dir=$data_dir `
48 |     --output_dir=$output_dir `
49 |     --resolution=$resolution `
50 |     --train_batch_size=$train_batch_size `
51 |     --learning_rate=$learning_rate `
52 |     --max_train_steps=$mts `
53 |     --use_8bit_adam `
54 |     --xformers `
55 |     --mixed_precision=$mixed_precision `
56 |     $cache_latents_value `
57 |     --save_every_n_epochs=$save_every_n_epochs `
58 |     --logging_dir=$logging_dir `
59 |     --save_precision="fp16" `
60 |     --reg_data_dir=$reg_data_dir `
61 |     --seed=494481440 `
62 |     --lr_scheduler=$lr_scheduler
63 | 
64 | # Add the inference yaml file along with the model for proper loading. Need to have the same name as model... Most likely "last.yaml" in our case.
65 | 


--------------------------------------------------------------------------------
/examples/lucoris extract examples.txt:
--------------------------------------------------------------------------------
 1 | python tools\lycoris_locon_extract.py --mode quantile --safetensors --linear_ratio 0.9 --conv_ratio 0.9 --device cuda D:/models/v1-5-pruned.ckpt D:/models/cyberrealistic_v12.safetensors "D:/lora/sd1.5/cyberrealistic_v12.safetensors"
 2 | 
 3 | python tools\lycoris_locon_extract.py --mode quantile --safetensors --linear_quantile 0.75 --conv_quantile 0.75 --device cuda D:/models/v1-5-pruned.ckpt "C:\Users\berna\Downloads\deliberate_v2.safetensors" "D:/lora/sd1.5/deliberate_v2.safetensors"
 4 | 
 5 | python tools\lycoris_locon_extract.py --mode fixed --safetensors --linear_dim 512 --conv_dim 512 --device cuda D:/models/v1-5-pruned.ckpt D:/models/cyberrealistic_v12.safetensors "D:/lora/sd1.5/cyberrealistic_v12.safetensors"
 6 | 
 7 | python tools\lycoris_locon_extract.py --use_sparse_bias --sparsity 0.98 --mode quantile --safetensors --linear_quantile 0.75 --conv_quantile 0.75 --device cuda D:/models/v1-5-pruned.ckpt "C:\Users\berna\Downloads\deliberate_v2.safetensors" "D:/lora/sd1.5/deliberate_v2.safetensors"
 8 | 
 9 | python tools\lycoris_locon_extract.py --use_sparse_bias --sparsity 0.98 --mode quantile --safetensors --linear_quantile 0.75 --conv_quantile 0.75 --device cuda D:/models/v1-5-pruned.ckpt "D:/models/test\claire_v1.0ee2-000003.safetensors" "D:/lora/sd1.5/claire_v1.0ee2-000003.safetensors"
10 | 
11 | python tools\lycoris_locon_extract.py --use_sparse_bias --sparsity 0.98 --mode quantile --safetensors --linear_quantile 0.5 --conv_quantile 0.5 --device cuda D:/models/v1-5-pruned.ckpt "D:/models/test\claire_v1.0ee2-000003.safetensors" "D:/lora/sd1.5/claire_v1.0ee2-0.5.safetensors"
12 | 
13 | python tools\lycoris_locon_extract.py --use_sparse_bias --sparsity 0.98 --mode quantile --safetensors --linear_quantile 0.5 --conv_quantile 0.5 --device cuda D:/models/v1-5-pruned.ckpt "D:/models/test\claire_v1.0f.safetensors" "D:/lora/sd1.5/claire_v1.0f0.5.safetensors"


--------------------------------------------------------------------------------
/examples/pull kohya_ss sd-scripts updates in.md:
--------------------------------------------------------------------------------
 1 | ## Updating a Local Submodule with the Latest sd-scripts Changes
 2 | 
 3 | To update your local branch with the most recent changes from kohya/sd-scripts, follow these steps:
 4 | 
 5 | 1. When you wish to perform an update of the dev branch, execute the following commands:
 6 | 
 7 |    ```bash
 8 |    cd sd-scripts
 9 |    git fetch
10 |    git checkout dev
11 |    git pull origin dev
12 |    cd ..
13 |    git add sd-scripts
14 |    git commit -m "Update sd-scripts submodule to the latest on dev"
15 |    ```
16 | 
17 |    Alternatively, if you want to obtain the latest code from main:
18 | 
19 |    ```bash
20 |    cd sd-scripts
21 |    git fetch
22 |    git checkout main
23 |    git pull origin main
24 |    cd ..
25 |    git add sd-scripts
26 |    git commit -m "Update sd-scripts submodule to the latest on main"
27 |    ```
28 | 


--------------------------------------------------------------------------------
/examples/stable_cascade/test.toml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/examples/stable_cascade/test.toml


--------------------------------------------------------------------------------
/examples/word_frequency.ps1:
--------------------------------------------------------------------------------
 1 | $txt_files_folder = "D:\dataset\"
 2 | $txt_prefix_to_ignore = "asds"
 3 | $txt_postfix_ti_ignore = "asds"
 4 | 
 5 | # Should not need to touch anything below
 6 | 
 7 | # (Get-Content $txt_files_folder"\*.txt" ).Replace(",", "") -Split '\W' | Group-Object -NoElement | Sort-Object -Descending -Property Count
 8 | 
 9 | $combined_txt = Get-Content $txt_files_folder"\*.txt"
10 | $combined_txt = $combined_txt.Replace(",", "")
11 | $combined_txt = $combined_txt.Replace("$txt_prefix_to_ignore", "")
12 | $combined_txt = $combined_txt.Replace("$txt_postfix_ti_ignore", "") -Split '\W' | Group-Object -NoElement | Sort-Object -Descending -Property Count
13 | 
14 | Write-Output "Sorted by count"
15 | Write-Output $combined_txt.Name


--------------------------------------------------------------------------------
/gui-uv.bat:
--------------------------------------------------------------------------------
 1 | @echo off
 2 | set VIRTUAL_ENV=.venv
 3 | echo VIRTUAL_ENV is set to %VIRTUAL_ENV%
 4 | 
 5 | :: Check if uv is installed
 6 | setlocal enabledelayedexpansion
 7 | where uv >nul 2>nul
 8 | if %errorlevel% neq 0 (
 9 |     set /p INSTALL_UV="uv is not installed. We can try to install it for you, or you can install it manually from https://astral.sh/uv before running this script again. Would you like to attempt automatic installation now? (Y/N) "
10 |     if /i "!INSTALL_UV!"=="Y" (
11 |         winget install --id=astral-sh.uv  -e
12 |     ) else (
13 |         echo Okay, please install uv manually from https://astral.sh/uv and then re-run this script. Exiting.
14 |         exit /b 1
15 |     )
16 | )
17 | endlocal
18 | 
19 | set PATH=%PATH%;%~dp0venv\Lib\site-packages\torch\lib
20 | 
21 | echo Starting the GUI... this might take some time... Especially on 1st run after install or update...
22 | 
23 | :: Make sure we are on the right sd-scripts commit
24 | git submodule update --init --recursive
25 | 
26 | :: If the exit code is 0, run the kohya_gui.py script with the command-line arguments
27 | if %errorlevel% equ 0 (
28 |     REM Check if the batch was started via double-click
29 |     IF /i "%comspec% /c %~0 " equ "%cmdcmdline:"=%" (
30 |         REM echo This script was started by double clicking.
31 |         cmd /k uv run --link-mode=copy --index-strategy unsafe-best-match kohya_gui.py --noverify %*
32 |     ) ELSE (
33 |         REM echo This script was started from a command prompt.
34 |         uv run --link-mode=copy --index-strategy unsafe-best-match kohya_gui.py --noverify %*
35 |     )
36 | )


--------------------------------------------------------------------------------
/gui-uv.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | export VIRTUAL_ENV=.venv
 3 | 
 4 | env_var_exists() {
 5 |   if [[ -n "${!1}" ]]; then
 6 |     return 0
 7 |   else
 8 |     return 1
 9 |   fi
10 | }
11 | 
12 | lib_path="/usr/lib/wsl/lib/"
13 | 
14 | if [ -d "$lib_path" ]; then
15 |     if [ -z "${LD_LIBRARY_PATH}" ]; then
16 |         export LD_LIBRARY_PATH="$lib_path"
17 |     fi
18 | fi
19 | 
20 | if [ -n "$SUDO_USER" ] || [ -n "$SUDO_COMMAND" ]; then
21 |     echo "The sudo command resets the non-essential environment variables, we keep the LD_LIBRARY_PATH variable."
22 |     export LD_LIBRARY_PATH=$(sudo -i printenv LD_LIBRARY_PATH)
23 | fi
24 | 
25 | SCRIPT_DIR=$(cd -- "$(dirname -- "$0")" && pwd)
26 | cd "$SCRIPT_DIR" || exit 1
27 | 
28 | # Check if --quiet is in the arguments
29 | uv_quiet=""
30 | args=()
31 | for arg in "$@"; do
32 |   if [[ "$arg" == "--quiet" ]]; then
33 |     uv_quiet="--quiet"
34 |   else
35 |     args+=("$arg")
36 |   fi
37 | done
38 | 
39 | if ! command -v uv &> /dev/null; then
40 |   read -p "uv is not installed. We can try to install it for you, or you can install it manually from https://astral.sh/uv before running this script again. Would you like to attempt automatic installation now? [Y/n]: " install_uv
41 |   if [[ "$install_uv" =~ ^[Yy]$ ]]; then
42 |     curl -LsSf https://astral.sh/uv/install.sh | sh
43 |     source $HOME/.local/bin/env
44 |   else
45 |     echo "Okay, please install uv manually from https://astral.sh/uv and then re-run this script. Exiting."
46 |     exit 1
47 |   fi
48 | fi
49 | 
50 | if [[ "$uv_quiet" == "--quiet" ]]; then
51 |   echo "Notice: uv will run in quiet mode. No indication of the uv module download and install process will be displayed."
52 | fi
53 | 
54 | git submodule update --init --recursive
55 | uv run $uv_quiet kohya_gui.py --noverify "${args[@]}"
56 | 


--------------------------------------------------------------------------------
/gui.bat:
--------------------------------------------------------------------------------
 1 | @echo off
 2 | 
 3 | set PYTHON_VER=3.10.9
 4 | 
 5 | :: Deactivate the virtual environment
 6 | call .\venv\Scripts\deactivate.bat
 7 | 
 8 | :: Activate the virtual environment
 9 | call .\venv\Scripts\activate.bat
10 | 
11 | :: Update pip to latest version
12 | python -m pip install --upgrade pip -q
13 | 
14 | set PATH=%PATH%;%~dp0venv\Lib\site-packages\torch\lib
15 | 
16 | echo Starting the GUI... this might take some time...
17 | 
18 | :skip_validation
19 | 
20 | :: If the exit code is 0, run the kohya_gui.py script with the command-line arguments
21 | if %errorlevel% equ 0 (
22 |     REM Check if the batch was started via double-click
23 |     IF /i "%comspec% /c %~0 " equ "%cmdcmdline:"=%" (
24 |         REM echo This script was started by double clicking.
25 |         cmd /k python.exe kohya_gui.py %*
26 |     ) ELSE (
27 |         REM echo This script was started from a command prompt.
28 |         python.exe kohya_gui.py %*
29 |     )
30 | )
31 | 


--------------------------------------------------------------------------------
/gui.ps1:
--------------------------------------------------------------------------------
 1 | # Check if a virtual environment is active and deactivate it if necessary
 2 | if ($env:VIRTUAL_ENV) {
 3 |     # Write-Host "Deactivating the virtual environment to test for modules installed locally..."
 4 |     & deactivate
 5 | }
 6 | 
 7 | # Activate the virtual environment
 8 | # Write-Host "Activating the virtual environment..."
 9 | & .\venv\Scripts\activate
10 | 
11 | python.exe -m pip install --upgrade pip -q
12 | 
13 | $env:PATH += ";$($MyInvocation.MyCommand.Path)\venv\Lib\site-packages\torch\lib"
14 | 
15 | Write-Host "Starting the GUI... this might take some time..."
16 | 
17 | $argsFromFile = @()
18 | if (Test-Path .\gui_parameters.txt) {
19 |     $argsFromFile = Get-Content .\gui_parameters.txt -Encoding UTF8 | Where-Object { $_ -notmatch "^#" } | Foreach-Object { $_ -split " " }
20 | }
21 | $args_combo = $argsFromFile + $args
22 | # Write-Host "The arguments passed to this script were: $args_combo"
23 | python.exe kohya_gui.py $args_combo
24 | 
25 | 


--------------------------------------------------------------------------------
/kohya_gui/__init__.py:
--------------------------------------------------------------------------------
1 | """empty"""
2 | 


--------------------------------------------------------------------------------
/kohya_gui/class_lora_tab.py:
--------------------------------------------------------------------------------
 1 | import gradio as gr
 2 | from .merge_lora_gui import GradioMergeLoRaTab
 3 | from .svd_merge_lora_gui import gradio_svd_merge_lora_tab
 4 | from .verify_lora_gui import gradio_verify_lora_tab
 5 | from .resize_lora_gui import gradio_resize_lora_tab
 6 | from .extract_lora_gui import gradio_extract_lora_tab
 7 | from .flux_extract_lora_gui import gradio_flux_extract_lora_tab
 8 | from .convert_lcm_gui import gradio_convert_lcm_tab
 9 | from .extract_lycoris_locon_gui import gradio_extract_lycoris_locon_tab
10 | from .extract_lora_from_dylora_gui import gradio_extract_dylora_tab
11 | from .merge_lycoris_gui import gradio_merge_lycoris_tab
12 | from .flux_merge_lora_gui import GradioFluxMergeLoRaTab
13 | 
14 | 
15 | class LoRATools:
16 |     def __init__(
17 |         self,
18 |         headless: bool = False,
19 |     ):
20 |         gr.Markdown("This section provide various LoRA tools...")
21 |         gradio_extract_dylora_tab(headless=headless)
22 |         gradio_convert_lcm_tab(headless=headless)
23 |         gradio_extract_lora_tab(headless=headless)
24 |         gradio_flux_extract_lora_tab(headless=headless)
25 |         gradio_extract_lycoris_locon_tab(headless=headless)
26 |         gradio_merge_lora_tab = GradioMergeLoRaTab()
27 |         gradio_merge_lycoris_tab(headless=headless)
28 |         gradio_svd_merge_lora_tab(headless=headless)
29 |         gradio_resize_lora_tab(headless=headless)
30 |         gradio_verify_lora_tab(headless=headless)
31 |         GradioFluxMergeLoRaTab(headless=headless)
32 | 


--------------------------------------------------------------------------------
/kohya_gui/class_metadata.py:
--------------------------------------------------------------------------------
 1 | import gradio as gr
 2 | 
 3 | from .class_gui_config import KohyaSSGUIConfig
 4 | 
 5 | 
 6 | class MetaData:
 7 |     def __init__(
 8 |         self,
 9 |         config: KohyaSSGUIConfig = {},
10 |     ) -> None:
11 |         self.config = config
12 | 
13 |         with gr.Row():
14 |             self.metadata_title = gr.Textbox(
15 |                 label="Metadata title",
16 |                 placeholder="(optional) title for model metadata (default is output_name)",
17 |                 interactive=True,
18 |                 value=self.config.get("metadata.title", ""),
19 |             )
20 |             self.metadata_author = gr.Textbox(
21 |                 label="Metadata author",
22 |                 placeholder="(optional) author name for model metadata",
23 |                 interactive=True,
24 |                 value=self.config.get("metadata.author", ""),
25 |             )
26 |         self.metadata_description = gr.Textbox(
27 |             label="Metadata description",
28 |             placeholder="(optional) description for model metadata",
29 |             interactive=True,
30 |             value=self.config.get("metadata.description", ""),
31 |         )
32 |         with gr.Row():
33 |             self.metadata_license = gr.Textbox(
34 |                 label="Metadata license",
35 |                 placeholder="(optional) license for model metadata",
36 |                 interactive=True,
37 |                 value=self.config.get("metadata.license", ""),
38 |             )
39 |             self.metadata_tags = gr.Textbox(
40 |                 label="Metadata tags",
41 |                 placeholder="(optional) tags for model metadata, separated by comma",
42 |                 interactive=True,
43 |                 value=self.config.get("metadata.tags", ""),
44 |             )
45 | 
46 |     def run_cmd(run_cmd: list, **kwargs):
47 |         if "metadata_title" in kwargs and kwargs.get("metadata_title") != "":
48 |             run_cmd.append("--metadata_title")
49 |             run_cmd.append(kwargs["metadata_title"])
50 | 
51 |         if "metadata_author" in kwargs and kwargs.get("metadata_author") != "":
52 |             run_cmd.append("--metadata_author")
53 |             run_cmd.append(kwargs["metadata_author"])
54 | 
55 |         if "metadata_description" in kwargs and kwargs.get("metadata_description") != "":
56 |             run_cmd.append("--metadata_description")
57 |             run_cmd.append(kwargs["metadata_description"])
58 | 
59 |         if "metadata_license" in kwargs and kwargs.get("metadata_license") != "":
60 |             run_cmd.append("--metadata_license")
61 |             run_cmd.append(kwargs["metadata_license"])
62 | 
63 |         if "metadata_tags" in kwargs and kwargs.get("metadata_tags") != "":
64 |             run_cmd.append("--metadata_tags")
65 |             run_cmd.append(kwargs["metadata_tags"])
66 | 
67 |         return run_cmd
68 | 


--------------------------------------------------------------------------------
/kohya_gui/custom_logging.py:
--------------------------------------------------------------------------------
 1 | import os
 2 | import logging
 3 | import time
 4 | import sys
 5 | 
 6 | from rich.theme import Theme
 7 | from rich.logging import RichHandler
 8 | from rich.console import Console
 9 | from rich.pretty import install as pretty_install
10 | from rich.traceback import install as traceback_install
11 | 
12 | log = None
13 | 
14 | 
15 | def setup_logging(clean=False, debug=False):
16 |     global log
17 | 
18 |     if log is not None:
19 |         return log
20 | 
21 |     try:
22 |         if clean and os.path.isfile("setup.log"):
23 |             os.remove("setup.log")
24 |         time.sleep(0.1)  # prevent race condition
25 |     except:
26 |         pass
27 | 
28 |     if sys.version_info >= (3, 9):
29 |         logging.basicConfig(
30 |             level=logging.DEBUG,
31 |             format="%(asctime)s | %(levelname)s | %(pathname)s | %(message)s",
32 |             filename="setup.log",
33 |             filemode="a",
34 |             encoding="utf-8",
35 |             force=True,
36 |         )
37 |     else:
38 |         logging.basicConfig(
39 |             level=logging.DEBUG,
40 |             format="%(asctime)s | %(levelname)s | %(pathname)s | %(message)s",
41 |             filename="setup.log",
42 |             filemode="a",
43 |             force=True,
44 |         )
45 | 
46 |     console = Console(
47 |         log_time=True,
48 |         log_time_format="%H:%M:%S-%f",
49 |         theme=Theme(
50 |             {
51 |                 "traceback.border": "black",
52 |                 "traceback.border.syntax_error": "black",
53 |                 "inspect.value.border": "black",
54 |             }
55 |         ),
56 |     )
57 |     pretty_install(console=console)
58 |     traceback_install(
59 |         console=console,
60 |         extra_lines=1,
61 |         width=console.width,
62 |         word_wrap=False,
63 |         indent_guides=False,
64 |         suppress=[],
65 |     )
66 |     rh = RichHandler(
67 |         show_time=True,
68 |         omit_repeated_times=False,
69 |         show_level=True,
70 |         show_path=False,
71 |         markup=False,
72 |         rich_tracebacks=True,
73 |         log_time_format="%H:%M:%S-%f",
74 |         level=logging.DEBUG if debug else logging.INFO,
75 |         console=console,
76 |     )
77 |     rh.set_name(logging.DEBUG if debug else logging.INFO)
78 |     log = logging.getLogger("sd")
79 |     log.addHandler(rh)
80 | 
81 |     return log
82 | 


--------------------------------------------------------------------------------
/kohya_gui/localization.py:
--------------------------------------------------------------------------------
 1 | import json
 2 | import logging
 3 | import os
 4 | 
 5 | localizationMap = {}
 6 | 
 7 | 
 8 | def load_localizations():
 9 |     localizationMap.clear()
10 |     dirname = "./localizations"
11 |     for file in os.listdir(dirname):
12 |         fn, ext = os.path.splitext(file)
13 |         if ext.lower() != ".json":
14 |             continue
15 |         localizationMap[fn] = os.path.join(dirname, file)
16 | 
17 | 
18 | def load_language_js(language_name: str) -> str:
19 |     fn = localizationMap.get(language_name, None)
20 |     data = {}
21 |     if fn is not None:
22 |         try:
23 |             with open(fn, "r", encoding="utf-8") as file:
24 |                 data = json.load(file)
25 |         except Exception:
26 |             logging.ERROR(f"Error loading localization from {fn}")
27 | 
28 |     return f"window.localization = {json.dumps(data)}"
29 | 
30 | 
31 | load_localizations()
32 | 


--------------------------------------------------------------------------------
/kohya_gui/localization_ext.py:
--------------------------------------------------------------------------------
 1 | import os
 2 | import gradio as gr
 3 | import kohya_gui.localization as localization
 4 | 
 5 | 
 6 | def file_path(fn):
 7 |     return f"file={os.path.abspath(fn)}?{os.path.getmtime(fn)}"
 8 | 
 9 | 
10 | def js_html_str(language):
11 |     head = f'<script type="text/javascript">{localization.load_language_js(language)}</script>\n'
12 |     head += (
13 |         f'<script type="text/javascript">{open("./assets/js/script.js", "r", encoding="utf-8").read()}</script>\n'
14 |     )
15 |     head += f'<script type="text/javascript">{open("./assets/js/localization.js", "r", encoding="utf-8").read()}</script>\n'
16 |     return head
17 | 
18 | 
19 | def add_javascript(language):
20 |     if language is None:
21 |         # print('no language')
22 |         return
23 |     jsStr = js_html_str(language)
24 | 
25 |     def template_response(*args, **kwargs):
26 |         res = localization.GrRoutesTemplateResponse(*args, **kwargs)
27 |         res.body = res.body.replace(b"</head>", f"{jsStr}</head>".encode("utf-8"))
28 |         res.init_headers()
29 |         return res
30 | 
31 |     gr.routes.templates.TemplateResponse = template_response
32 | 
33 | 
34 | if not hasattr(localization, "GrRoutesTemplateResponse"):
35 |     localization.GrRoutesTemplateResponse = gr.routes.templates.TemplateResponse
36 | 


--------------------------------------------------------------------------------
/kohya_gui/sd_modeltype.py:
--------------------------------------------------------------------------------
 1 | from os.path import isfile
 2 | from safetensors import safe_open
 3 | import enum
 4 | 
 5 | # methodology is based on https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/82a973c04367123ae98bd9abdf80d9eda9b910e2/modules/sd_models.py#L379-L403
 6 | 
 7 | 
 8 | class ModelType(enum.Enum):
 9 |     UNKNOWN = 0
10 |     SD1 = 1
11 |     SD2 = 2
12 |     SDXL = 3
13 |     SD3 = 4
14 |     FLUX1 = 5
15 | 
16 | 
17 | class SDModelType:
18 |     def __init__(self, safetensors_path):
19 |         self.model_type = ModelType.UNKNOWN
20 | 
21 |         if not isfile(safetensors_path):
22 |             return
23 | 
24 |         try:
25 |             st = safe_open(filename=safetensors_path, framework="numpy", device="cpu")
26 | 
27 |             # print(st.keys())
28 | 
29 |             def hasKeyPrefix(pfx):
30 |                 return any(k.startswith(pfx) for k in st.keys())
31 | 
32 |             if "model.diffusion_model.x_embedder.proj.weight" in st.keys():
33 |                 self.model_type = ModelType.SD3
34 |             elif (
35 |                 "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale"
36 |                 in st.keys()
37 |                 or "double_blocks.0.img_attn.norm.key_norm.scale" in st.keys()
38 |             ):
39 |                 # print("flux1 model detected...")
40 |                 self.model_type = ModelType.FLUX1
41 |             elif hasKeyPrefix("conditioner."):
42 |                 self.model_type = ModelType.SDXL
43 |             elif hasKeyPrefix("cond_stage_model.model."):
44 |                 self.model_type = ModelType.SD2
45 |             elif hasKeyPrefix("model."):
46 |                 self.model_type = ModelType.SD1
47 |         except:
48 |             pass
49 |         
50 |         # print(f"Model type: {self.model_type}")
51 | 
52 |     def Is_SD1(self):
53 |         return self.model_type == ModelType.SD1
54 | 
55 |     def Is_SD2(self):
56 |         return self.model_type == ModelType.SD2
57 | 
58 |     def Is_SDXL(self):
59 |         return self.model_type == ModelType.SDXL
60 | 
61 |     def Is_SD3(self):
62 |         return self.model_type == ModelType.SD3
63 | 
64 |     def Is_FLUX1(self):
65 |         return self.model_type == ModelType.FLUX1
66 | 


--------------------------------------------------------------------------------
/kohya_gui/utilities.py:
--------------------------------------------------------------------------------
 1 | import gradio as gr
 2 | 
 3 | from .basic_caption_gui import gradio_basic_caption_gui_tab
 4 | from .convert_model_gui import gradio_convert_model_tab
 5 | from .blip_caption_gui import gradio_blip_caption_gui_tab
 6 | from .blip2_caption_gui import gradio_blip2_caption_gui_tab
 7 | from .git_caption_gui import gradio_git_caption_gui_tab
 8 | from .wd14_caption_gui import gradio_wd14_caption_gui_tab
 9 | from .manual_caption_gui import gradio_manual_caption_gui_tab
10 | from .group_images_gui import gradio_group_images_gui_tab
11 | from .class_gui_config import KohyaSSGUIConfig
12 | 
13 | 
14 | def utilities_tab(
15 |     train_data_dir_input=gr.Dropdown(),
16 |     reg_data_dir_input=gr.Dropdown(),
17 |     output_dir_input=gr.Dropdown(),
18 |     logging_dir_input=gr.Dropdown(),
19 |     headless=False,
20 |     config: KohyaSSGUIConfig = {},
21 | ):
22 |     with gr.Tab("Captioning"):
23 |         gradio_basic_caption_gui_tab(headless=headless)
24 |         gradio_blip_caption_gui_tab(headless=headless)
25 |         gradio_blip2_caption_gui_tab(headless=headless)
26 |         gradio_git_caption_gui_tab(headless=headless)
27 |         gradio_wd14_caption_gui_tab(headless=headless, config=config)
28 |         gradio_manual_caption_gui_tab(headless=headless)
29 |     gradio_convert_model_tab(headless=headless)
30 |     gradio_group_images_gui_tab(headless=headless)
31 | 
32 |     return (
33 |         train_data_dir_input,
34 |         reg_data_dir_input,
35 |         output_dir_input,
36 |         logging_dir_input,
37 |     )
38 | 


--------------------------------------------------------------------------------
/localizations/Put localization files here.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/localizations/Put localization files here.txt


--------------------------------------------------------------------------------
/localizations/chinese-sample.json:
--------------------------------------------------------------------------------
 1 | {
 2 |   "Loading...": "载入中...",
 3 |   "Use via API": "通过API使用",
 4 |   "Built with Gradio": "使用Gradio构建",
 5 |   "Dreambooth":"梦想阁",
 6 |   "Training": "训练",
 7 |   "Train a custom model using kohya dreambooth python code…": "使用kohya dreamboot python代码 训练个性化模型",
 8 |   "Configuration file": "配置文件",
 9 |   "Open": "打开",
10 |   "Save": "保存",
11 |   "Load": "加载",
12 |   "Source model": "模型来源",
13 |   "Model Quick Pick": "快速选择模型",
14 |   "Save trained model as": "保存训练模型为",
15 |   "Folders": "文件夹",
16 |   "Start training": "开始训练",
17 |   "Stop training": "停止训练",
18 |   "Print training command": "打印训练命令",
19 |   "Start tensorboard": "开始 tensorboard",
20 |   "Stop tensorboard": "结束 tensorboard",
21 |   "Image folder": "图片文件夹",
22 |   "Regularisation folder": "正则化文件夹",
23 |   "Output folder": "输出文件夹",
24 |   "Logging folder": "日志文件夹",
25 |   "Model output name": "模型输出文件夹",
26 |   "Training comment": "训练注释",
27 |   "(Optional) Add training comment to be included in metadata": "(可选)增加训练注释到元数据",
28 |   "Parameters": "参数",
29 |   "Basic": "基础",
30 |   "Train batch size": "训练批次大小",
31 |   "Epoch": "数量增加",
32 |   "Max train epoch": "每批数量",
33 |   "(Optional) Enforce number of epoch": "(可选)强制每批数量",
34 |   "Advanced": "增强",
35 |   "Samples": "样例",
36 |   "Tools": "工具",
37 |   "This section provide Dreambooth tools to help setup your dataset…": "这些选择帮助设置自己的数据集",
38 |   "Dreambooth/LoRA Folder preparation": "Dreambooth/LoRA文件准备",
39 |   "This utility will create the necessary folder structure for the training images and optional regularization images needed for the kohys_ss Dreambooth/LoRA method to function correctly.": "为训练文件创建文件夹",
40 |   "Instance prompt": "实例提示",
41 |   "Class prompt": "类提示",
42 |   "Training images": "训练图片",
43 |   "Directory containing the training images": "直接包含训练图片",
44 |   "Repeats": "重复",
45 |   "Regularisation images": "正则化图像",
46 |   "Destination training directory": "训练结果目录",
47 |   "Directory where formatted training and regularisation folders will be placed": "训练和正则化文件会被取代",
48 |   "Prepare training data": "准备训练数据",
49 |   "Copy info to Folders Tab": "复制信息到文件夹",
50 |   "Train a custom model using kohya train network LoRA python code…": "使用kohya训练网络LoRA训练个性化模型"
51 | }


--------------------------------------------------------------------------------
/localizations/en-GB.json:
--------------------------------------------------------------------------------
 1 | {
 2 |   "analyze": "analyse",
 3 |   "behavior": "behaviour",
 4 |   "color": "colour",
 5 |   "flavor": "flavour",
 6 |   "honor": "honour",
 7 |   "humor": "humour",
 8 |   "localization": "localisation",
 9 |   "localize": "localise",
10 |   "neighbor": "neighbour",
11 |   "offense": "offence",
12 |   "oriented": "orientated",
13 |   "practice": "practise",
14 |   "pretense": "pretence",
15 |   "program": "programme",
16 |   "recognize": "recognise",
17 |   "regularization": "regularisation",
18 |   "savior": "saviour",
19 |   "signaling": "signalling",
20 |   "specialization": "specialisation",
21 |   "stabilization": "stabilisation",
22 |   "standardization": "standardisation",
23 |   "utilize": "utilise"
24 | }


--------------------------------------------------------------------------------
/models/.keep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/models/.keep


--------------------------------------------------------------------------------
/presets/finetune/SDXL - AI_Now PagedAdamW8bit v1.0.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "adaptive_noise_scale": 0.00375,
 3 |     "additional_parameters": "",
 4 |     "batch_size": "4",
 5 |     "block_lr": "",
 6 |     "bucket_no_upscale": true,
 7 |     "bucket_reso_steps": 64,
 8 |     "cache_latents": true,
 9 |     "cache_latents_to_disk": false,
10 |     "caption_dropout_every_n_epochs": 0.0,
11 |     "caption_dropout_rate": 0,
12 |     "caption_extension": ".txt",
13 |     "clip_skip": "1",
14 |     "color_aug": false,
15 |     "create_buckets": true,
16 |     "create_caption": true,
17 |     "dataset_repeats": "1",
18 |     "epoch": 240,
19 |     "flip_aug": false,
20 |     "full_bf16": true,
21 |     "full_fp16": false,
22 |     "full_path": true,
23 |     "gradient_accumulation_steps": 6.0,
24 |     "gradient_checkpointing": true,
25 |     "keep_tokens": "0",
26 |     "learning_rate": 5e-05,
27 |     "lr_scheduler": "constant",
28 |     "lr_scheduler_args": "",
29 |     "lr_warmup": 0,
30 |     "max_bucket_reso": "1024",
31 |     "max_data_loader_n_workers": "0",
32 |     "max_resolution": "1024,1024",
33 |     "max_timestep": 900,
34 |     "max_token_length": "75",
35 |     "max_train_epochs": "240",
36 |     "mem_eff_attn": false,
37 |     "min_bucket_reso": "64",
38 |     "min_snr_gamma": 5,
39 |     "min_timestep": 100,
40 |     "mixed_precision": "bf16",
41 |     "multires_noise_discount": 0,
42 |     "multires_noise_iterations": 0,
43 |     "noise_offset": 0.0375,
44 |     "noise_offset_type": "Original",
45 |     "num_cpu_threads_per_process": 2,
46 |     "optimizer": "PagedAdamW8bit",
47 |     "optimizer_args": "",
48 |     "persistent_data_loader_workers": false,
49 |     "random_crop": false,
50 |     "save_every_n_epochs": 240,
51 |     "save_every_n_steps": 0,
52 |     "save_last_n_steps": 0,
53 |     "save_last_n_steps_state": 0,
54 |     "save_precision": "bf16",
55 |     "scale_v_pred_loss_like_noise_pred": false,
56 |     "sdxl_cache_text_encoder_outputs": true,
57 |     "sdxl_checkbox": true,
58 |     "sdxl_no_half_vae": true,
59 |     "seed": "1234",
60 |     "shuffle_caption": false,
61 |     "train_batch_size": 2,
62 |     "train_text_encoder": false,
63 |     "use_latent_files": "No",
64 |     "log_with": "",
65 |     "v2": false,
66 |     "v_parameterization": false,
67 |     "v_pred_like_loss": 0,
68 |     "vae_batch_size": 0,
69 |     "weighted_captions": false,
70 |     "xformers": "xformers"
71 | }


--------------------------------------------------------------------------------
/presets/finetune/SDXL - Essenz series by AI_Characters_Training v1.0.json:
--------------------------------------------------------------------------------
 1 | {
 2 |   "adaptive_noise_scale": 0,
 3 |   "additional_parameters": "",
 4 |   "batch_size": "1",
 5 |   "block_lr": "",
 6 |   "bucket_no_upscale": false,
 7 |   "bucket_reso_steps": 64,
 8 |   "cache_latents": true,
 9 |   "cache_latents_to_disk": true,
10 |   "caption_dropout_every_n_epochs": 0.0,
11 |   "caption_dropout_rate": 0,
12 |   "caption_extension": ".txt",
13 |   "caption_metadata_filename": "meta_cap.json",
14 |   "clip_skip": "1",
15 |   "color_aug": false,
16 |   "dataset_repeats": "1",
17 |   "epoch": 1,
18 |   "flip_aug": false,
19 |   "full_bf16": false,
20 |   "full_fp16": false,
21 |   "full_path": true,
22 |   "generate_caption_database": true,
23 |   "generate_image_buckets": true,
24 |   "gradient_accumulation_steps": 1.0,
25 |   "gradient_checkpointing": true,
26 |   "image_folder": "/kohya_ss/dataset/1_/",
27 |   "keep_tokens": 0,
28 |   "latent_metadata_filename": "meta_lat.json",
29 |   "learning_rate": 1e-06,
30 |   "logging_dir": "/kohya_ss/output/SDXL1.0_Essenz-series-by-AI_Characters_Concept_Morphing-v1.0",
31 |   "lr_scheduler": "constant",
32 |   "lr_scheduler_args": "",
33 |   "lr_warmup": 0,
34 |   "max_bucket_reso": "4096",
35 |   "max_data_loader_n_workers": "0",
36 |   "max_resolution": "1024,1024",
37 |   "max_timestep": 1000,
38 |   "max_token_length": "75",
39 |   "max_train_epochs": "100",
40 |   "mem_eff_attn": false,
41 |   "min_bucket_reso": "64",
42 |   "min_snr_gamma": 0,
43 |   "min_timestep": 0,
44 |   "mixed_precision": "fp16",
45 |   "model_list": "stabilityai/stable-diffusion-xl-base-1.0",
46 |   "multires_noise_discount": 0,
47 |   "multires_noise_iterations": 0,
48 |   "noise_offset": 0,
49 |   "noise_offset_type": "Original",
50 |   "num_cpu_threads_per_process": 2,
51 |   "optimizer": "AdamW8bit",
52 |   "optimizer_args": "",
53 |   "output_dir": "/kohya_ss/output/SDXL1.0_Essenz-series-by-AI_Characters_Concept_Morphing-v1.0",
54 |   "output_name": "SDXL1.0_Essenz-series-by-AI_Characters_Concept_Morphing-v1.0",
55 |   "persistent_data_loader_workers": false,
56 |   "pretrained_model_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0",
57 |   "random_crop": false,
58 |   "resume": "",
59 |   "sample_every_n_epochs": 0,
60 |   "sample_every_n_steps": 0,
61 |   "sample_prompts": "",
62 |   "sample_sampler": "k_dpm_2",
63 |   "save_every_n_epochs": 10,
64 |   "save_every_n_steps": 0,
65 |   "save_last_n_steps": 0,
66 |   "save_last_n_steps_state": 0,
67 |   "save_model_as": "safetensors",
68 |   "save_precision": "fp16",
69 |   "save_state": false,
70 |   "scale_v_pred_loss_like_noise_pred": false,
71 |   "sdxl_cache_text_encoder_outputs": false,
72 |   "sdxl_checkbox": true,
73 |   "sdxl_no_half_vae": true,
74 |   "seed": "",
75 |   "shuffle_caption": false,
76 |   "train_batch_size": 1,
77 |   "train_dir": "/kohya_ss/output/SDXL1.0_Essenz-series-by-AI_Characters_Concept_Morphing-v1.0",
78 |   "train_text_encoder": true,
79 |   "use_latent_files": "Yes",
80 |   "log_with": "",
81 |   "v2": false,
82 |   "v_parameterization": false,
83 |   "v_pred_like_loss": 0,
84 |   "vae_batch_size": 0,
85 |   "wandb_api_key": "",
86 |   "weighted_captions": false,
87 |   "xformers": "xformers"
88 | }


--------------------------------------------------------------------------------
/presets/finetune/adafactor.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "batch_size": "1",
 3 |     "bucket_no_upscale": true,
 4 |     "bucket_reso_steps": 1.0,
 5 |     "cache_latents": true,
 6 |     "caption_dropout_every_n_epochs": 0.0,
 7 |     "caption_dropout_rate": 0.1,
 8 |     "caption_extension": ".txt",
 9 |     "clip_skip": 1,
10 |     "color_aug": false,
11 |     "create_buckets": false,
12 |     "create_caption": true,
13 |     "dataset_repeats": "10",
14 |     "epoch": "2",
15 |     "flip_aug": false,
16 |     "full_fp16": false,
17 |     "full_path": true,
18 |     "gradient_accumulation_steps": 1.0,
19 |     "gradient_checkpointing": false,
20 |     "keep_tokens": 1,
21 |     "learning_rate": "1e-6",
22 |     "lr_scheduler": "adafactor",
23 |     "lr_warmup": "10",
24 |     "max_bucket_reso": "1024",
25 |     "max_data_loader_n_workers": "0",
26 |     "max_resolution": "512,512",
27 |     "max_token_length": "150",
28 |     "max_train_epochs": "",
29 |     "mem_eff_attn": false,
30 |     "min_bucket_reso": "256",
31 |     "mixed_precision": "bf16",
32 |     "noise_offset": "",
33 |     "num_cpu_threads_per_process": 2,
34 |     "optimizer": "Adafactor",
35 |     "optimizer_args": "scale_parameter=True relative_step=True warmup_init=True weight_decay=2",
36 |     "persistent_data_loader_workers": false,
37 |     "random_crop": false,
38 |     "save_every_n_epochs": "1",
39 |     "save_precision": "fp16",
40 |     "seed": "1234",
41 |     "shuffle_caption": true,
42 |     "train_batch_size": 4,
43 |     "train_text_encoder": true,
44 |     "use_8bit_adam": false,
45 |     "use_latent_files": "No",
46 |     "v2": false,
47 |     "v_parameterization": false,
48 |     "xformers": true
49 | }


--------------------------------------------------------------------------------
/presets/finetune/lion.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "batch_size": "1",
 3 |     "bucket_no_upscale": true,
 4 |     "bucket_reso_steps": 1.0,
 5 |     "cache_latents": true,
 6 |     "caption_dropout_every_n_epochs": 0.0,
 7 |     "caption_dropout_rate": 0.1,
 8 |     "caption_extension": ".txt",
 9 |     "clip_skip": 1,
10 |     "color_aug": false,
11 |     "create_buckets": false,
12 |     "create_caption": true,
13 |     "dataset_repeats": "10",
14 |     "epoch": "2",
15 |     "flip_aug": false,
16 |     "full_fp16": false,
17 |     "full_path": true,
18 |     "gradient_accumulation_steps": 1.0,
19 |     "gradient_checkpointing": false,
20 |     "keep_tokens": 1,
21 |     "learning_rate": "0.0000166666666",
22 |     "lr_scheduler": "cosine",
23 |     "lr_warmup": "10",
24 |     "max_bucket_reso": "1024",
25 |     "max_data_loader_n_workers": "0",
26 |     "max_resolution": "512,512",
27 |     "max_token_length": "150",
28 |     "max_train_epochs": "",
29 |     "mem_eff_attn": false,
30 |     "min_bucket_reso": "256",
31 |     "mixed_precision": "bf16",
32 |     "noise_offset": "",
33 |     "num_cpu_threads_per_process": 2,
34 |     "optimizer": "Lion",
35 |     "optimizer_args": "",
36 |     "persistent_data_loader_workers": false,
37 |     "random_crop": false,
38 |     "save_every_n_epochs": "1",
39 |     "save_precision": "fp16",
40 |     "seed": "1234",
41 |     "shuffle_caption": true,
42 |     "train_batch_size": 4,
43 |     "train_text_encoder": true,
44 |     "use_8bit_adam": false,
45 |     "use_latent_files": "No",
46 |     "v2": false,
47 |     "v_parameterization": false,
48 |     "xformers": true
49 | }


--------------------------------------------------------------------------------
/presets/finetune/prepare_presets.md:
--------------------------------------------------------------------------------
1 | # Preparing presets for users
2 | 
3 | Run the followinf command to prepare new presets for release to users:
4 | 
5 | ```
6 | python.exe .\tools\prepare_presets.py .\presets\finetune\*.json
7 | ```


--------------------------------------------------------------------------------
/presets/finetune/user_presets/.put your own presets here:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/presets/finetune/user_presets/.put your own presets here


--------------------------------------------------------------------------------
/presets/lora/SDXL - LoHA AI_Characters v1.0.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "LyCORIS/LoHa",
 3 |     "adaptive_noise_scale": 0,
 4 |     "additional_parameters": "",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": false,
 9 |     "bucket_reso_steps": 32,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0.05,
14 |     "caption_extension": ".txt",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 16,
18 |     "conv_block_alphas": "",
19 |     "conv_block_dims": "",
20 |     "conv_dim": 16,
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 1,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_bf16": false,
29 |     "full_fp16": false,
30 |     "gradient_accumulation_steps": 1.0,
31 |     "gradient_checkpointing": true,
32 |     "keep_tokens": "0",
33 |     "learning_rate": 0.001,
34 |     "lora_network_weights": "",
35 |     "lr_scheduler": "constant",
36 |     "lr_scheduler_num_cycles": "1",
37 |     "lr_scheduler_power": "",
38 |     "lr_warmup": 0,
39 |     "max_bucket_reso": 2048,
40 |     "max_data_loader_n_workers": "0",
41 |     "max_resolution": "1024,1024",
42 |     "max_timestep": 1000,
43 |     "max_token_length": "75",
44 |     "max_train_epochs": "100",
45 |     "mem_eff_attn": false,
46 |     "mid_lr_weight": "",
47 |     "min_bucket_reso": 256,
48 |     "min_snr_gamma": 5,
49 |     "min_timestep": 0,
50 |     "mixed_precision": "fp16",
51 |     "module_dropout": 0,
52 |     "multires_noise_discount": 0,
53 |     "multires_noise_iterations": 0,
54 |     "network_alpha": 32,
55 |     "network_dim": 32,
56 |     "network_dropout": 0,
57 |     "no_token_padding": false,
58 |     "noise_offset": 0,
59 |     "noise_offset_type": "Original",
60 |     "num_cpu_threads_per_process": 2,
61 |     "optimizer": "AdamW8bit",
62 |     "optimizer_args": "",
63 |     "persistent_data_loader_workers": false,
64 |     "prior_loss_weight": 1.0,
65 |     "random_crop": false,
66 |     "rank_dropout": 0,
67 |     "save_every_n_epochs": 100,
68 |     "save_every_n_steps": 0,
69 |     "save_last_n_steps": 0,
70 |     "save_last_n_steps_state": 0,
71 |     "save_precision": "fp16",
72 |     "scale_v_pred_loss_like_noise_pred": false,
73 |     "scale_weight_norms": 2.5,
74 |     "sdxl": true,
75 |     "sdxl_cache_text_encoder_outputs": false,
76 |     "sdxl_no_half_vae": true,
77 |     "seed": "",
78 |     "shuffle_caption": false,
79 |     "stop_text_encoder_training_pct": 0,
80 |     "text_encoder_lr": 0.001,
81 |     "train_batch_size": 8,
82 |     "train_on_input": true,
83 |     "training_comment": "",
84 |     "unet_lr": 0.001,
85 |     "unit": 1,
86 |     "up_lr_weight": "",
87 |     "use_cp": false,
88 |     "log_with": "",
89 |     "v2": false,
90 |     "v_parameterization": false,
91 |     "vae_batch_size": 0,
92 |     "weighted_captions": false,
93 |     "xformers": true
94 | }


--------------------------------------------------------------------------------
/presets/lora/SDXL - LoKR v1.0.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "LyCORIS/LoKr",
 3 |     "adaptive_noise_scale": 0,
 4 |     "additional_parameters": "",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": true,
 9 |     "bucket_reso_steps": 64,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0,
14 |     "caption_extension": ".txt",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 64,
18 |     "conv_alphas": "",
19 |     "conv_dim": 64,
20 |     "conv_dims": "",
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 20,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_fp16": false,
29 |     "gradient_accumulation_steps": 1.0,
30 |     "gradient_checkpointing": true,
31 |     "keep_tokens": "0",
32 |     "learning_rate": 1.0,
33 |     "lora_network_weights": "",
34 |     "lr_scheduler": "constant",
35 |     "lr_scheduler_num_cycles": "",
36 |     "lr_scheduler_power": "",
37 |     "lr_warmup": 0,
38 |     "max_data_loader_n_workers": "0",
39 |     "max_resolution": "1024,1024",
40 |     "max_timestep": 1000,
41 |     "max_token_length": "75",
42 |     "max_train_epochs": "",
43 |     "mem_eff_attn": false,
44 |     "mid_lr_weight": "",
45 |     "min_snr_gamma": 10,
46 |     "min_timestep": 0,
47 |     "mixed_precision": "bf16",
48 |     "module_dropout": 0.1,
49 |     "multires_noise_discount": 0.2,
50 |     "multires_noise_iterations": 8,
51 |     "network_alpha": 64,
52 |     "network_dim": 64,
53 |     "network_dropout": 0,
54 |     "no_token_padding": false,
55 |     "noise_offset": 0.0357,
56 |     "noise_offset_type": "Multires",
57 |     "num_cpu_threads_per_process": 2,
58 |     "optimizer": "Prodigy",
59 |     "optimizer_args": "",
60 |     "persistent_data_loader_workers": false,
61 |     "prior_loss_weight": 1.0,
62 |     "random_crop": false,
63 |     "rank_dropout": 0.1,
64 |     "save_every_n_epochs": 1,
65 |     "save_every_n_steps": 0,
66 |     "save_last_n_steps": 0,
67 |     "save_last_n_steps_state": 0,
68 |     "save_precision": "fp16",
69 |     "scale_v_pred_loss_like_noise_pred": false,
70 |     "scale_weight_norms": 0,
71 |     "sdxl": true,
72 |     "sdxl_cache_text_encoder_outputs": false,
73 |     "sdxl_no_half_vae": true,
74 |     "seed": "12345",
75 |     "shuffle_caption": false,
76 |     "stop_text_encoder_training": 0,
77 |     "text_encoder_lr": 1.0,
78 |     "train_batch_size": 8,
79 |     "train_on_input": true,
80 |     "training_comment": "",
81 |     "unet_lr": 1.0,
82 |     "unit": 1,
83 |     "up_lr_weight": "",
84 |     "use_cp": true,
85 |     "log_with": "",
86 |     "v2": false,
87 |     "v_parameterization": false,
88 |     "vae_batch_size": 0,
89 |     "weighted_captions": false,
90 |     "xformers": true
91 | }


--------------------------------------------------------------------------------
/presets/lora/SDXL - LoRA AI_Now ADamW v1.0.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "Standard",
 3 |     "adaptive_noise_scale": 0.00375,
 4 |     "additional_parameters": "",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": true,
 9 |     "bucket_reso_steps": 64,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0,
14 |     "caption_extension": ".txt-no",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 32,
18 |     "conv_block_alphas": "",
19 |     "conv_block_dims": "",
20 |     "conv_dim": 32,
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 160,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_bf16": false,
29 |     "full_fp16": false,
30 |     "gradient_accumulation_steps": 1,
31 |     "gradient_checkpointing": true,
32 |     "keep_tokens": "0",
33 |     "learning_rate": 0.0001,
34 |     "lora_network_weights": "",
35 |     "lr_scheduler": "constant",
36 |     "lr_scheduler_args": "",
37 |     "lr_scheduler_num_cycles": "1",
38 |     "lr_scheduler_power": "",
39 |     "lr_warmup": 0,
40 |     "max_bucket_reso": 2048,
41 |     "max_data_loader_n_workers": "0",
42 |     "max_resolution": "1024,1024",
43 |     "max_timestep": 900,
44 |     "max_token_length": "75",
45 |     "max_train_epochs": "",
46 |     "max_train_steps": "320",
47 |     "mem_eff_attn": false,
48 |     "mid_lr_weight": "",
49 |     "min_bucket_reso": 256,
50 |     "min_snr_gamma": 5,
51 |     "min_timestep": 100,
52 |     "mixed_precision": "bf16",
53 |     "module_dropout": 0,
54 |     "multires_noise_discount": 0,
55 |     "multires_noise_iterations": 0,
56 |     "network_alpha": 32,
57 |     "network_dim": 32,
58 |     "network_dropout": 0,
59 |     "no_token_padding": false,
60 |     "noise_offset": 0.0375,
61 |     "noise_offset_type": "Original",
62 |     "num_cpu_threads_per_process": 2,
63 |     "optimizer": "AdamW",
64 |     "optimizer_args": "",
65 |     "persistent_data_loader_workers": false,
66 |     "prior_loss_weight": 1.0,
67 |     "random_crop": false,
68 |     "rank_dropout": 0,
69 |     "save_every_n_epochs": 5,
70 |     "save_every_n_steps": 0,
71 |     "save_last_n_steps": 0,
72 |     "save_last_n_steps_state": 0,
73 |     "save_precision": "bf16",
74 |     "scale_v_pred_loss_like_noise_pred": false,
75 |     "scale_weight_norms": 0,
76 |     "sdxl": true,
77 |     "sdxl_cache_text_encoder_outputs": false,
78 |     "sdxl_no_half_vae": true,
79 |     "seed": "12345",
80 |     "shuffle_caption": false,
81 |     "stop_text_encoder_training": 0,
82 |     "text_encoder_lr": 0.0001,
83 |     "train_batch_size": 4,
84 |     "train_on_input": true,
85 |     "training_comment": "trigger: lxndrn woman",
86 |     "unet_lr": 0.0001,
87 |     "unit": 1,
88 |     "up_lr_weight": "",
89 |     "use_cp": false,
90 |     "log_with": "",
91 |     "v2": false,
92 |     "v_parameterization": false,
93 |     "v_pred_like_loss": 0,
94 |     "vae_batch_size": 0,
95 |     "weighted_captions": false,
96 |     "xformers": "xformers"
97 | }


--------------------------------------------------------------------------------
/presets/lora/SDXL - LoRA AI_Now prodigy v1.0.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "Standard",
 3 |     "adaptive_noise_scale": 0,
 4 |     "additional_parameters": "",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": true,
 9 |     "bucket_reso_steps": 32,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0,
14 |     "caption_extension": ".txt",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 32,
18 |     "conv_block_alphas": "",
19 |     "conv_block_dims": "",
20 |     "conv_dim": 32,
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 160,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_bf16": false,
29 |     "full_fp16": false,
30 |     "gradient_accumulation_steps": 1,
31 |     "gradient_checkpointing": true,
32 |     "keep_tokens": "0",
33 |     "learning_rate": 1.0,
34 |     "lora_network_weights": "",
35 |     "lr_scheduler": "constant",
36 |     "lr_scheduler_num_cycles": "1",
37 |     "lr_scheduler_power": "",
38 |     "lr_warmup": 0,
39 |     "max_bucket_reso": 2048,
40 |     "max_data_loader_n_workers": "0",
41 |     "max_resolution": "1024,1024",
42 |     "max_timestep": 1000,
43 |     "max_token_length": "75",
44 |     "max_train_epochs": "",
45 |     "mem_eff_attn": false,
46 |     "mid_lr_weight": "",
47 |     "min_bucket_reso": 256,
48 |     "min_snr_gamma": 5,
49 |     "min_timestep": 0,
50 |     "mixed_precision": "bf16",
51 |     "module_dropout": 0,
52 |     "multires_noise_discount": 0,
53 |     "multires_noise_iterations": 0,
54 |     "network_alpha": 16,
55 |     "network_dim": 32,
56 |     "network_dropout": 0,
57 |     "no_token_padding": false,
58 |     "noise_offset": 0,
59 |     "noise_offset_type": "Original",
60 |     "num_cpu_threads_per_process": 2,
61 |     "optimizer": "Prodigy",
62 |     "optimizer_args": "weight_decay=0.01 decouple=True d0=0.0001 use_bias_correction=True",
63 |     "persistent_data_loader_workers": false,
64 |     "prior_loss_weight": 1.0,
65 |     "random_crop": false,
66 |     "rank_dropout": 0,
67 |     "save_every_n_epochs": 10,
68 |     "save_every_n_steps": 0,
69 |     "save_last_n_steps": 0,
70 |     "save_last_n_steps_state": 0,
71 |     "save_precision": "bf16",
72 |     "scale_v_pred_loss_like_noise_pred": false,
73 |     "scale_weight_norms": 5,
74 |     "sdxl": true,
75 |     "sdxl_cache_text_encoder_outputs": false,
76 |     "sdxl_no_half_vae": true,
77 |     "seed": "",
78 |     "shuffle_caption": false,
79 |     "stop_text_encoder_training_pct": 0,
80 |     "text_encoder_lr": 1.0,
81 |     "train_batch_size": 8,
82 |     "train_on_input": true,
83 |     "training_comment": "trigger: the queen of heart 1a",
84 |     "unet_lr": 1.0,
85 |     "unit": 1,
86 |     "up_lr_weight": "",
87 |     "use_cp": false,
88 |     "log_with": "",
89 |     "v2": false,
90 |     "v_parameterization": false,
91 |     "vae_batch_size": 0,
92 |     "weighted_captions": false,
93 |     "xformers": "xformers"
94 | }


--------------------------------------------------------------------------------
/presets/lora/SDXL - LoRA AI_characters standard v1.0.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "Standard",
 3 |     "adaptive_noise_scale": 0,
 4 |     "additional_parameters": "",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": false,
 9 |     "bucket_reso_steps": 64,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0.05,
14 |     "caption_extension": ".txt",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 1,
18 |     "conv_block_alphas": "",
19 |     "conv_block_dims": "",
20 |     "conv_dim": 1,
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 100,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_bf16": false,
29 |     "full_fp16": false,
30 |     "gradient_accumulation_steps": "1",
31 |     "gradient_checkpointing": true,
32 |     "keep_tokens": "0",
33 |     "learning_rate": 2e-05,
34 |     "lora_network_weights": "",
35 |     "lr_scheduler": "constant",
36 |     "lr_scheduler_args": "",
37 |     "lr_scheduler_num_cycles": "",
38 |     "lr_scheduler_power": "",
39 |     "lr_warmup": 0,
40 |     "max_bucket_reso": 2048,
41 |     "max_data_loader_n_workers": "0",
42 |     "max_resolution": "1024,1024",
43 |     "max_timestep": 1000,
44 |     "max_token_length": "75",
45 |     "max_train_epochs": "100",
46 |     "max_train_steps": "",
47 |     "mem_eff_attn": false,
48 |     "mid_lr_weight": "",
49 |     "min_bucket_reso": 256,
50 |     "min_snr_gamma": 5,
51 |     "min_timestep": 0,
52 |     "mixed_precision": "fp16",
53 |     "module_dropout": 0,
54 |     "multires_noise_discount": 0,
55 |     "multires_noise_iterations": 0,
56 |     "network_alpha": 32,
57 |     "network_dim": 32,
58 |     "network_dropout": 0,
59 |     "no_token_padding": false,
60 |     "noise_offset": 0,
61 |     "noise_offset_type": "Original",
62 |     "num_cpu_threads_per_process": 2,
63 |     "optimizer": "AdamW8bit",
64 |     "optimizer_args": "",
65 |     "persistent_data_loader_workers": false,
66 |     "prior_loss_weight": 1.0,
67 |     "random_crop": false,
68 |     "rank_dropout": 0,
69 |     "save_every_n_epochs": 1,
70 |     "save_every_n_steps": 0,
71 |     "save_last_n_steps": 0,
72 |     "save_last_n_steps_state": 0,
73 |     "save_precision": "fp16",
74 |     "scale_v_pred_loss_like_noise_pred": false,
75 |     "scale_weight_norms": 0,
76 |     "sdxl": true,
77 |     "sdxl_cache_text_encoder_outputs": false,
78 |     "sdxl_no_half_vae": true,
79 |     "seed": "",
80 |     "shuffle_caption": false,
81 |     "stop_text_encoder_training_pct": 0,
82 |     "text_encoder_lr": 2e-05,
83 |     "train_batch_size": 8,
84 |     "train_on_input": true,
85 |     "training_comment": "2 repeats for styles, 3 repeats for characters, 1 repeat for styles when used together with characters",
86 |     "unet_lr": 2e-05,
87 |     "unit": 1,
88 |     "up_lr_weight": "",
89 |     "use_cp": false,
90 |     "log_with": "",
91 |     "v2": false,
92 |     "v_parameterization": false,
93 |     "v_pred_like_loss": 0,
94 |     "vae_batch_size": 0,
95 |     "weighted_captions": false,
96 |     "xformers": "xformers"
97 | }


--------------------------------------------------------------------------------
/presets/lora/SDXL - LoRA AI_characters standard v1.1.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "Standard",
 3 |     "adaptive_noise_scale": 0,
 4 |     "additional_parameters": "",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": false,
 9 |     "bucket_reso_steps": 64,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0.05,
14 |     "caption_extension": ".txt",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 1,
18 |     "conv_block_alphas": "",
19 |     "conv_block_dims": "",
20 |     "conv_dim": 1,
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 50,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_bf16": false,
29 |     "full_fp16": false,
30 |     "gradient_accumulation_steps": "1",
31 |     "gradient_checkpointing": true,
32 |     "keep_tokens": "0",
33 |     "learning_rate": 3e-05,
34 |     "lora_network_weights": "",
35 |     "lr_scheduler": "constant",
36 |     "lr_scheduler_args": "",
37 |     "lr_scheduler_num_cycles": "",
38 |     "lr_scheduler_power": "",
39 |     "lr_warmup": 0,
40 |     "max_bucket_reso": 2048,
41 |     "max_data_loader_n_workers": "0",
42 |     "max_resolution": "1024,1024",
43 |     "max_timestep": 1000,
44 |     "max_token_length": "75",
45 |     "max_train_epochs": "50",
46 |     "max_train_steps": "",
47 |     "mem_eff_attn": false,
48 |     "mid_lr_weight": "",
49 |     "min_bucket_reso": 256,
50 |     "min_snr_gamma": 5,
51 |     "min_timestep": 0,
52 |     "mixed_precision": "fp16",
53 |     "module_dropout": 0,
54 |     "multires_noise_discount": 0,
55 |     "multires_noise_iterations": 0,
56 |     "network_alpha": 32,
57 |     "network_dim": 32,
58 |     "network_dropout": 0,
59 |     "no_token_padding": false,
60 |     "noise_offset": 0,
61 |     "noise_offset_type": "Original",
62 |     "num_cpu_threads_per_process": 2,
63 |     "optimizer": "AdamW",
64 |     "optimizer_args": "",
65 |     "persistent_data_loader_workers": false,
66 |     "prior_loss_weight": 1.0,
67 |     "random_crop": false,
68 |     "rank_dropout": 0,
69 |     "save_every_n_epochs": 1,
70 |     "save_every_n_steps": 0,
71 |     "save_last_n_steps": 0,
72 |     "save_last_n_steps_state": 0,
73 |     "save_precision": "fp16",
74 |     "scale_v_pred_loss_like_noise_pred": false,
75 |     "scale_weight_norms": 0,
76 |     "sdxl": true,
77 |     "sdxl_cache_text_encoder_outputs": false,
78 |     "sdxl_no_half_vae": true,
79 |     "seed": "",
80 |     "shuffle_caption": false,
81 |     "stop_text_encoder_training_pct": 0,
82 |     "text_encoder_lr": 3e-05,
83 |     "train_batch_size": 3,
84 |     "train_on_input": true,
85 |     "training_comment": "3 repeats. More info: https://civitai.com/articles/1771",
86 |     "unet_lr": 3e-05,
87 |     "unit": 1,
88 |     "up_lr_weight": "",
89 |     "use_cp": false,
90 |     "log_with": "",
91 |     "v2": false,
92 |     "v_parameterization": false,
93 |     "v_pred_like_loss": 0,
94 |     "vae_batch_size": 0,
95 |     "weighted_captions": false,
96 |     "xformers": "xformers"
97 | }


--------------------------------------------------------------------------------
/presets/lora/SDXL - LoRA adafactor v1.0.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "Standard",
 3 |     "adaptive_noise_scale": 0.00357,
 4 |     "additional_parameters": "--log_prefix=xl-loha",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": false,
 9 |     "bucket_reso_steps": 32,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0,
14 |     "caption_extension": ".txt2",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 4,
18 |     "conv_block_alphas": "",
19 |     "conv_block_dims": "",
20 |     "conv_dim": 4,
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 30,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_bf16": false,
29 |     "full_fp16": false,
30 |     "gradient_accumulation_steps": 1.0,
31 |     "gradient_checkpointing": true,
32 |     "keep_tokens": 1,
33 |     "learning_rate": 1.0,
34 |     "lora_network_weights": "",
35 |     "lr_scheduler": "adafactor",
36 |     "lr_scheduler_num_cycles": "1",
37 |     "lr_scheduler_power": "",
38 |     "lr_warmup": 0,
39 |     "max_bucket_reso": 2048,
40 |     "max_data_loader_n_workers": "0",
41 |     "max_resolution": "1024,1024",
42 |     "max_timestep": 1000,
43 |     "max_token_length": "75",
44 |     "max_train_epochs": "30",
45 |     "mem_eff_attn": false,
46 |     "mid_lr_weight": "",
47 |     "min_bucket_reso": 64,
48 |     "min_snr_gamma": 0,
49 |     "min_timestep": 0,
50 |     "mixed_precision": "bf16",
51 |     "module_dropout": 0,
52 |     "multires_noise_discount": 0,
53 |     "multires_noise_iterations": 0,
54 |     "network_alpha": 128,
55 |     "network_dim": 128,
56 |     "network_dropout": 0,
57 |     "no_token_padding": false,
58 |     "noise_offset": 0.0357,
59 |     "noise_offset_type": "Original",
60 |     "num_cpu_threads_per_process": 2,
61 |     "optimizer": "Adafactor",
62 |     "optimizer_args": "",
63 |     "persistent_data_loader_workers": false,
64 |     "prior_loss_weight": 1.0,
65 |     "random_crop": false,
66 |     "rank_dropout": 0,
67 |     "save_every_n_epochs": 5,
68 |     "save_every_n_steps": 0,
69 |     "save_last_n_steps": 0,
70 |     "save_last_n_steps_state": 0,
71 |     "save_precision": "bf16",
72 |     "scale_v_pred_loss_like_noise_pred": false,
73 |     "scale_weight_norms": 1,
74 |     "sdxl": true,
75 |     "sdxl_cache_text_encoder_outputs": false,
76 |     "sdxl_no_half_vae": true,
77 |     "seed": "",
78 |     "shuffle_caption": false,
79 |     "stop_text_encoder_training_pct": 0,
80 |     "text_encoder_lr": 1.0,
81 |     "train_batch_size": 5,
82 |     "train_on_input": false,
83 |     "training_comment": "trigger: the white queen",
84 |     "unet_lr": 1.0,
85 |     "unit": 1,
86 |     "up_lr_weight": "",
87 |     "use_cp": false,
88 |     "log_with": "",
89 |     "v2": false,
90 |     "v_parameterization": false,
91 |     "vae_batch_size": 0,
92 |     "weighted_captions": false,
93 |     "xformers": true
94 | }


--------------------------------------------------------------------------------
/presets/lora/SDXL - LoRA aitrepreneur clothing v1.0.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "Standard",
 3 |     "adaptive_noise_scale": 0,
 4 |     "additional_parameters": "",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": true,
 9 |     "bucket_reso_steps": 64,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0,
14 |     "caption_extension": ".txt",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 32,
18 |     "conv_block_alphas": "",
19 |     "conv_block_dims": "",
20 |     "conv_dim": 32,
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 15,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_bf16": false,
29 |     "full_fp16": false,
30 |     "gradient_accumulation_steps": 2,
31 |     "gradient_checkpointing": true,
32 |     "keep_tokens": "0",
33 |     "learning_rate": 0.0009,
34 |     "lora_network_weights": "",
35 |     "lr_scheduler": "constant",
36 |     "lr_scheduler_args": "",
37 |     "lr_scheduler_num_cycles": "1",
38 |     "lr_scheduler_power": "",
39 |     "lr_warmup": 0,
40 |     "max_bucket_reso": 2048,
41 |     "max_data_loader_n_workers": "0",
42 |     "max_resolution": "1024,1024",
43 |     "max_timestep": 1000,
44 |     "max_token_length": "75",
45 |     "max_train_epochs": "",
46 |     "max_train_steps": "",
47 |     "mem_eff_attn": false,
48 |     "mid_lr_weight": "",
49 |     "min_bucket_reso": 256,
50 |     "min_snr_gamma": 0,
51 |     "min_timestep": 0,
52 |     "mixed_precision": "bf16",
53 |     "module_dropout": 0,
54 |     "multires_noise_discount": 0,
55 |     "multires_noise_iterations": 0,
56 |     "network_alpha": 1,
57 |     "network_dim": 128,
58 |     "network_dropout": 0,
59 |     "no_token_padding": false,
60 |     "noise_offset": 0,
61 |     "noise_offset_type": "Original",
62 |     "num_cpu_threads_per_process": 2,
63 |     "optimizer": "Adafactor",
64 |     "optimizer_args": "scale_parameter=False relative_step=False warmup_init=False",
65 |     "persistent_data_loader_workers": false,
66 |     "prior_loss_weight": 1.0,
67 |     "random_crop": false,
68 |     "rank_dropout": 0,
69 |     "save_every_n_epochs": 1,
70 |     "save_every_n_steps": 0,
71 |     "save_last_n_steps": 0,
72 |     "save_last_n_steps_state": 0,
73 |     "save_precision": "bf16",
74 |     "scale_v_pred_loss_like_noise_pred": false,
75 |     "scale_weight_norms": 0,
76 |     "sdxl": true,
77 |     "sdxl_cache_text_encoder_outputs": false,
78 |     "sdxl_no_half_vae": true,
79 |     "seed": "12345",
80 |     "shuffle_caption": false,
81 |     "stop_text_encoder_training": 0,
82 |     "text_encoder_lr": 0.0009,
83 |     "train_batch_size": 1,
84 |     "train_on_input": true,
85 |     "training_comment": "trigger: supergirl costume",
86 |     "unet_lr": 0.0009,
87 |     "unit": 1,
88 |     "up_lr_weight": "",
89 |     "use_cp": false,
90 |     "log_with": "",
91 |     "v2": false,
92 |     "v_parameterization": false,
93 |     "v_pred_like_loss": 0,
94 |     "vae_batch_size": 0,
95 |     "weighted_captions": false,
96 |     "xformers": "xformers"
97 | }


--------------------------------------------------------------------------------
/presets/lora/SDXL - LoRA by malcolmrey training v1.0.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "Standard",
 3 |     "adaptive_noise_scale": 0,
 4 |     "additional_parameters": "",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": false,
 9 |     "bucket_reso_steps": 32,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0,
14 |     "caption_extension": ".txt",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 32,
18 |     "conv_block_alphas": "",
19 |     "conv_block_dims": "",
20 |     "conv_dim": 32,
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 16,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_bf16": false,
29 |     "full_fp16": false,
30 |     "gradient_accumulation_steps": 1,
31 |     "gradient_checkpointing": true,
32 |     "keep_tokens": "0",
33 |     "learning_rate": 0.0001,
34 |     "lora_network_weights": "",
35 |     "lr_scheduler": "constant",
36 |     "lr_scheduler_num_cycles": "1",
37 |     "lr_scheduler_power": "",
38 |     "lr_warmup": 0,
39 |     "max_bucket_reso": 2048,
40 |     "max_data_loader_n_workers": "0",
41 |     "max_resolution": "1024,1024",
42 |     "max_timestep": 1000,
43 |     "max_token_length": "75",
44 |     "max_train_epochs": "16",
45 |     "mem_eff_attn": false,
46 |     "mid_lr_weight": "",
47 |     "min_bucket_reso": 256,
48 |     "min_snr_gamma": 5,
49 |     "min_timestep": 0,
50 |     "mixed_precision": "bf16",
51 |     "module_dropout": 0,
52 |     "multires_noise_discount": 0,
53 |     "multires_noise_iterations": 0,
54 |     "network_alpha": 13,
55 |     "network_dim": 32,
56 |     "network_dropout": 0,
57 |     "no_token_padding": false,
58 |     "noise_offset": 0,
59 |     "noise_offset_type": "Original",
60 |     "num_cpu_threads_per_process": 2,
61 |     "optimizer": "AdamW8bit",
62 |     "optimizer_args": "",
63 |     "persistent_data_loader_workers": false,
64 |     "prior_loss_weight": 1.0,
65 |     "random_crop": false,
66 |     "rank_dropout": 0,
67 |     "save_every_n_epochs": 2,
68 |     "save_every_n_steps": 0,
69 |     "save_last_n_steps": 0,
70 |     "save_last_n_steps_state": 0,
71 |     "save_precision": "bf16",
72 |     "scale_v_pred_loss_like_noise_pred": false,
73 |     "scale_weight_norms": 2.5,
74 |     "sdxl": true,
75 |     "sdxl_cache_text_encoder_outputs": false,
76 |     "sdxl_no_half_vae": true,
77 |     "seed": "",
78 |     "shuffle_caption": false,
79 |     "stop_text_encoder_training_pct": 0,
80 |     "text_encoder_lr": 0.0001,
81 |     "train_batch_size": 8,
82 |     "train_on_input": true,
83 |     "training_comment": "trigger: playboy centerfold",
84 |     "unet_lr": 0.0001,
85 |     "unit": 1,
86 |     "up_lr_weight": "",
87 |     "use_cp": false,
88 |     "log_with": "",
89 |     "v2": false,
90 |     "v_parameterization": false,
91 |     "vae_batch_size": 0,
92 |     "weighted_captions": false,
93 |     "xformers": "xformers"
94 | }


--------------------------------------------------------------------------------
/presets/lora/SDXL - LoRA face dogu_cat v1.0.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "Standard",
 3 |     "adaptive_noise_scale": 0.00357,
 4 |     "additional_parameters": "",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": false,
 9 |     "bucket_reso_steps": 32,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0,
14 |     "caption_extension": ".txt",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 4,
18 |     "conv_block_alphas": "",
19 |     "conv_block_dims": "",
20 |     "conv_dim": 4,
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 10,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_bf16": false,
29 |     "full_fp16": false,
30 |     "gradient_accumulation_steps": 1,
31 |     "gradient_checkpointing": true,
32 |     "keep_tokens": 1,
33 |     "learning_rate": 0.0001,
34 |     "lora_network_weights": "",
35 |     "lr_scheduler": "cosine",
36 |     "lr_scheduler_args": "",
37 |     "lr_scheduler_num_cycles": "1",
38 |     "lr_scheduler_power": "",
39 |     "lr_warmup": 0,
40 |     "max_bucket_reso": 2048,
41 |     "max_data_loader_n_workers": "0",
42 |     "max_resolution": "1024,1024",
43 |     "max_timestep": 1000,
44 |     "max_token_length": "75",
45 |     "max_train_epochs": "",
46 |     "max_train_steps": "",
47 |     "mem_eff_attn": false,
48 |     "mid_lr_weight": "",
49 |     "min_bucket_reso": 64,
50 |     "min_snr_gamma": 0,
51 |     "min_timestep": 0,
52 |     "mixed_precision": "bf16",
53 |     "module_dropout": 0,
54 |     "multires_noise_discount": 0,
55 |     "multires_noise_iterations": 0,
56 |     "network_alpha": 46,
57 |     "network_dim": 92,
58 |     "network_dropout": 0,
59 |     "no_token_padding": false,
60 |     "noise_offset": 0.0357,
61 |     "noise_offset_type": "Original",
62 |     "num_cpu_threads_per_process": 2,
63 |     "optimizer": "AdamW8bit",
64 |     "optimizer_args": "",
65 |     "persistent_data_loader_workers": false,
66 |     "prior_loss_weight": 1.0,
67 |     "random_crop": false,
68 |     "rank_dropout": 0,
69 |     "save_every_n_epochs": 2,
70 |     "save_every_n_steps": 0,
71 |     "save_last_n_steps": 0,
72 |     "save_last_n_steps_state": 0,
73 |     "save_precision": "bf16",
74 |     "scale_v_pred_loss_like_noise_pred": false,
75 |     "scale_weight_norms": 1,
76 |     "sdxl": true,
77 |     "sdxl_cache_text_encoder_outputs": false,
78 |     "sdxl_no_half_vae": true,
79 |     "seed": "12345",
80 |     "shuffle_caption": false,
81 |     "stop_text_encoder_training_pct": 0,
82 |     "text_encoder_lr": 0.0001,
83 |     "train_batch_size": 1,
84 |     "train_on_input": false,
85 |     "training_comment": "Good for faces. Use 20 1024x1024 cropped images, 20 repeats, Blip captions, but 'woman' replaced with 'khls woman', https://civitai.com/user/dogu_cat/models",
86 |     "unet_lr": 0.0001,
87 |     "unit": 1,
88 |     "up_lr_weight": "",
89 |     "use_cp": false,
90 |     "log_with": "",
91 |     "v2": false,
92 |     "v_parameterization": false,
93 |     "v_pred_like_loss": 0,
94 |     "vae_batch_size": 0,
95 |     "weighted_captions": false,
96 |     "xformers": "xformers"
97 | }


--------------------------------------------------------------------------------
/presets/lora/SDXL - LoRA finetuning phase 1_v1.1.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "Standard",
 3 |     "adaptive_noise_scale": 0,
 4 |     "additional_parameters": "",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": false,
 9 |     "bucket_reso_steps": 32,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0,
14 |     "caption_extension": ".txt2",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 64,
18 |     "conv_alphas": "",
19 |     "conv_dim": 64,
20 |     "conv_dims": "",
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 8,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_bf16": false,
29 |     "full_fp16": false,
30 |     "gradient_accumulation_steps": 1.0,
31 |     "gradient_checkpointing": true,
32 |     "keep_tokens": "0",
33 |     "learning_rate": 0.0001,
34 |     "lora_network_weights": "",
35 |     "lr_scheduler": "constant",
36 |     "lr_scheduler_num_cycles": "",
37 |     "lr_scheduler_power": "",
38 |     "lr_warmup": 0,
39 |     "max_bucket_reso": 2048,
40 |     "max_data_loader_n_workers": "0",
41 |     "max_resolution": "1024,1024",
42 |     "max_timestep": 1000,
43 |     "max_token_length": "75",
44 |     "max_train_epochs": "",
45 |     "mem_eff_attn": false,
46 |     "mid_lr_weight": "",
47 |     "min_bucket_reso": 64,
48 |     "min_snr_gamma": 0,
49 |     "min_timestep": 0,
50 |     "mixed_precision": "bf16",
51 |     "module_dropout": 0,
52 |     "multires_noise_discount": 0,
53 |     "multires_noise_iterations": 0,
54 |     "network_alpha": 128,
55 |     "network_dim": 128,
56 |     "network_dropout": 0,
57 |     "no_token_padding": false,
58 |     "noise_offset": 0,
59 |     "noise_offset_type": "Original",
60 |     "num_cpu_threads_per_process": 2,
61 |     "optimizer": "AdamW",
62 |     "optimizer_args": "",
63 |     "persistent_data_loader_workers": false,
64 |     "prior_loss_weight": 1.0,
65 |     "random_crop": false,
66 |     "rank_dropout": 0,
67 |     "save_every_n_epochs": 1,
68 |     "save_every_n_steps": 0,
69 |     "save_last_n_steps": 0,
70 |     "save_last_n_steps_state": 1,
71 |     "save_precision": "bf16",
72 |     "scale_v_pred_loss_like_noise_pred": false,
73 |     "scale_weight_norms": 0,
74 |     "sdxl": true,
75 |     "sdxl_cache_text_encoder_outputs": false,
76 |     "sdxl_no_half_vae": true,
77 |     "seed": "12345",
78 |     "shuffle_caption": false,
79 |     "stop_text_encoder_training_pct": 0,
80 |     "text_encoder_lr": 0.0001,
81 |     "train_batch_size": 8,
82 |     "train_on_input": true,
83 |     "training_comment": "kill bill, the bride",
84 |     "unet_lr": 0.0001,
85 |     "unit": 1,
86 |     "up_lr_weight": "",
87 |     "use_cp": true,
88 |     "log_with": "",
89 |     "v2": false,
90 |     "v_parameterization": false,
91 |     "vae_batch_size": 0,
92 |     "weighted_captions": false,
93 |     "xformers": true
94 | }


--------------------------------------------------------------------------------
/presets/lora/SDXL - LoRA finetuning phase 2_v1.1.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "Standard",
 3 |     "adaptive_noise_scale": 0.00357,
 4 |     "additional_parameters": "--log_prefix=xl-loha",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": false,
 9 |     "bucket_reso_steps": 32,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0,
14 |     "caption_extension": ".txt2",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 4,
18 |     "conv_alphas": "",
19 |     "conv_dim": 4,
20 |     "conv_dims": "",
21 |     "decompose_both": false,
22 |     "dim_from_weights": true,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 1,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_bf16": false,
29 |     "full_fp16": false,
30 |     "gradient_accumulation_steps": 1.0,
31 |     "gradient_checkpointing": true,
32 |     "keep_tokens": 1,
33 |     "learning_rate": 0.0001,
34 |     "lora_network_weights": "D:/lycoris/sdxl\\sdxl-kill bill, the bride-lora-1.0av2.safetensors",
35 |     "lr_scheduler": "constant",
36 |     "lr_scheduler_num_cycles": "1",
37 |     "lr_scheduler_power": "",
38 |     "lr_warmup": 0,
39 |     "max_bucket_reso": 2048,
40 |     "max_data_loader_n_workers": "0",
41 |     "max_resolution": "1024,1024",
42 |     "max_timestep": 1000,
43 |     "max_token_length": "75",
44 |     "max_train_epochs": "1",
45 |     "mem_eff_attn": false,
46 |     "mid_lr_weight": "",
47 |     "min_bucket_reso": 64,
48 |     "min_snr_gamma": 0,
49 |     "min_timestep": 0,
50 |     "mixed_precision": "bf16",
51 |     "module_dropout": 0,
52 |     "multires_noise_discount": 0,
53 |     "multires_noise_iterations": 0,
54 |     "network_alpha": 128,
55 |     "network_dim": 128,
56 |     "network_dropout": 0,
57 |     "no_token_padding": false,
58 |     "noise_offset": 0.0357,
59 |     "noise_offset_type": "Original",
60 |     "num_cpu_threads_per_process": 2,
61 |     "optimizer": "AdamW",
62 |     "optimizer_args": "",
63 |     "persistent_data_loader_workers": false,
64 |     "prior_loss_weight": 1.0,
65 |     "random_crop": false,
66 |     "rank_dropout": 0,
67 |     "save_every_n_epochs": 1,
68 |     "save_every_n_steps": 0,
69 |     "save_last_n_steps": 0,
70 |     "save_last_n_steps_state": 0,
71 |     "save_precision": "bf16",
72 |     "scale_v_pred_loss_like_noise_pred": false,
73 |     "scale_weight_norms": 0,
74 |     "sdxl": true,
75 |     "sdxl_cache_text_encoder_outputs": false,
76 |     "sdxl_no_half_vae": true,
77 |     "seed": "17415",
78 |     "shuffle_caption": false,
79 |     "stop_text_encoder_training_pct": 0,
80 |     "text_encoder_lr": 0.0001,
81 |     "train_batch_size": 1,
82 |     "train_on_input": false,
83 |     "training_comment": "trigger: portrait",
84 |     "unet_lr": 0.0001,
85 |     "unit": 1,
86 |     "up_lr_weight": "",
87 |     "use_cp": false,
88 |     "log_with": "",
89 |     "v2": false,
90 |     "v_parameterization": false,
91 |     "vae_batch_size": 0,
92 |     "weighted_captions": false,
93 |     "xformers": true
94 | }


--------------------------------------------------------------------------------
/presets/lora/SDXL - LoRA kudou-reira dadaptadam v1.0.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "Standard",
 3 |     "adaptive_noise_scale": 0,
 4 |     "additional_parameters": "--network_train_unet_only",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": true,
 9 |     "bucket_reso_steps": 64,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0,
14 |     "caption_extension": ".txt",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 1,
18 |     "conv_alphas": "",
19 |     "conv_dim": 1,
20 |     "conv_dims": "",
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 25,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_bf16": false,
29 |     "full_fp16": false,
30 |     "gradient_accumulation_steps": 1.0,
31 |     "gradient_checkpointing": true,
32 |     "keep_tokens": 2,
33 |     "learning_rate": 1.0,
34 |     "lora_network_weights": "",
35 |     "lr_scheduler": "cosine",
36 |     "lr_scheduler_num_cycles": "",
37 |     "lr_scheduler_power": "",
38 |     "lr_warmup": 5,
39 |     "max_bucket_reso": 2048,
40 |     "max_data_loader_n_workers": "0",
41 |     "max_resolution": "1024,1024",
42 |     "max_timestep": 1000,
43 |     "max_token_length": "225",
44 |     "max_train_epochs": "",
45 |     "mem_eff_attn": false,
46 |     "mid_lr_weight": "",
47 |     "min_bucket_reso": 256,
48 |     "min_snr_gamma": 5,
49 |     "min_timestep": 0,
50 |     "mixed_precision": "bf16",
51 |     "module_dropout": 0,
52 |     "multires_noise_discount": 0,
53 |     "multires_noise_iterations": 0,
54 |     "network_alpha": 256,
55 |     "network_dim": 256,
56 |     "network_dropout": 0,
57 |     "no_token_padding": false,
58 |     "noise_offset": 0.0357,
59 |     "noise_offset_type": "Original",
60 |     "num_cpu_threads_per_process": 2,
61 |     "optimizer": "DAdaptAdam",
62 |     "optimizer_args": "\"decouple=True\" \"weight_decay=0.2\" \"betas=0.9,0.99\" \"growth_rate=1.02\"",
63 |     "persistent_data_loader_workers": false,
64 |     "prior_loss_weight": 1.0,
65 |     "random_crop": false,
66 |     "rank_dropout": 0,
67 |     "save_every_n_epochs": 1,
68 |     "save_every_n_steps": 0,
69 |     "save_last_n_steps": 0,
70 |     "save_last_n_steps_state": 0,
71 |     "save_precision": "bf16",
72 |     "scale_v_pred_loss_like_noise_pred": false,
73 |     "scale_weight_norms": 0,
74 |     "sdxl": true,
75 |     "sdxl_cache_text_encoder_outputs": false,
76 |     "sdxl_no_half_vae": true,
77 |     "seed": "1337",
78 |     "shuffle_caption": true,
79 |     "stop_text_encoder_training": 0,
80 |     "text_encoder_lr": 1.0,
81 |     "train_batch_size": 6,
82 |     "train_on_input": true,
83 |     "training_comment": "",
84 |     "unet_lr": 1.0,
85 |     "unit": 1,
86 |     "up_lr_weight": "",
87 |     "use_cp": false,
88 |     "log_with": "wandb",
89 |     "v2": false,
90 |     "v_parameterization": false,
91 |     "vae_batch_size": 0,
92 |     "weighted_captions": false,
93 |     "xformers": true
94 | }


--------------------------------------------------------------------------------
/presets/lora/SDXL - LoRA kudou-reira dadaptadam v1.1.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "Standard",
 3 |     "adaptive_noise_scale": 0,
 4 |     "additional_parameters": "--network_train_unet_only",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": true,
 9 |     "bucket_reso_steps": 64,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0,
14 |     "caption_extension": ".txt",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 1,
18 |     "conv_alphas": "",
19 |     "conv_dim": 1,
20 |     "conv_dims": "",
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 25,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_bf16": false,
29 |     "full_fp16": false,
30 |     "gradient_accumulation_steps": 1.0,
31 |     "gradient_checkpointing": true,
32 |     "keep_tokens": 2,
33 |     "learning_rate": 1.0,
34 |     "lora_network_weights": "",
35 |     "lr_scheduler": "cosine",
36 |     "lr_scheduler_num_cycles": "",
37 |     "lr_scheduler_power": "",
38 |     "lr_warmup": 5,
39 |     "max_bucket_reso": 2048,
40 |     "max_data_loader_n_workers": "0",
41 |     "max_resolution": "1024,1024",
42 |     "max_timestep": 1000,
43 |     "max_token_length": "225",
44 |     "max_train_epochs": "",
45 |     "mem_eff_attn": false,
46 |     "mid_lr_weight": "",
47 |     "min_bucket_reso": 256,
48 |     "min_snr_gamma": 5,
49 |     "min_timestep": 0,
50 |     "mixed_precision": "bf16",
51 |     "module_dropout": 0,
52 |     "multires_noise_discount": 0,
53 |     "multires_noise_iterations": 0,
54 |     "network_alpha": 64,
55 |     "network_dim": 64,
56 |     "network_dropout": 0,
57 |     "no_token_padding": false,
58 |     "noise_offset": 0.0357,
59 |     "noise_offset_type": "Original",
60 |     "num_cpu_threads_per_process": 2,
61 |     "optimizer": "DAdaptAdam",
62 |     "optimizer_args": "\"decouple=True\" \"weight_decay=0.1\" \"betas=0.9,0.91\"",
63 |     "persistent_data_loader_workers": false,
64 |     "prior_loss_weight": 1.0,
65 |     "random_crop": false,
66 |     "rank_dropout": 0,
67 |     "save_every_n_epochs": 1,
68 |     "save_every_n_steps": 0,
69 |     "save_last_n_steps": 0,
70 |     "save_last_n_steps_state": 0,
71 |     "save_precision": "bf16",
72 |     "scale_v_pred_loss_like_noise_pred": false,
73 |     "scale_weight_norms": 0,
74 |     "sdxl": true,
75 |     "sdxl_cache_text_encoder_outputs": false,
76 |     "sdxl_no_half_vae": true,
77 |     "seed": "1337",
78 |     "shuffle_caption": true,
79 |     "stop_text_encoder_training": 0,
80 |     "text_encoder_lr": 1.0,
81 |     "train_batch_size": 6,
82 |     "train_on_input": true,
83 |     "training_comment": "",
84 |     "unet_lr": 1.0,
85 |     "unit": 1,
86 |     "up_lr_weight": "",
87 |     "use_cp": false,
88 |     "log_with": "wandb",
89 |     "v2": false,
90 |     "v_parameterization": false,
91 |     "vae_batch_size": 0,
92 |     "weighted_captions": false,
93 |     "xformers": true
94 | }


--------------------------------------------------------------------------------
/presets/lora/SDXL - LoRA kudou-reira prodigy v4.0.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "Standard",
 3 |     "adaptive_noise_scale": 0,
 4 |     "additional_parameters": "--lr_scheduler_type \"CosineAnnealingLR\" --lr_scheduler_args \"T_max=30\" --network_train_unet_only",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": true,
 9 |     "bucket_reso_steps": 64,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0,
14 |     "caption_extension": ".txt",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 1,
18 |     "conv_block_alphas": "",
19 |     "conv_block_dims": "",
20 |     "conv_dim": 1,
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 30,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_bf16": false,
29 |     "full_fp16": false,
30 |     "gradient_accumulation_steps": 1.0,
31 |     "gradient_checkpointing": true,
32 |     "keep_tokens": 2,
33 |     "learning_rate": 1.0,
34 |     "lora_network_weights": "",
35 |     "lr_scheduler": "cosine_with_restarts",
36 |     "lr_scheduler_num_cycles": "",
37 |     "lr_scheduler_power": "",
38 |     "lr_warmup": 0,
39 |     "max_bucket_reso": 2048,
40 |     "max_data_loader_n_workers": "0",
41 |     "max_resolution": "1024,1024",
42 |     "max_timestep": 1000,
43 |     "max_token_length": "225",
44 |     "max_train_epochs": "",
45 |     "mem_eff_attn": false,
46 |     "mid_lr_weight": "",
47 |     "min_bucket_reso": 256,
48 |     "min_snr_gamma": 5,
49 |     "min_timestep": 0,
50 |     "mixed_precision": "bf16",
51 |     "module_dropout": 0,
52 |     "multires_noise_discount": 0,
53 |     "multires_noise_iterations": 0,
54 |     "network_alpha": 256,
55 |     "network_dim": 256,
56 |     "network_dropout": 0,
57 |     "no_token_padding": false,
58 |     "noise_offset": 0.0357,
59 |     "noise_offset_type": "Original",
60 |     "num_cpu_threads_per_process": 2,
61 |     "optimizer": "Prodigy",
62 |     "optimizer_args": "decouple=True weight_decay=0.45 d_coef=2 use_bias_correction=True safeguard_warmup=True",
63 |     "persistent_data_loader_workers": false,
64 |     "prior_loss_weight": 1.0,
65 |     "random_crop": false,
66 |     "rank_dropout": 0,
67 |     "save_every_n_epochs": 1,
68 |     "save_every_n_steps": 0,
69 |     "save_last_n_steps": 0,
70 |     "save_last_n_steps_state": 0,
71 |     "save_precision": "bf16",
72 |     "scale_v_pred_loss_like_noise_pred": false,
73 |     "scale_weight_norms": 0,
74 |     "sdxl": true,
75 |     "sdxl_cache_text_encoder_outputs": false,
76 |     "sdxl_no_half_vae": true,
77 |     "seed": "1337",
78 |     "shuffle_caption": true,
79 |     "stop_text_encoder_training_pct": 0,
80 |     "text_encoder_lr": 1.0,
81 |     "train_batch_size": 6,
82 |     "train_on_input": true,
83 |     "training_comment": "",
84 |     "unet_lr": 1.0,
85 |     "unit": 1,
86 |     "up_lr_weight": "",
87 |     "use_cp": false,
88 |     "log_with": "wandb",
89 |     "v2": false,
90 |     "v_parameterization": false,
91 |     "vae_batch_size": 0,
92 |     "weighted_captions": false,
93 |     "xformers": true
94 | }


--------------------------------------------------------------------------------
/presets/lora/SDXL - edgLoRAXL AI_Now.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "Standard",
 3 |     "adaptive_noise_scale": 0,
 4 |     "additional_parameters": "--max_grad_norm=1",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": true,
 9 |     "bucket_reso_steps": 32,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0,
14 |     "caption_extension": ".txt",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 4,
18 |     "conv_block_alphas": "",
19 |     "conv_block_dims": "",
20 |     "conv_dim": 8,
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 160,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_bf16": false,
29 |     "full_fp16": false,
30 |     "gradient_accumulation_steps": 1.0,
31 |     "gradient_checkpointing": true,
32 |     "keep_tokens": "0",
33 |     "learning_rate": 1.0,
34 |     "lora_network_weights": "",
35 |     "lr_scheduler": "cosine",
36 |     "lr_scheduler_num_cycles": "",
37 |     "lr_scheduler_power": "",
38 |     "lr_warmup": 0,
39 |     "max_bucket_reso": 2048,
40 |     "max_data_loader_n_workers": "0",
41 |     "max_resolution": "1024,1024",
42 |     "max_timestep": 1000,
43 |     "max_token_length": "75",
44 |     "max_train_epochs": "",
45 |     "max_train_steps": "320",
46 |     "mem_eff_attn": false,
47 |     "mid_lr_weight": "",
48 |     "min_bucket_reso": 256,
49 |     "min_snr_gamma": 0,
50 |     "min_timestep": 0,
51 |     "mixed_precision": "bf16",
52 |     "module_dropout": 0,
53 |     "multires_noise_discount": 0.2,
54 |     "multires_noise_iterations": 8,
55 |     "network_alpha": 32,
56 |     "network_dim": 32,
57 |     "network_dropout": 0,
58 |     "no_token_padding": false,
59 |     "noise_offset": 0.0357,
60 |     "noise_offset_type": "Original",
61 |     "num_cpu_threads_per_process": 2,
62 |     "optimizer": "Prodigy",
63 |     "optimizer_args": "decouple=True weight_decay=0.5 betas=0.9,0.99 use_bias_correction=False",
64 |     "persistent_data_loader_workers": false,
65 |     "prior_loss_weight": 1.0,
66 |     "random_crop": false,
67 |     "rank_dropout": 0,
68 |     "save_every_n_epochs": 10,
69 |     "save_every_n_steps": 0,
70 |     "save_last_n_steps": 0,
71 |     "save_last_n_steps_state": 0,
72 |     "save_precision": "bf16",
73 |     "scale_v_pred_loss_like_noise_pred": false,
74 |     "scale_weight_norms": 1,
75 |     "sdxl": true,
76 |     "sdxl_cache_text_encoder_outputs": false,
77 |     "sdxl_no_half_vae": true,
78 |     "seed": "12345",
79 |     "shuffle_caption": false,
80 |     "stop_text_encoder_training": 0,
81 |     "text_encoder_lr": 1.0,
82 |     "train_batch_size": 4,
83 |     "train_on_input": false,
84 |     "training_comment": "",
85 |     "unet_lr": 1.0,
86 |     "unit": 1,
87 |     "up_lr_weight": "",
88 |     "use_cp": true,
89 |     "log_with": "",
90 |     "v2": false,
91 |     "v_parameterization": false,
92 |     "vae_batch_size": 0,
93 |     "weighted_captions": false,
94 |     "xformers": true
95 | }


--------------------------------------------------------------------------------
/presets/lora/SDXL - edgLoRAXL.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "Standard",
 3 |     "adaptive_noise_scale": 0,
 4 |     "additional_parameters": "--max_grad_norm=0",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": true,
 9 |     "bucket_reso_steps": 64,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0,
14 |     "caption_extension": ".txt",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 4,
18 |     "conv_block_alphas": "",
19 |     "conv_block_dims": "",
20 |     "conv_dim": 8,
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 1,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_bf16": false,
29 |     "full_fp16": false,
30 |     "gradient_accumulation_steps": 1.0,
31 |     "gradient_checkpointing": true,
32 |     "keep_tokens": "0",
33 |     "learning_rate": 1.0,
34 |     "lora_network_weights": "",
35 |     "lr_scheduler": "cosine",
36 |     "lr_scheduler_num_cycles": "",
37 |     "lr_scheduler_power": "",
38 |     "lr_warmup": 0,
39 |     "max_bucket_reso": 2048,
40 |     "max_data_loader_n_workers": "0",
41 |     "max_resolution": "1024,1024",
42 |     "max_timestep": 1000,
43 |     "max_token_length": "75",
44 |     "max_train_epochs": "",
45 |     "mem_eff_attn": false,
46 |     "mid_lr_weight": "",
47 |     "min_bucket_reso": 256,
48 |     "min_snr_gamma": 0,
49 |     "min_timestep": 0,
50 |     "mixed_precision": "fp16",
51 |     "module_dropout": 0,
52 |     "multires_noise_discount": 0.2,
53 |     "multires_noise_iterations": 8,
54 |     "network_alpha": 32,
55 |     "network_dim": 32,
56 |     "network_dropout": 0,
57 |     "no_token_padding": false,
58 |     "noise_offset": 0.0357,
59 |     "noise_offset_type": "Original",
60 |     "num_cpu_threads_per_process": 2,
61 |     "optimizer": "Prodigy",
62 |     "optimizer_args": "decouple=True weight_decay=0.5 betas=0.9,0.99 use_bias_correction=False",
63 |     "persistent_data_loader_workers": false,
64 |     "prior_loss_weight": 1.0,
65 |     "random_crop": false,
66 |     "rank_dropout": 0,
67 |     "save_every_n_epochs": 1,
68 |     "save_every_n_steps": 0,
69 |     "save_last_n_steps": 0,
70 |     "save_last_n_steps_state": 0,
71 |     "save_precision": "fp16",
72 |     "scale_v_pred_loss_like_noise_pred": false,
73 |     "scale_weight_norms": 1,
74 |     "sdxl": true,
75 |     "sdxl_cache_text_encoder_outputs": false,
76 |     "sdxl_no_half_vae": true,
77 |     "seed": "12345",
78 |     "shuffle_caption": false,
79 |     "stop_text_encoder_training": 0,
80 |     "text_encoder_lr": 1.0,
81 |     "train_batch_size": 4,
82 |     "train_on_input": false,
83 |     "training_comment": "",
84 |     "unet_lr": 1.0,
85 |     "unit": 1,
86 |     "up_lr_weight": "",
87 |     "use_cp": true,
88 |     "log_with": "",
89 |     "v2": false,
90 |     "v_parameterization": false,
91 |     "vae_batch_size": 0,
92 |     "weighted_captions": false,
93 |     "xformers": true
94 | }


--------------------------------------------------------------------------------
/presets/lora/iA3-Prodigy-sd15.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "LyCORIS/iA3",
 3 |     "adaptive_noise_scale": 0.005,
 4 |     "caption_dropout_rate": 0.5,
 5 |     "epoch": 300,
 6 |     "gradient_accumulation_steps": 1,
 7 |     "gradient_checkpointing": true,
 8 |     "keep_tokens": 1,
 9 |     "learning_rate": 1.0,
10 |     "lr_scheduler": "constant",
11 |     "lr_warmup": 0,
12 |     "min_snr_gamma": 5,
13 |     "network_alpha": 1024,
14 |     "network_dim": 1024,
15 |     "network_dropout": 0.3,
16 |     "noise_offset": 0.05,
17 |     "noise_offset_type": "Original",
18 |     "optimizer": "Prodigy",
19 |     "optimizer_args": "d_coef=1.0 weight_decay=0.01 safeguard_warmup=False use_bias_correction=False",
20 |     "save_every_n_epochs": 10,
21 |     "save_every_n_steps": 0,
22 |     "save_last_n_steps": 0,
23 |     "scale_weight_norms": 1,
24 |     "seed": "31337",
25 |     "shuffle_caption": true,
26 |     "text_encoder_lr": 1.0,
27 |     "train_batch_size": 1,
28 |     "training_comment": "rentry.co/ProdiAgy",
29 |     "unet_lr": 1.0
30 | }


--------------------------------------------------------------------------------
/presets/lora/ia3-sd15.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "LyCORIS/iA3",
 3 |     "adaptive_noise_scale": 0,
 4 |     "additional_parameters": "",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": true,
 9 |     "bucket_reso_steps": 1,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0,
14 |     "caption_extension": ".none-use-foldername",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 64,
18 |     "conv_alphas": "",
19 |     "conv_dim": 64,
20 |     "conv_dims": "",
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 4,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_fp16": false,
29 |     "gradient_accumulation_steps": 1,
30 |     "gradient_checkpointing": false,
31 |     "keep_tokens": "0",
32 |     "learning_rate": 1.0,
33 |     "lora_network_weights": "",
34 |     "lr_scheduler": "cosine",
35 |     "lr_scheduler_num_cycles": "",
36 |     "lr_scheduler_power": "",
37 |     "lr_warmup": 0,
38 |     "max_data_loader_n_workers": "0",
39 |     "max_resolution": "512,512",
40 |     "max_token_length": "75",
41 |     "max_train_epochs": "",
42 |     "mem_eff_attn": false,
43 |     "mid_lr_weight": "",
44 |     "min_snr_gamma": 10,
45 |     "mixed_precision": "bf16",
46 |     "module_dropout": 0,
47 |     "multires_noise_discount": 0.2,
48 |     "multires_noise_iterations": 8,
49 |     "network_alpha": 64,
50 |     "network_dim": 64,
51 |     "network_dropout": 0,
52 |     "no_token_padding": false,
53 |     "noise_offset": 0,
54 |     "noise_offset_type": "Multires",
55 |     "num_cpu_threads_per_process": 2,
56 |     "optimizer": "Prodigy",
57 |     "optimizer_args": "",
58 |     "persistent_data_loader_workers": false,
59 |     "prior_loss_weight": 1.0,
60 |     "random_crop": false,
61 |     "rank_dropout": 0,
62 |     "save_every_n_epochs": 1,
63 |     "save_every_n_steps": 0,
64 |     "save_last_n_steps": 0,
65 |     "save_last_n_steps_state": 0,
66 |     "save_precision": "fp16",
67 |     "scale_v_pred_loss_like_noise_pred": false,
68 |     "scale_weight_norms": 0,
69 |     "seed": "",
70 |     "shuffle_caption": false,
71 |     "stop_text_encoder_training": 0,
72 |     "text_encoder_lr": 1.0,
73 |     "train_batch_size": 1,
74 |     "train_on_input": true,
75 |     "training_comment": "",
76 |     "unet_lr": 1.0,
77 |     "unit": 1,
78 |     "up_lr_weight": "",
79 |     "use_cp": false,
80 |     "log_with": "",
81 |     "v2": false,
82 |     "v_parameterization": false,
83 |     "vae_batch_size": 0,
84 |     "weighted_captions": false,
85 |     "xformers": true
86 | }


--------------------------------------------------------------------------------
/presets/lora/locon-dadaptation-sdxl.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "Standard",
 3 |     "adaptive_noise_scale": 0,
 4 |     "additional_parameters": "",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": true,
 9 |     "bucket_reso_steps": 1,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0,
14 |     "caption_extension": ".none-use-foldername",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 64,
18 |     "conv_alphas": "",
19 |     "conv_dim": 64,
20 |     "conv_dims": "",
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 4,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_fp16": false,
29 |     "gradient_accumulation_steps": 1,
30 |     "gradient_checkpointing": false,
31 |     "keep_tokens": "0",
32 |     "learning_rate": 4e-07,
33 |     "lora_network_weights": "",
34 |     "lr_scheduler": "constant_with_warmup",
35 |     "lr_scheduler_num_cycles": "",
36 |     "lr_scheduler_power": "",
37 |     "lr_warmup": 8,
38 |     "max_data_loader_n_workers": "0",
39 |     "max_resolution": "512,512",
40 |     "max_token_length": "75",
41 |     "max_train_epochs": "",
42 |     "mem_eff_attn": false,
43 |     "mid_lr_weight": "",
44 |     "min_snr_gamma": 10,
45 |     "mixed_precision": "bf16",
46 |     "module_dropout": 0,
47 |     "multires_noise_discount": 0.2,
48 |     "multires_noise_iterations": 8,
49 |     "network_alpha": 64,
50 |     "network_dim": 64,
51 |     "network_dropout": 0,
52 |     "no_token_padding": false,
53 |     "noise_offset": 0.0357,
54 |     "noise_offset_type": "Original",
55 |     "num_cpu_threads_per_process": 2,
56 |     "optimizer": "Adafactor",
57 |     "optimizer_args": "scale_parameter=False relative_step=False warmup_init=False",
58 |     "persistent_data_loader_workers": false,
59 |     "prior_loss_weight": 1.0,
60 |     "random_crop": false,
61 |     "rank_dropout": 0,
62 |     "save_every_n_epochs": 1,
63 |     "save_every_n_steps": 0,
64 |     "save_last_n_steps": 0,
65 |     "save_last_n_steps_state": 0,
66 |     "save_precision": "fp16",
67 |     "scale_v_pred_loss_like_noise_pred": false,
68 |     "scale_weight_norms": 0,
69 |     "sdxl": true,
70 |     "seed": "",
71 |     "shuffle_caption": false,
72 |     "stop_text_encoder_training": 0,
73 |     "text_encoder_lr": 0.0,
74 |     "train_batch_size": 1,
75 |     "train_on_input": true,
76 |     "training_comment": "",
77 |     "unet_lr": 4e-07,
78 |     "unit": 1,
79 |     "up_lr_weight": "",
80 |     "use_cp": false,
81 |     "log_with": "",
82 |     "v2": false,
83 |     "v_parameterization": false,
84 |     "vae_batch_size": 0,
85 |     "weighted_captions": false,
86 |     "xformers": true
87 | }


--------------------------------------------------------------------------------
/presets/lora/loha-sd15.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "LyCORIS/LoHa",
 3 |     "adaptive_noise_scale": 0,
 4 |     "additional_parameters": "",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": true,
 9 |     "bucket_reso_steps": 1,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0,
14 |     "caption_extension": ".none-use-foldername",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 4,
18 |     "conv_alphas": "",
19 |     "conv_dim": 8,
20 |     "conv_dims": "",
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "epoch": 2,
26 |     "factor": -1,
27 |     "flip_aug": false,
28 |     "full_fp16": false,
29 |     "gradient_accumulation_steps": 4,
30 |     "gradient_checkpointing": false,
31 |     "keep_tokens": "0",
32 |     "learning_rate": 0.0001,
33 |     "lora_network_weights": "",
34 |     "lr_scheduler": "cosine",
35 |     "lr_scheduler_num_cycles": "",
36 |     "lr_scheduler_power": "",
37 |     "lr_warmup": 0,
38 |     "max_data_loader_n_workers": "0",
39 |     "max_resolution": "512,512",
40 |     "max_token_length": "75",
41 |     "max_train_epochs": "",
42 |     "mem_eff_attn": false,
43 |     "mid_lr_weight": "",
44 |     "min_snr_gamma": 10,
45 |     "mixed_precision": "bf16",
46 |     "module_dropout": 0,
47 |     "multires_noise_discount": 0.2,
48 |     "multires_noise_iterations": 8,
49 |     "network_alpha": 16,
50 |     "network_dim": 32,
51 |     "network_dropout": 0,
52 |     "no_token_padding": false,
53 |     "noise_offset": 0,
54 |     "noise_offset_type": "Multires",
55 |     "num_cpu_threads_per_process": 2,
56 |     "optimizer": "AdamW",
57 |     "optimizer_args": "",
58 |     "persistent_data_loader_workers": false,
59 |     "prior_loss_weight": 1.0,
60 |     "random_crop": false,
61 |     "rank_dropout": 0,
62 |     "save_every_n_epochs": 1,
63 |     "save_every_n_steps": 0,
64 |     "save_last_n_steps": 0,
65 |     "save_last_n_steps_state": 0,
66 |     "save_precision": "fp16",
67 |     "scale_v_pred_loss_like_noise_pred": false,
68 |     "scale_weight_norms": 1,
69 |     "seed": "",
70 |     "shuffle_caption": false,
71 |     "stop_text_encoder_training": 0,
72 |     "text_encoder_lr": 0.0001,
73 |     "train_batch_size": 1,
74 |     "train_on_input": true,
75 |     "training_comment": "",
76 |     "unet_lr": 0.0001,
77 |     "unit": 1,
78 |     "up_lr_weight": "",
79 |     "use_cp": true,
80 |     "log_with": "",
81 |     "v2": false,
82 |     "v_parameterization": false,
83 |     "vae_batch_size": 0,
84 |     "weighted_captions": false,
85 |     "xformers": true
86 | }


--------------------------------------------------------------------------------
/presets/lora/lokr-sd15.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "LyCORIS/LoKr",
 3 |     "adaptive_noise_scale": 0,
 4 |     "additional_parameters": "",
 5 |     "block_alphas": "",
 6 |     "block_dims": "",
 7 |     "block_lr_zero_threshold": "",
 8 |     "bucket_no_upscale": true,
 9 |     "bucket_reso_steps": 1,
10 |     "cache_latents": true,
11 |     "cache_latents_to_disk": true,
12 |     "caption_dropout_every_n_epochs": 0.0,
13 |     "caption_dropout_rate": 0,
14 |     "caption_extension": ".none-use-foldername",
15 |     "clip_skip": "1",
16 |     "color_aug": false,
17 |     "conv_alpha": 64,
18 |     "conv_alphas": "",
19 |     "conv_dim": 64,
20 |     "conv_dims": "",
21 |     "decompose_both": false,
22 |     "dim_from_weights": false,
23 |     "down_lr_weight": "",
24 |     "enable_bucket": true,
25 |     "factor": -1,
26 |     "flip_aug": false,
27 |     "full_fp16": false,
28 |     "gradient_accumulation_steps": 4,
29 |     "gradient_checkpointing": false,
30 |     "keep_tokens": "0",
31 |     "learning_rate": 1.0,
32 |     "lora_network_weights": "",
33 |     "lr_scheduler": "cosine",
34 |     "lr_scheduler_num_cycles": "",
35 |     "lr_scheduler_power": "",
36 |     "lr_warmup": 0,
37 |     "max_data_loader_n_workers": "0",
38 |     "max_resolution": "512,512",
39 |     "max_token_length": "75",
40 |     "max_train_epochs": "",
41 |     "mem_eff_attn": false,
42 |     "mid_lr_weight": "",
43 |     "min_snr_gamma": 10,
44 |     "mixed_precision": "bf16",
45 |     "module_dropout": 0,
46 |     "multires_noise_discount": 0.2,
47 |     "multires_noise_iterations": 8,
48 |     "network_alpha": 64,
49 |     "network_dim": 64,
50 |     "network_dropout": 0,
51 |     "no_token_padding": false,
52 |     "noise_offset": 0,
53 |     "noise_offset_type": "Multires",
54 |     "num_cpu_threads_per_process": 2,
55 |     "optimizer": "Prodigy",
56 |     "optimizer_args": "",
57 |     "persistent_data_loader_workers": false,
58 |     "prior_loss_weight": 1.0,
59 |     "random_crop": false,
60 |     "rank_dropout": 0,
61 |     "save_every_n_steps": 0,
62 |     "save_last_n_steps": 0,
63 |     "save_last_n_steps_state": 0,
64 |     "save_precision": "fp16",
65 |     "scale_v_pred_loss_like_noise_pred": false,
66 |     "scale_weight_norms": 0,
67 |     "seed": "",
68 |     "shuffle_caption": false,
69 |     "stop_text_encoder_training": 0,
70 |     "text_encoder_lr": 1.0,
71 |     "train_batch_size": 1,
72 |     "train_on_input": false,
73 |     "training_comment": "",
74 |     "unet_lr": 1.0,
75 |     "unit": 1,
76 |     "up_lr_weight": "",
77 |     "use_cp": false,
78 |     "log_with": "",
79 |     "v2": false,
80 |     "v_parameterization": false,
81 |     "vae_batch_size": 0,
82 |     "weighted_captions": false,
83 |     "xformers": true
84 | }


--------------------------------------------------------------------------------
/presets/lora/prepare_presets.md:
--------------------------------------------------------------------------------
1 | # Preparing presets for users
2 | 
3 | Run the followinf command to prepare new presets for release to users:
4 | 
5 | ```
6 | python.exe .\tools\prepare_presets.py .\presets\lora\*.json
7 | ```


--------------------------------------------------------------------------------
/presets/lora/sd15 - EDG_LoConOptiSettings.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "LyCORIS/LoCon",
 3 |     "additional_parameters": "",
 4 |     "block_alphas": "",
 5 |     "block_dims": "",
 6 |     "block_lr_zero_threshold": "",
 7 |     "bucket_no_upscale": true,
 8 |     "bucket_reso_steps": 64.0,
 9 |     "cache_latents": true,
10 |     "caption_dropout_every_n_epochs": 0.0,
11 |     "caption_dropout_rate": 0,
12 |     "caption_extension": ".txt",
13 |     "clip_skip": 2,
14 |     "color_aug": false,
15 |     "conv_alpha": 1,
16 |     "conv_alphas": "",
17 |     "conv_dim": 32,
18 |     "conv_dims": "",
19 |     "down_lr_weight": "",
20 |     "enable_bucket": false,
21 |     "epoch": 1,
22 |     "flip_aug": false,
23 |     "full_fp16": false,
24 |     "gradient_accumulation_steps": 1.0,
25 |     "gradient_checkpointing": false,
26 |     "keep_tokens": "0",
27 |     "learning_rate": "0.0001",
28 |     "lora_network_weights": "",
29 |     "lr_scheduler": "constant",
30 |     "lr_scheduler_num_cycles": "",
31 |     "lr_scheduler_power": "",
32 |     "lr_warmup": "0",
33 |     "max_data_loader_n_workers": "1",
34 |     "max_resolution": "512,650",
35 |     "max_token_length": "75",
36 |     "max_train_epochs": "",
37 |     "mem_eff_attn": true,
38 |     "mid_lr_weight": "",
39 |     "min_snr_gamma": 0,
40 |     "mixed_precision": "bf16",
41 |     "network_alpha": 64,
42 |     "network_dim": 64,
43 |     "no_token_padding": false,
44 |     "noise_offset": "0.05",
45 |     "num_cpu_threads_per_process": 2,
46 |     "optimizer": "AdamW8bit",
47 |     "optimizer_args": "",
48 |     "persistent_data_loader_workers": false,
49 |     "prior_loss_weight": 1.0,
50 |     "random_crop": false,
51 |     "save_every_n_epochs": 1,
52 |     "save_precision": "bf16",
53 |     "seed": "1234",
54 |     "shuffle_caption": false,
55 |     "stop_text_encoder_training": 0,
56 |     "text_encoder_lr": "5e-05",
57 |     "train_batch_size": 3,
58 |     "training_comment": "",
59 |     "unet_lr": "0.0001",
60 |     "up_lr_weight": "",
61 |     "v2": false,
62 |     "v_parameterization": false,
63 |     "vae_batch_size": 0,
64 |     "xformers": true
65 | }


--------------------------------------------------------------------------------
/presets/lora/sd15 - EDG_LoHaOptiSettings.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "LyCORIS/LoHa",
 3 |     "additional_parameters": "",
 4 |     "block_alphas": "",
 5 |     "block_dims": "",
 6 |     "block_lr_zero_threshold": "",
 7 |     "bucket_no_upscale": true,
 8 |     "bucket_reso_steps": 64.0,
 9 |     "cache_latents": true,
10 |     "caption_dropout_every_n_epochs": 0.0,
11 |     "caption_dropout_rate": 0,
12 |     "caption_extension": ".txt",
13 |     "clip_skip": 2,
14 |     "color_aug": false,
15 |     "conv_alpha": 1,
16 |     "conv_alphas": "",
17 |     "conv_dim": 32,
18 |     "conv_dims": "",
19 |     "down_lr_weight": "",
20 |     "enable_bucket": false,
21 |     "epoch": 1,
22 |     "flip_aug": false,
23 |     "full_fp16": false,
24 |     "gradient_accumulation_steps": 1.0,
25 |     "gradient_checkpointing": false,
26 |     "keep_tokens": "0",
27 |     "learning_rate": "0.0001",
28 |     "lora_network_weights": "",
29 |     "lr_scheduler": "constant",
30 |     "lr_scheduler_num_cycles": "1",
31 |     "lr_scheduler_power": "",
32 |     "lr_warmup": "0",
33 |     "max_data_loader_n_workers": "1",
34 |     "max_resolution": "512,650",
35 |     "max_token_length": "75",
36 |     "max_train_epochs": "",
37 |     "mem_eff_attn": true,
38 |     "mid_lr_weight": "",
39 |     "min_snr_gamma": 0,
40 |     "mixed_precision": "bf16",
41 |     "network_alpha": 32,
42 |     "network_dim": 32,
43 |     "no_token_padding": false,
44 |     "noise_offset": "",
45 |     "num_cpu_threads_per_process": 2,
46 |     "optimizer": "AdamW8bit",
47 |     "optimizer_args": "",
48 |     "persistent_data_loader_workers": false,
49 |     "prior_loss_weight": 1.0,
50 |     "random_crop": false,
51 |     "save_every_n_epochs": 1,
52 |     "save_precision": "bf16",
53 |     "seed": "1234",
54 |     "shuffle_caption": false,
55 |     "stop_text_encoder_training": 0,
56 |     "text_encoder_lr": "5e-5",
57 |     "train_batch_size": 3,
58 |     "training_comment": "",
59 |     "unet_lr": "0.0001",
60 |     "up_lr_weight": "",
61 |     "v2": false,
62 |     "v_parameterization": false,
63 |     "vae_batch_size": 0,
64 |     "xformers": true
65 | }


--------------------------------------------------------------------------------
/presets/lora/sd15 - EDG_LoraOptiSettings.json:
--------------------------------------------------------------------------------
 1 | {
 2 |     "LoRA_type": "Standard",
 3 |     "additional_parameters": "",
 4 |     "block_alphas": "",
 5 |     "block_dims": "",
 6 |     "block_lr_zero_threshold": "",
 7 |     "bucket_no_upscale": true,
 8 |     "bucket_reso_steps": 64.0,
 9 |     "cache_latents": true,
10 |     "caption_dropout_every_n_epochs": 0.0,
11 |     "caption_dropout_rate": 0,
12 |     "caption_extension": ".txt",
13 |     "clip_skip": 2,
14 |     "color_aug": false,
15 |     "conv_alpha": 1,
16 |     "conv_alphas": "",
17 |     "conv_dim": 1,
18 |     "conv_dims": "",
19 |     "down_lr_weight": "",
20 |     "enable_bucket": false,
21 |     "epoch": 1,
22 |     "flip_aug": false,
23 |     "full_fp16": false,
24 |     "gradient_accumulation_steps": 1.0,
25 |     "gradient_checkpointing": false,
26 |     "keep_tokens": "0",
27 |     "learning_rate": "0.0001",
28 |     "lora_network_weights": "",
29 |     "lr_scheduler": "constant",
30 |     "lr_scheduler_num_cycles": "",
31 |     "lr_scheduler_power": "",
32 |     "lr_warmup": "0",
33 |     "max_data_loader_n_workers": "1",
34 |     "max_resolution": "512,650",
35 |     "max_token_length": "75",
36 |     "max_train_epochs": "",
37 |     "mem_eff_attn": true,
38 |     "mid_lr_weight": "",
39 |     "min_snr_gamma": 0,
40 |     "mixed_precision": "bf16",
41 |     "network_alpha": 64,
42 |     "network_dim": 64,
43 |     "no_token_padding": false,
44 |     "noise_offset": "0.05",
45 |     "num_cpu_threads_per_process": 2,
46 |     "optimizer": "AdamW8bit",
47 |     "optimizer_args": "",
48 |     "persistent_data_loader_workers": false,
49 |     "prior_loss_weight": 1.0,
50 |     "random_crop": false,
51 |     "save_every_n_epochs": 1,
52 |     "save_precision": "bf16",
53 |     "seed": "1234",
54 |     "shuffle_caption": false,
55 |     "stop_text_encoder_training": 0,
56 |     "text_encoder_lr": "5e-05",
57 |     "train_batch_size": 3,
58 |     "training_comment": "",
59 |     "unet_lr": "0.0001",
60 |     "up_lr_weight": "",
61 |     "v2": false,
62 |     "v_parameterization": false,
63 |     "vae_batch_size": 0,
64 |     "xformers": true
65 | }


--------------------------------------------------------------------------------
/presets/lora/user_presets/.put your own presets here:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/presets/lora/user_presets/.put your own presets here


--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
 1 | [project]
 2 | name = "kohya-ss"
 3 | version = "25.2.1"
 4 | description = "Kohya_ss GUI"
 5 | readme = "README.md"
 6 | requires-python = ">=3.10,<3.12"
 7 | dependencies = [
 8 |     "accelerate>=1.7.0",
 9 |     "aiofiles==23.2.1",
10 |     "altair==4.2.2",
11 |     "bitsandbytes>=0.45.0",
12 |     "dadaptation==3.2",
13 |     "diffusers[torch]==0.32.2",
14 |     "easygui==0.98.3",
15 |     "einops==0.7.0",
16 |     "fairscale==0.4.13",
17 |     "ftfy==6.1.1",
18 |     "gradio>=5.34.1",
19 |     "huggingface-hub==0.29.3",
20 |     "imagesize==1.4.1",
21 |     "invisible-watermark==0.2.0",
22 |     "library",
23 |     "lion-pytorch==0.0.6",
24 |     "lycoris-lora==3.2.0.post2",
25 |     "omegaconf==2.3.0",
26 |     "onnx==1.16.1",
27 |     "onnxruntime-gpu==1.19.2",
28 |     "open-clip-torch==2.20.0",
29 |     "opencv-python==4.10.0.84",
30 |     "pip",
31 |     "prodigy-plus-schedule-free==1.8.0",
32 |     "prodigyopt==1.1.2",
33 |     "protobuf==3.20.3",
34 |     "pytorch-lightning==1.9.0",
35 |     "pytorch-optimizer==3.5.0",
36 |     "rich>=13.7.1",
37 |     "safetensors==0.4.4",
38 |     "schedulefree==1.4",
39 |     "scipy==1.11.4",
40 |     "sentencepiece==0.2.0",
41 |     "tensorboard>=2.18.0",
42 |     "tensorflow>=2.16.1",
43 |     "tensorflow-io-gcs-filesystem==0.31.0; sys_platform == 'win32'",
44 |     "tensorflow-io-gcs-filesystem>=0.37.1; sys_platform == 'linux'",
45 |     "timm>=0.9.2",
46 |     "tk==0.1.0",
47 |     "toml==0.10.2",
48 |     "torch>=2.5.0",
49 |     "torchvision>=0.20.0",
50 |     "transformers==4.44.2",
51 |     "triton>=3.1.0; sys_platform == 'linux'",
52 |     "voluptuous==0.13.1",
53 |     "wandb==0.18.0",
54 |     "xformers>=0.0.30",
55 | ]
56 | 
57 | [tool.uv.sources]
58 | torch = [
59 |   { index = "pytorch-cu128", marker = "sys_platform == 'linux'" },
60 |   { index = "pytorch-cu128", marker = "sys_platform == 'win32'" }
61 | ]
62 | torchvision = [
63 |   { index = "pytorch-cu128", marker = "sys_platform == 'linux'" },
64 |   { index = "pytorch-cu128", marker = "sys_platform == 'win32'" }
65 | ]
66 | xformers = [
67 |   { index = "pytorch-cu128", marker = "sys_platform == 'linux'" },
68 |   { index = "pytorch-cu128", marker = "sys_platform == 'win32'" }
69 | ]
70 | library = { path = "sd-scripts" }
71 | 
72 | [[tool.uv.index]]
73 | name = "pytorch-cu124"
74 | url = "https://download.pytorch.org/whl/cu124"
75 | explicit = true
76 | 
77 | [[tool.uv.index]]
78 | name = "pytorch-cu126"
79 | url = "https://download.pytorch.org/whl/cu126"
80 | explicit = true
81 | 
82 | [[tool.uv.index]]
83 | name = "pytorch-cu128"
84 | url = "https://download.pytorch.org/whl/cu128"
85 | explicit = true
86 | 


--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
 1 | accelerate>=1.7.0
 2 | aiofiles==23.2.1
 3 | altair==4.2.2
 4 | dadaptation==3.2
 5 | diffusers[torch]==0.32.2
 6 | easygui==0.98.3
 7 | einops==0.7.0
 8 | fairscale==0.4.13
 9 | ftfy==6.1.1
10 | gradio>=5.34.1
11 | huggingface-hub==0.29.3
12 | imagesize==1.4.1
13 | invisible-watermark==0.2.0
14 | lion-pytorch==0.0.6
15 | lycoris_lora==3.2.0.post2
16 | omegaconf==2.3.0
17 | onnx==1.16.1
18 | prodigyopt==1.1.2
19 | protobuf==3.20.3
20 | open-clip-torch==2.20.0
21 | opencv-python==4.10.0.84
22 | prodigy-plus-schedule-free==1.8.0
23 | pytorch-lightning==1.9.0
24 | pytorch-optimizer==3.5.0
25 | rich>=13.7.1
26 | safetensors==0.4.4
27 | schedulefree==1.4
28 | scipy==1.11.4
29 | # for T5XXL tokenizer (SD3/FLUX)
30 | sentencepiece==0.2.0
31 | timm==1.0.15
32 | tk==0.1.0
33 | toml==0.10.2
34 | transformers==4.44.2
35 | voluptuous==0.13.1
36 | wandb==0.18.0
37 | # for kohya_ss sd-scripts library
38 | -e ./sd-scripts
39 | 


--------------------------------------------------------------------------------
/requirements_ipex_xpu.txt:
--------------------------------------------------------------------------------
 1 | # Custom index URL for specific packages
 2 | --extra-index-url https://download.pytorch.org/whl/xpu
 3 | 
 4 | torch==2.7.1+xpu
 5 | torchvision==0.22.1+xpu
 6 | 
 7 | # Intel TensorFlow extension is Linux only and is too outdated to work with new OneAPI versions
 8 | # Using CPU only TensorFlow with PyTorch 2.5+ instead
 9 | tensorboard==2.15.2
10 | tensorflow==2.15.1
11 | onnxruntime-openvino==1.22.0
12 | 
13 | -r requirements.txt
14 | 


--------------------------------------------------------------------------------
/requirements_linux.txt:
--------------------------------------------------------------------------------
 1 | # Custom index URL for specific packages
 2 | --extra-index-url https://download.pytorch.org/whl/cu128
 3 | 
 4 | torch==2.7.0+cu128
 5 | torchvision==0.22.0+cu128
 6 | xformers>=0.0.30
 7 | 
 8 | bitsandbytes>=0.45.0
 9 | tensorboard==2.15.2
10 | tensorflow==2.15.0.post1
11 | onnxruntime-gpu==1.19.2
12 | 
13 | -r requirements.txt
14 | 


--------------------------------------------------------------------------------
/requirements_linux_ipex.txt:
--------------------------------------------------------------------------------
 1 | # Custom index URL for specific packages
 2 | --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
 3 | 
 4 | torch==2.3.1+cxx11.abi
 5 | torchvision==0.18.1+cxx11.abi
 6 | intel-extension-for-pytorch==2.3.110+xpu
 7 | oneccl_bind_pt==2.3.100+xpu
 8 | 
 9 | tensorboard==2.15.2
10 | tensorflow==2.15.1
11 | intel-extension-for-tensorflow[xpu]==2.15.0.1
12 | onnxruntime-openvino==1.22.0
13 | 
14 | mkl==2024.2.1
15 | mkl-dpcpp==2024.2.1
16 | oneccl-devel==2021.13.1
17 | impi-devel==2021.13.1
18 | 
19 | -r requirements.txt
20 | 


--------------------------------------------------------------------------------
/requirements_linux_rocm.txt:
--------------------------------------------------------------------------------
 1 | # Custom index URL for specific packages
 2 | --extra-index-url https://download.pytorch.org/whl/rocm6.3
 3 | --find-links https://repo.radeon.com/rocm/manylinux/rocm-rel-6.4.1
 4 | 
 5 | torch==2.7.1+rocm6.3
 6 | torchvision==0.22.1+rocm6.3
 7 | 
 8 | tensorboard==2.14.1; python_version=='3.11'
 9 | tensorboard==2.16.2; python_version!='3.11'
10 | tensorflow-rocm==2.14.0.600; python_version=='3.11'
11 | tensorflow-rocm==2.16.2; python_version!='3.11'
12 | 
13 | # no support for python 3.11
14 | onnxruntime-rocm==1.21.0
15 | 
16 | -r requirements.txt
17 | 


--------------------------------------------------------------------------------
/requirements_macos_amd64.txt:
--------------------------------------------------------------------------------
1 | torch==2.0.0 torchvision==0.15.1 -f https://download.pytorch.org/whl/cpu/torch_stable.html
2 | xformers bitsandbytes==0.43.3
3 | tensorflow-macos tensorboard==2.14.1
4 | onnxruntime==1.17.1
5 | -r requirements.txt
6 | 


--------------------------------------------------------------------------------
/requirements_macos_arm64.txt:
--------------------------------------------------------------------------------
 1 | --extra-index-url https://download.pytorch.org/whl/nightly/cpu
 2 | torch==2.8.0.*
 3 | torchvision==0.22.*
 4 | xformers==0.0.29.* 
 5 | git+https://github.com/bitsandbytes-foundation/bitsandbytes.git/#0.45.5
 6 | tensorflow-macos 
 7 | tensorflow-metal
 8 | tensorboard==2.14.1
 9 | onnxruntime==1.17.1
10 | -r requirements.txt
11 | 


--------------------------------------------------------------------------------
/requirements_pytorch_windows.txt:
--------------------------------------------------------------------------------
1 | # Custom index URL for specific packages
2 | --extra-index-url https://download.pytorch.org/whl/cu128
3 | 
4 | torch==2.7.0+cu128
5 | torchvision==0.22.0+cu128
6 | xformers>=0.0.30
7 | 
8 | -r requirements_windows.txt


--------------------------------------------------------------------------------
/requirements_runpod.txt:
--------------------------------------------------------------------------------
 1 | --extra-index-url https://download.pytorch.org/whl/cu124
 2 | torch==2.5.0+cu124
 3 | torchvision==0.20.0+cu124
 4 | xformers==0.0.28.post2
 5 | 
 6 | bitsandbytes==0.44.0
 7 | tensorboard==2.14.1
 8 | tensorflow==2.14.0
 9 | wheel
10 | tensorrt
11 | onnxruntime-gpu==1.19.2
12 | 
13 | -r requirements.txt
14 | 


--------------------------------------------------------------------------------
/requirements_windows.txt:
--------------------------------------------------------------------------------
1 | bitsandbytes>=0.45.0
2 | tensorboard
3 | tensorflow>=2.16.1
4 | onnxruntime-gpu==1.19.2
5 | 
6 | -r requirements.txt


--------------------------------------------------------------------------------
/setup-3.10.bat:
--------------------------------------------------------------------------------
 1 | @echo off
 2 | 
 3 | IF NOT EXIST venv (
 4 |     echo Creating venv...
 5 |     py -3.10.11 -m venv venv
 6 | )
 7 | 
 8 | :: Create the directory if it doesn't exist
 9 | mkdir ".\logs\setup" > nul 2>&1
10 | 
11 | :: Deactivate the virtual environment to prevent error
12 | call .\venv\Scripts\deactivate.bat
13 | 
14 | call .\venv\Scripts\activate.bat
15 | 
16 | REM first make sure we have setuptools available in the venv    
17 | python -m pip install --require-virtualenv --no-input -q -q  setuptools
18 | 
19 | REM Check if the batch was started via double-click
20 | IF /i "%comspec% /c %~0 " equ "%cmdcmdline:"=%" (
21 |     REM echo This script was started by double clicking.
22 |     cmd /k python .\setup\setup_windows.py
23 | ) ELSE (
24 |     REM echo This script was started from a command prompt.
25 |     python .\setup\setup_windows.py %*
26 | )
27 | 
28 | :: Deactivate the virtual environment
29 | call .\venv\Scripts\deactivate.bat


--------------------------------------------------------------------------------
/setup-runpod.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | 
 3 | # This gets the directory the script is run from so pathing can work relative to the script where needed.
 4 | SCRIPT_DIR="$(cd -- "$(dirname -- "$0")" && pwd)"
 5 | 
 6 | # Install tk and python3.10-venv
 7 | echo "Installing tk and python3.10-venv..."
 8 | apt update -y && apt install -y python3-tk python3.10-venv
 9 | 
10 | # Install required libcudnn release 8.7.0.84-1
11 | echo "Installing required libcudnn release 8.7.0.84-1..."
12 | apt install -y libcudnn8=8.7.0.84-1+cuda11.8 libcudnn8-dev=8.7.0.84-1+cuda11.8 --allow-change-held-packages
13 | 
14 | # Check if the venv folder doesn't exist
15 | if [ ! -d "$SCRIPT_DIR/venv" ]; then
16 |     echo "Creating venv..."
17 |     python3 -m venv "$SCRIPT_DIR/venv"
18 | fi
19 | 
20 | # Activate the virtual environment
21 | echo "Activating venv..."
22 | source "$SCRIPT_DIR/venv/bin/activate" || exit 1
23 | 
24 | # Run setup_linux.py script with platform requirements
25 | echo "Running setup_linux.py..."
26 | python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_runpod.txt --show_stdout --no_run_accelerate
27 | pip3 cache purge
28 | 
29 | # Configure accelerate
30 | echo "Configuring accelerate..."
31 | mkdir -p "/root/.cache/huggingface/accelerate"
32 | cp "$SCRIPT_DIR/config_files/accelerate/runpod.yaml" "/root/.cache/huggingface/accelerate/default_config.yaml"
33 | 
34 | echo "Installation completed... You can start the gui with ./gui.sh --share --headless"
35 | 
36 | # Deactivate the virtual environment
37 | echo "Deactivating venv..."
38 | deactivate


--------------------------------------------------------------------------------
/setup.bat:
--------------------------------------------------------------------------------
 1 | @echo off
 2 | 
 3 | IF NOT EXIST venv (
 4 |     echo Creating venv...
 5 |     python -m venv venv
 6 | )
 7 | 
 8 | :: Create the directory if it doesn't exist
 9 | mkdir ".\logs\setup" > nul 2>&1
10 | 
11 | :: Deactivate the virtual environment to prevent error
12 | call .\venv\Scripts\deactivate.bat
13 | 
14 | call .\venv\Scripts\activate.bat
15 | 
16 | REM first make sure we have setuptools available in the venv    
17 | python -m pip install --require-virtualenv --no-input -q setuptools
18 | 
19 | REM Check if the batch was started via double-click
20 | IF /i "%comspec% /c %~0 " equ "%cmdcmdline:"=%" (
21 |     REM echo This script was started by double clicking.
22 |     cmd /k python .\setup\setup_windows.py
23 | ) ELSE (
24 |     REM echo This script was started from a command prompt.
25 |     python .\setup\setup_windows.py %*
26 | )
27 | 
28 | :: Deactivate the virtual environment
29 | call .\venv\Scripts\deactivate.bat


--------------------------------------------------------------------------------
/setup.ps1:
--------------------------------------------------------------------------------
 1 | if (-not (Test-Path -Path "venv")) {
 2 |     Write-Host "Creating venv..."
 3 |     python -m venv venv
 4 | }
 5 | 
 6 | # Create the directory if it doesn't exist
 7 | $null = New-Item -ItemType Directory -Force -Path ".\logs\setup"
 8 | 
 9 | # Deactivate the virtual environment
10 | & .\venv\Scripts\deactivate.bat
11 | 
12 | & .\venv\Scripts\activate.bat
13 | 
14 | & .\venv\Scripts\python.exe .\setup\setup_windows.py $args
15 | 
16 | # Deactivate the virtual environment
17 | & .\venv\Scripts\deactivate.bat
18 | 


--------------------------------------------------------------------------------
/setup/check_local_modules.py:
--------------------------------------------------------------------------------
 1 | import argparse
 2 | import subprocess
 3 | 
 4 | # Define color variables
 5 | yellow_text = "\033[1;33m"
 6 | blue_text = "\033[1;34m"
 7 | reset_text = "\033[0m"
 8 | 
 9 | # Parse command line arguments
10 | parser = argparse.ArgumentParser()
11 | parser.add_argument('--no_question', action='store_true')
12 | args = parser.parse_args()
13 | 
14 | # Run pip freeze and capture the output
15 | output = subprocess.getoutput("pip freeze")
16 | 
17 | # Remove lines containing "WARNING"
18 | output_lines = [line for line in output.splitlines() if "WARNING" not in line]
19 | 
20 | # Reconstruct the output string without warning lines
21 | output = "\n".join(output_lines)
22 | 
23 | # Check if modules are found in the output
24 | if output:
25 |     print(f"{yellow_text}=============================================================")
26 |     print("Modules installed outside the virtual environment were found.")
27 |     print("This can cause issues. Please review the installed modules.\n")
28 |     print("You can uninstall all local modules with:\n")
29 |     print(f"{blue_text}deactivate")
30 |     print("pip freeze > uninstall.txt")
31 |     print("pip uninstall -y -r uninstall.txt")
32 |     print(f"{yellow_text}============================================================={reset_text}")
33 |     print('')
34 | 


--------------------------------------------------------------------------------
/setup/create_user_files.py:
--------------------------------------------------------------------------------
 1 | import os
 2 | 
 3 | bat_content = r'''@echo off
 4 | REM Example of how to start the GUI with custom arguments. In this case how to auto launch the browser:
 5 | REM call gui.bat --inbrowser
 6 | REM
 7 | REM You can add many arguments on the same line
 8 | REM
 9 | call gui.bat --inbrowser
10 | '''
11 | 
12 | ps1_content = r'''# Example of how to start the GUI with custom arguments. In this case how to auto launch the browser:
13 | # .\gui.ps1 --inbrowser
14 | #
15 | # You can add many arguments on the same line
16 | #
17 | # & .\gui.ps1 --inbrowser --server_port 2345
18 | 
19 | & .\gui.ps1 --inbrowser
20 | '''
21 | 
22 | bat_filename = 'gui-user.bat'
23 | ps1_filename = 'gui-user.ps1'
24 | 
25 | if not os.path.exists(bat_filename):
26 |     with open(bat_filename, 'w') as bat_file:
27 |         bat_file.write(bat_content)
28 |     print(f"File created: {bat_filename}")
29 | else:
30 |     print(f"File already exists: {bat_filename}")
31 | 
32 | if not os.path.exists(ps1_filename):
33 |     with open(ps1_filename, 'w') as ps1_file:
34 |         ps1_file.write(ps1_content)
35 |     print(f"File created: {ps1_filename}")
36 | else:
37 |     print(f"File already exists: {ps1_filename}")
38 | 


--------------------------------------------------------------------------------
/setup/debug_info.py:
--------------------------------------------------------------------------------
 1 | import platform
 2 | import subprocess
 3 | import os
 4 | 
 5 | # Get system information
 6 | system = platform.system()
 7 | release = platform.release()
 8 | version = platform.version()
 9 | machine = platform.machine()
10 | processor = platform.processor()
11 | 
12 | # Print system information
13 | print("System Information:")
14 | print(f"System: {system}, Release: {release}, Version: {version}, Machine: {machine}, Processor: {processor}")
15 | 
16 | # Get Python information
17 | python_version = platform.python_version()
18 | python_implementation = platform.python_implementation()
19 | python_compiler = platform.python_compiler()
20 | 
21 | # Print Python information
22 | print("\nPython Information:")
23 | print(f"Version: {python_version}, Implementation: {python_implementation}, Compiler: {python_compiler}")
24 | 
25 | # Get virtual environment information
26 | venv = os.environ.get('VIRTUAL_ENV', None)
27 | 
28 | # Print virtual environment information
29 | if venv:
30 |     print("\nVirtual Environment Information:")
31 |     print(f"Path: {venv}")
32 | else:
33 |     print("\nVirtual Environment Information:")
34 |     print("Not running inside a virtual environment.")
35 | 
36 | # Get GPU information (requires nvidia-smi to be installed)
37 | try:
38 |     output = subprocess.check_output(['nvidia-smi', '--query-gpu=name,memory.total', '--format=csv'])
39 |     output = output.decode('utf-8').strip().split('\n')[1:]
40 |     gpu_info = [line.split(', ') for line in output]
41 |     gpu_name, gpu_vram = gpu_info[0]
42 |     gpu_vram = gpu_vram.replace(' MiB', '')
43 |     gpu_vram_warning = int(gpu_vram) < 8000
44 | except (subprocess.CalledProcessError, FileNotFoundError):
45 |     gpu_name, gpu_vram = "N/A", "N/A"
46 |     gpu_vram_warning = False
47 | 
48 | # Print GPU information
49 | print("\nGPU Information:")
50 | print(f"Name: {gpu_name}, VRAM: {gpu_vram} MiB")
51 | 
52 | # Print VRAM warning if necessary
53 | if gpu_vram_warning:
54 |     print('\033[33mWarning: GPU VRAM is less than 8GB and will likely result in proper operations.\033[0m')
55 | 
56 | print(' ')
57 | 


--------------------------------------------------------------------------------
/setup/docker_setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 | 
3 | setup()


--------------------------------------------------------------------------------
/setup/setup_linux.py:
--------------------------------------------------------------------------------
 1 | import argparse
 2 | import logging
 3 | import setup_common
 4 | 
 5 | errors = 0  # Define the 'errors' variable before using it
 6 | log = logging.getLogger('sd')
 7 | 
 8 | # ANSI escape code for yellow color
 9 | YELLOW = '\033[93m'
10 | RESET_COLOR = '\033[0m'
11 | 
12 | 
13 | def main_menu(platform_requirements_file, show_stdout: bool = False, no_run_accelerate: bool = False):
14 |     log.info("Installing python dependencies. This could take a few minutes as it downloads files.")
15 |     log.info("If this operation ever runs too long, you can rerun this script in verbose mode to check.")
16 |     
17 |     setup_common.check_repo_version()
18 |     # setup_common.check_python()
19 | 
20 |     # Upgrade pip if needed
21 |     setup_common.install('pip')
22 |     setup_common.install_requirements_inbulk(
23 |         platform_requirements_file, show_stdout=show_stdout,
24 |     )
25 |     # setup_common.install_requirements(platform_requirements_file, check_no_verify_flag=False, show_stdout=show_stdout)
26 |     if not no_run_accelerate:
27 |         setup_common.configure_accelerate(run_accelerate=False)
28 | 
29 | 
30 | if __name__ == '__main__':
31 |     setup_common.ensure_base_requirements()
32 |     setup_common.setup_logging()
33 |     if not setup_common.check_python_version():
34 |         exit(1)
35 |     
36 |     setup_common.update_submodule()
37 | 
38 |     parser = argparse.ArgumentParser()
39 |     parser.add_argument('--platform-requirements-file', dest='platform_requirements_file', default='requirements_linux.txt', help='Path to the platform-specific requirements file')
40 |     parser.add_argument('--show_stdout', dest='show_stdout', action='store_true', help='Whether to show stdout during installation')
41 |     parser.add_argument('--no_run_accelerate', dest='no_run_accelerate', action='store_true', help='Whether to not run accelerate config')
42 |     args = parser.parse_args()
43 | 
44 |     main_menu(args.platform_requirements_file, show_stdout=args.show_stdout, no_run_accelerate=args.no_run_accelerate)
45 | 


--------------------------------------------------------------------------------
/setup/setup_runpod.py:
--------------------------------------------------------------------------------
 1 | import argparse
 2 | import logging
 3 | import setup_common
 4 | import os
 5 | import shutil
 6 | 
 7 | errors = 0  # Define the 'errors' variable before using it
 8 | log = logging.getLogger('sd')
 9 | 
10 | # ANSI escape code for yellow color
11 | YELLOW = '\033[93m'
12 | RESET_COLOR = '\033[0m'
13 | 
14 | def configure_accelerate():
15 |     script_dir = os.path.dirname(os.path.abspath(__file__))
16 |     cache_dir = "/root/.cache/huggingface/accelerate"
17 |     
18 |     log.info("Configuring accelerate...")
19 |     os.makedirs(cache_dir, exist_ok=True)
20 | 
21 |     config_file_src = os.path.join(script_dir, "config_files", "accelerate", "runpod.yaml")
22 |     config_file_dest = os.path.join(cache_dir, "default_config.yaml")
23 |     shutil.copyfile(config_file_src, config_file_dest)
24 | 
25 | 
26 | def setup_environment():
27 |     # Get the directory the script is run from
28 |     script_dir = os.path.dirname(os.path.abspath(__file__))
29 | 
30 |     # Install tk and python3.10-venv
31 |     log.info("Install tk and python3.10-venv...")
32 |     subprocess.run(['apt', 'update', '-y'])
33 |     subprocess.run(['apt', 'install', '-y', 'python3-tk', 'python3.10-venv'])
34 | 
35 |     # Check if the venv folder doesn't exist
36 |     venv_dir = os.path.join(script_dir, 'venv')
37 |     if not os.path.exists(venv_dir):
38 |         log.info("Creating venv...")
39 |         subprocess.run(['python3', '-m', 'venv', venv_dir])
40 | 
41 |     # Activate the virtual environment
42 |     log.info("Activate venv...")
43 |     activate_script = os.path.join(venv_dir, 'bin', 'activate')
44 |     activate_command = f'source "{activate_script}" || exit 1'
45 |     subprocess.run(activate_command, shell=True, executable='/bin/bash')
46 | 
47 | 
48 | def main_menu(platform_requirements_file):
49 |     log.info("Installing python dependencies. This could take a few minutes as it downloads files.")
50 |     log.info("If this operation ever runs too long, you can rerun this script in verbose mode to check.")
51 | 
52 |     setup_common.check_repo_version()
53 |     # setup_common.check_python()
54 | 
55 |     # Upgrade pip if needed
56 |     setup_common.install('pip')
57 |     
58 |     setup_common.install_requirements_inbulk(
59 |         platform_requirements_file, show_stdout=True,
60 |     )
61 |     configure_accelerate()
62 | 
63 | 
64 | if __name__ == '__main__':
65 |     setup_common.ensure_base_requirements()
66 |     setup_common.setup_logging()
67 |     if not setup_common.check_python_version():
68 |         exit(1)
69 |     
70 |     setup_common.update_submodule()
71 |     
72 |     parser = argparse.ArgumentParser()
73 |     parser.add_argument('--platform-requirements-file', dest='platform_requirements_file', default='requirements_runpod.txt', help='Path to the platform-specific requirements file')
74 |     args = parser.parse_args()
75 |     
76 |     main_menu(args.platform_requirements_file)
77 | 


--------------------------------------------------------------------------------
/setup/update_bitsandbytes.py:
--------------------------------------------------------------------------------
 1 | import os
 2 | import sysconfig
 3 | import filecmp
 4 | import shutil
 5 | 
 6 | def sync_bits_and_bytes_files():
 7 |     """
 8 |     Check for "different" bitsandbytes Files and copy only if necessary.
 9 |     This function is specific for Windows OS.
10 |     """
11 |     
12 |     # Only execute on Windows
13 |     if os.name != "nt":
14 |         print("This function is only applicable to Windows OS.")
15 |         return
16 | 
17 |     try:
18 |         # Define source and destination directories
19 |         source_dir = os.path.join(os.getcwd(), "bitsandbytes_windows")
20 | 
21 |         dest_dir_base = os.path.join(sysconfig.get_paths()["purelib"], "bitsandbytes")
22 |         
23 |         # Clear file comparison cache
24 |         filecmp.clear_cache()
25 |         
26 |         # Iterate over each file in source directory
27 |         for file in os.listdir(source_dir):
28 |             source_file_path = os.path.join(source_dir, file)
29 | 
30 |             # Decide the destination directory based on file name
31 |             if file in ("main.py", "paths.py"):
32 |                 dest_dir = os.path.join(dest_dir_base, "cuda_setup")
33 |             else:
34 |                 dest_dir = dest_dir_base
35 | 
36 |             # Copy file from source to destination, maintaining original file's metadata
37 |             print(f'Copy {source_file_path} to {dest_dir}')
38 |             shutil.copy2(source_file_path, dest_dir)
39 | 
40 |     except FileNotFoundError as fnf_error:
41 |         print(f"File not found error: {fnf_error}")
42 |     except PermissionError as perm_error:
43 |         print(f"Permission error: {perm_error}")
44 |     except Exception as e:
45 |         print(f"An unexpected error occurred: {e}")
46 | 
47 | 
48 | if __name__ == "__main__":
49 |     sync_bits_and_bytes_files()


--------------------------------------------------------------------------------
/test/config/TI-AdamW8bit-toml.json:
--------------------------------------------------------------------------------
 1 | {
 2 |   "adaptive_noise_scale": 0,
 3 |   "additional_parameters": "",
 4 |   "bucket_no_upscale": true,
 5 |   "bucket_reso_steps": 1,
 6 |   "cache_latents": true,
 7 |   "cache_latents_to_disk": false,
 8 |   "caption_dropout_every_n_epochs": 0.0,
 9 |   "caption_dropout_rate": 0.05,
10 |   "caption_extension": "",
11 |   "clip_skip": 2,
12 |   "color_aug": false,
13 |   "dataset_config": "D:/kohya_ss/test/config/dataset.toml",
14 |   "enable_bucket": true,
15 |   "epoch": 4,
16 |   "flip_aug": false,
17 |   "full_fp16": false,
18 |   "gpu_ids": "",
19 |   "gradient_accumulation_steps": 1,
20 |   "gradient_checkpointing": false,
21 |   "init_word": "*",
22 |   "keep_tokens": "0",
23 |   "learning_rate": 0.0001,
24 |   "log_tracker_config": "",
25 |   "log_tracker_name": "",
26 |   "logging_dir": "./test/logs",
27 |   "lr_scheduler": "cosine",
28 |   "lr_scheduler_args": "",
29 |   "lr_scheduler_num_cycles": "",
30 |   "lr_scheduler_power": "",
31 |   "lr_warmup": 0,
32 |   "max_bucket_reso": 2048,
33 |   "max_data_loader_n_workers": "0",
34 |   "max_resolution": "512,512",
35 |   "max_timestep": 1000,
36 |   "max_token_length": "75",
37 |   "max_train_epochs": "",
38 |   "max_train_steps": "80",
39 |   "mem_eff_attn": false,
40 |   "min_bucket_reso": 256,
41 |   "min_snr_gamma": 10,
42 |   "min_timestep": 0,
43 |   "mixed_precision": "bf16",
44 |   "model_list": "runwayml/stable-diffusion-v1-5",
45 |   "multi_gpu": false,
46 |   "multires_noise_discount": 0.2,
47 |   "multires_noise_iterations": 8,
48 |   "no_token_padding": false,
49 |   "noise_offset": 0.05,
50 |   "noise_offset_type": "Multires",
51 |   "num_cpu_threads_per_process": 2,
52 |   "num_machines": 1,
53 |   "num_processes": 1,
54 |   "num_vectors_per_token": 8,
55 |   "optimizer": "AdamW8bit",
56 |   "optimizer_args": "",
57 |   "output_dir": "./test/output",
58 |   "output_name": "TI-Adamw8bit-toml",
59 |   "persistent_data_loader_workers": false,
60 |   "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5",
61 |   "prior_loss_weight": 1.0,
62 |   "random_crop": false,
63 |   "reg_data_dir": "",
64 |   "resume": "",
65 |   "sample_every_n_epochs": 0,
66 |   "sample_every_n_steps": 20,
67 |   "sample_prompts": "a painting of man wearing a gas mask , by darius kawasaki",
68 |   "sample_sampler": "euler_a",
69 |   "save_every_n_epochs": 1,
70 |   "save_every_n_steps": 0,
71 |   "save_last_n_steps": 0,
72 |   "save_last_n_steps_state": 0,
73 |   "save_model_as": "safetensors",
74 |   "save_precision": "fp16",
75 |   "save_state": false,
76 |   "scale_v_pred_loss_like_noise_pred": false,
77 |   "sdxl": false,
78 |   "sdxl_no_half_vae": false,
79 |   "seed": "1234",
80 |   "shuffle_caption": false,
81 |   "stop_text_encoder_training": 0,
82 |   "template": "style template",
83 |   "token_string": "zxc",
84 |   "train_batch_size": 4,
85 |   "train_data_dir": "",
86 |   "use_wandb": false,
87 |   "v2": false,
88 |   "v_parameterization": false,
89 |   "v_pred_like_loss": 0,
90 |   "vae": "",
91 |   "vae_batch_size": 0,
92 |   "wandb_api_key": "",
93 |   "wandb_run_name": "",
94 |   "weights": "",
95 |   "xformers": "xformers"
96 | }


--------------------------------------------------------------------------------
/test/config/TI-AdamW8bit.json:
--------------------------------------------------------------------------------
 1 | {
 2 |   "adaptive_noise_scale": 0.005,
 3 |   "additional_parameters": "",
 4 |   "bucket_no_upscale": true,
 5 |   "bucket_reso_steps": 1,
 6 |   "cache_latents": true,
 7 |   "cache_latents_to_disk": false,
 8 |   "caption_dropout_every_n_epochs": 0.0,
 9 |   "caption_dropout_rate": 0.05,
10 |   "caption_extension": "",
11 |   "clip_skip": 2,
12 |   "color_aug": false,
13 |   "dataset_config": "",
14 |   "enable_bucket": true,
15 |   "epoch": 8,
16 |   "flip_aug": false,
17 |   "full_fp16": false,
18 |   "gpu_ids": "",
19 |   "gradient_accumulation_steps": 1,
20 |   "gradient_checkpointing": false,
21 |   "init_word": "*",
22 |   "ip_noise_gamma": 0.1,
23 |   "ip_noise_gamma_random_strength": true,
24 |   "keep_tokens": "0",
25 |   "learning_rate": 0.0001,
26 |   "log_tracker_config": "",
27 |   "log_tracker_name": "",
28 |   "logging_dir": "./test/logs",
29 |   "lr_scheduler": "cosine",
30 |   "lr_scheduler_args": "",
31 |   "lr_scheduler_num_cycles": "",
32 |   "lr_scheduler_power": "",
33 |   "lr_warmup": 0,
34 |   "max_bucket_reso": 2048,
35 |   "max_data_loader_n_workers": "0",
36 |   "max_resolution": "512,512",
37 |   "max_timestep": 1000,
38 |   "max_token_length": "75",
39 |   "max_train_epochs": "",
40 |   "max_train_steps": "",
41 |   "mem_eff_attn": false,
42 |   "min_bucket_reso": 256,
43 |   "min_snr_gamma": 10,
44 |   "min_timestep": 0,
45 |   "mixed_precision": "bf16",
46 |   "model_list": "runwayml/stable-diffusion-v1-5",
47 |   "multi_gpu": false,
48 |   "multires_noise_discount": 0.2,
49 |   "multires_noise_iterations": 8,
50 |   "no_token_padding": false,
51 |   "noise_offset": 0.05,
52 |   "noise_offset_random_strength": true,
53 |   "noise_offset_type": "Original",
54 |   "num_cpu_threads_per_process": 2,
55 |   "num_machines": 1,
56 |   "num_processes": 1,
57 |   "num_vectors_per_token": 8,
58 |   "optimizer": "AdamW8bit",
59 |   "optimizer_args": "",
60 |   "output_dir": "./test/output",
61 |   "output_name": "TI-Adamw8bit",
62 |   "persistent_data_loader_workers": false,
63 |   "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5",
64 |   "prior_loss_weight": 1.0,
65 |   "random_crop": false,
66 |   "reg_data_dir": "",
67 |   "resume": "",
68 |   "sample_every_n_epochs": 0,
69 |   "sample_every_n_steps": 20,
70 |   "sample_prompts": "a painting of man wearing a gas mask , by darius kawasaki",
71 |   "sample_sampler": "euler_a",
72 |   "save_every_n_epochs": 1,
73 |   "save_every_n_steps": 0,
74 |   "save_last_n_steps": 0,
75 |   "save_last_n_steps_state": 0,
76 |   "save_model_as": "safetensors",
77 |   "save_precision": "fp16",
78 |   "save_state": false,
79 |   "scale_v_pred_loss_like_noise_pred": false,
80 |   "sdxl": false,
81 |   "sdxl_no_half_vae": false,
82 |   "seed": "1234",
83 |   "shuffle_caption": false,
84 |   "stop_text_encoder_training": 0,
85 |   "template": "style template",
86 |   "token_string": "zxc",
87 |   "train_batch_size": 4,
88 |   "train_data_dir": "./test/img",
89 |   "use_wandb": false,
90 |   "v2": false,
91 |   "v_parameterization": false,
92 |   "v_pred_like_loss": 0,
93 |   "vae": "",
94 |   "vae_batch_size": 0,
95 |   "wandb_api_key": "",
96 |   "wandb_run_name": "",
97 |   "weights": "",
98 |   "xformers": "xformers"
99 | }


--------------------------------------------------------------------------------
/test/config/dataset-finetune.toml:
--------------------------------------------------------------------------------
 1 | [[datasets]]
 2 | resolution = 512
 3 | batch_size = 4
 4 | keep_tokens = 1
 5 | enable_bucket = true
 6 | min_bucket_reso = 64
 7 | max_bucket_reso = 1024
 8 | bucket_reso_steps = 32
 9 | bucket_no_upscale = false
10 | 
11 |   [[datasets.subsets]]
12 |   image_dir = '.\test\img\10_darius kawasaki person'
13 |   num_repeats = 10
14 |   metadata_file = '.\test\config\meta-1_lat.json'


--------------------------------------------------------------------------------
/test/config/dataset-masked_loss.toml:
--------------------------------------------------------------------------------
 1 | [[datasets]]
 2 | resolution = 512
 3 | batch_size = 4
 4 | keep_tokens = 1
 5 | enable_bucket = true
 6 | min_bucket_reso = 64
 7 | max_bucket_reso = 1024
 8 | bucket_reso_steps = 32
 9 | bucket_no_upscale = true
10 | 
11 |   [[datasets.subsets]]
12 |   image_dir = '.\test\img\10_darius kawasaki person'
13 |   num_repeats = 10
14 |   caption_extension = '.txt'
15 |   conditioning_data_dir = '.\test\masked_loss'


--------------------------------------------------------------------------------
/test/config/dataset-multires.toml:
--------------------------------------------------------------------------------
 1 | [general]
 2 | # define common settings here
 3 | flip_aug = true
 4 | color_aug = false
 5 | keep_tokens_separator= "|||"
 6 | shuffle_caption = false
 7 | caption_tag_dropout_rate = 0
 8 | caption_extension = ".txt"
 9 | min_bucket_reso = 64
10 | max_bucket_reso = 2048
11 | 
12 | [[datasets]]
13 | # define the first resolution here
14 | batch_size = 1
15 | enable_bucket = true
16 | resolution = [1024, 1024]
17 | 
18 |   [[datasets.subsets]]
19 |   image_dir = "./test/img/10_darius kawasaki person"
20 |   num_repeats = 10
21 | 
22 | [[datasets]]
23 | # define the second resolution here
24 | batch_size = 1
25 | enable_bucket = true
26 | resolution = [768, 768]
27 | 
28 |   [[datasets.subsets]]
29 |   image_dir = "./test/img/10_darius kawasaki person"
30 |   num_repeats = 10
31 | 
32 | [[datasets]]
33 | # define the third resolution here
34 | batch_size = 1
35 | enable_bucket = true
36 | resolution = [512, 512]
37 | 
38 |   [[datasets.subsets]]
39 |   image_dir = "./test/img/10_darius kawasaki person"
40 |   num_repeats = 10


--------------------------------------------------------------------------------
/test/config/dataset.toml:
--------------------------------------------------------------------------------
 1 | [[datasets]]
 2 | resolution = 512
 3 | batch_size = 4
 4 | keep_tokens = 1
 5 | enable_bucket = true
 6 | min_bucket_reso = 64
 7 | max_bucket_reso = 1024
 8 | bucket_reso_steps = 32
 9 | bucket_no_upscale = true
10 | 
11 |   [[datasets.subsets]]
12 |   image_dir = './test/img/10_darius kawasaki person'
13 |   num_repeats = 10
14 |   class_tokens = 'darius kawasaki person'
15 |   caption_extension = '.txt'


--------------------------------------------------------------------------------
/test/config/dreambooth-Adafactor.json:
--------------------------------------------------------------------------------
 1 | {
 2 |   "adaptive_noise_scale": 0,
 3 |   "additional_parameters": "",
 4 |   "bucket_no_upscale": true,
 5 |   "bucket_reso_steps": 1,
 6 |   "cache_latents": true,
 7 |   "cache_latents_to_disk": false,
 8 |   "caption_dropout_every_n_epochs": 0.0,
 9 |   "caption_dropout_rate": 0.05,
10 |   "caption_extension": "",
11 |   "clip_skip": 1,
12 |   "color_aug": false,
13 |   "enable_bucket": true,
14 |   "epoch": 1,
15 |   "flip_aug": false,
16 |   "full_fp16": false,
17 |   "gradient_accumulation_steps": 4.0,
18 |   "gradient_checkpointing": false,
19 |   "keep_tokens": "0",
20 |   "learning_rate": 0.0001,
21 |   "logging_dir": "./test/logs",
22 |   "lr_scheduler": "constant",
23 |   "lr_warmup": 0,
24 |   "max_data_loader_n_workers": "0",
25 |   "max_resolution": "512,512",
26 |   "max_timestep": 1000,
27 |   "max_token_length": "75",
28 |   "max_train_epochs": "",
29 |   "mem_eff_attn": false,
30 |   "min_snr_gamma": 0,
31 |   "min_timestep": 0,
32 |   "mixed_precision": "bf16",
33 |   "model_list": "runwayml/stable-diffusion-v1-5",
34 |   "multires_noise_discount": 0,
35 |   "multires_noise_iterations": 0,
36 |   "no_token_padding": false,
37 |   "noise_offset": "0.05",
38 |   "noise_offset_type": "Original",
39 |   "num_cpu_threads_per_process": 2,
40 |   "optimizer": "Adafactor",
41 |   "optimizer_args": "scale_parameter=False relative_step=False warmup_init=False",
42 |   "output_dir": "./test/output",
43 |   "output_name": "dreambooth-Adafactor",
44 |   "persistent_data_loader_workers": false,
45 |   "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5",
46 |   "prior_loss_weight": 1.0,
47 |   "random_crop": false,
48 |   "reg_data_dir": "",
49 |   "resume": "",
50 |   "sample_every_n_epochs": 0,
51 |   "sample_every_n_steps": 25,
52 |   "sample_prompts": "a painting of a gas mask , by darius kawasaki",
53 |   "sample_sampler": "euler_a",
54 |   "save_every_n_epochs": 1,
55 |   "save_every_n_steps": 0,
56 |   "save_last_n_steps": 0,
57 |   "save_last_n_steps_state": 0,
58 |   "save_model_as": "safetensors",
59 |   "save_precision": "fp16",
60 |   "save_state": false,
61 |   "scale_v_pred_loss_like_noise_pred": false,
62 |   "sdxl": false,
63 |   "seed": "1234",
64 |   "shuffle_caption": false,
65 |   "stop_text_encoder_training": 0,
66 |   "train_batch_size": 1,
67 |   "train_data_dir": "./test/img",
68 |   "use_wandb": false,
69 |   "v2": false,
70 |   "v_parameterization": false,
71 |   "vae": "",
72 |   "vae_batch_size": 0,
73 |   "wandb_api_key": "",
74 |   "weighted_captions": false,
75 |   "xformers": true
76 | }


--------------------------------------------------------------------------------
/test/config/dreambooth-Prodigy-SDXL.json:
--------------------------------------------------------------------------------
 1 | {
 2 |   "adaptive_noise_scale": 0,
 3 |   "additional_parameters": "",
 4 |   "bucket_no_upscale": true,
 5 |   "bucket_reso_steps": 32,
 6 |   "cache_latents": true,
 7 |   "cache_latents_to_disk": false,
 8 |   "caption_dropout_every_n_epochs": 0.0,
 9 |   "caption_dropout_rate": 0,
10 |   "caption_extension": "",
11 |   "clip_skip": 2,
12 |   "color_aug": false,
13 |   "enable_bucket": true,
14 |   "epoch": 1,
15 |   "flip_aug": false,
16 |   "full_bf16": false,
17 |   "full_fp16": false,
18 |   "gpu_ids": "",
19 |   "gradient_accumulation_steps": 1,
20 |   "gradient_checkpointing": false,
21 |   "keep_tokens": "0",
22 |   "learning_rate": 1.0,
23 |   "learning_rate_te": 1e-05,
24 |   "learning_rate_te1": 1e-05,
25 |   "learning_rate_te2": 0.0,
26 |   "logging_dir": "./test/logs",
27 |   "lr_scheduler": "cosine",
28 |   "lr_scheduler_args": "",
29 |   "lr_scheduler_num_cycles": "",
30 |   "lr_scheduler_power": "",
31 |   "lr_warmup": 0,
32 |   "max_bucket_reso": 2048,
33 |   "max_data_loader_n_workers": "0",
34 |   "max_resolution": "512,512",
35 |   "max_timestep": 1000,
36 |   "max_token_length": "75",
37 |   "max_train_epochs": "",
38 |   "max_train_steps": "",
39 |   "mem_eff_attn": false,
40 |   "min_bucket_reso": 256,
41 |   "min_snr_gamma": 0,
42 |   "min_timestep": 0,
43 |   "mixed_precision": "bf16",
44 |   "model_list": "stabilityai/stable-diffusion-xl-base-1.0",
45 |   "multi_gpu": false,
46 |   "multires_noise_discount": 0.2,
47 |   "multires_noise_iterations": 8,
48 |   "no_token_padding": false,
49 |   "noise_offset": "0.05",
50 |   "noise_offset_type": "Multires",
51 |   "num_cpu_threads_per_process": 2,
52 |   "num_machines": 1,
53 |   "num_processes": 1,
54 |   "optimizer": "Prodigy",
55 |   "optimizer_args": "decouple=True weight_decay=0.6 betas=0.9,0.99 use_bias_correction=True",
56 |   "output_dir": "./test/output",
57 |   "output_name": "db-Prodigy",
58 |   "persistent_data_loader_workers": false,
59 |   "pretrained_model_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0",
60 |   "prior_loss_weight": 1.0,
61 |   "random_crop": false,
62 |   "reg_data_dir": "",
63 |   "resume": "",
64 |   "sample_every_n_epochs": 0,
65 |   "sample_every_n_steps": 25,
66 |   "sample_prompts": "a painting of a gas mask , by darius kawasaki",
67 |   "sample_sampler": "euler_a",
68 |   "save_every_n_epochs": 1,
69 |   "save_every_n_steps": 0,
70 |   "save_last_n_steps": 0,
71 |   "save_last_n_steps_state": 0,
72 |   "save_model_as": "safetensors",
73 |   "save_precision": "fp16",
74 |   "save_state": false,
75 |   "scale_v_pred_loss_like_noise_pred": false,
76 |   "sdxl": true,
77 |   "seed": "1234",
78 |   "shuffle_caption": false,
79 |   "stop_text_encoder_training": 0,
80 |   "train_batch_size": 1,
81 |   "train_data_dir": "./test/img",
82 |   "use_wandb": false,
83 |   "v2": false,
84 |   "v_parameterization": false,
85 |   "v_pred_like_loss": 0,
86 |   "vae": "",
87 |   "vae_batch_size": 0,
88 |   "wandb_api_key": "",
89 |   "weighted_captions": false,
90 |   "xformers": "xformers"
91 | }


--------------------------------------------------------------------------------
/test/config/dreambooth.json:
--------------------------------------------------------------------------------
 1 | {
 2 |   "adaptive_noise_scale": 0,
 3 |   "additional_parameters": "",
 4 |   "bucket_no_upscale": true,
 5 |   "bucket_reso_steps": 1,
 6 |   "cache_latents": true,
 7 |   "cache_latents_to_disk": false,
 8 |   "caption_dropout_every_n_epochs": 0.0,
 9 |   "caption_dropout_rate": 0.05,
10 |   "caption_extension": "",
11 |   "clip_skip": 1,
12 |   "color_aug": false,
13 |   "enable_bucket": true,
14 |   "epoch": 1,
15 |   "flip_aug": false,
16 |   "full_fp16": false,
17 |   "gradient_accumulation_steps": 4.0,
18 |   "gradient_checkpointing": false,
19 |   "keep_tokens": "0",
20 |   "learning_rate": 1.0,
21 |   "logging_dir": "./test/logs",
22 |   "lr_scheduler": "constant",
23 |   "lr_warmup": 0,
24 |   "max_data_loader_n_workers": "0",
25 |   "max_resolution": "512,512",
26 |   "max_timestep": 1000,
27 |   "max_token_length": "75",
28 |   "max_train_epochs": "",
29 |   "mem_eff_attn": false,
30 |   "min_snr_gamma": 0,
31 |   "min_timestep": 0,
32 |   "mixed_precision": "bf16",
33 |   "model_list": "runwayml/stable-diffusion-v1-5",
34 |   "multires_noise_discount": 0,
35 |   "multires_noise_iterations": 0,
36 |   "no_token_padding": false,
37 |   "noise_offset": "0.05",
38 |   "noise_offset_type": "Original",
39 |   "num_cpu_threads_per_process": 2,
40 |   "optimizer": "AdamW8bit",
41 |   "optimizer_args": "",
42 |   "output_dir": "./test/output",
43 |   "output_name": "db",
44 |   "persistent_data_loader_workers": false,
45 |   "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5",
46 |   "prior_loss_weight": 1.0,
47 |   "random_crop": false,
48 |   "reg_data_dir": "",
49 |   "resume": "",
50 |   "sample_every_n_epochs": 0,
51 |   "sample_every_n_steps": 25,
52 |   "sample_prompts": "a painting of a gas mask , by darius kawasaki",
53 |   "sample_sampler": "euler_a",
54 |   "save_every_n_epochs": 1,
55 |   "save_every_n_steps": 0,
56 |   "save_last_n_steps": 0,
57 |   "save_last_n_steps_state": 0,
58 |   "save_model_as": "safetensors",
59 |   "save_precision": "fp16",
60 |   "save_state": false,
61 |   "scale_v_pred_loss_like_noise_pred": false,
62 |   "sdxl": false,
63 |   "seed": "1234",
64 |   "shuffle_caption": false,
65 |   "stop_text_encoder_training": 0,
66 |   "train_batch_size": 1,
67 |   "train_data_dir": "./test/img",
68 |   "use_wandb": false,
69 |   "v2": false,
70 |   "v_parameterization": false,
71 |   "vae": "",
72 |   "vae_batch_size": 0,
73 |   "wandb_api_key": "",
74 |   "weighted_captions": false,
75 |   "xformers": true
76 | }


--------------------------------------------------------------------------------
/test/config/locon-Prodigy.json:
--------------------------------------------------------------------------------
  1 | {
  2 |   "LoRA_type": "Kohya LoCon",
  3 |   "adaptive_noise_scale": 0,
  4 |   "additional_parameters": "",
  5 |   "block_alphas": "",
  6 |   "block_dims": "",
  7 |   "block_lr_zero_threshold": "",
  8 |   "bucket_no_upscale": true,
  9 |   "bucket_reso_steps": 1,
 10 |   "cache_latents": true,
 11 |   "cache_latents_to_disk": false,
 12 |   "caption_dropout_every_n_epochs": 0.0,
 13 |   "caption_dropout_rate": 0,
 14 |   "caption_extension": "",
 15 |   "clip_skip": 2,
 16 |   "color_aug": false,
 17 |   "conv_alpha": 8,
 18 |   "conv_alphas": "",
 19 |   "conv_dim": 16,
 20 |   "conv_dims": "",
 21 |   "decompose_both": false,
 22 |   "dim_from_weights": false,
 23 |   "down_lr_weight": "",
 24 |   "enable_bucket": true,
 25 |   "epoch": 1,
 26 |   "factor": -1,
 27 |   "flip_aug": false,
 28 |   "full_fp16": false,
 29 |   "gradient_accumulation_steps": 1,
 30 |   "gradient_checkpointing": false,
 31 |   "keep_tokens": "0",
 32 |   "learning_rate": 1.0,
 33 |   "logging_dir": "./test/logs",
 34 |   "lora_network_weights": "",
 35 |   "lr_scheduler": "cosine",
 36 |   "lr_scheduler_num_cycles": "",
 37 |   "lr_scheduler_power": "",
 38 |   "lr_warmup": 0,
 39 |   "max_data_loader_n_workers": "0",
 40 |   "max_resolution": "512,512",
 41 |   "max_token_length": "75",
 42 |   "max_train_epochs": "",
 43 |   "mem_eff_attn": false,
 44 |   "mid_lr_weight": "",
 45 |   "min_snr_gamma": 10,
 46 |   "mixed_precision": "bf16",
 47 |   "model_list": "runwayml/stable-diffusion-v1-5",
 48 |   "module_dropout": 0.1,
 49 |   "multires_noise_discount": 0.2,
 50 |   "multires_noise_iterations": 8,
 51 |   "network_alpha": 8,
 52 |   "network_dim": 16,
 53 |   "network_dropout": 0.1,
 54 |   "no_token_padding": false,
 55 |   "noise_offset": "0.05",
 56 |   "noise_offset_type": "Multires",
 57 |   "num_cpu_threads_per_process": 2,
 58 |   "optimizer": "Prodigy",
 59 |   "optimizer_args": "decouple=True weight_decay=0.6 betas=0.9,0.99 use_bias_correction=True",
 60 |   "output_dir": "./test/output",
 61 |   "output_name": "locon-Prodigy",
 62 |   "persistent_data_loader_workers": false,
 63 |   "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5",
 64 |   "prior_loss_weight": 1.0,
 65 |   "random_crop": false,
 66 |   "rank_dropout": 0.1,
 67 |   "reg_data_dir": "",
 68 |   "resume": "",
 69 |   "sample_every_n_epochs": 0,
 70 |   "sample_every_n_steps": 25,
 71 |   "sample_prompts": "a painting of a gas mask , by darius kawasaki",
 72 |   "sample_sampler": "euler_a",
 73 |   "save_every_n_epochs": 1,
 74 |   "save_every_n_steps": 0,
 75 |   "save_last_n_steps": 0,
 76 |   "save_last_n_steps_state": 0,
 77 |   "save_model_as": "safetensors",
 78 |   "save_precision": "fp16",
 79 |   "save_state": false,
 80 |   "scale_v_pred_loss_like_noise_pred": false,
 81 |   "scale_weight_norms": 1,
 82 |   "seed": "1234",
 83 |   "shuffle_caption": false,
 84 |   "stop_text_encoder_training": 0,
 85 |   "text_encoder_lr": 1.0,
 86 |   "train_batch_size": 4,
 87 |   "train_data_dir": "./test/img",
 88 |   "train_on_input": false,
 89 |   "training_comment": "",
 90 |   "unet_lr": 1.0,
 91 |   "unit": 1,
 92 |   "up_lr_weight": "",
 93 |   "use_cp": true,
 94 |   "use_wandb": false,
 95 |   "v2": false,
 96 |   "v_parameterization": false,
 97 |   "vae_batch_size": 0,
 98 |   "wandb_api_key": "",
 99 |   "weighted_captions": false,
100 |   "xformers": true
101 | }


--------------------------------------------------------------------------------
/test/config/meta-1_lat.json:
--------------------------------------------------------------------------------
 1 | {
 2 |   "test\\img\\10_darius kawasaki person\\Dariusz_Zawadzki.jpg": {
 3 |     "caption": "a painting of a steam punk skull with a gas mask , by darius kawasaki",
 4 |     "train_resolution": [
 5 |       1024,
 6 |       1024
 7 |     ]
 8 |   },
 9 |   "test\\img\\10_darius kawasaki person\\Dariusz_Zawadzki_2.jpg": {
10 |     "caption": "a painting of a man with a skull on his head , by darius kawasaki",
11 |     "train_resolution": [
12 |       1024,
13 |       1024
14 |     ]
15 |   },
16 |   "test\\img\\10_darius kawasaki person\\Dariusz_Zawadzki_3.jpg": {
17 |     "caption": "a painting of a woman with a helmet on her head , by darius kawasaki",
18 |     "train_resolution": [
19 |       1024,
20 |       1024
21 |     ]
22 |   },
23 |   "test\\img\\10_darius kawasaki person\\Dariusz_Zawadzki_4.jpg": {
24 |     "caption": "a painting of a horned man with a goat head , by darius kawasaki",
25 |     "train_resolution": [
26 |       1024,
27 |       1024
28 |     ]
29 |   },
30 |   "test\\img\\10_darius kawasaki person\\Dariusz_Zawadzki_5.jpg": {
31 |     "caption": "a painting of a man playing a piano , by darius kawasaki",
32 |     "train_resolution": [
33 |       1024,
34 |       1024
35 |     ]
36 |   },
37 |   "test\\img\\10_darius kawasaki person\\Dariusz_Zawadzki_6.jpg": {
38 |     "caption": "a painting of a robot sitting on a rock , by darius kawasaki",
39 |     "train_resolution": [
40 |       1024,
41 |       1024
42 |     ]
43 |   },
44 |   "test\\img\\10_darius kawasaki person\\Dariusz_Zawadzki_7.jpg": {
45 |     "caption": "a painting of a soldier with a helmet on , by darius kawasaki",
46 |     "train_resolution": [
47 |       1024,
48 |       1024
49 |     ]
50 |   },
51 |   "test\\img\\10_darius kawasaki person\\Dariusz_Zawadzki_8.jpg": {
52 |     "caption": "a painting of a giant crab with a large body , by darius kawasaki",
53 |     "train_resolution": [
54 |       1024,
55 |       1024
56 |     ]
57 |   }
58 | }


--------------------------------------------------------------------------------
/test/ft/.keep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/ft/.keep


--------------------------------------------------------------------------------
/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki.jpg


--------------------------------------------------------------------------------
/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki.txt:
--------------------------------------------------------------------------------
1 | a painting of a steam punk skull with a gas mask , by darius kawasaki


--------------------------------------------------------------------------------
/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_2.jpg


--------------------------------------------------------------------------------
/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_2.txt:
--------------------------------------------------------------------------------
1 | a painting of a man with a skull on his head , by darius kawasaki


--------------------------------------------------------------------------------
/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_3.jpg


--------------------------------------------------------------------------------
/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_3.txt:
--------------------------------------------------------------------------------
1 | a painting of a woman with a helmet on her head , by darius kawasaki


--------------------------------------------------------------------------------
/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_4.jpg


--------------------------------------------------------------------------------
/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_4.txt:
--------------------------------------------------------------------------------
1 | a painting of a horned man with a goat head , by darius kawasaki


--------------------------------------------------------------------------------
/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_5.jpg


--------------------------------------------------------------------------------
/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_5.txt:
--------------------------------------------------------------------------------
1 | a painting of a man playing a piano , by darius kawasaki


--------------------------------------------------------------------------------
/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_6.jpg


--------------------------------------------------------------------------------
/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_6.txt:
--------------------------------------------------------------------------------
1 | a painting of a robot sitting on a rock , by darius kawasaki


--------------------------------------------------------------------------------
/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_7.jpg


--------------------------------------------------------------------------------
/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_7.txt:
--------------------------------------------------------------------------------
1 | a painting of a soldier with a helmet on , by darius kawasaki


--------------------------------------------------------------------------------
/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_8.jpg


--------------------------------------------------------------------------------
/test/img with spaces/10_darius kawasaki person/Dariusz_Zawadzki_8.txt:
--------------------------------------------------------------------------------
1 | a painting of a giant crab with a large body , by darius kawasaki


--------------------------------------------------------------------------------
/test/img/10_darius kawasaki person/Dariusz_Zawadzki.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/img/10_darius kawasaki person/Dariusz_Zawadzki.jpg


--------------------------------------------------------------------------------
/test/img/10_darius kawasaki person/Dariusz_Zawadzki.txt:
--------------------------------------------------------------------------------
1 | a painting of a steam punk skull with a gas mask , by darius kawasaki


--------------------------------------------------------------------------------
/test/img/10_darius kawasaki person/Dariusz_Zawadzki_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/img/10_darius kawasaki person/Dariusz_Zawadzki_2.jpg


--------------------------------------------------------------------------------
/test/img/10_darius kawasaki person/Dariusz_Zawadzki_2.txt:
--------------------------------------------------------------------------------
1 | a painting of a man with a skull on his head , by darius kawasaki


--------------------------------------------------------------------------------
/test/img/10_darius kawasaki person/Dariusz_Zawadzki_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/img/10_darius kawasaki person/Dariusz_Zawadzki_3.jpg


--------------------------------------------------------------------------------
/test/img/10_darius kawasaki person/Dariusz_Zawadzki_3.txt:
--------------------------------------------------------------------------------
1 | a painting of a woman with a helmet on her head , by darius kawasaki


--------------------------------------------------------------------------------
/test/img/10_darius kawasaki person/Dariusz_Zawadzki_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/img/10_darius kawasaki person/Dariusz_Zawadzki_4.jpg


--------------------------------------------------------------------------------
/test/img/10_darius kawasaki person/Dariusz_Zawadzki_4.txt:
--------------------------------------------------------------------------------
1 | a painting of a horned man with a goat head , by darius kawasaki


--------------------------------------------------------------------------------
/test/img/10_darius kawasaki person/Dariusz_Zawadzki_5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/img/10_darius kawasaki person/Dariusz_Zawadzki_5.jpg


--------------------------------------------------------------------------------
/test/img/10_darius kawasaki person/Dariusz_Zawadzki_5.txt:
--------------------------------------------------------------------------------
1 | a painting of a man playing a piano , by darius kawasaki


--------------------------------------------------------------------------------
/test/img/10_darius kawasaki person/Dariusz_Zawadzki_6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/img/10_darius kawasaki person/Dariusz_Zawadzki_6.jpg


--------------------------------------------------------------------------------
/test/img/10_darius kawasaki person/Dariusz_Zawadzki_6.txt:
--------------------------------------------------------------------------------
1 | a painting of a robot sitting on a rock , by darius kawasaki


--------------------------------------------------------------------------------
/test/img/10_darius kawasaki person/Dariusz_Zawadzki_7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/img/10_darius kawasaki person/Dariusz_Zawadzki_7.jpg


--------------------------------------------------------------------------------
/test/img/10_darius kawasaki person/Dariusz_Zawadzki_7.txt:
--------------------------------------------------------------------------------
1 | a painting of a soldier with a helmet on , by darius kawasaki


--------------------------------------------------------------------------------
/test/img/10_darius kawasaki person/Dariusz_Zawadzki_8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/img/10_darius kawasaki person/Dariusz_Zawadzki_8.jpg


--------------------------------------------------------------------------------
/test/img/10_darius kawasaki person/Dariusz_Zawadzki_8.txt:
--------------------------------------------------------------------------------
1 | a painting of a giant crab with a large body , by darius kawasaki


--------------------------------------------------------------------------------
/test/log/.keep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/log/.keep


--------------------------------------------------------------------------------
/test/logs/.keep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/logs/.keep


--------------------------------------------------------------------------------
/test/masked_loss/Dariusz_Zawadzki.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/masked_loss/Dariusz_Zawadzki.jpg


--------------------------------------------------------------------------------
/test/masked_loss/Dariusz_Zawadzki_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/masked_loss/Dariusz_Zawadzki_2.jpg


--------------------------------------------------------------------------------
/test/masked_loss/Dariusz_Zawadzki_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/masked_loss/Dariusz_Zawadzki_3.jpg


--------------------------------------------------------------------------------
/test/masked_loss/Dariusz_Zawadzki_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/masked_loss/Dariusz_Zawadzki_4.jpg


--------------------------------------------------------------------------------
/test/masked_loss/Dariusz_Zawadzki_5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/masked_loss/Dariusz_Zawadzki_5.jpg


--------------------------------------------------------------------------------
/test/masked_loss/Dariusz_Zawadzki_6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/masked_loss/Dariusz_Zawadzki_6.jpg


--------------------------------------------------------------------------------
/test/masked_loss/Dariusz_Zawadzki_7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/masked_loss/Dariusz_Zawadzki_7.jpg


--------------------------------------------------------------------------------
/test/masked_loss/Dariusz_Zawadzki_8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmaltais/kohya_ss/4161d1d80ad554f7801c584632665d6825994062/test/masked_loss/Dariusz_Zawadzki_8.jpg


--------------------------------------------------------------------------------
/tools/caption.py:
--------------------------------------------------------------------------------
 1 | # This script will create the caption text files in the specified folder using the specified file pattern and caption text.
 2 | #
 3 | # eg: python caption.py D:\some\folder\location "*.png, *.jpg, *.webp" "some caption text"
 4 | 
 5 | import argparse
 6 | import os
 7 | import logging
 8 | from pathlib import Path
 9 | 
10 | def create_caption_files(image_folder: Path, file_pattern: str, caption_text: str, caption_file_ext: str, overwrite: bool):
11 |     # Split the file patterns string and remove whitespace from each extension
12 |     patterns = [pattern.strip() for pattern in file_pattern.split(",")]
13 | 
14 |     # Use the glob method to match the file pattern
15 |     for pattern in patterns:
16 |         files = image_folder.glob(pattern)
17 | 
18 |         # Iterate over the matched files
19 |         for file in files:
20 |             # Check if a text file with the same name as the current file exists in the folder
21 |             txt_file = file.with_suffix(caption_file_ext)
22 |             if not txt_file.exists() or overwrite:
23 |                 txt_file.write_text(caption_text)
24 |                 logging.info(f"Caption file created: {txt_file}")
25 |                 
26 | def writable_dir(target_path):
27 |     """ Check if a path is a valid directory and that it can be written to. """
28 |     path = Path(target_path)
29 |     if path.is_dir():
30 |         if os.access(path, os.W_OK):
31 |             return path
32 |         else:
33 |             raise argparse.ArgumentTypeError(f"Directory '{path}' is not writable.")
34 |     else:
35 |         raise argparse.ArgumentTypeError(f"Directory '{path}' does not exist.")
36 | 
37 | def main():
38 |     # Set up logging
39 |     logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
40 |     
41 |     # Define command-line arguments
42 |     parser = argparse.ArgumentParser()
43 |     parser.add_argument("image_folder", type=writable_dir, help="The folder where the image files are located")
44 |     parser.add_argument("--file_pattern", type=str, default="*.png, *.jpg, *.jpeg, *.webp", help="the pattern to match the image file names")
45 |     parser.add_argument("--caption_file_ext", type=str, default=".caption", help="the caption file extension.")
46 |     parser.add_argument("--overwrite", action="store_true", default=False, help="whether to overwrite existing caption files")
47 | 
48 |     # Create a mutually exclusive group for the caption_text and caption_file arguments
49 |     caption_group = parser.add_mutually_exclusive_group(required=True)
50 |     caption_group.add_argument("--caption_text", type=str, help="the text to include in the caption files")
51 |     caption_group.add_argument("--caption_file", type=argparse.FileType("r"), help="the file containing the text to include in the caption files")
52 | 
53 |     # Parse the command-line arguments
54 |     args = parser.parse_args()
55 | 
56 |     # Create the caption files
57 |     create_caption_files(args.image_folder, args.file_pattern, args.caption_text, args.caption_file_ext, args.overwrite)
58 | 
59 | if __name__ == "__main__":
60 |     main()


--------------------------------------------------------------------------------
/tools/cleanup_captions.py:
--------------------------------------------------------------------------------
 1 | import os
 2 | import argparse
 3 | import logging
 4 | from pathlib import Path
 5 | 
 6 | def writable_dir(target_path):
 7 |     """ Check if a path is a valid directory and that it can be written to. """
 8 |     path = Path(target_path)
 9 |     if path.is_dir():
10 |         if os.access(path, os.W_OK):
11 |             return path
12 |         else:
13 |             raise argparse.ArgumentTypeError(f"Directory '{path}' is not writable.")
14 |     else:
15 |         raise argparse.ArgumentTypeError(f"Directory '{path}' does not exist.")
16 |     
17 | def main(folder_path:Path, extension:str, keywords:set=None):
18 |     for file_name in os.listdir(folder_path):
19 |         if file_name.endswith(extension):
20 |             file_path = os.path.join(folder_path, file_name)
21 |             try:
22 |                 with open(file_path, "r") as f:
23 |                     text = f.read()
24 |                 # extract tags from text and split into a list using comma as the delimiter
25 |                 tags = [tag.strip() for tag in text.split(",")]
26 |                 # remove the specified keywords from the tags list
27 |                 if keywords:
28 |                     tags = [tag for tag in tags if tag not in keywords]
29 |                 # remove empty or whitespace-only tags
30 |                 tags = [tag for tag in tags if tag.strip() != ""]
31 |                 # join the tags back into a comma-separated string and write back to the file
32 |                 with open(file_path, "w") as f:
33 |                     f.write(", ".join(tags))
34 |                 logging.info(f"Processed {file_name}")
35 |             except Exception as e:
36 |                 logging.error(f"Error processing {file_name}: {e}")
37 | 
38 | if __name__ == "__main__":
39 |     # Set up logging
40 |     logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
41 | 
42 |     parser = argparse.ArgumentParser(description="Remove specified keywords from all text files in a directory.")
43 |     parser.add_argument("folder_path", type=writable_dir, help="path to directory containing text files")
44 |     parser.add_argument("-e", "--extension", type=str, default=".txt", help="file extension of text files to be processed (default: .txt)")
45 |     parser.add_argument("-k", "--keywords", type=str, nargs="*", help="Optional: list of keywords to be removed from text files. If not provided, the default list will be used.")
46 |     args = parser.parse_args()
47 | 
48 |     folder_path = args.folder_path
49 |     extension = args.extension
50 |     keywords = set(args.keywords) if args.keywords else set(["1girl", "solo", "blue eyes", "brown eyes", "blonde hair", "black hair", "realistic", "red lips", "lips", "artist name", "makeup", "realistic","brown hair", "dark skin", 
51 |                 "dark-skinned female", "medium breasts", "breasts", "1boy"])
52 | 
53 |     main(folder_path, extension, keywords)
54 | 


--------------------------------------------------------------------------------
/tools/convert_html_to_md.py:
--------------------------------------------------------------------------------
 1 | import argparse
 2 | import os
 3 | import requests
 4 | from bs4 import BeautifulSoup
 5 | from urllib.parse import urljoin
 6 | from html2text import html2text
 7 | from pathlib import Path
 8 | 
 9 | def is_writable_path(target_path):
10 |     """
11 |     Check if a path is writable.
12 |     """
13 |     path = Path(os.path.dirname(target_path))
14 |     if path.is_dir():
15 |         if os.access(path, os.W_OK):
16 |             return target_path
17 |         else:
18 |             raise argparse.ArgumentTypeError(f"Directory '{path}' is not writable.")
19 |     else:
20 |         raise argparse.ArgumentTypeError(f"Directory '{path}' does not exist.")
21 | 
22 | def main(url, markdown_path):
23 |     # Create a session object
24 |     with requests.Session() as session:
25 |         # Send HTTP request to the specified URL
26 |         response = session.get(url)
27 |         response.raise_for_status()  # Check for HTTP issues
28 | 
29 |         # Create a BeautifulSoup object and specify the parser
30 |         soup = BeautifulSoup(response.text, 'html.parser')
31 | 
32 |         # Ensure the directory for saving images exists
33 |         os.makedirs("./logs", exist_ok=True)
34 | 
35 |         # Find all image tags and save images
36 |         for image in soup.find_all('img'):
37 |             image_url = urljoin(url, image['src'])
38 |             try:
39 |                 image_response = session.get(image_url, stream=True)
40 |                 image_response.raise_for_status()
41 |                 image_name = os.path.join("./logs", os.path.basename(image_url))
42 |                 with open(image_name, 'wb') as file:
43 |                     file.write(image_response.content)
44 |             except requests.RequestException as e:
45 |                 print(f"Failed to download {image_url}: {e}")
46 | 
47 |         # Convert the HTML content to markdown
48 |         markdown_content = html2text(response.text)
49 | 
50 |         # Save the markdown content to a file
51 |         try:
52 |             with open(markdown_path, "w", encoding="utf8") as file:
53 |                 file.write(markdown_content)
54 |             print(f"Markdown content successfully written to {markdown_path}")
55 |         except Exception as e:
56 |             print(f"Failed to write markdown to {markdown_path}: {e}")
57 | 
58 | if __name__ == "__main__":
59 |     parser = argparse.ArgumentParser(description="Convert HTML to Markdown")
60 |     parser.add_argument("url", help="The URL of the webpage to convert")
61 |     parser.add_argument("markdown_path", help="The path to save the converted markdown file", type=is_writable_path)
62 |     args = parser.parse_args()
63 | 
64 |     main(args.url, args.markdown_path)
65 | 


--------------------------------------------------------------------------------
/tools/convert_images_to_hq_jpg.py:
--------------------------------------------------------------------------------
 1 | import argparse
 2 | import glob
 3 | import os
 4 | from pathlib import Path
 5 | from PIL import Image
 6 | 
 7 | 
 8 | def writable_dir(target_path):
 9 |     """ Check if a path is a valid directory and that it can be written to. """
10 |     path = Path(target_path)
11 |     if path.is_dir():
12 |         if os.access(path, os.W_OK):
13 |             return path
14 |         else:
15 |             raise argparse.ArgumentTypeError(f"Directory '{path}' is not writable.")
16 |     else:
17 |         raise argparse.ArgumentTypeError(f"Directory '{path}' does not exist.")
18 | 
19 | def main(directory, in_ext, quality, delete_originals):
20 |     out_ext = "jpg"
21 | 
22 |     # Create the file pattern string using the input file extension
23 |     file_pattern = f"*.{in_ext}"
24 | 
25 |     # Get the list of files in the directory that match the file pattern
26 |     files = glob.glob(os.path.join(directory, file_pattern))
27 | 
28 |     # Iterate over the list of files
29 |     for file in files:
30 |         # Open the image file
31 |         img = Image.open(file)
32 | 
33 |         # Create a new file path with the output file extension
34 |         new_path = Path(file).with_suffix(f".{out_ext}")
35 | 
36 |         # Check if the output file already exists
37 |         if new_path.exists():
38 |             # Skip the conversion if the output file already exists
39 |             print(f"Skipping {file} because {new_path} already exists")
40 |             continue
41 | 
42 |         # Save the image to the new file as high-quality JPEG
43 |         img.save(new_path, quality=quality, optimize=True)
44 | 
45 |         # Optionally, delete the original file
46 |         if delete_originals:
47 |             os.remove(file)
48 | 
49 | 
50 | if __name__ == "__main__":
51 |     # Define the command-line arguments
52 |     parser = argparse.ArgumentParser()
53 |     parser.add_argument("directory", type=writable_dir,
54 |                         help="the directory containing the images to be converted")
55 |     parser.add_argument("--in_ext", type=str, default="webp",
56 |                         help="the input file extension")
57 |     parser.add_argument("--quality", type=int, default=95,
58 |                         help="the JPEG quality (0-100)")
59 |     parser.add_argument("--delete_originals", action="store_true",
60 |                         help="whether to delete the original files after conversion")
61 |     
62 |     # Parse the command-line arguments
63 |     args = parser.parse_args()
64 |     
65 |     main(directory=args.directory, in_ext=args.in_ext, quality=args.quality, delete_originals=args.delete_originals)
66 | 


--------------------------------------------------------------------------------
/tools/convert_images_to_webp.py:
--------------------------------------------------------------------------------
 1 | import argparse
 2 | from pathlib import Path
 3 | import os
 4 | from PIL import Image
 5 | 
 6 | def writable_dir(target_path):
 7 |     """ Check if a path is a valid directory and that it can be written to. """
 8 |     path = Path(target_path)
 9 |     if path.is_dir():
10 |         if os.access(path, os.W_OK):
11 |             return path
12 |         else:
13 |             raise argparse.ArgumentTypeError(f"Directory '{path}' is not writable.")
14 |     else:
15 |         raise argparse.ArgumentTypeError(f"Directory '{path}' does not exist.")
16 |     
17 | def main():
18 |     # Define the command-line arguments
19 |     parser = argparse.ArgumentParser()
20 |     parser.add_argument("directory", type=writable_dir,
21 |                         help="the directory containing the images to be converted")
22 |     parser.add_argument("--in_ext", type=str, default="webp",
23 |                         help="the input file extension")
24 |     parser.add_argument("--out_ext", type=str, default="webp",
25 |                         help="the output file extension")
26 |     parser.add_argument("--delete_originals", action="store_true",
27 |                         help="whether to delete the original files after conversion")
28 | 
29 |     # Parse the command-line arguments
30 |     args = parser.parse_args()
31 |     directory = Path(args.directory)
32 |     in_ext = args.in_ext
33 |     delete_originals = args.delete_originals
34 | 
35 |     # Create the file pattern string using the input file extension
36 |     file_pattern = f"*.{in_ext}"
37 | 
38 |     # Get the list of files in the directory that match the file pattern
39 |     files = list(directory.glob(file_pattern))
40 | 
41 |     # Iterate over the list of files
42 |     for file in files:
43 |         try:
44 |             # Open the image file
45 |             img = Image.open(file)
46 | 
47 |             # Create a new file path with the output file extension
48 |             new_path = file.with_suffix(f".{args.out_ext}")
49 |             print(new_path)
50 | 
51 |             # Check if the output file already exists
52 |             if new_path.exists():
53 |                 # Skip the conversion if the output file already exists
54 |                 print(f"Skipping {file} because {new_path} already exists")
55 |                 continue
56 | 
57 |             # Save the image to the new file as lossless
58 |             img.save(new_path, lossless=True)
59 | 
60 |             # Close the image file
61 |             img.close()
62 | 
63 |             # Optionally, delete the original file
64 |             if delete_originals:
65 |                 file.unlink()
66 |         except Exception as e:
67 |             print(f"Error processing {file}: {e}")
68 | 
69 | 
70 | if __name__ == "__main__":
71 |     main()
72 | 


--------------------------------------------------------------------------------
/tools/create_txt_from_images.py:
--------------------------------------------------------------------------------
 1 | import os
 2 | import argparse
 3 | 
 4 | def main(folder_path):
 5 |     # Validate if the folder exists
 6 |     if not os.path.exists(folder_path):
 7 |         print("The specified folder does not exist.")
 8 |         return
 9 |     
10 |     # Loop through all files in the directory
11 |     for filename in os.listdir(folder_path):
12 |         # Check if the file is an image file (webp, jpg, png)
13 |         if filename.lower().endswith(('.webp', '.jpg', '.png')):
14 |             # Remove the file extension from the filename
15 |             name_without_extension = os.path.splitext(filename)[0]
16 |             
17 |             # Construct the name of the txt file
18 |             txt_filename = f"{name_without_extension}.txt"
19 |             
20 |             # Extract the content before the underscore
21 |             content = name_without_extension.split("_")[0]
22 |             
23 |             # Write the content to the txt file
24 |             with open(os.path.join(folder_path, txt_filename), "w") as txt_file:
25 |                 txt_file.write(content)
26 | 
27 | if __name__ == "__main__":
28 |     parser = argparse.ArgumentParser(description='Process a folder.')
29 |     parser.add_argument('folder_path', type=str, help='Path to the folder to process')
30 |     
31 |     args = parser.parse_args()
32 |     main(args.folder_path)
33 | 


--------------------------------------------------------------------------------
/tools/extract loha and lora examples.txt:
--------------------------------------------------------------------------------
 1 | D:\kohya_ss\.venv\Scripts\python.exe D:\kohya_ss\tools\extract_lora_from_models-nw.py `
 2 | --save_precision fp16 `
 3 | --model_org E:/models/sdxl/base/sd_xl_base_1.0_0.9vae.safetensors `
 4 | --model_tuned E:/models/sdxl/dreamshaperXL_alpha2Xl10.safetensors `
 5 | --save_to E:/lora/sdxl/dreamshaperXL_alpha2Xl10_sv_fro_0.9_1024.safetensors `
 6 | --dim 1024 `
 7 | --device cuda `
 8 | --sdxl `
 9 | --dynamic_method sv_fro `
10 | --dynamic_param 0.9 `
11 | --verbose
12 | 
13 | D:\kohya_ss\.venv\Scripts\python.exe D:\kohya_ss\tools\extract_lora_from_models-nw.py `
14 | --save_precision fp16 `
15 | --model_org E:/models/sdxl/base/sd_xl_base_1.0_0.9vae.safetensors `
16 | --model_tuned E:/models/sdxl/proteus_v06.safetensors `
17 | --save_to E:/lora/sdxl/proteus_v06_sv_cumulative_knee_1024.safetensors `
18 | --dim 1024 `
19 | --device cuda `
20 | --sdxl `
21 | --dynamic_method sv_cumulative_knee `
22 | --verbose
23 | 
24 | D:\kohya_ss\.venv\Scripts\python.exe D:\kohya_ss\tools\lr_finder.py `
25 | E:/models/sdxl/base/sd_xl_base_1.0_0.9vae.safetensors `
26 | E:/models/sdxl/dreamshaperXL_alpha2Xl10.safetensors `
27 |     --lr_finder_num_layers 16 `
28 |     --lr_finder_min_lr 1e-8 `
29 |     --lr_finder_max_lr 0.2 `
30 |     --lr_finder_num_steps 120 `
31 |     --lr_finder_iters_per_step 40 `
32 |     --rank 8 `
33 |     --initial_alpha 8.0 `
34 |     --precision bf16 `
35 |     --device cuda `
36 |     --lr_finder_plot `
37 |     --lr_finder_show_plot
38 | 
39 | D:\kohya_ss\.venv\Scripts\python.exe D:\kohya_ss\tools\extract_loha_from_tuned_model.py `
40 | E:/models/sdxl/base/sd_xl_base_1.0_0.9vae.safetensors `
41 | E:/models/sdxl/dreamshaperXL_alpha2Xl10.safetensors `
42 | E:/lora/sdxl/dreamshaperXL_alpha2Xl10_loha_1e-7.safetensors `
43 | --rank 2 `
44 | --initial_alpha 2 `
45 | --max_rank_retries 7 `
46 | --rank_increase_factor 2 `
47 | --max_iterations 8000 `
48 | --min_iterations 400 `
49 | --target_loss 1e-7 `
50 | --lr 1e-01 `
51 | --device cuda `
52 | --precision fp32 `
53 | --verbose `
54 | --save_weights_dtype bf16 `
55 | --progress_check_interval 100 `
56 | --save_every_n_layers 10 `
57 | --keep_n_resume_files 10 `
58 | --skip_delta_threshold 1e-7 `
59 | --rank_search_strategy binary_search_min_rank `
60 | --probe_aggressive_early_stop
61 | 
62 | D:\kohya_ss\venv\Scripts\python.exe D:\kohya_ss\tools\model_diff_report.py `
63 | E:/models/sdxl/base/sd_xl_base_1.0_0.9vae.safetensors `
64 | E:/models/sdxl/dreamshaperXL_alpha2Xl10.safetensors `
65 | --top_n_diff 15 --plot_histograms --plot_histograms_top_n 3 --output_dir ./analysis_results


--------------------------------------------------------------------------------
/tools/gradio_theme_builder.py:
--------------------------------------------------------------------------------
1 | import gradio as gr
2 | gr.themes.builder()
3 | 


--------------------------------------------------------------------------------
/tools/prepare_presets.py:
--------------------------------------------------------------------------------
 1 | import json
 2 | import argparse
 3 | import glob
 4 | 
 5 | 
 6 | def remove_items_with_keywords(json_file_path):
 7 |     keywords = [
 8 |         "caption_metadata_filename",
 9 |         "dir",
10 |         "image_folder",
11 |         "latent_metadata_filename",
12 |         "logging_dir",
13 |         "model_list",
14 |         "output_dir",
15 |         "output_name",
16 |         "pretrained_model_name_or_path",
17 |         "resume",
18 |         "save_model_as",
19 |         "save_state",
20 |         "sample_",
21 |         "train_dir",
22 |         "wandb_api_key",
23 |     ]
24 | 
25 |     with open(json_file_path) as file:
26 |         data = json.load(file)
27 | 
28 |     for key in list(data.keys()):
29 |         for keyword in keywords:
30 |             if keyword in key:
31 |                 del data[key]
32 |                 break
33 | 
34 |     sorted_data = {k: data[k] for k in sorted(data)}
35 | 
36 |     with open(json_file_path, "w") as file:
37 |         json.dump(sorted_data, file, indent=4)
38 | 
39 |     print(
40 |         "Items with keywords have been removed from the JSON file and the list has been sorted alphabetically:",
41 |         json_file_path,
42 |     )
43 | 
44 | 
45 | if __name__ == "__main__":
46 |     parser = argparse.ArgumentParser(
47 |         description="Remove items from JSON files based on keywords in the keys"
48 |     )
49 |     parser.add_argument(
50 |         "json_files", type=str, nargs="+", help="Path(s) to the JSON file(s)"
51 |     )
52 |     args = parser.parse_args()
53 | 
54 |     json_files = args.json_files
55 |     for file_pattern in json_files:
56 |         for json_file_path in glob.glob(file_pattern):
57 |             remove_items_with_keywords(json_file_path)
58 | 


--------------------------------------------------------------------------------
/tools/prune.py:
--------------------------------------------------------------------------------
 1 | import argparse
 2 | import torch
 3 | from tqdm import tqdm
 4 | 
 5 | parser = argparse.ArgumentParser(description="Prune a model")
 6 | parser.add_argument("model_prune", type=str, help="Path to model to prune")
 7 | parser.add_argument("prune_output", type=str, help="Path to pruned ckpt output")
 8 | parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
 9 | args = parser.parse_args()
10 | 
11 | print("Loading model...")
12 | model_prune = torch.load(args.model_prune)
13 | theta_prune = model_prune["state_dict"]
14 | theta = {}
15 | 
16 | print("Pruning model...")
17 | for key in tqdm(theta_prune.keys(), desc="Pruning keys"):
18 |     if "model" in key:
19 |         theta.update({key: theta_prune[key]})
20 | 
21 | del theta_prune
22 | 
23 | if args.half:
24 |     print("Halving model...")
25 |     state_dict = {k: v.half() for k, v in tqdm(theta.items(), desc="Halving weights")}
26 | else:
27 |     state_dict = theta
28 | 
29 | del theta
30 | 
31 | print("Saving pruned model...")
32 | 
33 | torch.save({"state_dict": state_dict}, args.prune_output)
34 | 
35 | del state_dict
36 | 
37 | print("Done pruning!")


--------------------------------------------------------------------------------
/tools/rename_depth_mask.py:
--------------------------------------------------------------------------------
 1 | import os
 2 | import argparse
 3 | 
 4 | # Define the command line arguments
 5 | parser = argparse.ArgumentParser(description='Rename files in a folder')
 6 | parser.add_argument('folder', metavar='folder', type=str, help='the folder containing the files to rename')
 7 | 
 8 | # Parse the arguments
 9 | args = parser.parse_args()
10 | 
11 | # Get the list of files in the folder
12 | files = os.listdir(args.folder)
13 | 
14 | # Loop through each file in the folder
15 | for file in files:
16 |     # Check if the file has the expected format
17 |     if file.endswith('-0000.png'):
18 |         # Get the new file name
19 |         new_file_name = file[:-9] + '.mask'
20 |         # Rename the file
21 |         os.rename(os.path.join(args.folder, file), os.path.join(args.folder, new_file_name))
22 | 


--------------------------------------------------------------------------------