├── .github
└── ISSUE_TEMPLATE
│ ├── bug_report.yaml
│ └── feature_request.yml
├── .gitignore
├── HelpBatchCheckpointsPrompt.md
├── LICENSE
├── README.md
├── img
├── BasePrompt.png
├── CheckpointLoop.png
└── grid.png
├── javascript
└── hints.js
├── pyproject.toml
└── scripts
├── BatchParams.py
├── CivitaihelperPrompts.py
├── Logger.py
├── Save.py
├── Utils.py
├── batchCheckpointsPrompt.py
└── settings.py
/.github/ISSUE_TEMPLATE/bug_report.yaml:
--------------------------------------------------------------------------------
1 | name: Bug Report
2 | description: File a bug report
3 | title: "[Bug]: "
4 | labels: ["bug", "triage"]
5 |
6 | body:
7 | - type: markdown
8 | attributes:
9 | value: |
10 | Thanks for taking the time to fill out this bug report!
11 |
12 | - type: checkboxes
13 | id: otherIssues
14 | attributes:
15 | label: Please search to see if an issue already exists for the bug you encountered, and that it hasn't been fixed in a recent build/commit.
16 | options:
17 | - label: I have searched the existing issues
18 | required: true
19 |
20 |
21 | - type: textarea
22 | id: what-happened
23 | attributes:
24 | label: What happened?
25 | description: Also tell us, what did you expect to happen?
26 | placeholder: Tell us what you see!
27 | value: "A bug happened!"
28 | validations:
29 | required: true
30 |
31 | - type: dropdown
32 | id: webui
33 | attributes:
34 | multiple: true
35 | label: Which WebUI
36 | description: What WebUI do you use?
37 | options:
38 | - AUTOMATIC1111 (Default)
39 | - Vladmandic (not tested)
40 | validations:
41 | required: true
42 |
43 | - type: dropdown
44 | id: webui-version
45 | attributes:
46 | multiple: true
47 | label: WebUI version
48 | description: Which WebUI version do you use?
49 | options:
50 | - 1.9.*
51 | - 1.8.*
52 | - 1.7.*
53 | - 1.6.*
54 | - 1.5.*
55 | - 1.4.*
56 | - 1.3.*
57 | - 1.2.*
58 | - 1.1.*
59 | - other
60 | validations:
61 | required: true
62 |
63 | - type: dropdown
64 | id: browsers
65 | attributes:
66 | label: What browsers are you seeing the problem on?
67 | multiple: true
68 | options:
69 | - Firefox
70 | - Chrome
71 | - Safari
72 | - Microsoft Edge
73 | - Other
74 |
75 | - type: dropdown
76 | id: py-version
77 | attributes:
78 | label: What Python version are you running on?
79 | multiple: false
80 | options:
81 | - Python 3.10.x
82 | - Python 3.11.x (above, no supported yet)
83 | - Python 3.9.x (below, no recommended)
84 |
85 | - type: textarea
86 | id: steps
87 | attributes:
88 | label: Steps to reproduce the problem
89 | description: Please provide us with precise step by step information on how to reproduce the bug
90 | value: |
91 | 1. Go to ....
92 | 2. Press ....
93 | 3. ...
94 | validations:
95 | required: true
96 |
97 | - type: textarea
98 | id: extensions
99 | attributes:
100 | label: List of extensions
101 | description: Are you using any extensions other than built-ins and this one? If yes, provide a list, you can copy it at "Extensions" tab. Write "No" otherwise.
102 | validations:
103 | required: true
104 |
105 | - type: textarea
106 | id: logs
107 | attributes:
108 | label: Relevant log output
109 | description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after your bug happened. If it's very long, provide a link to pastebin or similar service. This will be automatically formatted into code, so no need for backticks.
110 | render: shell
111 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.yml:
--------------------------------------------------------------------------------
1 | name: Feature request
2 | description: Request a new feature
3 | title: "[Feature Request]: "
4 | labels: ["enhancement"]
5 |
6 | body:
7 | - type: markdown
8 | attributes:
9 | value: |
10 | Thanks for taking the time to fill out this feature request!
11 |
12 | - type: checkboxes
13 | id: otherIssues
14 | attributes:
15 | label: Please search to see if an issue already exists for the feature you want, and that it hasn't been fixed in a recent build/commit.
16 | options:
17 | - label: I have searched the existing issues
18 | required: true
19 |
20 | - type: textarea
21 | id: what-should-happen
22 | attributes:
23 | label: What would this feature do?
24 | description: Also tell us, what your feature idea would do?
25 | validations:
26 | required: true
27 |
28 | - type: textarea
29 | id: workflow
30 | attributes:
31 | label: What is the workflow for your feature idea?
32 | description: Tell us how your feature idea would work. What Syntax would you use (if applicable)?
33 | validations:
34 | required: true
35 |
36 | - type: textarea
37 | id: other-info
38 | attributes:
39 | label: Any other information?
40 | description: Any other information you want to add?
41 | validations:
42 | required: false
43 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | scripts/__pycache__
--------------------------------------------------------------------------------
/HelpBatchCheckpointsPrompt.md:
--------------------------------------------------------------------------------
1 | ## what is this all about?
2 |
3 | This script allows you to try different checkpoints with different prompts. Each checkpoint can have its own prompt, or the prompt from the positive prompt field is inserted into the checkpoint-specific prompt at a position you specify.
4 |
5 |
6 |
7 | ## checkpoint names
8 | either enter the names of the checkpoints yourself, keep in mind that if you have sorted them into sub-folders, you have to add the sub-folders to the name, or press this 📒 button next to the box and all existing checkpoints will be loaded into the textbox.
9 |
10 | ### syntax:
11 | - `checkpointA, checkpointB` Separate the checkpoint names with a comma.
12 | - `@index:number` Is automatically added after the checkpoint name when you load the checkpoint by 📒 button. Ignored by the program, but can help you to see which checkpoint belongs to which prompt.
13 |
14 |
15 |
16 | ## Prompts/prompt templates for Checkpoints
17 | here you enter the prompts for the checkpoints, each in the same order as the checkpoints are.
18 | If you have installed the [Civitai Helper](https://github.com/butaixianran/Stable-Diffusion-Webui-Civitai-Helper) extension, this button automatically launches the prompts from the checkpoint thumbnails into the textbox.
19 | If you don't have this installed it will simply load `{prompt};` for each checkpoint. Just like if there is no prompt for the preview image.
20 |
21 | hires fix always uses the same prompts as for the first pass of the checkpoint, even if extra hires fix prompts were specified.
22 | ### syntax:
23 | - `promptA; promptB` Separate the prompts with a semicolon.
24 | - `@index:number` Is automatically added after the checkpoint name when you load the checkpoint by 📒 button.
25 | - `{prompt}` insert this at the point in the checkpoint specific prompt where you want the positive prompt to be inserted.
26 | - `{{size:WIDTHxHEIGHT}}` defines the size of the image. When not set, the default size is set in the UI.
27 | - `{{count:number}}` Add this to the end of the prompt to set a different batch count for this checkpoint
28 | - `{{clip_skip:number}}` Add this to the end of the prompt to set a different clip skip for this checkpoint
29 | - `{{neg: negativ prompt text here}}`Add this to the end of the prompt, the text will be simply added to the back of the negative prompt
30 | - `{{style:StyleName}}` Add this to your prompt to use a Style saved in A1111. If you have `{prompt}` in your style and your prompt template looks something like `template prompt {{style:StyleName}}` the end prompt will look like `part of the style before {prompt}, prompt template, base prompt, part of the style prompt after {prompt}`
31 |
32 | These can be changed in the setting. **you need to know Regexp to use this!** Look at the code in BatchParams.py to see how it works internally.
33 |
34 | **SDXL:** If you use SDXL and 1.X/2.X checkpoints set the *VAE* to `Automatic` and set the appropriate sizes for each prompt.
35 |
36 |
37 | 🔢 adds the `@index:number` to the end of the Checkpoints and Prompts. If already there updates them.
38 |
39 |
40 | ## Grid margins (px)
41 | specifies how many pixels should be between the checkpoint images in the grid created in the end
42 |
43 |
44 |
45 | ## save
46 | You can save the state. Use a unique name for this. I.e. no duplicate names. Press the 💾 button to save.
47 | - To overwrite a saved state, check the `Overwrite existing save` checkbox and press the 💾 button.
48 | - To append to a saved state, check the `append existing save` checkbox and press the 💾 button.
49 |
50 | ### load
51 | To reload a saved state, select it from the drop-down menu and press the 📒 button. This is a multiple selection menu, but only the first selection is taken into account.
52 | To load new saves, press the 🔄 button before selecting a save from the drop-down menu.
53 |
54 |
55 |
56 | ## advanced settings
57 | The basismodel version of the checkpoint can now be inserted with the 🔢 button next to the index, but this is quite slow and takes about 5 minutes on my system with 90 checkpoints. To enable this check under advanced settings the checkbox `Add model version to checkpoint names`.
58 |
59 |
60 |
61 | ## more
62 | You found a bug or want to contribute to the script? Visit the github page (link can be found in the extensions tab)
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Tom Haelbich
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # The project is no longer being maintained. It might still work in A1111 and Forge, it might not.
2 |
3 |
4 | # batch Checkpoints with Prompt
5 | a script for [AUTOMATIC1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
6 |
7 | ### what does it do?
8 | Creates images with with different checkpoints and prompts. Either different prompts for each checkpoint or the base prompt is inserted into each template for the checkpoint.
9 |
10 | ##### Forks
11 | Checkout @RandyHaylor [Fork](https://github.com/RandyHaylor/SD-WebUI-BatchCheckpointPrompt) for some advanced features like:
12 | - iterate through all checkpoints without the same amount of prompts
13 | - iterator for prompts (for embedding version testing etc)
14 |
15 | #### why use this and not x/y/z plot?
16 | Different checkpoints need different trigger words, or you want to test the same prompt with a photorealistic model and an anime model, then it would be good if the prompt for the anime model is not `RAW Color photo of ...`
17 | This script takes the positive prompt and always inserts it into the prompt for the respective model, so that the prompt for the anime model is no longer RAW Color photo, etc.
18 |
19 | ### Instalation
20 | can now be installed like an extension:
21 | extensions > install from URL
22 |
23 | Copying the script to the scripts folder is depracticated
24 |
25 |
26 | ### Usage
27 |
28 | detailed docs [here](https://github.com/h43lb1t0/BatchCheckpointPrompt/blob/main/HelpBatchCheckpointsPrompt.md) and in the help tab in the script
29 |
30 | #### Load Checkpoint names
31 | press the 📒 button below the checkpoint text field and all checkpoints will be automatically loaded into the text field
32 |
33 | #### get prompts
34 | If you have installed the [Civitai Helper](https://github.com/zixaphir/Stable-Diffusion-Webui-Civitai-Helper) extension the 📒 button below the prompt text field will load the previews prompt for each Checkpoint
35 |
36 | #### Save and Load
37 | Save your checkpoint prompt combinations and load them again at the next startup
38 | Give your saves a unique name, and press the 💾 button. So far only the console shows if the save worked.
39 | Reload a saved stand by selecting the name from the Dropdown menu and then pressing the 📒 button below it.
40 |
41 | The saved values appear only after a restart of the UI in the Dropdown menu, that should definitely be fixed later on.
42 |
43 | #### Prompts
44 | In the Positive prompt write your prompt without any trigger words. E.g. "one young woman, long blonde hair, wearing a black skirt and a white blouse,standing in nature".
45 | In the script add the checkpoint names in the first input box. Separate the names with a comma.
46 | In the second textbox you write the prompts for each checkpoint in the same order as the checkpoints. at the place where your base prompt should be inserted you write ``{prompt}``. The prompts are separated with a semicolon.
47 |
48 | hires fix always uses the same prompts as for the first pass of the checkpoint, even if extra hires fix prompts were specified
49 |
50 | An example of replicantV30_v30 and artiusV21_v21:
51 | 
52 | 
53 |
54 |
55 | **The Prompt for replicantV30_v30:**
56 |
57 | `(masterpiece, best quality),from above , one young woman, long blonde hair, wearing a black skirt and a pink blouse,standing in an urban park area`
58 |
59 |
60 | **The prompt for artiusV21_v21:**
61 |
62 | `Portrait photo of one young woman, long blonde hair, wearing a black skirt and a pink blouse,standing in an urban park area, (cinematic:1.5), epic realistic, hyperdetailed, insane details, intricate details, accent lighting, soft volumetric light, bokeh, (dramatic light:1.2), (neutral colors:1.3), cross process`
63 |
64 |
65 | 
66 |
67 | some settings can be changed individually for each checkpoint. Syntax can be found in the help tab of the script
68 |
69 |
70 | ### bugs
71 |
72 | although the correct info texts are displayed, send to img2img and send to inpaint do not pass the correct data, but only the base prompt.
73 |
74 |
75 |
76 | works with [Dynamic Prompts](https://github.com/adieyal/sd-dynamic-prompts), but jinja2 templates can cause unexpected behavior.
77 |
78 | if you find any other bugs besides the ones mentioned above, open a new Issue. Please give as many details as possible
79 |
80 | ### contribution
81 | If you want to contribute something to the script just open a pull request and I will check it. There is still some work to do.
82 |
83 |
84 | ### Roadmap
85 |
86 | - [x] delete checkpoints and prompts at index
87 | - [ ] only load models for a specific sd version
88 | - [x] add negative prompt
89 | - [x] add basemodel version next to the Checkpointname
90 | - [x] reload button for save and load
91 | - [x] add clip skip for each checkpoint
92 |
--------------------------------------------------------------------------------
/img/BasePrompt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/h43lb1t0/SD-WebUI-BatchCheckpointPrompt/cf99e1897f8f91a40aff4d5c6beac57761817f98/img/BasePrompt.png
--------------------------------------------------------------------------------
/img/CheckpointLoop.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/h43lb1t0/SD-WebUI-BatchCheckpointPrompt/cf99e1897f8f91a40aff4d5c6beac57761817f98/img/CheckpointLoop.png
--------------------------------------------------------------------------------
/img/grid.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/h43lb1t0/SD-WebUI-BatchCheckpointPrompt/cf99e1897f8f91a40aff4d5c6beac57761817f98/img/grid.png
--------------------------------------------------------------------------------
/javascript/hints.js:
--------------------------------------------------------------------------------
1 | (function () {
2 | // mouseover tooltips for various UI elements
3 | const titles = {
4 | '🔄': 'Refresh saves',
5 | '💾': 'save',
6 | '\uD83D\uDCD2\u200B': 'load saves',
7 | '\uD83D\uDCD2\u200D': 'load prompts from Civitai extension',
8 | '🔢': 'add index to prompt and checkpoints',
9 |
10 | };
11 |
12 | onUiUpdate(function () {
13 | gradioApp().querySelectorAll('.batch-checkpoint-prompt').forEach(function (button) {
14 | const tooltip = titles[button.textContent];
15 | if (tooltip) {
16 | button.title = tooltip;
17 | }
18 | })
19 | });
20 | })();
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.ruff]
2 |
3 |
4 | # Same as Black.
5 | line-length = 88
6 | indent-width = 4
7 |
8 | target-version = "py310"
9 |
10 | [tool.ruff.lint]
11 | # Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
12 | #select = ["E4", "E7", "E9", "F"]
13 | ignore = ["E402", "E722"]
14 |
15 | # Allow fix for all enabled rules (when `--fix`) is provided.
16 | fixable = ["ALL"]
17 | unfixable = []
18 |
19 | # Allow unused variables when underscore-prefixed.
20 | dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
21 |
22 | [tool.ruff.format]
23 | # Like Black, use double quotes for strings.
24 | quote-style = "double"
25 |
26 | # Like Black, indent with spaces, rather than tabs.
27 | indent-style = "space"
28 |
29 | # Like Black, respect magic trailing commas.
30 | skip-magic-trailing-comma = false
31 |
32 | # Like Black, automatically detect the appropriate line ending.
33 | line-ending = "auto"
34 |
35 | [tool.mypy]
36 | ignore_missing_imports = true
37 | disallow_untyped_defs = true
38 | explicit_package_bases = true
39 | install_types = true
--------------------------------------------------------------------------------
/scripts/BatchParams.py:
--------------------------------------------------------------------------------
1 | """"This module provides a function to get all"""
2 | from dataclasses import dataclass
3 | from typing import Union, List, Tuple
4 | import re
5 | import os
6 | from scripts.Logger import Logger
7 | from scripts.Utils import Utils
8 |
9 | import modules
10 |
11 | import modules.shared as shared
12 |
13 | @dataclass()
14 | class BatchParams:
15 | """Dataclass to store the parameters for a batch
16 |
17 | Args:
18 | checkpoint (str): the checkpoint name
19 | prompt (str): the prompt
20 | hr_prompt (str): the hires. fix prompt
21 | neg_prompt (str): the negative prompt
22 | style (List[str]): the style (A1111 styles)
23 | batch_count (int, optional): the batch count. Defaults to -1. (don't overwrite the UI value)
24 | clip_skip (int, optional): the clip skip. Defaults to 1.
25 | width (int, optional): the width. Defaults to -1. (don't overwrite the UI value)
26 | height (int, optional): the height. Defaults to -1. (don't overwrite the UI value)
27 | """
28 | checkpoint: str
29 | prompt: str
30 | hr_prompt: str
31 | neg_prompt: str
32 | style : List[str]
33 | batch_count: int = -1
34 | clip_skip: int = 1
35 | width: int = -1
36 | height: int = -1
37 |
38 | def __repr__(self) -> str:
39 | checkpointName: str = os.path.basename(self.checkpoint)
40 | return( f"BatchParams: {checkpointName},\n "
41 | f"prompt: {self.prompt},\n"
42 | f"style: {self.style},\n"
43 | f"neg_prompt: {self.neg_prompt},\n "
44 | f"batch_count: {self.batch_count},\n "
45 | f"clip_skip: {self.clip_skip}\n"
46 | f"size: {self.width}x{self.height}")
47 |
48 | logger = Logger()
49 |
50 | def get_all_batch_params(p: Union[modules.processing.StableDiffusionProcessingTxt2Img, modules.processing.StableDiffusionProcessingImg2Img], checkpoints_as_string: str, prompts_as_string: str) -> List[BatchParams]:
51 | """Get all batch parameters from the input
52 |
53 | Args:
54 | p (Union[modules.processing.StableDiffusionProcessingTxt2Img, modules.processing.StableDiffusionProcessingImg2Img]): the processing object
55 | checkpoints_as_string (str): the checkpoints as string
56 | prompts_as_string (str): the prompts as string
57 |
58 | Returns:
59 | List[BatchParams]: the batch parameters
60 | """
61 |
62 | def getRegexFromOpts(key: str, search_for_number: bool = True) -> Tuple[str, str]:
63 | """Get the regex from the options. As the user can change the regex,
64 | it is checked if the regex is valid.
65 |
66 | Args:
67 | key (str): the key
68 | search_for_number (bool, optional): If true checks if the regex is valid. Defaults to True.
69 |
70 | Returns:
71 | Tuple[str, str]: the search pattern and the sub pattern
72 | """
73 | sub_pattern = getattr(shared.opts, key)
74 | search_pattern = sub_pattern.replace("[", "([").replace("]", "])")
75 |
76 | if not re.search(r"\[0-9\]\+|\\d\+", sub_pattern) and search_for_number:
77 | raise RuntimeError(f'Can\'t find a number with the regex for {key}: "{sub_pattern}"')
78 |
79 | return search_pattern, sub_pattern
80 |
81 | utils = Utils()
82 |
83 | def get_batch_count_from_prompt(prompt: str) -> Tuple[int, str]:
84 | """Extracts the batch count from the prompt if specified, else uses the default value
85 |
86 | Args:
87 | prompt (str): the prompt
88 |
89 | Returns:
90 | Tuple[int, str]: the batch count and the prompt
91 | """
92 | search_pattern, sub_pattern = getRegexFromOpts("batchCountRegex")
93 | number_match = re.search(search_pattern, prompt)
94 | if number_match and number_match.group(1):
95 | # Extract the number from the match object
96 | number = int(number_match.group(1)) # Use group(1) to get the number inside parentheses
97 | number = p.n_iter if number < 1 else number
98 | prompt = re.sub(sub_pattern, '', prompt)
99 | else:
100 | number = p.n_iter
101 |
102 |
103 | return number, prompt
104 |
105 | def get_clip_skip_from_prompt(prompt: str) -> Tuple[int, str]:
106 | """Extracts the clip skip from the prompt if specified, else uses the default value
107 |
108 | Args:
109 | prompt (str): the prompt
110 |
111 | Returns:
112 | Tuple[int, str]: the clip skip and the prompt
113 | """
114 | search_pattern, sub_pattern = getRegexFromOpts("clipSkipRegex")
115 | number_match = re.search(search_pattern, prompt)
116 | if number_match and number_match.group(1):
117 | # Extract the number from the match object
118 | number = int(number_match.group(1))
119 | number = shared.opts.data["CLIP_stop_at_last_layers"] if number < 1 else number
120 | prompt = (
121 | re.sub(sub_pattern, '', prompt))
122 | else:
123 | number = shared.opts.data["CLIP_stop_at_last_layers"]
124 |
125 |
126 | return number, prompt
127 |
128 | def get_style_from_prompt(prompt: str) -> Tuple[List[str], str]:
129 | """Extracts the style from the prompt if specified.
130 |
131 | Args:
132 | prompt (str): the prompt
133 |
134 | Returns:
135 | Tuple[List[str], str]: the styles and the prompt
136 | """
137 | styles = []
138 | search_pattern, sub_pattern = getRegexFromOpts("styleRegex", False)
139 | style_matches = re.findall(search_pattern, prompt)
140 | if style_matches:
141 | for i, stl in enumerate(style_matches):
142 | styles.append(stl)
143 | _, prompt_regex = getRegexFromOpts("promptRegex", False)
144 | replacement = prompt_regex if i == len(style_matches) - 1 else ""
145 | prompt = re.sub(sub_pattern, replacement, prompt, count=1)
146 |
147 | logger.debug_log(f"nr.: {i}, prompt: {prompt}", False)
148 |
149 | return styles, prompt
150 |
151 | def get_image_size_from_prompt(prompt: str) -> Tuple[int, int, str]:
152 | """Extracts the image size from the prompt if specified, else uses the default value
153 |
154 | Args:
155 | prompt (str): the prompt
156 |
157 | Returns:
158 | Tuple[int, int, str]: the width, height and the prompt.
159 | If the width and height are not specified, -1 is returned.
160 | """
161 | search_pattern, sub_pattern = getRegexFromOpts("widthHeightRegex", False) #{{size:[0-9]+x[0-9]+}}
162 | number_matches = re.search(search_pattern, prompt)
163 | if number_matches:
164 | try:
165 | width, height = map(int, number_matches.groups())
166 | except ValueError:
167 | raise RuntimeError(f"Can't convert the image size to an integer: {number_matches[0]}")
168 | prompt = re.sub(sub_pattern, '', prompt)
169 | else:
170 | width, height = -1, -1
171 |
172 | return width, height, prompt
173 |
174 | def split_postive_and_negative_postive_prompt(prompt: str) -> Tuple[str, str]:
175 | """Splits the prompt into a positive and negative prompt.
176 | If a negative prompt is specified.
177 |
178 | Args:
179 | prompt (str): the prompt
180 |
181 | Returns:
182 | Tuple[str, str]: the positive and negative prompt
183 | """
184 | pattern = getattr(shared.opts, "negPromptRegex")
185 | parts = re.split(pattern, prompt)
186 | if len(parts) > 1:
187 | neg_prompt = parts[1]
188 | else:
189 | neg_prompt = ""
190 |
191 | prompt = parts[0]
192 |
193 | return prompt, neg_prompt
194 |
195 |
196 | all_batch_params: List[BatchParams] = []
197 |
198 | checkpoints: List[str] = utils.getCheckpointListFromInput(checkpoints_as_string)
199 |
200 |
201 | prompts: List[str] = utils.remove_index_from_string(prompts_as_string).split(";")
202 | prompts = [prompt.replace('\n', '').strip() for prompt in prompts if not prompt.isspace() and prompt != '']
203 |
204 | if len(prompts) != len(checkpoints):
205 | logger.debug_log(f"len prompt: {len(prompts)}, len checkpoints{len(checkpoints)}")
206 | raise RuntimeError("amount of prompts don't match with amount of checkpoints")
207 |
208 | if len(prompts) == 0:
209 | raise RuntimeError("can't run without a checkpoint and prompt")
210 |
211 |
212 | for i in range(len(checkpoints)):
213 |
214 | info = modules.sd_models.get_closet_checkpoint_match(checkpoints[i])
215 | if info is None:
216 | raise RuntimeError(f"Unknown checkpoint: {checkpoints[i]}")
217 |
218 |
219 | batch_count, prompts[i] = get_batch_count_from_prompt(prompts[i])
220 | clip_skip, prompts[i] = get_clip_skip_from_prompt(prompts[i])
221 | style, prompts[i] = get_style_from_prompt(prompts[i])
222 | width, height, prompts[i] = get_image_size_from_prompt(prompts[i])
223 | prompt_template, neg_prompt = split_postive_and_negative_postive_prompt(prompts[i])
224 |
225 |
226 | _, prompt_regex = getRegexFromOpts("promptRegex", False)
227 |
228 | prompt = prompt_template.replace(prompt_regex, p.prompt)
229 | hr_prompt = prompt_template.replace(prompt_regex, p.hr_prompt)
230 | neg_prompt = p.negative_prompt + neg_prompt
231 |
232 |
233 | all_batch_params.append(BatchParams(checkpoints[i], prompt,hr_prompt, neg_prompt, style, batch_count, clip_skip, width, height))
234 |
235 | logger.debug_log(f"batch_params: {all_batch_params[i]}", False)
236 |
237 | return all_batch_params
--------------------------------------------------------------------------------
/scripts/CivitaihelperPrompts.py:
--------------------------------------------------------------------------------
1 | """This modules uses the JSON files created by the
2 | https://github.com/butaixianran/Stable-Diffusion-Webui-Civitai-Helper extension"""
3 |
4 | import sys
5 | import os
6 | sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "scripts"))
7 | from scripts.Logger import Logger
8 | from scripts.Utils import Utils
9 |
10 | import modules.shared as shared
11 | import modules.scripts as scripts
12 | import json
13 |
14 |
15 | class CivitaihelperPrompts():
16 | """
17 | This class is used to get the prompts from the JSON files created by the
18 | Stable-Diffusion-Webui-Civitai-Helper extension
19 | """
20 |
21 | def __init__(self) -> None:
22 | self.model_path = self.get_custom_model_folder()
23 | self.utils = Utils()
24 | self.logger = Logger()
25 | self.logger.debug = False
26 |
27 | def get_custom_model_folder(self) -> str:
28 | """Get the custom model folder from the command line options or the default folder
29 |
30 | Returns:
31 | str: the model folder path
32 | """
33 | if shared.cmd_opts.ckpt_dir and os.path.isdir(shared.cmd_opts.ckpt_dir):
34 | return shared.cmd_opts.ckpt_dir
35 | else:
36 | return os.path.join(scripts.basedir(), "models", "Stable-diffusion")
37 |
38 |
39 | def get_civitAi_prompt_from_model(self, path: str) -> str:
40 | """Get the prompt from the civitai.info file
41 |
42 | Args:
43 | path (str): the path to the model
44 |
45 | Returns:
46 | str: the prompt
47 | """
48 | path = path.replace(".ckpt", ".civitai.info").replace(
49 | ".safetensors", ".civitai.info")
50 | path = self.utils.get_clean_checkpoint_path(path)
51 | path = os.path.join(self.model_path, path)
52 | self.logger.debug_log(f"{path} -> is file {os.path.isfile(path)}")
53 | if not os.path.exists(os.path.realpath(path)):
54 | return "{prompt};"
55 | model_info = None
56 | with open(os.path.realpath(path), 'r') as f:
57 | try:
58 | model_info = json.load(f)
59 | except Exception as e:
60 | self.logger.debug_log(f"Error loading civitai info: {e}", False)
61 | return "{prompt};"
62 | try:
63 | self.logger.debug_log(f"len: {len(model_info['images'])}")
64 | for i in range(0, len(model_info['images'])):
65 | try:
66 | info = model_info['images'][i]['meta']['prompt']
67 | self.logger.debug_log(f"Prompt: {info}")
68 | if info:
69 | return f"{info};"
70 | except:
71 | pass
72 | return "{prompt};"
73 | except:
74 | return "{prompt};"
75 |
76 | def createCivitaiPromptString(self, checkpoints: str) -> str:
77 | """Create a string with all the prompts from the checkpoints
78 | Indices are added to the prompts
79 |
80 | Args:
81 | checkpoints (str): the checkpoints
82 |
83 | Returns:
84 | str: the prompt string with indices
85 | """
86 | checkpoints_list = self.utils.getCheckpointListFromInput(checkpoints)
87 | prompts = ""
88 | prompts_with_info = ""
89 | for i, checkpoint in enumerate(checkpoints_list):
90 | prompts += self.get_civitAi_prompt_from_model(checkpoint)
91 |
92 | prompts_with_info += self.utils.add_index_to_string(
93 | prompts, is_checkpoint=False)
94 |
95 | self.logger.log_info("loaded all prompts")
96 | return prompts_with_info
97 |
--------------------------------------------------------------------------------
/scripts/Logger.py:
--------------------------------------------------------------------------------
1 | """ Module for logging and debugging"""
2 | import inspect
3 | from pprint import pprint
4 | from typing import Any
5 |
6 | class Logger():
7 | """
8 | Log class with different styled logs.
9 | debugging can be enabled/disabled for the whole instance
10 | """
11 |
12 | def __init__(self) -> None:
13 | self.debug = False
14 |
15 | def log_caller(self) -> None:
16 | """Print the caller of the function"""
17 | caller_frame = inspect.currentframe().f_back.f_back # type: ignore
18 | caller_function_name = caller_frame.f_code.co_name # type: ignore
19 | caller_self = caller_frame.f_locals.get('self', None) # type: ignore
20 | if caller_self is not None:
21 | caller_class_name = caller_self.__class__.__name__
22 | print(f"\tat: {caller_class_name}.{caller_function_name}\n")
23 | else:
24 | print(f"\tat: {caller_function_name}\n")
25 |
26 |
27 | def debug_log(self, msg: str, debug: bool = False) -> None:
28 | """Print a debug message if debugging is enabled
29 |
30 | Args:
31 | msg (str): the message to print
32 | debug (bool, optional): if True, the message will be printed regardless of the debugging state.
33 | Defaults to False.
34 | """
35 | if self.debug or debug:
36 | print(f"\n\tDEBUG: {msg}")
37 | self.log_caller()
38 |
39 | def pretty_debug_log(self, msg: Any, debug: bool = False) -> None:
40 | """Print a debug message with pprint if debugging is enabled
41 |
42 | Args:
43 | msg (Any): the message to print
44 | debug (bool, optional): if True, the message will be printed regardless of the debugging state.
45 | """
46 | if self.debug or debug:
47 | print("\n\n\n")
48 | pprint(msg)
49 | self.log_caller()
50 |
51 |
52 | def log_info(self, msg: str) -> None:
53 | """Print an info message
54 |
55 | Args:
56 | msg (str): the message to print
57 | """
58 | print(f"INFO: Batch-Checkpoint-Prompt: {msg}")
59 |
60 | def debug_print_attributes(self, obj: Any, debug: bool = False) -> None:
61 | """Print the attributes of an object
62 |
63 | Args:
64 | obj (Any): the object to print the attributes from
65 | """
66 | print("Atributes: ")
67 | if self.debug or debug:
68 | attributes = dir(obj)
69 | for attribute in attributes:
70 | if not attribute.startswith("__"):
71 | value = getattr(obj, attribute)
72 | if not callable(value): # Exclude methods
73 | try:
74 | print(f"{attribute}:")
75 | pprint(value)
76 | except:
77 | print(f"{attribute}: {value}\n")
78 | print(f"\n{type(obj)}\n")
79 | self.log_caller()
--------------------------------------------------------------------------------
/scripts/Save.py:
--------------------------------------------------------------------------------
1 | """This module provides methods to save and load checkpoints and prompts in a JSON file."""
2 | import sys
3 | import os
4 |
5 | sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "scripts"))
6 | from scripts.Logger import Logger
7 |
8 | import json
9 | from typing import Dict, List, Tuple
10 |
11 | class Save():
12 | """
13 | saves and loads checkpoints and prompts in a JSON
14 | """
15 |
16 | def __init__(self) -> None:
17 | self.file_name = "batchCheckpointPromptValues.json"
18 | self.logger = Logger()
19 | self.logger.debug = False
20 |
21 | def read_file(self) -> Dict[str, Tuple[str, str]]:
22 | """Read the JSON file and return the data
23 |
24 | Returns:
25 | Dict[str, Tuple[str, str]]: the data from the JSON file.
26 | The key is the name of the save and the value is a tuple of checkpoints and prompts
27 | """
28 | try:
29 | with open(self.file_name, 'r') as f:
30 | data = json.load(f)
31 | return data
32 | except FileNotFoundError:
33 | return {"None": ("", "")}
34 |
35 | def store_values(self, name: str, checkpoints: str, prompts: str, overwrite_existing_save: bool, append_existing_save: bool) -> str:
36 | """Store the checkpoints and prompts in a JSON file
37 |
38 | Args:
39 | name (str): the name of the save
40 | checkpoints (str): the checkpoints
41 | prompts (str): the prompts
42 | overwrite_existing_save (bool): if True, overwrite a existing save with the same name
43 | append_existing_save (bool): if True, append a existing save with the same name
44 |
45 | Returns:
46 | str: a message that indicates if the save was successful
47 | """
48 | data = {}
49 |
50 | # If the JSON file already exists, load the data into the dictionary
51 | if os.path.exists(self.file_name):
52 | data = self.read_file()
53 |
54 | # Check if the name already exists in the data dictionary
55 |
56 | if name in data and not overwrite_existing_save and not append_existing_save:
57 | self.logger.log_info("Name already exists")
58 | return f'Name "{name}" already exists'
59 |
60 | if append_existing_save:
61 | self.logger.debug_log(f"Name: {name}")
62 | read_values = self.read_value(name)
63 | self.logger.pretty_debug_log(read_values)
64 | checkpoints_list = [read_values[0], checkpoints]
65 | prompts_list = [read_values[1], prompts]
66 | checkpoints = ",\n".join(checkpoints_list)
67 | prompts = ";\n".join(prompts_list)
68 |
69 | # Add the data to the dictionary
70 | data[name] = (checkpoints, prompts)
71 |
72 | # Append the new data to the JSON file
73 | with open(self.file_name, 'w') as f:
74 | json.dump(data, f)
75 |
76 | self.logger.log_info("saved checkpoints and Prompts")
77 | if append_existing_save:
78 | return f'Appended "{name}"'
79 | elif overwrite_existing_save:
80 | return f'Overwrote "{name}"'
81 | else:
82 | return f'Saved "{name}"'
83 |
84 | def read_value(self, name: str) -> Tuple[str, str]:
85 | """Get the checkpoints and prompts from a save
86 |
87 | Args:
88 | name (str): the name of the save
89 |
90 | Returns:
91 | Tuple[str, str]: the checkpoints and prompts
92 | """
93 | data = {}
94 |
95 | if os.path.exists(self.file_name):
96 | data = self.read_file()
97 | else:
98 | raise RuntimeError("no save file found")
99 |
100 | x, y = tuple(data[name])
101 | self.logger.log_info("loaded save")
102 |
103 | return x, y
104 |
105 | def get_keys(self) -> List[str]:
106 | """Get the keys from the JSON file
107 |
108 | Returns:
109 | List[str]: a list of keys
110 | """
111 | data = self.read_file()
112 | return list(data.keys())
113 |
--------------------------------------------------------------------------------
/scripts/Utils.py:
--------------------------------------------------------------------------------
1 | """This module provides utility functions."""
2 | from scripts.Logger import Logger
3 | import os
4 | import re
5 | import requests
6 | from typing import List
7 |
8 | import modules
9 | from modules.sd_models import read_state_dict
10 | from modules.sd_models_config import (find_checkpoint_config, config_default, config_sd2, config_sd2v, config_sd2_inpainting,
11 | config_depth_model, config_unclip, config_unopenclip, config_inpainting, config_instruct_pix2pix, config_alt_diffusion)
12 |
13 | import sys
14 | sys.path.insert(0, os.path.join(os.path.dirname(
15 | os.path.abspath(__file__)), "scripts"))
16 |
17 |
18 | class Utils():
19 | """
20 | methods that are needed in different classes
21 | """
22 |
23 | def __init__(self) -> None:
24 | self.logger = Logger()
25 | self.logger.debug = False
26 | script_path = os.path.dirname(
27 | os.path.dirname(os.path.abspath(__file__)))
28 | self.held_md_file_name = os.path.join(
29 | script_path, "HelpBatchCheckpointsPrompt.md")
30 | self.held_md_url = f"https://raw.githubusercontent.com/h43lb1t0/BatchCheckpointPrompt/main/{self.held_md_file_name}.md"
31 |
32 | def split_prompts(self, text: str) -> List[str]:
33 | """Split the prompts by the ; and remove empty strings and newlines
34 |
35 | Args:
36 | text (str): the input string
37 | Returns:
38 | List[str]: a list of prompts
39 | """
40 | prompt_list = text.split(";")
41 | return [prompt.replace('\n', '').strip(
42 | ) for prompt in prompt_list if not prompt.isspace() and prompt != '']
43 |
44 |
45 | def remove_index_from_string(self, input: str) -> str:
46 | """Remove the index from the string
47 |
48 | Args:
49 | input (str): the input string
50 | Returns:
51 | str: the string without the index
52 | """
53 | return re.sub(r"@index:\d+", "", input).strip()
54 |
55 | def remove_model_version_from_string(self, checkpoints_text: str) -> str:
56 | """Remove the model version from the string
57 |
58 | Args:
59 | input (str): the input string with all checkpoints
60 | Returns:
61 | str: the string without the model version
62 | """
63 | patterns = [
64 | '@version:sd1',
65 | '@version:sd2',
66 | '@version:sd2v',
67 | '@version:sd2-inpainting',
68 | '@version:depth',
69 | '@version:unclip',
70 | '@version:unopenclip',
71 | '@version:sd1-inpainting',
72 | '@version:pix2pix',
73 | '@version:alt'
74 | ]
75 |
76 | # Iterate over the patterns and substitute them with an empty string
77 | for pattern in patterns:
78 | checkpoints_text = re.sub(pattern, '', checkpoints_text)
79 |
80 | return checkpoints_text
81 |
82 | def get_clean_checkpoint_path(self, checkpoint: str) -> str:
83 | """Remove the checkpoint hash from the filename
84 |
85 | Args:
86 | input (str): the input string with hash
87 | Returns:
88 | str: the string without the hash
89 | """
90 | return re.sub(r' \[.*?\]', '', checkpoint).strip()
91 |
92 | def getCheckpointListFromInput(self, checkpoints_text: str, clean: bool = True) -> List[str]:
93 | """Get a list of checkpoints from the input string
94 |
95 | Args:
96 | checkpoints_text (str): the input string with all checkpoints
97 | clean (bool): remove the index and hash from the string
98 | Returns:
99 | List[str]: a list of checkpoints
100 | """
101 | self.logger.debug_log(f"checkpoints: {checkpoints_text}")
102 | checkpoints_text = self.remove_model_version_from_string(checkpoints_text)
103 | if clean:
104 | checkpoints_text = self.remove_index_from_string(checkpoints_text)
105 | checkpoints_text = self.get_clean_checkpoint_path(checkpoints_text)
106 | checkpoints = checkpoints_text.split(",")
107 | checkpoints = [checkpoint.replace('\n', '').strip(
108 | ) for checkpoint in checkpoints if checkpoints if not checkpoint.isspace() and checkpoint != '']
109 | return checkpoints
110 |
111 | def get_help_md(self) -> str:
112 | """Gets the help md file.
113 | If the file is not localy found downloads it from the github repository
114 |
115 | Returns:
116 | str: the help md file as a string
117 | """
118 | md = "could not get help file. Check Github for more information"
119 | if os.path.isfile(self.held_md_file_name):
120 | with open(self.held_md_file_name) as f:
121 | md = f.read()
122 | else:
123 | self.logger.debug_log("downloading help md")
124 | result = requests.get(self.held_md_url)
125 | if result.status_code == 200:
126 | with open(self.held_md_file_name, "wb") as file:
127 | file.write(result.content)
128 | return self.get_help_md()
129 | return md
130 |
131 | def add_index_to_string(self, text: str, is_checkpoint: bool = True) -> str:
132 | """Add the index to the string
133 |
134 | Args:
135 | text (str): the input string
136 | is_checkpoint (bool): if the string is a checkpoint lits or a prompt list
137 | Returns:
138 | str: the string with the index
139 | """
140 | text_string = ""
141 | if is_checkpoint:
142 | checkpoint_List = self.getCheckpointListFromInput(text)
143 | for i, checkpoint in enumerate(checkpoint_List):
144 | text_string += f"{self.remove_index_from_string(checkpoint)} @index:{i},\n"
145 | return text_string
146 | else:
147 | prompt_list = self.split_prompts(text)
148 | for i, prompt in enumerate(prompt_list):
149 | text_string += f"{self.remove_index_from_string(prompt)} @index:{i};\n\n"
150 | return text_string
151 |
152 | def add_model_version_to_string(self, checkpoints_text: str) -> str:
153 | """Add the model version to the string.
154 | EXPERIMENTAL!
155 |
156 | Args:
157 | checkpoints_text (str): the input string with all checkpoints
158 | Returns:
159 | str: the string with the model version
160 | """
161 | text_string = ""
162 | checkpoints_not_cleaned = self.getCheckpointListFromInput(
163 | checkpoints_text, clean=False)
164 | checkpoints = self.getCheckpointListFromInput(checkpoints_text)
165 | for i, checkpoint in enumerate(checkpoints):
166 | info = modules.sd_models.get_closet_checkpoint_match(checkpoint)
167 | state_dict = read_state_dict(info.filename)
168 | version_string = find_checkpoint_config(state_dict, None)
169 | if version_string == config_default:
170 | version_string = "sd1"
171 | elif version_string == config_sd2:
172 | version_string = "sd2"
173 | elif version_string == config_sd2v:
174 | version_string = "sd2v"
175 | elif version_string == config_sd2_inpainting:
176 | version_string = "sd2-inpainting"
177 | elif version_string == config_depth_model:
178 | version_string = "depth"
179 | elif version_string == config_unclip:
180 | version_string = "unclip"
181 | elif version_string == config_unopenclip:
182 | version_string = "unopenclip"
183 | elif version_string == config_inpainting:
184 | version_string = "sd1-inpainting"
185 | elif version_string == config_instruct_pix2pix:
186 | version_string = "pix2pix"
187 | elif version_string == config_alt_diffusion:
188 | version_string = "alt"
189 | checkpoint_partly_cleaned = checkpoints_not_cleaned[i].replace(
190 | "\n", "").replace(",", "")
191 | text_string += f"{checkpoint_partly_cleaned} @version:{version_string},\n\n"
192 | return text_string
193 |
194 | def remove_element_at_index(self, checkpoints: str, prompts: str, index: List[int]) -> List[str]:
195 | """Remove the element at the given index from the string
196 |
197 | Args:
198 | checkpoints (str): the input string with all checkpoints
199 | prompts (str): the input string with all prompts
200 | index (List[int]): the indices to remove
201 | Returns:
202 | List[str]: a list with the new checkpoints and prompts
203 | """
204 |
205 | checkpoints_list = self.getCheckpointListFromInput(checkpoints)
206 | prompts_list = self.split_prompts(prompts)
207 | if (len(checkpoints_list) == len(prompts_list) or len(prompts_list) - len(index) <= 0 ):
208 | if max(index) <= len(checkpoints_list) -1:
209 | for i in index:
210 | checkpoints_list.pop(i)
211 | prompts_list.pop(i)
212 | checkpoints = ""
213 | for c in checkpoints_list:
214 | checkpoints += f"{c},"
215 | prompts = ""
216 | for p in prompts_list:
217 | prompts += f"{p};"
218 | result = [self.add_index_to_string(checkpoints, True), self.add_index_to_string(prompts, False)]
219 | self.logger.debug_log(f"result: {result}")
220 | return result
221 | else:
222 | self.logger.debug_log("index is out of range")
223 | return [checkpoints, prompts]
224 | else:
225 | self.logger.debug_log(
226 | f"checkpoints and prompts are not the same length cp: {len(checkpoints_list)} p: {len(prompts_list)}")
227 | return [checkpoints, prompts]
--------------------------------------------------------------------------------
/scripts/batchCheckpointsPrompt.py:
--------------------------------------------------------------------------------
1 | """This script is used to generate images with different checkpoints and prompts"""
2 | from copy import copy
3 | import os
4 | import re
5 | import subprocess
6 | import sys
7 | from typing import Any, List, Tuple, Union
8 |
9 | sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "scripts"))
10 | from scripts.Utils import Utils
11 | from scripts.Logger import Logger
12 | from scripts.CivitaihelperPrompts import CivitaihelperPrompts
13 | from scripts.Save import Save
14 | from scripts.BatchParams import BatchParams, get_all_batch_params
15 |
16 |
17 | import gradio as gr
18 | import modules
19 | import modules.scripts as scripts
20 | import modules.shared as shared
21 | from modules.shared_state import State as shared_state
22 | from modules import processing
23 | from modules.processing import process_images
24 | from modules.ui_components import (FormColumn, FormRow)
25 |
26 | from PIL import Image, ImageDraw, ImageFont
27 |
28 | import PIL
29 |
30 |
31 |
32 | try:
33 | import matplotlib.font_manager as fm
34 | except:
35 | subprocess.check_call(["pip", "install", "matplotlib"])
36 | import matplotlib.font_manager as fm
37 |
38 | class ToolButton(gr.Button, gr.components.FormComponent):
39 | """Small button with single emoji as text, fits inside gradio forms"""
40 |
41 | def __init__(self, **kwargs: Any) -> None:
42 | super().__init__(variant="tool", elem_classes=["batch-checkpoint-prompt"], **kwargs)
43 |
44 | def get_block_name(self) -> str:
45 | return "button"
46 |
47 |
48 | class CheckpointLoopScript(scripts.Script):
49 | """Script for generating images with different checkpoints and prompts
50 | This calss is called by A1111
51 | """
52 |
53 | def __init__(self) -> None:
54 | self.margin_size = 0
55 | self.logger = Logger()
56 | self.logger.debug = False
57 | self.font = None
58 | self.text_margin_left_and_right = 16
59 | self.fill_values_symbol = "\U0001f4d2" # 📒
60 | self.zero_width_space = '\u200B' # zero width space
61 | self.zero_width_joiner = '\u200D' # zero width joiner
62 | self.save_symbol = "\U0001F4BE" # 💾
63 | self.reload_symbol = "\U0001F504" # 🔄
64 | self.index_symbol = "\U0001F522" # 🔢
65 | self.rm_index_symbol = "\U0001F5D1" # 🗑️
66 | self.save = Save()
67 | self.utils = Utils()
68 | self.civitai_helper = CivitaihelperPrompts()
69 | self.outdir_txt2img_grids = shared.opts.outdir_txt2img_grids
70 | self.outdir_img2img_grids = shared.opts.outdir_img2img_grids
71 |
72 |
73 | def title(self) -> str:
74 | return "Batch Checkpoint and Prompt"
75 |
76 | def save_inputs(self, save_name: str, checkpoints: str, prompt_templates: str, action : str) -> str:
77 | """Save the inputs to a file
78 |
79 | Args:
80 | save_name (str): the save name
81 | checkpoints (str): the checkpoints
82 | prompt_templates (str): the prompt templates
83 | action (str): Possible values: "No", "Overwrite existing save", "append existing save"
84 |
85 | Returns:
86 | str: the save status
87 | """
88 | overwrite_existing_save = False
89 | append_existing_save = False
90 | if action == "Overwrite existing save":
91 | overwrite_existing_save = True
92 | elif action == "append existing save":
93 | append_existing_save = True
94 | return self.save.store_values(
95 | save_name.strip(), checkpoints.strip(), prompt_templates.strip(), overwrite_existing_save, append_existing_save)
96 |
97 |
98 | """ def load_inputs(self, name: str) -> None:
99 | values = self.save.read_value(name.strip()) """
100 |
101 | def get_checkpoints(self) -> str:
102 | """Get the checkpoints from the sd_models module.
103 | Add the index to the checkpoints
104 |
105 | Returns:
106 | str: the checkpoints
107 | """
108 | checkpoint_list_no_index = list(modules.sd_models.checkpoints_list)
109 | checkpoint_list_with_index = []
110 | for i in range(len(checkpoint_list_no_index)):
111 | checkpoint_list_with_index.append(
112 | f"{checkpoint_list_no_index[i]} @index:{i}")
113 | return ',\n'.join(checkpoint_list_with_index)
114 |
115 | def getCheckpoints_and_prompt_with_index_and_version(self, checkpoint_list: str, prompts: str, add_model_version: bool) -> Tuple[str, str]:
116 | """Add the index to the checkpoints and prompts
117 | and add the model version to the checkpoints
118 |
119 | Args:
120 | checkpoint_list (str): the checkpoint list
121 | prompts (str): the prompts
122 | add_model_version (bool): add the model version to the checkpoints. EXPERIMENTAL!
123 |
124 | Returns:
125 | Tuple[str, str]: the checkpoints and prompts
126 | """
127 | checkpoints = self.utils.add_index_to_string(checkpoint_list)
128 | if add_model_version:
129 | checkpoints = self.utils.add_model_version_to_string(checkpoints)
130 | prompts = self.utils.add_index_to_string(prompts, is_checkpoint=False)
131 | return checkpoints, prompts
132 |
133 | def refresh_saved(self) -> gr.Dropdown:
134 | """Refresh the saved values dropdown
135 |
136 | Returns:
137 | gr.Dropdown: the updated dropdown
138 | """
139 | return gr.Dropdown.update(choices=self.save.get_keys())
140 |
141 | def remove_checkpoints_prompt_at_index(self, checkpoints: str, prompts: str, index: str) -> List[str]:
142 | """Remove the checkpoint and prompt at the specified index
143 |
144 | Args:
145 | checkpoints (str): the checkpoints
146 | prompts (str): the prompts
147 | index (str): the index
148 |
149 | Returns:
150 | List[str]: the checkpoints and prompts
151 | """
152 | index_list = index.split(",")
153 | index_list_num = [int(i) for i in index_list]
154 | return self.utils.remove_element_at_index(checkpoints, prompts, index_list_num)
155 |
156 |
157 |
158 |
159 | def ui(self, is_img2img: bool) -> List[Union[gr.components.Textbox, gr.components.Slider]]:
160 | """Create the UI
161 |
162 | Args:
163 | is_img2img (bool): not used.
164 |
165 | Returns:
166 | List[Union[gr.components.Textbox, gr.components.Slider]]: the UI components
167 | """
168 | with gr.Tab("Parameters"):
169 | with FormRow():
170 | checkpoints_input = gr.components.Textbox(
171 | lines=5, label="Checkpoint Names", placeholder="Checkpoint names (separated with comma)")
172 | fill_checkpoints_button = ToolButton(
173 | value=self.fill_values_symbol, visible=True)
174 | with FormRow():
175 |
176 | checkpoints_prompt = gr.components.Textbox(
177 | lines=5, label="Prompts/prompt templates for Checkpoints", placeholder="prompts/prompt templates (separated with semicolon)")
178 |
179 | civitai_prompt_fill_button = ToolButton(
180 | value=self.fill_values_symbol+self.zero_width_joiner, visible=True)
181 | add_index_button = ToolButton(
182 | value=self.index_symbol, visible=True)
183 | with FormColumn():
184 | with FormRow():
185 | rm_model_prompt_at_indexes_textbox = gr.components.Textbox(lines=1, label="Remove checkpoint and prompt at index", placeholder="Remove checkpoint and prompt at index (separated with comma)")
186 | rm_model_prompt_at_indexes_button = ToolButton(value=self.rm_index_symbol, visible=True)
187 | margin_size = gr.Slider(
188 | label="Grid margins (px)", minimum=0, maximum=10, value=0, step=1)
189 |
190 | # save and load inputs
191 |
192 | with FormRow():
193 | keys = self.save.get_keys()
194 | saved_inputs_dropdown = gr.components.Dropdown(
195 | choices=keys, label="Saved values")
196 |
197 | load_button = ToolButton(
198 | value=self.fill_values_symbol+self.zero_width_space, visible=True)
199 | refresh_button = ToolButton(value=self.reload_symbol, visible=True)
200 |
201 |
202 | with FormRow():
203 | save_name = gr.components.Textbox(
204 | lines=1, label="save name", placeholder="save name")
205 | save_button = ToolButton(value=self.save_symbol, visible=True)
206 | with FormRow():
207 | test = gr.components.Radio(["No", "Overwrite existing save", "append existing save"], label="Change saves?")
208 |
209 | save_status = gr.Textbox(label="", interactive=False)
210 |
211 |
212 |
213 |
214 | with gr.Accordion(label='Advanced settings', open=False):
215 | gr.Markdown("""
216 | This can take a long time depending on the number of checkpoints!
217 | See the help tab for more information
218 | """)
219 | add_model_version_checkbox = gr.components.Checkbox(label="Add model version to checkpoint names", interactive=False
220 | , info="Not working in current webui versions")
221 |
222 | # Actions
223 |
224 | fill_checkpoints_button.click(
225 | fn=self.get_checkpoints, outputs=[checkpoints_input])
226 | save_button.click(fn=self.save_inputs, inputs=[
227 | save_name, checkpoints_input, checkpoints_prompt, test], outputs=[save_status])
228 | load_button.click(fn=self.save.read_value, inputs=[saved_inputs_dropdown], outputs=[
229 | checkpoints_input, checkpoints_prompt])
230 | civitai_prompt_fill_button.click(fn=self.civitai_helper.createCivitaiPromptString, inputs=[
231 | checkpoints_input], outputs=[checkpoints_prompt])
232 | add_index_button.click(fn=self.getCheckpoints_and_prompt_with_index_and_version, inputs=[
233 | checkpoints_input, checkpoints_prompt, add_model_version_checkbox], outputs=[checkpoints_input, checkpoints_prompt])
234 |
235 | refresh_button.click(fn=self.refresh_saved, outputs=[saved_inputs_dropdown])
236 |
237 | rm_model_prompt_at_indexes_button.click(fn=self.remove_checkpoints_prompt_at_index, inputs=[
238 | checkpoints_input, checkpoints_prompt, rm_model_prompt_at_indexes_textbox], outputs=[checkpoints_input, checkpoints_prompt])
239 |
240 | with gr.Tab("help"):
241 | gr.Markdown(self.utils.get_help_md())
242 |
243 | return [checkpoints_input, checkpoints_prompt, margin_size]
244 |
245 | def show(self, is_img2img: bool) -> bool:
246 | """Show the UI in text2img and img2img mode
247 |
248 | Args:
249 | is_img2img (bool): not used
250 |
251 | Returns:
252 | bool: True
253 | """
254 | return True
255 |
256 |
257 | def _generate_images_with_SD(self,p: Union[modules.processing.StableDiffusionProcessingTxt2Img, modules.processing.StableDiffusionProcessingImg2Img],
258 | batch_params: BatchParams, orginal_size: Tuple[int, int]) -> modules.processing.Processed:
259 | """ manipulates the StableDiffusionProcessing Obect
260 | to generate images with the new checkpoint and prompt
261 | and other parameters
262 |
263 | Args:
264 | p (Union[modules.processing.StableDiffusionProcessingTxt2Img, modules.processing.StableDiffusionProcessingImg2Img]): the processing object
265 | batch_params (BatchParams): the batch parameters
266 | orginal_size (Tuple[int, int]): the original size specified in the UI
267 |
268 | Returns:
269 | modules.processing.Processed: the processed object
270 | """
271 | self.logger.debug_log(str(batch_params), False)
272 |
273 | info = None
274 | info = modules.sd_models.get_closet_checkpoint_match(batch_params.checkpoint)
275 | modules.sd_models.reload_model_weights(shared.sd_model, info)
276 | p.override_settings['sd_model_checkpoint'] = info.name
277 | p.prompt = batch_params.prompt
278 | p.negative_prompt = batch_params.neg_prompt
279 | if len(batch_params.style) > 0:
280 | p.styles = batch_params.style
281 | p.n_iter = batch_params.batch_count
282 | shared.opts.data["CLIP_stop_at_last_layers"] = batch_params.clip_skip
283 | if batch_params.width > 0 and batch_params.height > 0:
284 | self.logger.debug_print_attributes(p, False)
285 | p.height = batch_params.height
286 | p.width = batch_params.width
287 | else:
288 | p.width, p.height = orginal_size
289 | p.hr_prompt = batch_params.hr_prompt
290 | p.hr_negative_prompt = p.negative_prompt
291 | self.logger.debug_log(f"batch count {p.n_iter}")
292 |
293 | processed = process_images(p)
294 |
295 | return processed
296 |
297 |
298 | def _generate_infotexts(self, pc: Union[modules.processing.StableDiffusionProcessingTxt2Img, modules.processing.StableDiffusionProcessingImg2Img],
299 | all_infotexts: List[str], n_iter: int) -> List[str]:
300 | """Generate the infotexts for the images
301 |
302 | Args:
303 | pc (Union[modules.processing.StableDiffusionProcessingTxt2Img, modules.processing.StableDiffusionProcessingImg2Img]): the processing object
304 | all_infotexts (List[str]): the infotexts created by A1111
305 | n_iter (int): the number of iterations
306 |
307 | Returns:
308 | List[str]: the infotexts
309 | """
310 |
311 | def _a1111_infotext_caller(i: int = 0) -> str:
312 | """Call A1111 to create a infotext. This is a helper function.
313 |
314 | Args:
315 | i (int, optional): the index. Defaults to 0. Used to get the correct seed and subseed.
316 |
317 | Returns:
318 | str: the infotext
319 | """
320 | return processing.create_infotext(pc, pc.all_prompts, pc.all_seeds, pc.all_subseeds, position_in_batch=i)
321 |
322 | self.logger.pretty_debug_log(all_infotexts)
323 |
324 |
325 | self.logger.debug_print_attributes(pc)
326 |
327 | if n_iter == 1:
328 | all_infotexts.append(_a1111_infotext_caller())
329 | else:
330 | all_infotexts.append(self.base_prompt)
331 | for i in range(n_iter * pc.batch_size):
332 | all_infotexts.append(_a1111_infotext_caller(i))
333 |
334 | return all_infotexts
335 |
336 |
337 | def run(self, p: Union[modules.processing.StableDiffusionProcessingTxt2Img, modules.processing.StableDiffusionProcessingImg2Img], checkpoints_text: str, checkpoints_prompt: str, margin_size: int) -> modules.processing.Processed:
338 | """The main function to generate the images
339 |
340 | Args:
341 | p (Union[modules.processing.StableDiffusionProcessingTxt2Img, modules.processing.StableDiffusionProcessingImg2Img]): the processing object
342 | checkpoints_text (str): the checkpoints
343 | checkpoints_prompt (str): the prompts
344 | margin_size (int): the margin size for the grid
345 |
346 | Returns:
347 | modules.processing.Processed: the processed object
348 | """
349 | image_processed = []
350 | self.margin_size = margin_size
351 |
352 | def _get_total_batch_count(batchParams: List[BatchParams]) -> int:
353 | """Get the total batch count to update the progress bar
354 |
355 | Args:
356 | batchParams (List[BatchParams]): the batch parameters
357 |
358 | Returns:
359 | int: the total batch count
360 | """
361 | summe = 0
362 | for param in batchParams:
363 | summe += param.batch_count
364 | return summe
365 |
366 | self.base_prompt: str = p.prompt
367 |
368 | all_batchParams = get_all_batch_params(p, checkpoints_text, checkpoints_prompt)
369 |
370 | total_batch_count = _get_total_batch_count(all_batchParams)
371 | total_steps = p.steps * total_batch_count
372 | self.logger.debug_log(f"total steps: {total_steps}")
373 |
374 | shared.state.job_count = total_batch_count
375 | shared.total_tqdm.updateTotal(total_steps)
376 |
377 | all_infotexts = [self.base_prompt]
378 |
379 | p.extra_generation_params['Script'] = self.title()
380 |
381 | self.logger.log_info(f'will generate {total_batch_count} images over {len(all_batchParams)} checkpoints)')
382 |
383 | original_size = p.width, p.height
384 |
385 |
386 | for i, checkpoint in enumerate(all_batchParams):
387 |
388 |
389 | self.logger.log_info(f"checkpoint: {i+1}/{len(all_batchParams)} ({checkpoint.checkpoint})")
390 |
391 |
392 | self.logger.debug_log(
393 | f"Propmpt with replace: {all_batchParams[i].prompt}, neg prompt: {all_batchParams[i].neg_prompt}")
394 |
395 |
396 | processed_sd_object = self._generate_images_with_SD(p, all_batchParams[i], original_size)
397 |
398 | image_processed.append(processed_sd_object)
399 |
400 |
401 | all_infotexts = self._generate_infotexts(copy(p), all_infotexts, all_batchParams[i].batch_count)
402 |
403 |
404 | if shared.state.interrupted or shared.state.stopping_generation:
405 | break
406 |
407 | img_grid = self._create_grid(image_processed, all_batchParams)
408 |
409 | image_processed[0].images.insert(0, img_grid)
410 | image_processed[0].index_of_first_image = 1
411 | for i, image in enumerate(image_processed):
412 | if i > 0:
413 | for j in range(len(image_processed[i].images)):
414 | image_processed[0].images.append(
415 | image_processed[i].images[j])
416 |
417 | image_processed[0].infotexts = all_infotexts
418 |
419 |
420 | return image_processed[0]
421 |
422 |
423 |
424 | def _create_grid(self, image_processed: List[modules.processing.Processed], all_batch_params: List[BatchParams]) -> PIL.Image.Image:
425 | """Create the grid with the images
426 |
427 | Args:
428 | image_processed (List[modules.processing.Processed]): the images
429 | all_batch_params (List[BatchParams]): the batch parameters
430 |
431 | Returns:
432 | PIL.Image.Image: the grid
433 | """
434 | self.logger.log_info(
435 | "creating the grid. This can take a while, depending on the amount of images")
436 |
437 | def _getFileName(save_path: str) -> str:
438 | """Get the file name for the grid.
439 | The files are acsending numbered.
440 |
441 | Args:
442 | save_path (str): the save path
443 |
444 | Returns:
445 | str: the file name
446 | """
447 | save_path = os.path.join(save_path, "Checkpoint-Prompt-Loop")
448 | self.logger.debug_log(f"save path: {save_path}")
449 | if not os.path.exists(save_path):
450 | os.makedirs(save_path)
451 |
452 | files = os.listdir(save_path)
453 | pattern = r"img_(\d{4})"
454 |
455 | matching_files = [f for f in files if re.match(pattern, f)]
456 |
457 | if matching_files:
458 |
459 | matching_files.sort()
460 | last_file = matching_files[-1]
461 | match = re.search(r"\d{4}", last_file)
462 | number = int(match.group()) if match else 0
463 | else:
464 | number = 0
465 |
466 | new_number = number + 1
467 |
468 | return os.path.join(save_path, f"img_{new_number:04d}.png")
469 |
470 | total_width = 0
471 | max_height = 0
472 | min_height = 0
473 |
474 | spacing = self.margin_size
475 |
476 |
477 | for img in image_processed:
478 | total_width += img.images[0].size[0] + spacing
479 |
480 | img_with_legend = []
481 | for i, img in enumerate(image_processed):
482 | img_with_legend.append(self._add_legend(
483 | img.images[0], all_batch_params[i].checkpoint))
484 |
485 | for img in img_with_legend:
486 | max_height = max(max_height, img.size[1])
487 | min_height = min(min_height, img.size[1])
488 |
489 | result_img = Image.new('RGB', (total_width, max_height), "white")
490 |
491 | x_offset = -spacing
492 | for i, img in enumerate(img_with_legend):
493 | y_offset = max_height - img.size[1]
494 | result_img.paste(((0, 0, 0)), (x_offset, 0, x_offset +
495 | img.size[0] + spacing, max_height + spacing))
496 | result_img.paste(((255, 255, 255)), (x_offset, 0,
497 | x_offset + img.size[0], max_height - min_height))
498 | result_img.paste(img, (x_offset + spacing, y_offset))
499 |
500 | x_offset += img.size[0] + spacing
501 |
502 | if self.is_img2img:
503 | result_img.save(_getFileName(self.outdir_img2img_grids))
504 | else:
505 | result_img.save(_getFileName(self.outdir_txt2img_grids))
506 |
507 | return result_img
508 |
509 | def _add_legend(self, img: Image, checkpoint_name: str) -> Image:
510 | """Add the checkpoint name to the image
511 |
512 | Args:
513 | img (Image): the image
514 | checkpoint_name (str): the checkpoint name
515 |
516 | Returns:
517 | Image: the image with the checkpoint name as legend
518 | """
519 |
520 | def _find_available_font() -> str: #TODO: make this method more efficient
521 | """Find an available font
522 |
523 | Returns:
524 | str: the font
525 | """
526 |
527 | if self.font is None:
528 |
529 | self.font = fm.findfont(
530 | fm.FontProperties(family='DejaVu Sans'))
531 |
532 | if self.font is None:
533 | font_list = fm.findSystemFonts(
534 | fontpaths=None, fontext='ttf')
535 |
536 | for font_file in font_list:
537 | self.font = os.path.abspath(font_file)
538 | if os.path.isfile(self.font): # type: ignore
539 | self.logger.debug_log("font list font")
540 | return self.font # type: ignore
541 |
542 | self.logger.debug_log("default font")
543 | return ImageFont.load_default()
544 | self.logger.debug_log("DejaVu font")
545 |
546 | return self.font
547 |
548 | def _strip_checkpoint_name(checkpoint_name: str) -> str:
549 | """Remove the path from the checkpoint name
550 |
551 | Args:
552 | checkpoint_name (str): the checkpoint with path
553 |
554 | Returns:
555 | str: the checkpoint name
556 | """
557 | checkpoint_name = os.path.basename(checkpoint_name)
558 | return self.utils.get_clean_checkpoint_path(checkpoint_name)
559 |
560 | def _calculate_font(draw: ImageDraw, text: str, width: int) -> Tuple[int, int]:
561 | """Calculate the font size for the text according to the image width
562 |
563 | Args:
564 | draw (ImageDraw): the draw object
565 | text (str): the text
566 | width (int): the image width
567 |
568 | Returns:
569 | Tuple[int, int]: the font and the text height
570 | """
571 | width -= self.text_margin_left_and_right
572 | default_font_path = _find_available_font()
573 | font_size = 1
574 | font = ImageFont.truetype(
575 | default_font_path, font_size) if default_font_path else ImageFont.load_default()
576 | text_width, text_height = draw.textsize(text, font)
577 |
578 | while text_width < width:
579 | self.logger.debug_log(
580 | f"text width: {text_width}, img width: {width}")
581 | font_size += 1
582 | font = ImageFont.truetype(
583 | default_font_path, font_size) if default_font_path else ImageFont.load_default()
584 | text_width, text_height = draw.textsize(text, font)
585 |
586 | return font, text_height
587 |
588 | checkpoint_name = _strip_checkpoint_name(checkpoint_name)
589 |
590 | width, height = img.size
591 |
592 | draw = ImageDraw.Draw(img)
593 |
594 | font, text_height = _calculate_font(draw, checkpoint_name, width)
595 |
596 | new_image = Image.new("RGB", (width, height + text_height), "white")
597 | new_image.paste(img, (0, text_height))
598 |
599 | new_draw = ImageDraw.Draw(new_image)
600 |
601 | new_draw.text((self.text_margin_left_and_right/4, 0),
602 | checkpoint_name, fill="black", font=font)
603 |
604 | return new_image
605 |
--------------------------------------------------------------------------------
/scripts/settings.py:
--------------------------------------------------------------------------------
1 | """This module adds options to the settings window."""
2 | from modules import shared
3 | from modules import script_callbacks
4 |
5 | def on_ui_settings() -> None:
6 | """Adds options to the settings window."""
7 | section = ("batchCP ", "batch checkpoint prompt")
8 | shared.opts.add_option(
9 | key = "promptRegex",
10 | info = shared.OptionInfo(
11 | "{prompt}",
12 | "Prompt placeholder",
13 | section=section)
14 | )
15 |
16 | shared.opts.add_option(
17 | key = "widthHeightRegex",
18 | info = shared.OptionInfo(
19 | "\{\{size:(\d+)x(\d+)\}\}",
20 | "Width and height Regex",
21 | section=section)
22 | )
23 |
24 | shared.opts.add_option(
25 | key = "batchCountRegex",
26 | info = shared.OptionInfo(
27 | "\{\{count:[0-9]+\}\}",
28 | "Batch count Regex",
29 | section=section)
30 | )
31 |
32 | shared.opts.add_option(
33 | key = "clipSkipRegex",
34 | info = shared.OptionInfo(
35 | "\{\{clip_skip:[0-9]+\}\}",
36 | "Clip skip Regex",
37 | section=section)
38 | )
39 |
40 | shared.opts.add_option(
41 | key = "negPromptRegex",
42 | info = shared.OptionInfo(
43 | "\{\{neg:(.*?)\}\}",
44 | "negative Prompt Regex",
45 | section=section)
46 | )
47 |
48 | shared.opts.add_option(
49 | key = "styleRegex",
50 | info = shared.OptionInfo(
51 | "\{\{style:(.*?)\}\}",
52 | "style Regex",
53 | section=section)
54 | )
55 |
56 | script_callbacks.on_ui_settings(on_ui_settings)
57 |
--------------------------------------------------------------------------------