├── blacklist.txt ├── .gitignore ├── .github └── ISSUE_TEMPLATE │ ├── custom.md │ ├── feature_request.md │ └── bug_report.md ├── install.py ├── models.json ├── licence ├── javascript └── prompt_generator_hints.js ├── README.md └── scripts └── prompt_generator.py /blacklist.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /style.css -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/custom.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Custom issue template 3 | about: Describe this issue template's purpose here. 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | -------------------------------------------------------------------------------- /install.py: -------------------------------------------------------------------------------- 1 | import launch 2 | if not launch.is_installed("transformers"): 3 | launch.run_pip("install --upgrade transformers", "Requirement of Prompt-Maker") 4 | if not launch.is_installed("torch"): 5 | launch.run_pip("install --upgrade torch", "Requirement of Prompt-Maker") -------------------------------------------------------------------------------- /models.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "Title":"Gustavosta", 4 | "Tokenizer":"gpt2", 5 | "Model":"Gustavosta/MagicPrompt-Dalle" 6 | }, 7 | { 8 | "Title":"FredZhang7", 9 | "Tokenizer":"distilgpt2", 10 | "Model":"FredZhang7/distilgpt2-stable-diffusion-v2" 11 | } 12 | ] -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **What fork of Webui are you using (Eg: Automatic1111, vladmandic):** 27 | 28 | 29 | **Additional context** 30 | Add any other context about the problem here. 31 | -------------------------------------------------------------------------------- /licence: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 imrayya 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /javascript/prompt_generator_hints.js: -------------------------------------------------------------------------------- 1 | //Basically copied and adapted from AUTOMATIC1111 implementation of the main UI 2 | // mouseover tooltips for various UI elements in the form of "UI element label"="Tooltip text". 3 | 4 | prompt_generator_titles = { 5 | "Temperature": "A higher temperature will produce more diverse results, but with a higher risk of less coherent text", 6 | "Max Length": "The maximum number of tokens for the output of the model", 7 | "Top K": "Strategy is to sample from a shortlist of the top K tokens. This approach allows the other high-scoring tokens a chance of being picked.", 8 | "Repetition Penalty": "The parameter for repetition penalty. 1.0 means no penalty. Default setting is 1.2. Paper explaining it is linked to Github's readme", 9 | "How Many To Generate":"The number of results to generate. Not guaranteed if models fails to create them", 10 | "Generate Using Magic Prompt":"Be aware that sometimes the model fails to produce anything or less than the wanted amount, either try again or use a new prompt in that case" 11 | } 12 | 13 | onUiUpdate(function(){ 14 | gradioApp().querySelectorAll('span, button, select, p').forEach(function(span){ 15 | tooltip = prompt_generator_titles[span.textContent]; 16 | 17 | if(!tooltip){ 18 | tooltip = prompt_generator_titles[span.value]; 19 | } 20 | 21 | if(!tooltip){ 22 | for (const c of span.classList) { 23 | if (c in prompt_generator_titles) { 24 | tooltip = prompt_generator_titles[c]; 25 | break; 26 | } 27 | } 28 | } 29 | 30 | if(tooltip){ 31 | span.title = tooltip; 32 | } 33 | }) 34 | 35 | gradioApp().querySelectorAll('select').forEach(function(select){ 36 | if (select.onchange != null) return; 37 | 38 | select.onchange = function(){ 39 | select.title = prompt_generator_titles[select.value] || ""; 40 | } 41 | }) 42 | }) 43 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Prompt Generator 2 | 3 | Adds a tab to the webui that allows the user to generate a prompt from a small base prompt. Based on [FredZhang7/distilgpt2-stable-diffusion-v2](https://huggingface.co/FredZhang7/distilgpt2-stable-diffusion-v2) and [Gustavosta/MagicPrompt-Stable-Diffusion](https://huggingface.co/Gustavosta/MagicPrompt-Stable-Diffusion). I did nothing apart from porting it to [AUTOMATIC1111 WebUI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) 4 | 5 | 6 | 7 | ![Screenshot 2023-04-29 000027](https://user-images.githubusercontent.com/8998556/235261664-2c92689d-9915-4543-8d6a-57a8ecd0f484.png) 8 | 9 | 10 | ## Installation 11 | 12 | 1. Install [AUTOMATIC1111's Stable Diffusion Webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) 13 | 2. Clone this repository into the `extensions` folder inside the webui 14 | 15 | ## Usage 16 | 17 | 1. Write in the prompt in the *Start of the prompt* text box 18 | 2. Select which model you want to use 19 | 3. Click Generate and wait 20 | 21 | The initial use of the model may take longer as it needs to be downloaded to your machine for offline use. The model will be used on your device and will be stored in the default location of `*username*/.cache/huggingface/hub/models`. The entire process of generating results will be done on your local machine and not require internet access. 22 | 23 | ## Parameters Explanation 24 | 25 | - **Start of the prompt**: As the name suggests, the start of the prompt that the generator should start with 26 | - **Temperature**: A higher temperature will produce more diverse results, but with a higher risk of less coherent text 27 | - **Top K**: Strategy is to sample from a shortlist of the top K tokens. This approach allows the other high-scoring tokens a chance of being picked. 28 | - **Max Length**: the maximum number of tokens for the output of the model 29 | - **Repetition Penalty**: The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. Default setting is 1.2 30 | - **How Many To Generate**: The number of results to generate 31 | - **Use blacklist?**: Using `.\extensions\stable-diffusion-webui-Prompt_Generator\blacklist.txt`. It will delete any matches to the generated result (case insensitive). Each item to be filtered out should be on a new line. *Be aware that it simply deletes it and doesn't generate more to make up for the lost words* 32 | - **Use punctuation**: Allows the use of commas in the output 33 | 34 | ## Models 35 | 36 | There are two 'default' models provided: 37 | 38 | ### FredZhang7 39 | 40 | Made by [FredZhang7](https://huggingface.co/FredZhang7) under creativeml-openrail-m license. 41 | 42 | Useful to get styles for a prompt. Eg: "A cat sitting" -> "A cat sitting on a chair, digital art. The room is made of clay and metal with the sun shining through in front trending at Artstation 4k uhd..." 43 | 44 | ### MagicPrompt 45 | 46 | Made by [Gustavosta](https://huggingface.co/Gustavosta) under the MIT license. 47 | 48 | Useful to get more natural language prompts. Eg: "A cat sitting" -> "A cat sitting in a chair, wearing pair of sunglasses" 49 | 50 | *Be aware that sometimes the model fails to produce anything or less than the wanted amount, either try again or use a new prompt in that case* 51 | 52 | ## Install more models 53 | 54 | To install more model to use, ensure that the models are hosted on [huggingface.co](https://huggingface.co) and edit the json file at `.\extensions\stable-diffusion-webui-Prompt_Generator\models.json` with the relevant information. Use the models in the file as an example 55 | 56 | You might need to restart the extension/reload the UI if new items are added onto the list 57 | 58 | ## Credits 59 | 60 | Credits to both [FredZhang7](https://huggingface.co/FredZhang7) and [Gustavosta](https://huggingface.co/Gustavosta) 61 | -------------------------------------------------------------------------------- /scripts/prompt_generator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright 2023 Imrayya 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 5 | 6 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 7 | 8 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 9 | 10 | """ 11 | 12 | 13 | import json 14 | import re 15 | 16 | import gradio as gr 17 | import modules 18 | from pathlib import Path 19 | from modules import script_callbacks 20 | import modules.scripts as scripts 21 | from transformers import GPT2LMHeadModel, GPT2Tokenizer 22 | 23 | result_prompt = "" 24 | models = {} 25 | max_no_results = 20 # TODO move to setting panel 26 | base_dir = scripts.basedir() 27 | model_file = Path(base_dir, "models.json") 28 | 29 | 30 | class Model: 31 | ''' 32 | Small strut to hold data for the text generator 33 | ''' 34 | 35 | def __init__(self, name, model, tokenizer) -> None: 36 | self.name = name 37 | self.model = model 38 | self.tokenizer = tokenizer 39 | pass 40 | 41 | 42 | def populate_models(): 43 | """Get the models that this extension can use via models.json 44 | """ 45 | # TODO add button to refresh and update model list 46 | path = model_file 47 | with open(path, 'r') as f: 48 | data = json.load(f) 49 | for item in data: 50 | name = item["Title"] 51 | model = item["Model"] 52 | tokenizer = item["Tokenizer"] 53 | models[name] = Model(name, model, tokenizer) 54 | 55 | 56 | def add_to_prompt(prompt): # A holder TODO figure out how to get rid of it 57 | return prompt 58 | 59 | 60 | def get_list_blacklist(): 61 | # Set the directory you want to start from 62 | file_path = './extensions/stable-diffusion-webui-Prompt_Generator/blacklist.txt' 63 | things_to_black_list = [] 64 | with open(file_path, 'r') as f: 65 | # Read each line in the file and append it to the list 66 | for line in f: 67 | things_to_black_list.append(line.rstrip()) 68 | 69 | return things_to_black_list 70 | 71 | 72 | def on_ui_tabs(): 73 | # Method to create the extended prompt 74 | def generate_longer_generic(prompt, temperature, top_k, 75 | max_length, repetition_penalty, 76 | num_return_sequences, name, use_punctuation=False, 77 | use_blacklist=False): # TODO make the progress bar work 78 | """Generates a longer string from the input 79 | 80 | Args: 81 | prompt (str): As the name suggests, the start of the prompt that the generator should start with. 82 | 83 | temperature (float): A higher temperature will produce more diverse results, but with a higher risk of less coherent text 84 | 85 | top_k (float): Strategy is to sample from a shortlist of the top K tokens. This approach allows the other high-scoring tokens a chance of being picked. 86 | 87 | max_length (int): the maximum number of tokens for the output of the model 88 | 89 | repetition_penalty (float): The parameter for repetition penalty. 1.0 means no penalty. Default setting is 1.2 90 | 91 | num_return_sequences (int): The number of results to generate 92 | 93 | name (str): Which Model to use 94 | 95 | use_punctuation (bool): Allows the use of commas in the output. Defaults to False. 96 | 97 | use_blacklist (bool): It will delete any matches to the generated result (case insensitive). Each item to be filtered out should be on a new line. Defaults to False. 98 | 99 | Returns: 100 | Returns only an error otherwise saves it in result_prompt 101 | """ 102 | try: 103 | print("[Prompt_Generator]:","Loading Tokenizer") 104 | tokenizer = GPT2Tokenizer.from_pretrained(models[name].tokenizer) 105 | tokenizer.add_special_tokens({'pad_token': '[PAD]'}) 106 | print("[Prompt_Generator]:","Loading Model") 107 | model = GPT2LMHeadModel.from_pretrained(models[name].model) 108 | except Exception as e: 109 | print("[Prompt_Generator]:",f"Exception encountered while attempting to install tokenizer") 110 | return gr.update(), f"Error: {e}" 111 | try: 112 | print("[Prompt_Generator]:",f"Generate new prompt from: \"{prompt}\" with {name}") 113 | input_ids = tokenizer(prompt, return_tensors='pt').input_ids 114 | if (use_punctuation): 115 | output = model.generate(input_ids, do_sample=True, temperature=temperature, 116 | top_k=round(top_k), max_length=max_length, 117 | num_return_sequences=num_return_sequences, 118 | repetition_penalty=float( 119 | repetition_penalty), 120 | early_stopping=True) 121 | else: 122 | output = model.generate(input_ids, do_sample=True, temperature=temperature, 123 | top_k=round(top_k), max_length=max_length, 124 | num_return_sequences=num_return_sequences, 125 | repetition_penalty=float( 126 | repetition_penalty), 127 | penalty_alpha=0.6, no_repeat_ngram_size=1, 128 | early_stopping=True) 129 | print("[Prompt_Generator]:","Generation complete!") 130 | tempString = "" 131 | if (use_blacklist): 132 | blacklist = get_list_blacklist() 133 | for i in range(len(output)): 134 | 135 | tempString += tokenizer.decode( 136 | output[i], skip_special_tokens=True) + "\n" 137 | 138 | if (use_blacklist): 139 | for to_check in blacklist: 140 | tempString = re.sub( 141 | to_check, "", tempString, flags=re.IGNORECASE) 142 | if (i == 0): 143 | global result_prompt 144 | 145 | result_prompt = tempString 146 | # print(result_prompt) 147 | except Exception as e: 148 | print("[Prompt_Generator]:", 149 | f"Exception encountered while attempting to generate prompt: {e}") 150 | return gr.update(), f"Error: {e}" 151 | 152 | def ui_dynamic_result_visible(num): 153 | """Makes the results visible""" 154 | k = int(num) 155 | return [gr.Row.update(visible=True)]*k + [gr.Row.update(visible=False)]*(max_no_results-k) 156 | 157 | def ui_dynamic_result_prompts(): 158 | """Populates the results with the prompts""" 159 | 160 | lines = result_prompt.splitlines() 161 | num = len(lines) 162 | result_list = [] 163 | for i in range(int(max_no_results)): 164 | if (i < num): 165 | result_list.append(lines[i]) 166 | else: 167 | result_list.append("") 168 | return result_list 169 | 170 | def ui_dynamic_result_batch(): 171 | return result_prompt 172 | 173 | def save_prompt_to_file(path, append: bool): 174 | if len(result_prompt) == 0: 175 | print("[Prompt_Generator]:","Prompt is empty") 176 | return 177 | with open(path, encoding="utf-8", mode="a" if append else "w") as f: 178 | f.write(result_prompt) 179 | print("[Prompt_Generator]:","Prompt written to: ", path) 180 | 181 | # ---------------------------------------------------------------------------- 182 | # UI structure 183 | txt2img_prompt = modules.ui.txt2img_paste_fields[0][0] 184 | img2img_prompt = modules.ui.img2img_paste_fields[0][0] 185 | 186 | with gr.Blocks(analytics_enabled=False) as prompt_generator: 187 | # Handles UI for prompt creation 188 | with gr.Column(): 189 | with gr.Row(): 190 | prompt_textbox = gr.Textbox( 191 | lines=2, elem_id="promptTxt", label="Start of the prompt") 192 | with gr.Column(): 193 | gr.HTML( 194 | "Mouse over the labels to access tooltips that provide explanations for the parameters.") 195 | with gr.Row(): 196 | temp_slider = gr.Slider( 197 | elem_id="temp_slider", label="Temperature", interactive=True, minimum=0, maximum=1, value=0.9) 198 | maxLength_slider = gr.Slider( 199 | elem_id="max_length_slider", label="Max Length", interactive=True, minimum=1, maximum=200, step=1, value=90) 200 | topK_slider = gr.Slider( 201 | elem_id="top_k_slider", label="Top K", value=8, minimum=1, maximum=20, step=1, interactive=True) 202 | with gr.Column(): 203 | with gr.Row(): 204 | repetitionPenalty_slider = gr.Slider( 205 | elem_id="repetition_penalty_slider", label="Repetition Penalty", value=1.2, minimum=0.1, maximum=10, interactive=True) 206 | numReturnSequences_slider = gr.Slider( 207 | elem_id="num_return_sequences_slider", label="How Many To Generate", value=5, minimum=1, maximum=max_no_results, interactive=True, step=1) 208 | with gr.Column(): 209 | with gr.Row(): 210 | useBlacklist_checkbox = gr.Checkbox(label="Use blacklist?") 211 | gr.HTML(value="
Using \".\extensions\stable-diffusion-webui-Prompt_Generator\\blacklist.txt\".
It will delete any matches to the generated result (case insensitive).
") 212 | with gr.Column(): 213 | with gr.Row(): 214 | populate_models() 215 | generate_dropdown = gr.Dropdown(choices=list(models.keys()), value=list(models.keys())[ 216 | 1 if len(models) > 0 else 0], label="Which model to use?", show_label=True) # TODO Add default to setting page 217 | use_punctuation_check = gr.Checkbox(label="Use punctuation?") 218 | generate_button = gr.Button( 219 | value="Generate", elem_id="generate_button") # TODO Add element to show that it is working in the background so users don't think nothing is happening 220 | 221 | # Handles UI for results 222 | results_vis = [] 223 | results_txt_list = [] 224 | with gr.Tab("Results"): 225 | with gr.Column(): 226 | for i in range(max_no_results): 227 | with gr.Row(visible=False) as row: 228 | with gr.Column(scale=3): # Guessing at the scale 229 | textBox = gr.Textbox(label="", lines=3) 230 | with gr.Column(scale=1): 231 | txt2img = gr.Button("send to txt2img") 232 | img2img = gr.Button("send to img2img") 233 | # Handles ___2img buttons 234 | txt2img.click(add_to_prompt, inputs=[ 235 | textBox], outputs=[txt2img_prompt]).then(None, _js='switch_to_txt2img', 236 | inputs=None, outputs=None) 237 | img2img.click(add_to_prompt, inputs=[ 238 | textBox], outputs=[img2img_prompt]).then(None, _js='switch_to_img2img', 239 | inputs=None, outputs=None) 240 | results_txt_list.append(textBox) 241 | results_vis.append(row) 242 | with gr.Tab("Batch"): 243 | with gr.Column(): 244 | batch_texbox = gr.Textbox("", label="Results") 245 | with gr.Row(): 246 | with gr.Column(scale=4): 247 | savePathText = gr.Textbox( 248 | Path(base_dir, "batch_prompt.txt"), label="Path", interactive=True) 249 | with gr.Column(scale=1): 250 | append_checkBox = gr.Checkbox(label="Append") 251 | save_button = gr.Button("Save To file") 252 | 253 | # ---------------------------------------------------------------------------------- 254 | # Handle buttons 255 | save_button.click(fn=save_prompt_to_file, inputs=[ 256 | savePathText, append_checkBox]) 257 | # Please note that we use `.then()` to run other ui elements after the generation is done 258 | generate_button.click(fn=generate_longer_generic, inputs=[ 259 | prompt_textbox, temp_slider, topK_slider, maxLength_slider, 260 | repetitionPenalty_slider, numReturnSequences_slider, 261 | generate_dropdown, use_punctuation_check, useBlacklist_checkbox]).then( 262 | fn=ui_dynamic_result_visible, inputs=numReturnSequences_slider, 263 | outputs=results_vis).then( 264 | fn=ui_dynamic_result_prompts, outputs=results_txt_list).then(fn=ui_dynamic_result_batch, outputs=batch_texbox) 265 | return (prompt_generator, "Prompt Generator", "Prompt Generator"), 266 | 267 | 268 | script_callbacks.on_ui_tabs(on_ui_tabs) 269 | --------------------------------------------------------------------------------