├── README.md ├── main.py └── main_ui.py /README.md: -------------------------------------------------------------------------------- 1 | # prompt-generator-stable-diffusion 2 | huggingface🤗 spaces:-https://huggingface.co/spaces/thefcraft/prompt-generator-stable-diffusion 3 | 4 | huggingface🤗 model:-https://huggingface.co/thefcraft/prompt-generator-stable-diffusion 5 | 6 | ### how to use 7 | ``` 8 | import pickle 9 | import random 10 | import numpy as np 11 | 12 | import os 13 | import wget 14 | from zipfile import ZipFile 15 | 16 | 17 | def download_model(force = False): 18 | if force == True: print('downloading model file size is 108 MB so it may take some time to complete...') 19 | try: 20 | url = "https://huggingface.co/thefcraft/prompt-generator-stable-diffusion/resolve/main/models.pickle.zip" 21 | if force == True: 22 | with open("models.pickle.zip", 'w'): pass 23 | wget.download(url, "models.pickle.zip") 24 | if not os.path.exists('models.pickle.zip'): wget.download(url, "models.pickle.zip") 25 | print('Download zip file now extracting model') 26 | with ZipFile("models.pickle.zip", 'r') as zObject: zObject.extractall() 27 | print('extracted model .. now all done') 28 | return True 29 | except: 30 | if force == False: return download_model(force=True) 31 | print('Something went wrong\ndownload model via link: `https://huggingface.co/thefcraft/prompt-generator-stable-diffusion/tree/main`') 32 | try: os.chdir(os.path.abspath(os.path.dirname(__file__))) 33 | except: pass 34 | if not os.path.exists('models.pickle'): download_model() 35 | 36 | with open('models.pickle', 'rb')as f: 37 | models = pickle.load(f) 38 | 39 | LORA_TOKEN = ''#'<|>LORA_TOKEN<|>' 40 | # WEIGHT_TOKEN = '<|>WEIGHT_TOKEN<|>' 41 | NOT_SPLIT_TOKEN = '<|>NOT_SPLIT_TOKEN<|>' 42 | 43 | def sample_next(ctx:str,model,k): 44 | 45 | ctx = ', '.join(ctx.split(', ')[-k:]) 46 | if model.get(ctx) is None: 47 | return " " 48 | possible_Chars = list(model[ctx].keys()) 49 | possible_values = list(model[ctx].values()) 50 | 51 | # print(possible_Chars) 52 | # print(possible_values) 53 | 54 | return np.random.choice(possible_Chars,p=possible_values) 55 | 56 | def generateText(model, minLen=100, size=5): 57 | keys = list(model.keys()) 58 | starting_sent = random.choice(keys) 59 | k = len(random.choice(keys).split(', ')) 60 | 61 | sentence = starting_sent 62 | ctx = ', '.join(starting_sent.split(', ')[-k:]) 63 | 64 | while True: 65 | next_prediction = sample_next(ctx,model,k) 66 | sentence += f", {next_prediction}" 67 | ctx = ', '.join(sentence.split(', ')[-k:]) 68 | # if sentence.count('\n')>size: break 69 | if '\n' in sentence: break 70 | sentence = sentence.replace(NOT_SPLIT_TOKEN, ', ') 71 | # sentence = re.sub(WEIGHT_TOKEN.replace('|', '\|'), lambda match: f":{random.randint(0,2)}.{random.randint(0,9)}", sentence) 72 | # sentence = sentence.replace(":0.0", ':0.1') 73 | # return sentence 74 | 75 | prompt = sentence.split('\n')[0] 76 | if len(prompt)LORA_TOKEN<|>' 33 | # WEIGHT_TOKEN = '<|>WEIGHT_TOKEN<|>' 34 | NOT_SPLIT_TOKEN = '<|>NOT_SPLIT_TOKEN<|>' 35 | 36 | def sample_next(ctx:str,model,k): 37 | 38 | ctx = ', '.join(ctx.split(', ')[-k:]) 39 | if model.get(ctx) is None: 40 | return " " 41 | possible_Chars = list(model[ctx].keys()) 42 | possible_values = list(model[ctx].values()) 43 | 44 | # print(possible_Chars) 45 | # print(possible_values) 46 | 47 | return np.random.choice(possible_Chars,p=possible_values) 48 | 49 | def generateText(model, minLen=100, size=5): 50 | keys = list(model.keys()) 51 | starting_sent = random.choice(keys) 52 | k = len(random.choice(keys).split(', ')) 53 | 54 | sentence = starting_sent 55 | ctx = ', '.join(starting_sent.split(', ')[-k:]) 56 | 57 | while True: 58 | next_prediction = sample_next(ctx,model,k) 59 | sentence += f", {next_prediction}" 60 | ctx = ', '.join(sentence.split(', ')[-k:]) 61 | # if sentence.count('\n')>size: break 62 | if '\n' in sentence: break 63 | sentence = sentence.replace(NOT_SPLIT_TOKEN, ', ') 64 | # sentence = re.sub(WEIGHT_TOKEN.replace('|', '\|'), lambda match: f":{random.randint(0,2)}.{random.randint(0,9)}", sentence) 65 | # sentence = sentence.replace(":0.0", ':0.1') 66 | # return sentence 67 | 68 | prompt = sentence.split('\n')[0] 69 | if len(prompt)LORA_TOKEN<|>' 35 | # WEIGHT_TOKEN = '<|>WEIGHT_TOKEN<|>' 36 | NOT_SPLIT_TOKEN = '<|>NOT_SPLIT_TOKEN<|>' 37 | 38 | def sample_next(ctx:str,model,k): 39 | 40 | ctx = ', '.join(ctx.split(', ')[-k:]) 41 | if model.get(ctx) is None: 42 | return " " 43 | possible_Chars = list(model[ctx].keys()) 44 | possible_values = list(model[ctx].values()) 45 | 46 | # print(possible_Chars) 47 | # print(possible_values) 48 | 49 | return np.random.choice(possible_Chars,p=possible_values) 50 | 51 | def generateText(model, minLen=100, size=5): 52 | keys = list(model.keys()) 53 | starting_sent = random.choice(keys) 54 | k = len(random.choice(keys).split(', ')) 55 | 56 | sentence = starting_sent 57 | ctx = ', '.join(starting_sent.split(', ')[-k:]) 58 | 59 | while True: 60 | next_prediction = sample_next(ctx,model,k) 61 | sentence += f", {next_prediction}" 62 | ctx = ', '.join(sentence.split(', ')[-k:]) 63 | 64 | # if sentence.count('\n')>size: break 65 | if '\n' in sentence: break 66 | sentence = sentence.replace(NOT_SPLIT_TOKEN, ', ') 67 | # sentence = re.sub(WEIGHT_TOKEN.replace('|', '\|'), lambda match: f":{random.randint(0,2)}.{random.randint(0,9)}", sentence) 68 | # sentence = sentence.replace(":0.0", ':0.1') 69 | # return sentence 70 | 71 | prompt = sentence.split('\n')[0] 72 | if len(prompt)