├── README.md ├── .gitignore ├── prompt_executive_summary.txt ├── prompt_story_details.txt ├── prompt_salience.txt ├── prompt_anticipate.txt ├── LICENSE └── chat.py /README.md: -------------------------------------------------------------------------------- 1 | # AutoMuse_ChatGPT 2 | Making a version of AutoMuse but for the ChatGPT API 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | key_openai.txt 2 | key_pinecone.txt 3 | chat_logs/* 4 | chunk_summaries/* 5 | kg_topics/* 6 | gpt3_logs/* -------------------------------------------------------------------------------- /prompt_executive_summary.txt: -------------------------------------------------------------------------------- 1 | Write an executive summary of the following chat log. 2 | 3 | 4 | 5 | 6 | <> 7 | 8 | 9 | 10 | 11 | EXECUTIVE SUMMARY: -------------------------------------------------------------------------------- /prompt_story_details.txt: -------------------------------------------------------------------------------- 1 | Write a summary of what we learn about the story from the following chat log. 2 | 3 | 4 | 5 | 6 | <> 7 | 8 | 9 | 10 | 11 | STORY DETAILS: -------------------------------------------------------------------------------- /prompt_salience.txt: -------------------------------------------------------------------------------- 1 | Given the following chat log, write a brief summary of only the most salient points of the conversation. 2 | 3 | 4 | 5 | CHAT LOG: 6 | <> 7 | 8 | 9 | 10 | 11 | SALIENT POINTS: -------------------------------------------------------------------------------- /prompt_anticipate.txt: -------------------------------------------------------------------------------- 1 | Given the following chat log, infer the user's actual information needs. Attempt to anticipate what the user truly needs even if the user does not fully understand it yet themselves, or is asking the wrong questions. 2 | 3 | 4 | 5 | CHAT LOG: 6 | <> 7 | 8 | 9 | 10 | 11 | ANTICIPATE USER NEEDS: -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 David Shapiro 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /chat.py: -------------------------------------------------------------------------------- 1 | import os 2 | import openai 3 | import json 4 | import numpy as np 5 | from numpy.linalg import norm 6 | import re 7 | from time import time,sleep 8 | from uuid import uuid4 9 | import datetime 10 | 11 | 12 | def open_file(filepath): 13 | with open(filepath, 'r', encoding='utf-8') as infile: 14 | return infile.read() 15 | 16 | 17 | def save_file(filepath, content): 18 | with open(filepath, 'w', encoding='utf-8') as outfile: 19 | outfile.write(content) 20 | 21 | 22 | def load_json(filepath): 23 | with open(filepath, 'r', encoding='utf-8') as infile: 24 | return json.load(infile) 25 | 26 | 27 | def save_json(filepath, payload): 28 | with open(filepath, 'w', encoding='utf-8') as outfile: 29 | json.dump(payload, outfile, ensure_ascii=False, sort_keys=True, indent=2) 30 | 31 | 32 | def timestamp_to_datetime(unix_time): 33 | return datetime.datetime.fromtimestamp(unix_time).strftime("%A, %B %d, %Y at %I:%M%p %Z") 34 | 35 | 36 | def gpt3_embedding(content, engine='text-embedding-ada-002'): 37 | content = content.encode(encoding='ASCII',errors='ignore').decode() # fix any UNICODE errors 38 | response = openai.Embedding.create(input=content,engine=engine) 39 | vector = response['data'][0]['embedding'] # this is a normal list 40 | return vector 41 | 42 | 43 | def chatgpt_completion(messages, model="gpt-3.5-turbo"): 44 | response = openai.ChatCompletion.create(model=model, messages=messages) 45 | text = response['choices'][0]['message']['content'] 46 | filename = 'chat_%s_muse.txt' % time() 47 | if not os.path.exists('chat_logs'): 48 | os.makedirs('chat_logs') 49 | save_file('chat_logs/%s' % filename, text) 50 | return text 51 | 52 | 53 | def gpt3_completion(prompt, engine='text-davinci-003', temp=0.0, top_p=1.0, tokens=400, freq_pen=0.0, pres_pen=0.0, stop=['USER:', 'RAVEN:']): 54 | max_retry = 5 55 | retry = 0 56 | prompt = prompt.encode(encoding='ASCII',errors='ignore').decode() 57 | while True: 58 | try: 59 | response = openai.Completion.create( 60 | engine=engine, 61 | prompt=prompt, 62 | temperature=temp, 63 | max_tokens=tokens, 64 | top_p=top_p, 65 | frequency_penalty=freq_pen, 66 | presence_penalty=pres_pen, 67 | stop=stop) 68 | text = response['choices'][0]['text'].strip() 69 | #text = re.sub('[\r\n]+', '\n', text) 70 | #text = re.sub('[\t ]+', ' ', text) 71 | filename = '%s_gpt3.txt' % time() 72 | if not os.path.exists('gpt3_logs'): 73 | os.makedirs('gpt3_logs') 74 | save_file('gpt3_logs/%s' % filename, prompt + '\n\n==========\n\n' + text) 75 | return text 76 | except Exception as oops: 77 | retry += 1 78 | if retry >= max_retry: 79 | return "GPT3 error: %s" % oops 80 | print('Error communicating with OpenAI:', oops) 81 | sleep(1) 82 | 83 | 84 | def flatten_convo(conversation): 85 | convo = '' 86 | for i in conversation: 87 | convo += '%s: %s\n' % (i['role'].upper(), i['content']) 88 | return convo.strip() 89 | 90 | 91 | if __name__ == '__main__': 92 | convo_length = 30 93 | openai.api_key = open_file('key_openai.txt') 94 | default_system = 'I am an AI named Muse. My primary goal is to help the user plan, brainstorm, outline, and otherwise construct their work of fiction.' 95 | conversation = list() 96 | conversation.append({'role': 'system', 'content': default_system}) 97 | counter = 0 98 | while True: 99 | # get user input, save to file 100 | a = input('\n\nUSER: ') 101 | conversation.append({'role': 'user', 'content': a}) 102 | filename = 'chat_%s_user.txt' % time() 103 | if not os.path.exists('chat_logs'): 104 | os.makedirs('chat_logs') 105 | save_file('chat_logs/%s' % filename, a) 106 | flat = flatten_convo(conversation) 107 | #print(flat) 108 | # infer user intent, disposition, valence, needs 109 | prompt = open_file('prompt_anticipate.txt').replace('<>', flat) 110 | anticipation = gpt3_completion(prompt) 111 | print('\n\nANTICIPATION: %s' % anticipation) 112 | # summarize the conversation to the most salient points 113 | prompt = open_file('prompt_salience.txt').replace('<>', flat) 114 | salience = gpt3_completion(prompt) 115 | print('\n\nSALIENCE: %s' % salience) 116 | # update SYSTEM based upon user needs and salience 117 | conversation[0]['content'] = default_system + ''' Here's a brief summary of the conversation: %s - And here's what I expect the user's needs are: %s''' % (salience, anticipation) 118 | # generate a response 119 | response = chatgpt_completion(conversation) 120 | conversation.append({'role': 'assistant', 'content': response}) 121 | print('\n\nMUSE: %s' % response) 122 | # increment counter and consolidate memories 123 | counter += 2 124 | if counter >= 10: 125 | # reset conversation 126 | conversation = list() 127 | conversation.append({'role': 'system', 'content': default_system}) --------------------------------------------------------------------------------