├── .gitignore ├── reasoners ├── internal_monologue.png ├── README.md ├── internal_monologue.py ├── objective_oriented.py ├── structured.py └── structured2.py ├── code_gen ├── README.md └── code_gen.py ├── LICENSE ├── context_management ├── context_management.py ├── memory.py └── README.md └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .chatgpt_history -------------------------------------------------------------------------------- /reasoners/internal_monologue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alexfazio/globe-agents/main/reasoners/internal_monologue.png -------------------------------------------------------------------------------- /code_gen/README.md: -------------------------------------------------------------------------------- 1 | # Code Generation (🚧🦺 UNDER CONSTRUCTION 🦺🚧) 2 | Function calling is great in theory, but the JSON formatting is unreliable and more importantly, the model doesn't reason in JSON space! On the other hand, LLMs are very good at reasoning about code, and reliably adhering to syntax. The other benefit of code generation is error handling. Unlike with function calling, we can automatically run the code, check for errors, and feed the errors back to ChatGPT so it can fix the original code. 3 | 4 | 🏗️ 🏗️ 🏗️ 5 | 6 | We're currently working on some more advanced applications of code generation, so we'll be updating this section often -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2023 Ivan Yevenko 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /context_management/context_management.py: -------------------------------------------------------------------------------- 1 | import chatgpt 2 | 3 | class Context(): 4 | def __init__(self, messages=None): 5 | self.messages = messages or [] 6 | 7 | def add_message(self, role, content, name=None, idx=None): 8 | message = {'role': role, 'content': content} 9 | if name: 10 | message['name'] = name 11 | if idx is None: 12 | self.messages.append(message) 13 | else: 14 | self.messages.insert(idx, message) 15 | 16 | def clear(self): 17 | self.messages = [] 18 | 19 | def branch(self) -> 'ContextBranch': 20 | return ContextBranch(self) 21 | 22 | 23 | class ContextBranch(Context): 24 | def __init__(self, context: Context): 25 | self.context = context 26 | 27 | def __enter__(self): 28 | self.old_messages = self.context.messages 29 | self.context.messages = self.context.messages.copy() 30 | 31 | def __exit__(self, exc_type, exc_value, traceback): 32 | self.context.messages = self.old_messages 33 | 34 | 35 | if __name__ == '__main__': 36 | context = Context() 37 | context.add_message('user', "What should I do with my life?") 38 | 39 | for sys_msg in [ 40 | "You are a based twitter user. Your influences are Paul Graham, Peter Thiel, and Elon Musk.", 41 | "You are a typical engineering student. You are not too intelligent and mostly follow the herd.", 42 | ]: 43 | with context.branch(): 44 | context.add_message('system', sys_msg, idx=0) 45 | response = chatgpt.complete(context.messages, model='gpt-4', use_cache=True) 46 | print(response) 47 | print('\n***\n') -------------------------------------------------------------------------------- /context_management/memory.py: -------------------------------------------------------------------------------- 1 | import chatgpt 2 | from context_management import Context 3 | 4 | 5 | class MemoryManager: 6 | def __init__(self, context): 7 | self.context = context 8 | self.memories = {} 9 | 10 | def add_memory(self, name, memory): 11 | memory = memory if callable(memory) else (lambda m=memory: m) 12 | self.memories[name] = memory 13 | 14 | def remove_memory(self, name): 15 | if name in self.memories: 16 | del self.memories[name] 17 | 18 | def load_memories(self, *names): 19 | if len(names) == 0: 20 | names = self.memories.keys() 21 | 22 | mem_idx = int(self.context.messages[0]['role'] =='system') 23 | for name in names: 24 | if name not in self.memories: 25 | continue 26 | memory = self.memories[name]() 27 | 28 | found = False 29 | prefix = f'[Loaded Memory "{name}"]: ' 30 | for m in self.context.messages: 31 | if m['content'].startswith(prefix): 32 | m['content'] = prefix + memory 33 | found = True 34 | 35 | if not found: 36 | self.context.add_message('system', content=f'[Loaded Memory "{name}"]: {memory}', name='load_memory', idx=mem_idx) 37 | mem_idx += 1 38 | 39 | 40 | if __name__ == '__main__': 41 | context = Context() 42 | context.add_message('system', "You are regular citizen walking down the street.\nYou use your memory of citizens in your neighborhood to inform your actions.\nJohn has just approached your and you must respond.") 43 | context.add_message('user', "Hey 👋, it's John!") 44 | 45 | memory_manager = MemoryManager(context) 46 | memory_manager.add_memory('who is john', 'John murdered your family.') 47 | 48 | with context.branch(): 49 | response = chatgpt.complete(context.messages, model='gpt-4', use_cache=True) 50 | print('Without memory loaded:\n') 51 | print(response) 52 | 53 | with context.branch(): 54 | memory_manager.load_memories('who is john') 55 | response = chatgpt.complete(context.messages, model='gpt-4', use_cache=True) 56 | print('\nWith memory loaded:\n') 57 | print(response) 58 | -------------------------------------------------------------------------------- /reasoners/README.md: -------------------------------------------------------------------------------- 1 | # Reasoners 2 | Here we showcase a few tricks that leverage **internal monologue** to give us more direct control over the thinking process of LLMs. Heavily inspired by [SocialAGI](https://github.com/opensouls/SocialAGI). 3 | 4 | ## Internal Monologue 5 | Internal monologue provides a workspace for LLMs to reason before responding. This allows the LLM to contruct explicit plans, consider more options than one, follow explicitly defined reasoning steps instead of simple inductive reasoning, etc... 6 | 7 | ![Internal Monologue](internal_monologue.png) 8 | 9 | ## Objective-oriented Programming 10 | Objective-oriented programming is a direct consequence of internal monologue, since it allows the LLM to explicitly reflect on its state. If we combine fuzzy reasoning abilities with discrete reasoning via function calling, we can unlock an entirely new state-based programming paradigm. The core idea is you can write code like this: 11 | 12 | ```python 13 | reasoner.set_objective("do some task") 14 | while not reasoner.objective_complete: 15 | # Insert planning step here! 16 | do_something() 17 | reasoner.evaluate_objective() 18 | do_something_else() 19 | ``` 20 | 21 | Given this framework, as long as you can break down a complex task into a list of simpler tasks (objectives), an LLM has a much higher chance of completing the complex task. 22 | 23 | ## Structured Outputs 24 | One of the biggest problems we ran into when constructing complex plans or trying to discretely control the action space of agents, was the lack of structured outputs. Reasoning in language is nice, but how do you convert the LLM's decisions into executable code? 25 | 26 | One way to "force" structured outputs from LLM's is OpenAI's [function calling API](https://platform.openai.com/docs/guides/gpt/function-calling). However, instead of using the API for its intended purpose of outputting functions to call, we can leverage the trained JSON-formatted output abilities to output arbitrary data structures. 27 | 28 | `structured.py` shows how to directly use function calling to output response according to a specific type, like a list of strings. `structured2.py` generalizes this to outputting an arbitrary pydantic BaseModel instance using some advanced psyoping strats. These ideas are further extended in [marvin](https://github.com/PrefectHQ/marvin) and [instructor](https://github.com/jxnl/instructor), but good luck figuring those out and making them work for your specific usecase. 29 | -------------------------------------------------------------------------------- /context_management/README.md: -------------------------------------------------------------------------------- 1 | # Context Management 2 | One of the biggest problems with creating prompting abstractions is it's very easy to lose track of what's being put in to the language model's context. This is very bad! If you don't carefully manage what the LLM sees on a given completion, you'll quickly run out of context length, dilute the model's reasoning or confuse it to the point that it outputs complete garbage. 3 | 4 | One piece of code we found ourselves writing over and over again was copying message histories, doing some operations, then restoring the old copied message history. This usually simple enough, but when you require more complex scoping, this is a PITA. 5 | 6 | Our attempt at a pythonic solution to prompt scoping looks like this: 7 | 8 | ```python 9 | context = Context() 10 | with context.branch(): 11 | context.add_message('user', 'Relevant context for tasks 1-3') 12 | result = do_task1() 13 | context.add_message('assistant', result) 14 | with context.branch(): 15 | context.add_message('user', 'Relevant context for task 2') 16 | result = do_task2() 17 | # task2 relevant context not included 18 | result = do_task3() 19 | 20 | # Context is empty outside the branch 21 | ``` 22 | One of the most powerful applications of context branching is *parallelization*. For example, for a given input, you might want 10 different LLM's with different system prompts to answer independently (and in parrallel), then compare the results. `context_management.py` gives a simpler (synchronous) example of this. 23 | 24 | ## Memory 25 | The natural extension of context management is *memory*. It's often the case that you have a set of resuable information that is useful to give to the LLM. For example, you might want to store an explanation of some rules, a list of facts known about the user, semantic search results, a list of previous actions, etc... 26 | 27 | We implemented this with a simple dictionary of memories, that you can selectively load. This looks like this: 28 | 29 | ```python 30 | context = Context() 31 | 32 | memory_manager = MemoryManager(context) 33 | memory_manager.add_memory("date", lambda: datetime.datetime.now().strftime("%B %d, %Y")) 34 | memory_manager.add_memory("user's birthday", "January 1st") 35 | memory_manager.add_memory("gift recommendations", "A, B, C, D...") 36 | 37 | with context.branch(): 38 | memory_manager.load_memories("date", "user's birthday", "gift recommendations") 39 | plan = plan_birthday() 40 | ``` 41 | 42 | See `memory.py` for the implementation. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Globe Agent Frameworks 2 | 3 | This repo is a collection of useful LLM prompting/programming techniques that we've empirically found to be very useful. 4 | All the examples use the OpenAI ChatGPT API using a [custom wrapper](https://github.com/Globe-Knowledge-Solutions/chatgpt-wrapper) that you can install with: 5 | 6 | ```bash 7 | pip install chatgpt-wrapper 8 | ``` 9 | 10 | --- 11 | ***RANT:*** 12 | 13 | We made this because open source LLM agent programming frameworks are becoming absurdly complicated and over-abstracted. 14 | Libraries seem to be designed with virality as the primary objective rather than functionality, so they are extremely feature dense and bloated. 15 | They are so abstracted away from the actual prompts being sent to LLMs that you can't possibly optimize for your usecase. 16 | 17 | --- 18 | 19 | This repo is NOT a library in the sense that you can install it and use it via an API. Instead, we distill the most useful ideas we've found into minimal reproducable code examples that showcase useful concepts in the space of agent programming. 20 | 21 | The goal is to encourage **forking** and **copy-pasting** of both code and ideas. 22 | 23 | If you find this code useful or interesting, tag @ivan_yevenko and/or @sincethestudy on twitter and share your agent code. You should also join our [discord](https://discord.gg/79WH83sS3M) to share what you're working on. We run AI agent hackathons like [this one](https://colab.research.google.com/drive/1qxemv5_hCLxNu5NJUG4NuwE5_2Kh365Z?usp=sharing&authuser=1#scrollTo=wpiW9JIhoXpL) and discord is the best way to find out about them. 24 | 25 | ## Reasoners 26 | In the `reasoners/` folder, we introduce the concepts of internal monologue, objective-oriented programming and structured outputs. It lets us write code that looks like this: 27 | 28 | ```python 29 | reasoner.set_objective("do some task") 30 | while not reasoner.objective_complete: 31 | thought = reasoner.internal_monlogue("I need to come up with a plan") 32 | plan = reasoner.extract_info("Here is my plan written in list form:\n{plan}", List[str]) 33 | for step in plan: 34 | execute_step(step) 35 | reasoner.evaluate_objective() 36 | ``` 37 | 38 | ## Context Management 39 | The `context_management/` folder shows some examples of more advanced context management for LLMs 40 | 41 | ```python 42 | context = Context() 43 | 44 | memory_manager = MemoryManager(context) 45 | memory_manager.add_memory("date", lambda: datetime.datetime.now().strftime("%B %d, %Y")) 46 | memory_manager.add_memory("user's birthday", "January 1st") 47 | memory_manager.add_memory("gift recommendations", "A, B, C, D...") 48 | 49 | with context.branch(): 50 | memory_manager.load_memories("date", "user's birthday", "gift recommendations") 51 | plan = plan_birthday() 52 | ``` 53 | 54 | ## Code Generation 55 | Code generation is the new function calling. Check out the `code_gen/` folder for an example of function generation. We'll be adding to this folder over time. 56 | 57 | --- 58 | made by Ivan Yevenko 59 | -------------------------------------------------------------------------------- /code_gen/code_gen.py: -------------------------------------------------------------------------------- 1 | import re 2 | import traceback 3 | 4 | import chatgpt 5 | 6 | 7 | 8 | def extract_markdown_code_blocks(s): 9 | return re.findall(r'```(?:python)?\n(.*?)\n```', s, re.DOTALL) 10 | 11 | 12 | def generate_function(function_description, func_name, max_retries=3, debug=False): 13 | messages =[ 14 | {'role': 'system', 'content': "Start by stating your assumptions and explaining your approach. Only write the `rev` function, no other code allowed. Include a single code block. Do not call `rev`."}, 15 | {'role': 'user', 'content': f"Write a python function called `{func_name}`. Here's the description of the function:\n\n" + function_description} 16 | ] 17 | 18 | retries = 0 19 | while retries < max_retries: 20 | response = chatgpt.complete(messages=messages, model='gpt-4', use_cache=True) 21 | messages.append({'role': 'assistant', 'content': response}) 22 | 23 | code = extract_markdown_code_blocks(response) 24 | if not code: 25 | messages.append({'role': 'user', 'content': "I couldn't find any executable code in your response, can you make sure to include a code block?"}) 26 | retries += 1 27 | continue 28 | code = '\n\n'.join(code) 29 | if debug: 30 | print('#'*120) 31 | print(code) 32 | print('#'*120) 33 | 34 | namespace = {} 35 | try: 36 | exec(code, namespace) 37 | except Exception as e: 38 | error_message = traceback.format_exc() 39 | messages.append({'role': 'user', 'content': f"Error: {e}\n\nI got an error running your code. Here is the full error message:\n{error_message}\nCan you rewrite the entire code you wrote and try again?"}) 40 | retries += 1 41 | continue 42 | break 43 | 44 | if retries >= max_retries: 45 | raise Exception("Failed to generate valid code after 3 retries") 46 | return namespace[func_name] 47 | 48 | 49 | if __name__ == '__main__': 50 | ### Linked List Example Implementation ### 51 | class Node: 52 | def __init__(self, data=None): 53 | self.data = data 54 | self.next = None 55 | self.prev = None 56 | 57 | def __str__(self): 58 | return self.data 59 | 60 | node1 = Node('1') 61 | node2 = Node('2') 62 | node3 = Node('3') 63 | 64 | node1.next = node2 65 | node2.prev = node1 66 | node2.next = node3 67 | node3.prev = node2 68 | 69 | def print_list(node): 70 | while node is not None: 71 | print(node, end=" ") 72 | node = node.next 73 | print() 74 | ### Linked List Example Implementation ### 75 | 76 | print("Original List: ") 77 | print_list(node1) 78 | 79 | func = generate_function("This function reverses a linked list. The list will consist of nodes which have `next` and `prev` attributes. You will be given the head of the list. ", 'rev', debug=True) 80 | reversed_list = func(node1) 81 | 82 | print("Reversed List: ") 83 | print_list(reversed_list) 84 | -------------------------------------------------------------------------------- /reasoners/internal_monologue.py: -------------------------------------------------------------------------------- 1 | import chatgpt 2 | 3 | 4 | class Reasoner: 5 | def __init__(self, system_prompt=None, model='gpt-4'): 6 | self.model = model 7 | self.messages = [] 8 | if system_prompt: 9 | self.messages.append({'role': 'system', 'content': system_prompt}) 10 | self._is_internal = False 11 | 12 | def add_message(self, role, message, name=None): 13 | msg = {'role': role, 'content': message} 14 | if name: 15 | msg['name'] = name 16 | self.messages.append(msg) 17 | 18 | def external_dialogue(self, thought): 19 | # thought should describe how to respond, e.g. "I should respond to the user with the joke I came up with." 20 | self.add_message('assistant', '[Internal Monologue]: ' + thought) 21 | if self._is_internal: 22 | self._is_internal = False 23 | self.add_message('assistant', '[Internal Monologue]: I am now entering the external dialogue state. Everything I say there will be seen.') 24 | self.add_message('function', '[Exited Internal Monologue]', 'exit_monologue') 25 | response = chatgpt.complete(messages=self.messages, model=self.model) 26 | self.add_message('assistant', response) 27 | return response 28 | 29 | def internal_monologue(self, thought): 30 | if not self._is_internal: 31 | self._is_internal = True 32 | self.add_message('function', '[Entered Internal Monologue]', 'enter_monologue') 33 | self.add_message('assistant', "[Internal Monologue]: I am now in the internal monologue state. I won't be able to respond here, so I'll use this space to think, reflect, and plan.") 34 | self.add_message('assistant', '[Internal Monologue]: ' + thought) 35 | response = chatgpt.complete(messages=self.messages, model=self.model) 36 | response = response.replace('[Internal Monologue]: ', '') 37 | self.add_message('assistant', '[Internal Monologue]: ' + response) 38 | return response 39 | 40 | from colorama import Fore, Style 41 | def printc(*args, color='reset', **kwargs): 42 | color_code = getattr(Fore, color.upper(), Fore.RESET) 43 | text = ' '.join(str(arg) for arg in args) 44 | print(color_code + text + Style.RESET_ALL, **kwargs) 45 | 46 | 47 | if __name__ == '__main__': 48 | system_prompt = ( 49 | "You use your internal monologue to reason before responding to the user. " 50 | "You try to maximize how funny your response is." 51 | ) 52 | reasoner = Reasoner(system_prompt=system_prompt, model='gpt-4') 53 | 54 | while True: 55 | message = input("\nUser: ") 56 | if message == "quit": 57 | break 58 | 59 | reasoner.add_message('user', message) 60 | 61 | thought = reasoner.internal_monologue("I should brainstorm some funny ways to respond.") 62 | printc('\n' + thought, color='blue') 63 | 64 | thought = reasoner.internal_monologue("I need to choose the funniest response. I can only choose one.") 65 | printc('\n' + thought, color='blue') 66 | 67 | response = reasoner.external_dialogue(f"I'll respond to the user using the response I chose.") 68 | print('\n' + response) 69 | -------------------------------------------------------------------------------- /reasoners/objective_oriented.py: -------------------------------------------------------------------------------- 1 | import chatgpt 2 | 3 | 4 | class Reasoner: 5 | def __init__(self, system_prompt=None, model='gpt-4'): 6 | self.model = model 7 | self.messages = [] 8 | if system_prompt: 9 | self.messages.append({'role': 'system', 'content': system_prompt}) 10 | self._is_internal = False 11 | 12 | def add_message(self, role, message, name=None): 13 | msg = {'role': role, 'content': message} 14 | if name: 15 | msg['name'] = name 16 | self.messages.append(msg) 17 | 18 | def external_dialogue(self, thought): 19 | # thought should describe how to respond, e.g. "I should respond to the user with the joke I came up with." 20 | self.add_message('assistant', '[Internal Monologue]: ' + thought) 21 | if self._is_internal: 22 | self._is_internal = False 23 | self.add_message('assistant', '[Internal Monologue]: I am now entering the external dialogue state. Everything I say there will be seen.') 24 | self.add_message('function', '[Exited Internal Monologue]', 'exit_monologue') 25 | response = chatgpt.complete(messages=self.messages, model=self.model) 26 | self.add_message('assistant', response) 27 | return response 28 | 29 | def internal_monologue(self, thought): 30 | if not self._is_internal: 31 | self._is_internal = True 32 | self.add_message('function', '[Entered Internal Monologue]', 'enter_monologue') 33 | self.add_message('assistant', "[Internal Monologue]: I am now in the internal monologue state. I won't be able to respond here, so I'll use this space to think, reflect, and plan.") 34 | self.add_message('assistant', '[Internal Monologue]: ' + thought) 35 | response = chatgpt.complete(messages=self.messages, model=self.model) 36 | response = response.replace('[Internal Monologue]: ', '') 37 | self.add_message('assistant', '[Internal Monologue]: ' + response) 38 | return response 39 | 40 | 41 | class ObjectiveReasoner(Reasoner): 42 | def __init__(self, objective=None, system_prompt=None, model='gpt-4'): 43 | super().__init__(system_prompt=system_prompt, model=model) 44 | if objective is not None: 45 | self.set_objective(objective) 46 | self.objective_complete = False 47 | 48 | def set_objective(self, objective): 49 | self.objective = objective 50 | objective_prompt = f'Your current objective is to: {objective}' 51 | if self.messages and self.messages[0]['role'] == 'system': 52 | self.messages[0]['content'] = objective_prompt + self.messages[0]['content'] 53 | else: 54 | self.messages.insert(0, {'role': 'system', 'content': objective_prompt}) 55 | 56 | def evaluate_objective(self): 57 | assert self.objective is not None, "Can't evaluate objective, no objective set. Use set_objective() to set an objective before calling evaluate_objective()." 58 | json_schema = { 59 | "name": "set_objective_status", 60 | "description": "Sets the status of the objective by setting the objective_complete flag to True or False.", 61 | "parameters": { 62 | "type": "object", 63 | "properties": { 64 | "objective_complete": { 65 | "description": "The status of the objective. True for complete, False for incomplete.", 66 | "type": "boolean", 67 | } 68 | }, 69 | "required": ["objective_complete"] 70 | } 71 | } 72 | response = chatgpt.complete(messages=self.messages, model=self.model, functions=[json_schema], function_call={'name': 'set_objective_status'}) 73 | if response['role'] != 'function': 74 | raise Exception(f"Expected a function call, but got: {response['content']}") 75 | self.objective_complete = response['args']['objective_complete'] 76 | self.add_message(response['role'], f'Set flag: OBJECTIVE_COMPLETE={str(self.objective_complete).upper()}', name=response['name']) 77 | 78 | 79 | from colorama import Fore, Style 80 | def printc(*args, color='reset', **kwargs): 81 | color_code = getattr(Fore, color.upper(), Fore.RESET) 82 | text = ' '.join(str(arg) for arg in args) 83 | print(color_code + text + Style.RESET_ALL, **kwargs) 84 | 85 | 86 | if __name__ == '__main__': 87 | REFLECT = True 88 | system_prompt = ( 89 | "You use your internal monologue to reason before responding to the user. " 90 | "You try to maximize how funny your response is." 91 | ) 92 | objective = "Make the user laugh. The objective is complete when the user expreses laughter using 'haha' or 'lol', or similar." 93 | reasoner = ObjectiveReasoner(objective=objective, system_prompt=system_prompt, model='gpt-4') 94 | 95 | while True: 96 | message = input("\nUser: ") 97 | if message == "quit": 98 | break 99 | 100 | reasoner.add_message('user', message) 101 | 102 | if REFLECT: 103 | thought = reasoner.internal_monologue(f"My current objective is to: {objective}. I should reflect on my objective and evaluate my progress.") 104 | 105 | reasoner.evaluate_objective() 106 | if reasoner.objective_complete: 107 | printc('\nObjective complete!', color='green') 108 | break 109 | printc('\nObjective NOT complete.', color='red') 110 | 111 | thought = reasoner.internal_monologue("I should brainstorm some funny ways to respond.") 112 | printc('\n' + thought, color='blue') 113 | 114 | thought = reasoner.internal_monologue("I need to choose the funniest response. I can only choose one.") 115 | printc('\n' + thought, color='blue') 116 | 117 | response = reasoner.external_dialogue(f"I'll respond to the user using the response I chose.") 118 | print('\n' + response) 119 | -------------------------------------------------------------------------------- /reasoners/structured.py: -------------------------------------------------------------------------------- 1 | import chatgpt 2 | 3 | 4 | class Reasoner: 5 | def __init__(self, system_prompt=None, model='gpt-4'): 6 | self.model = model 7 | self.messages = [] 8 | if system_prompt: 9 | self.messages.append({'role': 'system', 'content': system_prompt}) 10 | self._is_internal = False 11 | 12 | def add_message(self, role, message, name=None): 13 | msg = {'role': role, 'content': message} 14 | if name: 15 | msg['name'] = name 16 | self.messages.append(msg) 17 | 18 | def external_dialogue(self, thought): 19 | # thought should describe how to respond, e.g. "I should respond to the user with the joke I came up with." 20 | self.add_message('assistant', '[Internal Monologue]: ' + thought) 21 | if self._is_internal: 22 | self._is_internal = False 23 | self.add_message('assistant', '[Internal Monologue]: I am now entering the external dialogue state. Everything I say there will be seen.') 24 | self.add_message('function', '[Exited Internal Monologue]', 'exit_monologue') 25 | response = chatgpt.complete(messages=self.messages, model=self.model) 26 | self.add_message('assistant', response) 27 | return response 28 | 29 | def internal_monologue(self, thought): 30 | if not self._is_internal: 31 | self._is_internal = True 32 | self.add_message('function', '[Entered Internal Monologue]', 'enter_monologue') 33 | self.add_message('assistant', "[Internal Monologue]: I am now in the internal monologue state. I won't be able to respond here, so I'll use this space to think, reflect, and plan.") 34 | self.add_message('assistant', '[Internal Monologue]: ' + thought) 35 | response = chatgpt.complete(messages=self.messages, model=self.model) 36 | response = response.replace('[Internal Monologue]: ', '') 37 | self.add_message('assistant', '[Internal Monologue]: ' + response) 38 | return response 39 | 40 | 41 | class StructuredReasoner(Reasoner): 42 | def __init__(self, system_prompt=None, model='gpt-4'): 43 | super().__init__(system_prompt=system_prompt, model=model) 44 | 45 | def parse_response_options(self): 46 | json_schema = { 47 | "name": "store_response_options", 48 | "description": "Stores a list of possible response options in memory to choose from later. E.g. ['attempt to explain mathematically', 'explain using an analogy', 'list resources to learn more']", 49 | "parameters": { 50 | "type": "object", 51 | "properties": { 52 | "responses": { 53 | "description": "The list of possible response options. Each element should be a short summary, not a full response.", 54 | "type": "array", 55 | "items": { 56 | "type": "string" 57 | } 58 | } 59 | }, 60 | "required": ["responses"] 61 | } 62 | } 63 | response = chatgpt.complete(messages=self.messages, model=self.model, functions=[json_schema], function_call={'name': 'store_response_options'}) 64 | if response['role'] != 'function': 65 | raise Exception(f"Expected a function call, but got: {response['content']}") 66 | repsonse_options = response['args']['responses'] 67 | self.add_message(response['role'], 'Stored response options:' + '\n'.join(repsonse_options), name=response['name']) 68 | return repsonse_options 69 | 70 | 71 | def choose(self, options): 72 | self.add_message('assistant', 73 | '[Internal Monologue]: I need to record my choice as one of the following, ' 74 | 'by calling the choose() function with the corresponding choice number:\n' + 75 | "\n".join([f"{i+1}. {option}" for i, option in enumerate(options)]) 76 | ) 77 | json_schema = { 78 | "name": "choose", 79 | "description": "Chooses one of the options.", 80 | "parameters": { 81 | "type": "object", 82 | "properties": { 83 | "choice_index": { 84 | "description": f"The index of the option you chose. An integer from 1 to {len(options)}", 85 | "type": "integer", 86 | } 87 | }, 88 | "required": ["options"] 89 | } 90 | } 91 | response = chatgpt.complete(messages=self.messages, model=self.model, functions=[json_schema], function_call={'name': 'choose'}) 92 | if response['role'] != 'function': 93 | raise Exception(f"Expected a function call, but got: {response['content']}") 94 | self.messages.pop() # remove the message that prompted the user to choose 95 | choice = response['args']['choice_index'] - 1 96 | self.add_message(response['role'], f'Chose option: {options}', name=response['name']) 97 | return choice 98 | 99 | 100 | from colorama import Fore, Style 101 | def printc(*args, color='reset', **kwargs): 102 | color_code = getattr(Fore, color.upper(), Fore.RESET) 103 | text = ' '.join(str(arg) for arg in args) 104 | print(color_code + text + Style.RESET_ALL, **kwargs) 105 | 106 | 107 | if __name__ == '__main__': 108 | THINK_FIRST = True 109 | system_prompt = ( 110 | "You use your internal monologue to reason before responding to the user. " 111 | "You try to maximize how funny your response is." 112 | ) 113 | reasoner = StructuredReasoner(system_prompt=system_prompt, model='gpt-4') 114 | 115 | while True: 116 | message = input("\nUser: ") 117 | if message == "quit": 118 | break 119 | 120 | reasoner.add_message('user', message) 121 | 122 | if THINK_FIRST: 123 | thought = reasoner.internal_monologue("I should brainstorm some funny ways to respond.") 124 | printc('\n' + thought, color='blue') 125 | else: 126 | reasoner.add_message('assistant', '[Internal Monologue]: I should brainstorm a list of funny ways to respond.') 127 | options = reasoner.parse_response_options() 128 | printc('\nOptions:\n- ' + '\n- '.join(options), color='yellow') 129 | 130 | if THINK_FIRST: 131 | thought = reasoner.internal_monologue("I need to choose the funniest response, I can only choose one. My options are:\n" + '\n'.join(options)) 132 | printc('\n' + thought, color='blue') 133 | else: 134 | reasoner.add_message('assistant', '[Internal Monologue]: I need to choose the funniest response') 135 | choice = reasoner.choose(options) 136 | printc('\nChose response: ' + options[choice], color='yellow') 137 | 138 | response = reasoner.external_dialogue(f"I'll respond to the user using the response I chose.") 139 | print('\n' + response) 140 | -------------------------------------------------------------------------------- /reasoners/structured2.py: -------------------------------------------------------------------------------- 1 | from string import Formatter 2 | from typing import Union, Type 3 | from pydantic import BaseModel 4 | from pydantic.main import create_model 5 | 6 | import chatgpt 7 | 8 | class Reasoner: 9 | def __init__(self, system_prompt=None, model='gpt-4'): 10 | self.model = model 11 | self.messages = [] 12 | if system_prompt: 13 | self.messages.append({'role': 'system', 'content': system_prompt}) 14 | self._is_internal = False 15 | 16 | def add_message(self, role, message, name=None): 17 | msg = {'role': role, 'content': message} 18 | if name: 19 | msg['name'] = name 20 | self.messages.append(msg) 21 | 22 | def external_dialogue(self, thought): 23 | # thought should describe how to respond, e.g. "I should respond to the user with the joke I came up with." 24 | self.add_message('assistant', '[Internal Monologue]: ' + thought) 25 | if self._is_internal: 26 | self._is_internal = False 27 | self.add_message('assistant', '[Internal Monologue]: I am now entering the external dialogue state. Everything I say there will be seen.') 28 | self.add_message('function', '[Exited Internal Monologue]', 'exit_monologue') 29 | response = chatgpt.complete(messages=self.messages, model=self.model) 30 | self.add_message('assistant', response) 31 | return response 32 | 33 | def internal_monologue(self, thought): 34 | if not self._is_internal: 35 | self._is_internal = True 36 | self.add_message('function', '[Entered Internal Monologue]', 'enter_monologue') 37 | self.add_message('assistant', "[Internal Monologue]: I am now in the internal monologue state. I won't be able to respond here, so I'll use this space to think, reflect, and plan.") 38 | self.add_message('assistant', '[Internal Monologue]: ' + thought) 39 | response = chatgpt.complete(messages=self.messages, model=self.model, use_cache=True) 40 | response = response.replace('[Internal Monologue]: ', '') 41 | self.add_message('assistant', '[Internal Monologue]: ' + response) 42 | return response 43 | 44 | 45 | class StructuredReasoner(Reasoner): 46 | def __init__(self, system_prompt=None, model='gpt-4'): 47 | super().__init__(system_prompt, model) 48 | 49 | def extract_info(self, info_format, output_type: Union[BaseModel, Type]): 50 | """ 51 | Extracts a piece of information in a specific format. 52 | This is done by using the function calling API to create a remember_{field_name} function and executing it. 53 | 54 | This function is useful when you want to extract the outcome of an internal monologue in a specific format. 55 | It doesn't work so well for reasoning, so stick to the paradigm of internal monologue -> extract_info. 56 | The format string is a python format string that determines the format of the stored information. 57 | 58 | Parameters: 59 | info_format (str): 60 | The format string that determines the format of the stored information. 61 | output_type (Union[BaseModel, Type]): 62 | The type of the field to be extracted. 63 | If a pydantic BaseModel is provided, the field is extracted as a pydantic model. 64 | If a python Type is provided, the field is extracted as an instance of that type. 65 | 66 | Returns: 67 | The value of the field remembered by the reasoner 68 | 69 | Examples: 70 | -------- 71 | Extracting an integer: 72 | >>> reasoner.add_message('user', "My name's Bill, I'm a 42 y.o. male from New York.") 73 | >>> reasoner.extract_info("The user is {age} years old.", int) 74 | 25 75 | 76 | Extracting an enum: 77 | >>> from enum import Enum 78 | >>> reasoner.add_message("assistant", "I have logically deduced that I am happy.") 79 | >>> reasoner.extract_info("I am {state}", Enum('MentalState', 'HAPPY SAD')) 80 | "HAPPY" 81 | 82 | Extracting a pydantic model: 83 | >>> from pydantic import BaseModel 84 | >>> class Person(BaseModel): 85 | ... name: str 86 | ... twitter_handle: str 87 | ... is_based: bool = False 88 | >>> reasoner.add_message("user", "Add Ivan Yevenko (@ivan_yevenko) to the database, he's pretty based.") 89 | >>> reasoner.extract_info("Added {person} to the database.", Person) 90 | Person(name='Ivan Yevenko', twitter_handle='@ivan_yevenko', is_based=True) 91 | """ 92 | formatter = Formatter() 93 | parsed = [x for x in formatter.parse(info_format) if x[1] is not None] 94 | assert len(parsed) == 1, "Only one format field is allowed." 95 | 96 | _, field_name, _, _ = parsed[0] 97 | 98 | use_pydantic = type(output_type) is type and issubclass(output_type, BaseModel) 99 | if use_pydantic: 100 | params = output_type.model_json_schema() 101 | else: 102 | SingleFieldModel = create_model("SingleFieldModel", **{field_name: (output_type, ...)}) 103 | params = SingleFieldModel.model_json_schema() 104 | 105 | func_name = "remember_" + field_name 106 | json_schema = { 107 | "name": func_name, 108 | "description": f"This function stores a piece of information in the format: '{info_format}'.", 109 | "parameters": params 110 | } 111 | 112 | response = chatgpt.complete(messages=self.messages, model=self.model, functions=[json_schema], function_call={'name': func_name}, use_cache=True) 113 | if response['role'] != 'function': 114 | raise Exception(f"Expected a function call, but got: {response['content']}") 115 | 116 | value = response['args'] 117 | if use_pydantic: 118 | value = output_type.model_construct(value) 119 | else: 120 | try: 121 | value = value[field_name] 122 | except KeyError: 123 | # Generated JSON schema is sometimes incorrect, so we try to extract the field anyway 124 | value = value.popitem()[1] 125 | 126 | info = info_format.format(**{field_name: value}) 127 | self.add_message('function', f'Stored information: "{info}"', name=response['name']) 128 | return value 129 | 130 | 131 | from colorama import Fore, Style 132 | def printc(*args, color='reset', **kwargs): 133 | color_code = getattr(Fore, color.upper(), Fore.RESET) 134 | text = ' '.join(str(arg) for arg in args) 135 | print(color_code + text + Style.RESET_ALL, **kwargs) 136 | 137 | 138 | if __name__ == '__main__': 139 | from typing import List 140 | 141 | THINK_FIRST = False 142 | system_prompt = ( 143 | "You use your internal monologue to reason before responding to the user. " 144 | "You try to maximize how funny your response is." 145 | ) 146 | reasoner = StructuredReasoner(system_prompt=system_prompt, model='gpt-4') 147 | 148 | while True: 149 | message = input("\nUser: ") 150 | if message == "quit": 151 | break 152 | 153 | reasoner.add_message('user', message) 154 | 155 | if THINK_FIRST: 156 | thought = reasoner.internal_monologue("I should brainstorm a list of funny ways to respond.") 157 | printc('\n' + thought, color='blue') 158 | else: 159 | reasoner.add_message('assistant', '[Internal Monologue]: I should brainstorm a list of funny ways to respond.') 160 | options = reasoner.extract_info("I came up with the following options:\n{options}", List[str]) 161 | printc('\nOptions:\n- ' + '\n- '.join(options), color='yellow') 162 | 163 | if THINK_FIRST: 164 | thought = reasoner.internal_monologue("I need to choose the funniest response, I can only choose one. My options are:\n" + '\n'.join(options)) 165 | printc('\n' + thought, color='blue') 166 | else: 167 | numbered_options = "\n".join([f"{i+1}. {option}" for i, option in enumerate(options)]) 168 | reasoner.add_message('assistant', '[Internal Monologue]: I need to choose the funniest response. My options are:\n' + numbered_options) 169 | choice = reasoner.extract_info("I chose Option {choice_index}.", int) 170 | printc('\nChose response: ' + options[choice-1], color='yellow') 171 | 172 | response = reasoner.external_dialogue(f"I'll respond to the user using the response I chose.") 173 | print('\n' + response) 174 | --------------------------------------------------------------------------------