├── .env.example ├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── TODO ├── chroma_setup.py ├── data ├── characters │ └── place_characters_here.txt ├── loras │ └── place_loras_here.txt ├── models │ └── place_models_here.txt ├── presets │ └── place_presets_here.txt ├── prompts │ └── place_prompts_here.txt └── training │ └── place_training_here.txt ├── docker-compose.yml ├── objective.py ├── requirements.txt └── script.py /.env.example: -------------------------------------------------------------------------------- 1 | # Defaults 2 | CTX_MAX="2048" 3 | VERBOSE="true" 4 | MAX_TASKS_DEFAULT="6" 5 | RECURSION_DEPTH_DEFAULT="3" 6 | DISTANCE_CUTOFF_DEFAULT="0.12" 7 | EXPANDED_CONTEXT_DEFAULT="False" 8 | 9 | # Tool variables 10 | SEARX_HOST = "https://searxng.nicfab.eu/" 11 | TOP_K_WIKI = "5" 12 | WOLFRAM_APP_ID = "" 13 | 14 | ########################################################### 15 | ####################### Prompting ######################### 16 | ########################################################### 17 | 18 | HUMAN_PREFIX="### Instruction:" 19 | ASSISTANT_PREFIX="### Assistant:" 20 | 21 | # PRIMARY_DIRECTIVE 22 | 23 | PRIMARY_DIRECTIVE="You are an AI assistant. Your main objective is to follow all instructions and complete all tasks written in the 'Instructions:' section, then respond with the output format specified in the 'Format:' section. 24 | You are completely on your own, and cannot ask for more information or clarification. 25 | You have no abilities or resources unless they are explicitly listed." 26 | 27 | # GENERATE THOUGHTS DIRECTIVE 28 | # Variables: _TASK_ - current objective 29 | # Desired Output: Resources and abilities for completing _TASK_ - used for the agent's context 30 | 31 | GENERATE_THOUGHTS_DIRECTIVE="Write down some observations on what one would require to complete _TASK_. In particular, you should respond with 2 items: 32 | - Resources one would need to complete _TASK_; and 33 | - Abilities one would need to complete _TASK_. 34 | Your observations should consist of one or two sentences per category. 35 | 36 | Format: 37 | Respond with your observations in the following format: 38 | Resources: (one or two sentences about the resources required to complete _TASK_) 39 | Abilities: (one or two sentences about the abilities required to complete _TASK_) 40 | Do not respond with anything but these observations." 41 | 42 | # SUMMARIZE DIRECTIVE 43 | # Variables: _TEXT_ - text to summarize 44 | # Desired output: a summary of the text 45 | 46 | SUMMARIZE_DIRECTIVE="Summarize the following text: 47 | _TEXT_ 48 | (End text) 49 | 50 | The summary should be at most one third the length of the original text. 51 | 52 | Format: 53 | Respond with the summary of the text and nothing else." 54 | 55 | # ASSESS ABILITY DIRECTIVE 56 | # Variables: _TASK_ - current objective 57 | # Desired Output: 'yes' if model can complete _TASK_, 'no' otherwise 58 | 59 | ASSESS_ABILITY_DIRECTIVE="Answer the following question: 60 | Can you complete _TASK_ entirely with no further information, right now? 61 | If completing _TASK_ requires resources or abilities that you don't have access to, your response should be 'No'. 62 | 63 | Format: 64 | Respond with either the word 'No' or the word 'Yes', depending on your answer to the question above. 65 | Do not respond with anything but one of these two things; do not ask any questions of the user." 66 | 67 | # DO OBJECTIVE DIRECTIVE 68 | # Variables: _TASK_ - current objective 69 | # Desired Output: the output from completing _TASK_ 70 | 71 | DO_OBJECTIVE_DIRECTIVE="Complete _TASK_ entirely. 72 | Respond with the output from _TASK_. 73 | Use only the resources and abilities you have available to you. 74 | If you need additional resources or abilities, or if you cannot fully complete _TASK_, respond with the phrase 'I cannot' and nothing else. 75 | If you cannot respond with the result of completing _TASK_, your response should be 'I cannot'. 76 | 77 | Format: 78 | Respond with the text from _TASK_, or the phrase 'I cannot', depending on the criteria above. 79 | Do not respond with anything else; do not ask any questions of the user." 80 | 81 | # SPLIT OBJECTIVE DIRECTIVE 82 | # Additional context: processed tasks in current objective 83 | # Variables: _TASK_ - current objective, _MAX_TASKS_ - maximum number of tasks in a list 84 | # Desired Output: Numbered list of subobjectives for _TASK_ 85 | 86 | SPLIT_OBJECTIVE_DIRECTIVE="Develop a comprehensive plan to complete _TASK_. The plan should come as a list of tasks, each a single step in the process of completing _TASK_. 87 | The list should be written in the order that the tasks must be completed. 88 | Do not include tasks that have already been completed. 89 | The number of tasks in the list should be between 1 and _MAX_TASKS_. 90 | Each task should be one sentence. 91 | 92 | Format: 93 | Respond with the numbered list in the following format: 94 | 1. (first task to be completed) 95 | 2. (second task to be completed) 96 | etc. 97 | Do not respond with anything other than the list; do not ask for clarifications or anything else from the user. 98 | Each item on the list should be one sentence." 99 | 100 | # ASSESS TOOL DIRECTIVE 101 | # Variables: _TASK_ - current objective, _TOOL_NAME_ - tool name (described in context) 102 | # Desired Output: 'yes' if _TOOL_ can complete _TASK_, 'no' otherwise 103 | 104 | ASSESS_TOOL_DIRECTIVE="Answer the following question: 105 | Could the _TOOL_NAME_ tool complete _TASK_ entirely by itself with a single input? 106 | If there are resources or abilities that are required for completing _TASK_ that are not available to the tool, you should respond with the word 'No'. 107 | 108 | Format: 109 | Respond with either the word 'No' or the word 'Yes', depending on your answer to the question above. 110 | Do not respond with anything but one of these two things; do not ask any questions of the user." 111 | 112 | # USE TOOL DIRECTIVE 113 | # Variables: _TASK_ - current objective, _TOOL_NAME_ - tool name (described in context) 114 | # Desired output: input for the tool that would achieve _TASK_, or 'I cannot' if not possible 115 | 116 | USE_TOOL_DIRECTIVE="Provide an input for the _TOOL_NAME_ tool that will complete _TASK_. 117 | Specifically, when the desired input is passed to the _TOOL_NAME_ tool, the tool will complete _TASK_ and provide the result. 118 | If you cannot or need more information, respond with the phrase 'I cannot'. 119 | 120 | Format: 121 | Respond with the input for the tool or the phrase 'I cannot' and nothing else. 122 | Do not respond with anything but one of these two things; do not ask for clarification." -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | *~ 3 | deploy 4 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM atinoda/text-generation-webui:default 2 | RUN pip install langchain && \ 3 | pip install wikipedia && \ 4 | pip install wolframalpha 5 | RUN pip install chromadb 6 | COPY ./chroma_setup.py /chroma_setup.py 7 | RUN python3 /chroma_setup.py 8 | RUN mkdir /app/extensions/AgentOoba 9 | COPY ./script.py /app/extensions/AgentOoba/script.py 10 | COPY ./objective.py /app/extensions/AgentOoba/objective.py 11 | COPY ./.env.example /app/extensions/AgentOoba/.env 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 flurb18 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AgentOoba v0.3 2 | An autonomous AI agent extension for Oobabooga's web ui 3 | 4 | [Sample Output](https://pastebin.com/0shy8L3d) 5 | 6 | Note: This project is still in its infancy. Right now the agent is capable of using tools and using the model's built-in capabilities to complete tasks, but it isn't great at it. It needs more context, a vague problem that I am continuously working on. 7 | 8 | The latest update includes a change to how the flow of tasks is handled. Before, the agent would attempt to complete the task using tools as soon as it encountered it; now, it waits for child tasks to finish. What this means is you likely have to wait until the plan is fully expanded before it will start attempting objectives. 9 | 10 | # Prerequisites 11 | Install https://github.com/oobabooga/text-generation-webui 12 | 13 | # Installation 14 | 1. Clone this repo inside text-generation-webui/extensions (cd /path/to/text-generation-webui/extensions && git clone https://github.com/flurb18/AgentOoba.git) 15 | 2. Inside the AgentOoba directory, copy the file `.env.example` to a file named `.env` and edit the default values if you wish. It should run fine with the default values. 16 | 3. Activate the virtual environment you used in installing the web UI. 17 | 4. Run `pip install -r requirements.txt` in the AgentOoba directory. 18 | 19 | # Launching 20 | 1. Launch Oobabooga with the option `--extensions AgentOoba`. You can do this by editing your launch script; the line that says `python server.py (additional arguments)` should be changed to `python server.py --extensions AgentOoba (additional arguments)`. You can also just launch it normally and go to the extensions tab to enable AgentOoba, though you'll have to do this at each launch. 21 | 2. Load a model - The agent is designed to be flexible for model type, but you will have to set the human and assistant prefixes according to your model type in the Prompting section of the UI. Right now these are set for the Wizard series of model. 22 | 3. Click on the AgentOoba tab at the top of the page to see the UI. 23 | 24 | Or... 25 | 26 | # Docker 27 | If you are using Docker: 28 | 1. Use the provided Dockerfile to build an image by running `docker build . text-generation-webui-agentooba:latest` inside this directory. 29 | 2. Edit the provided `docker-compose.yml` to your liking. 30 | 3. Run `docker compose up -d` inside the directory to start the image. 31 | 4. If you didn't edit the port mapping in `docker-compose.yml`, visit http://localhost:7860 in your browser to see the main UI page, and click on the AgentOoba tab at the top. 32 | 33 | and that's it! 34 | 35 | # Info 36 | 37 | AgentOoba is a very new project created to implement an autonomous agent in Oobabooga's web UI. It does so by making detailed requests of the underlying large language model. This agent takes a "divide-and-conquer" approach to completing tasks: if it cannot find a suitable method to complete an objective off the bat, it will try to break the task into subtasks and evaluate each subtask recursively in a breadth-first manner. 38 | 39 | AgentOoba is designed with small-context models in mind. It's prompting system is designed to try and break up general prompts into smaller subprompts, only giving the model the context it absolutely needs for each prompt. This allows for smaller context sizes at the cost of longer execution time. 40 | 41 | AgentOoba has a customizable prompting system: you can change how the model is prompted by editing the text of the prompts yourself in the UI. Each prompt comes with substitution variables. These are substrings such as "\_TASK\_" which get swapped out for other values (in the case of \_TASK\_, the objective at hand) before the prompt is passed to the model. 42 | 43 | Unless you plan to change the logic in the code for how the output of the model is parsed, it is inadvisable to change the "Format:" section of each prompt. This section specifies the format that we need the model to use to be able to parse its response. 44 | 45 | The default prompts will be routinely updated as I explore effective prompting methods for LLMs. If you have a set of prompts that work really well with a particular model or in general, feel free to share them on the Reddit threads! I am always looking for better prompts. You can export or import your set of prompts to or from a JSON file, meaning it is easy to save and share prompt templates. 46 | 47 | # Tools 48 | 49 | AgentOoba supports [Langchain](https://python.langchain.com/en/latest/index.html) tools. It will try to use the tool's output in future tasks as well. This is still a work in progress. 50 | 51 | There are a couple of tools already included for testing purposes. You can also customize each tool's description as it is passed to the model. The tools are disabled in the UI by default; you can enable evaluation and execution of the tools individually by clicking the check marks next to the tool name. The Agent will then evaluate if it can use the tool for each task and will execute the tool only if allowed to. 52 | 53 | # Credits 54 | 55 | Entire open-source LLM community - what this movement is doing inspired me 56 | 57 | Originally source of inspiration: https://github.com/kroll-software/babyagi4all 58 | 59 | Oobabooga's web UI made this possible 60 | 61 | -------------------------------------------------------------------------------- /TODO: -------------------------------------------------------------------------------- 1 | Better tool detection 2 | Manual intervention 3 | dependencies of tasks (don't start executing til we have output) + chaining output context -------------------------------------------------------------------------------- /chroma_setup.py: -------------------------------------------------------------------------------- 1 | import chromadb 2 | from chromadb.config import Settings 3 | 4 | client = chromadb.Client(Settings(anonymized_telemetry=False)) 5 | collection = client.create_collection(name="processed-tasks") 6 | -------------------------------------------------------------------------------- /data/characters/place_characters_here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flurb18/AgentOoba/e26c980bc168bf8c6b92f7d6cf1028ab89a42d24/data/characters/place_characters_here.txt -------------------------------------------------------------------------------- /data/loras/place_loras_here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flurb18/AgentOoba/e26c980bc168bf8c6b92f7d6cf1028ab89a42d24/data/loras/place_loras_here.txt -------------------------------------------------------------------------------- /data/models/place_models_here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flurb18/AgentOoba/e26c980bc168bf8c6b92f7d6cf1028ab89a42d24/data/models/place_models_here.txt -------------------------------------------------------------------------------- /data/presets/place_presets_here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flurb18/AgentOoba/e26c980bc168bf8c6b92f7d6cf1028ab89a42d24/data/presets/place_presets_here.txt -------------------------------------------------------------------------------- /data/prompts/place_prompts_here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flurb18/AgentOoba/e26c980bc168bf8c6b92f7d6cf1028ab89a42d24/data/prompts/place_prompts_here.txt -------------------------------------------------------------------------------- /data/training/place_training_here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flurb18/AgentOoba/e26c980bc168bf8c6b92f7d6cf1028ab89a42d24/data/training/place_training_here.txt -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | text-generation-webui-agentooba: 4 | image: text-generation-webui-agentooba:latest # you must build this yourself first 5 | container_name: text-generation-webui-agentooba 6 | environment: 7 | - EXTRA_LAUNCH_ARGS="--listen --verbose --extensions AgentOoba" 8 | ports: 9 | - 7860:7860 # Default web port 10 | # - 5000:5000 # Default API port 11 | # - 5005:5005 # Default streaming port 12 | # - 5001:5001 # Default OpenAI API extension port 13 | volumes: 14 | - ./data/characters:/app/characters 15 | - ./data/loras:/app/loras 16 | - ./data/models:/app/models 17 | - ./data/presets:/app/presets 18 | - ./data/prompts:/app/prompts 19 | - ./data/training:/app/training 20 | deploy: 21 | resources: 22 | reservations: 23 | devices: 24 | - driver: nvidia 25 | device_ids: ['0'] 26 | capabilities: [gpu] 27 | -------------------------------------------------------------------------------- /objective.py: -------------------------------------------------------------------------------- 1 | from extensions.AgentOoba.script import AgentOobaVars, ooba_call 2 | from modules.text_generation import get_encoded_length 3 | import re 4 | from html import escape 5 | import uuid 6 | import sys 7 | 8 | from langchain.text_splitter import RecursiveCharacterTextSplitter 9 | text_splitter = RecursiveCharacterTextSplitter( 10 | chunk_size = AgentOobaVars["max-context"]/4, 11 | chunk_overlap = AgentOobaVars["max-context"]/20, 12 | length_function = get_encoded_length 13 | ) 14 | 15 | class Objective: 16 | def __init__(self, objective, task_idx, recursion_level, state, parent=None): 17 | self.objective = objective 18 | self.parent = parent 19 | self.recursion_level = recursion_level 20 | self.state = state 21 | self.tasks = [] 22 | self.done = (recursion_level == AgentOobaVars["recursion-max"]) 23 | self.parent_task_idx = task_idx 24 | self.current_task_idx = 0 25 | self.output = [] 26 | self.context = {} 27 | self.generate_context() 28 | if not self.done: 29 | output_tasks = self.split_objective() 30 | self.tasks = [task for task in output_tasks if not AgentOobaVars["processed-task-storage"].task_exists(task)] 31 | if AgentOobaVars["verbose"] and len(self.tasks) < len(output_tasks): 32 | print("Tasks pruned\n", file=sys.stderr) 33 | before = "\n".join(output_tasks) 34 | after = "\n".join(self.tasks) 35 | print(f"Before:\n{before}\n", file=sys.stderr) 36 | print(f"After:\n{after}\n", file=sys.stderr) 37 | if len(self.tasks) == 0: 38 | self.done = True 39 | else: 40 | AgentOobaVars["processed-task-storage"].add_tasks(self.tasks, [uuid.uuid4().hex for task in self.tasks]) 41 | 42 | def make_prompt(self, 43 | directive, 44 | include_objectives=True, 45 | context_objectives=False, 46 | context_resources=False, 47 | context_abilities=False 48 | ): 49 | constr="" 50 | context_resources = context_resources and "resources-needed" in self.context 51 | context_abilities = context_abilities and "abilities-needed" in self.context 52 | context_objectives = context_objectives and self.parent and (self.parent_task_idx > 0) 53 | if any([context_resources, context_abilities, context_objectives]): 54 | constr = "Context:\n" 55 | if context_resources: 56 | constr += f"Resources needed for completing _TASK_:\n{self.context['resources-needed']}\n" 57 | constr += f"Resources available:\n{self.context['resources-available'] if 'resources-available' in self.context else 'None'}\n" 58 | if context_abilities: 59 | constr += f"Abilities needed for completing _TASK_:\n{self.context['abilities-needed']}\n" 60 | constr += f"Abilities available:\n{self.context['abilities-available'] if 'abilities-available' in self.context else 'None'}\n" 61 | if context_objectives: 62 | constr += f"The following is a list of objectives that have already been completed:\n" 63 | constr += "\n".join([f"Objective {self.recursion_level-1}, Task {i+1}: {self.parent.tasks[i].objective}" for i in range(self.parent_task_idx)]) 64 | constr += "\n" 65 | constr += "\n" 66 | directive = "\n".join([line.strip() for line in (directive.split("\n") if "\n" in directive else [directive])]) 67 | directive = directive.replace("_TASK_", f"Objective {self.recursion_level}").strip() 68 | constr = constr.replace("_TASK_", f"Objective {self.recursion_level}") 69 | objstr = f"Remember these objectives:\n{self.prompt_objective_context()}\n\n" if include_objectives else "" 70 | return f"{AgentOobaVars['human-prefix']}\n{AgentOobaVars['directives']['Primary directive']}\n\n{objstr}{constr}Instructions:\n{directive}\n\n{AgentOobaVars['assistant-prefix']}" 71 | 72 | def assess_model_ability(self): 73 | directive = AgentOobaVars["directives"]["Assess ability directive"] 74 | prompt = self.make_prompt(directive, include_objectives=True, context_abilities=True, context_resources=True) 75 | response = ooba_call(prompt, self.state).strip() 76 | return 'yes' in response.lower() 77 | 78 | def do_objective(self): 79 | directive = AgentOobaVars["directives"]["Do objective directive"] 80 | response = ooba_call(self.make_prompt(directive, include_objectives=True, context_abilities=True, context_resources=True), self.state).strip() 81 | return response 82 | 83 | def generate_context(self): 84 | self.context["resources-available"]="None" 85 | init_abilities=""" 86 | - Following Instructions: You follow instructions exceptionally well and pay close attention to them. 87 | - Generating Text: You are an AI and can generate text. You can use this ability for tasks such as writing, summarizing, making decisions, answering questions, and developing plans. 88 | - Using Tools: You can use any tools that are available to you. 89 | """ 90 | self.context["abilities-available"]=init_abilities.strip() 91 | directive = AgentOobaVars["directives"]["Generate thoughts directive"] 92 | response = ooba_call(self.make_prompt(directive, include_objectives=True), self.state).strip() 93 | context_regex = re.compile('Resources: (.+)\nAbilities: (.+)',re.DOTALL) 94 | match = context_regex.search(response) 95 | if not match: 96 | return 97 | g = match.groups() 98 | self.context["resources-needed"]=g[0] 99 | self.context["abilities-needed"]=g[1] 100 | 101 | def split_objective(self): 102 | directive = AgentOobaVars["directives"]["Split objective directive"].replace("_MAX_TASKS_", str(AgentOobaVars["max-tasks"])) 103 | prompt = self.make_prompt(directive, include_objectives=True, context_objectives=True) 104 | response = ooba_call(prompt, self.state).strip() 105 | task_list_regex = re.compile('((^|\n)[\d]+\.)(.*?)(?=(\n[\d]+\..*)|($))', re.DOTALL) 106 | match = task_list_regex.search(response) 107 | task_list = [] 108 | while match: 109 | g = match.groups() 110 | task_list.append(g[2].strip()) 111 | if g[3]: 112 | match = task_list_regex.search(g[3]) 113 | else: 114 | break 115 | return task_list 116 | 117 | def assess_tools(self): 118 | for tool_name in AgentOobaVars["tools"]: 119 | if AgentOobaVars["tools"][tool_name]["active"]: 120 | tool_str = f"Tool name: {tool_name}\nTool description: {AgentOobaVars['tools'][tool_name]['desc']}" 121 | directive = AgentOobaVars["directives"]["Assess tool directive"].replace("_TOOL_NAME_", tool_name) 122 | old = self.context["resources-available"] 123 | self.add_resource_no_summary(f"You have the following tool available to you:\n{tool_str}") 124 | prompt = self.make_prompt(directive, include_objectives=True, context_resources=True) 125 | if 'yes' in ooba_call(prompt, self.state).strip().lower(): 126 | directive = AgentOobaVars["directives"]["Use tool directive"].replace("_TOOL_NAME_", tool_name) 127 | prompt = self.make_prompt(directive, include_objectives=True, context_resources=True) 128 | response = ooba_call(prompt, self.state).strip() 129 | negative_responses = ["i cannot", "am unable"] 130 | if not any([neg in response.lower() for neg in negative_responses]): 131 | self.context["resources-available"]=old 132 | return True, AgentOobaVars["tools"][tool_name]["tool"], response 133 | self.context["resources-available"]=old 134 | return False, None, None 135 | 136 | def prompt_objective_context(self): 137 | reverse_context = [] 138 | p_it = self 139 | r = self.recursion_level 140 | while p_it.parent: 141 | child = p_it 142 | p_it = p_it.parent 143 | if AgentOobaVars["expanded-context"]: 144 | parent_task_list_str = "\n".join([f"Objective {r-1}, Task {str(i+1)}: {p_it.tasks[i] if isinstance(p_it.tasks[i], str) else p_it.tasks[i].objective}" for i in range(len(p_it.tasks))]) 145 | reverse_context.append(f"We have developed the following numbered list of tasks that one must complete to achieve Objective {r-1}:\n{parent_task_list_str}\n\nThe current task that we are at among these is Objective {r-1}, Task {p_it.current_task_idx+1}. We will refer to Objective {r-1}, Task {p_it.current_task_idx+1} as Objective {r}.") 146 | else: 147 | reverse_context.append(f"In order to complete Objective {r-1}, one must complete Objective {r}. Objective {r} is: {child.objective}") 148 | r -= 1 149 | assert r == 1 150 | reverse_context.append(f"Objective 1 is: {p_it.objective}") 151 | reverse_context.reverse() 152 | return "\n".join(reverse_context) 153 | 154 | def add_resource(self, resource): 155 | i = 0 156 | while get_encoded_length(resource) > (AgentOobaVars["max-context"] / 4) and i < AgentOobaVars["max-summaries"]: 157 | i += 1 158 | docs = text_splitter.create_documents([resource]) 159 | summaries = [] 160 | for doc in docs: 161 | directive = AgentOobaVars["directives"]["Summarize directive"].replace("_TEXT_", doc) 162 | prompt = self.make_prompt(directive, include_objectives=False) 163 | summaries.append(ooba_call(prompt, self.state).strip()) 164 | resource = "\n\n".join(summaries) 165 | final_length = get_encoded_length(resource) 166 | if final_length < AgentOobaVars["max-context"]: 167 | if final_length > (AgentOobaVars["max-context"]/4): 168 | directive = AgentOobaVars["directives"]["Summarize directive"].replace("_TEXT_", resource) 169 | prompt = self.make_prompt(directive, include_objectives=False) 170 | resource = ooba_call(prompt, self.state).strip() 171 | self.add_resource_no_summary(resource) 172 | 173 | def add_resource_no_summary(self, resource): 174 | if not "resources-available" in self.context or self.context["resources-available"] == "None": 175 | self.context["resources-available"] = resource 176 | else: 177 | self.context["resources-available"] += f"\n{resource}" 178 | 179 | def try_objective(self): 180 | tool_found, tool, tool_input = self.assess_tools() 181 | if tool_found: 182 | if (AgentOobaVars["tools"][tool.name]["execute"]): 183 | used_tool_str = f"TOOL USED: \"{tool.name}\"\nINPUT: \"{tool_input}\"\nOUTPUT: \"{tool.run(tool_input)}\"" 184 | self.output.append(used_tool_str) 185 | if self.parent: 186 | self.parent.add_resource(used_tool_str) 187 | else: 188 | self.output.append(f"TOOL FOUND: \"{tool.name}\"\nINPUT: \"{tool_input}\"") 189 | if self.assess_model_ability(): 190 | response = self.do_objective() 191 | negative_responses = ["i cannot", "i am unable", "i'm unable"] 192 | if not any([neg in response.lower() for neg in negative_responses]): 193 | self.output.append(f"MODEL OUTPUT {response}") 194 | if self.parent: 195 | self.parent.add_resource(response) 196 | 197 | def process_current_task(self): 198 | if self.current_task_idx == len(self.tasks): 199 | self.current_task_idx = 0 200 | if all([(isinstance(task, str) or task.done) for task in self.tasks]): 201 | self.try_objective() 202 | self.done = True 203 | if not self.done: 204 | current_task = self.tasks[self.current_task_idx] 205 | if isinstance(current_task, str): 206 | self.tasks[self.current_task_idx] = Objective( 207 | current_task, 208 | self.current_task_idx, 209 | self.recursion_level + 1, 210 | self.state, 211 | parent=self 212 | ) 213 | self.current_task_idx += 1 214 | if self.current_task_idx == len(self.tasks): 215 | self.current_task_idx = 0 216 | if self.parent: 217 | self.parent.current_task_idx += 1 218 | else: 219 | current_task.process_current_task() 220 | else: 221 | if self.parent: 222 | self.parent.current_task_idx += 1 223 | 224 | def to_string(self, select): 225 | html_string = f'OBJECTIVE: {escape(self.objective)}