├── .env.example ├── HISTORY.md ├── .gitignore ├── pyproject.toml ├── LICENSE ├── SECURITY.md ├── README.md ├── aea_babyagi.py ├── simple_babyagi.py ├── agent_babyagi.py └── actions.py /.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY="YOUR_API_KEY" 2 | PINECONE_API_KEY="YOUR_API_KEY" -------------------------------------------------------------------------------- /HISTORY.md: -------------------------------------------------------------------------------- 1 | ## 0.1.0 (2023-05-11) 2 | 3 | - The first release of aea-babyagi -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *__pycache__ 2 | .env 3 | *.DS_Store 4 | /packages/ 5 | /.ruff_cache/ 6 | ethereum_private_key.txt -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "aea-babyagi" 3 | version = "0.1.0" 4 | description = "" 5 | authors = ["David Minarsch ", "N0xMare"] 6 | license = "MIT" 7 | readme = "README.md" 8 | 9 | [tool.poetry.dependencies] 10 | python = "^3.10" 11 | openai = "0.27.2" 12 | python-dotenv = "^0.14.0" 13 | pinecone-client = "^2.2.1" 14 | ruff = "^0.0.260" 15 | black = "*" 16 | open-aea = {version = "1.33.0", extras = ["all"]} 17 | open-aea-ledger-ethereum = {version = "1.32.0"} 18 | unstructured = {extras = ["local-inference"], version = "^0.5.12"} 19 | tiktoken = "^0.3.3" 20 | 21 | 22 | [build-system] 23 | requires = ["poetry-core"] 24 | build-backend = "poetry.core.masonry.api" -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Valory 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | This document outlines security procedures and general policies for the `aea-babyagi` project. 4 | 5 | ## Supported Versions 6 | 7 | The following table shows which versions of `aea-babyagi` are currently being supported with security updates. 8 | 9 | | Version | Supported | 10 | |------------|--------------------| 11 | | `v0.1.0` | :white_check_mark: | 12 | 13 | ## Reporting a Vulnerability 14 | 15 | The `aea-babyagi` team and community take all security bugs in `aea-babyagi` seriously. Thank you for improving the security of `aea-babyagi`. We appreciate your efforts and responsible disclosure and will make every effort to acknowledge your contributions. 16 | 17 | Report security bugs by emailing `info@valory.xyz`. 18 | 19 | The lead maintainer will acknowledge your email within 48 hours, and will send a more detailed response within 48 hours indicating the next steps in handling your report. After the initial reply to your report, the security team will endeavour to keep you informed of the progress towards a fix and full announcement, and may ask for additional information or guidance. 20 | 21 | Report security bugs in third-party modules to the person or team maintaining the module. 22 | 23 | ## Disclosure Policy 24 | 25 | When the security team receives a security bug report, they will assign it to a primary handler. This person will coordinate the fix and release process, involving the following steps: 26 | 27 | - Confirm the problem and determine the affected versions. 28 | - Audit code to find any potential similar problems. 29 | - Prepare fixes for all releases still under maintenance. These fixes will be released as fast as possible to PyPI. 30 | 31 | ## Comments on this Policy 32 | 33 | If you have suggestions on how this process could be improved please submit a pull request. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | AEA BabyAGI 3 |

4 | 5 | ### Adaptation of [babyagi](https://github.com/yoheinakajima/babyagi) in the [Open AEA](https://github.com/valory-xyz/open-aea) framework. 6 | 7 | ## Project 8 | ```ml 9 | ├─ actions — "contains all the functions used to compose the low-high level actions of each agent" 10 | ├─ simple_babyagi — "Adaptation of the babyAGI agent loop/functions using only the OpenAI API without extra tooling" 11 | ├─ agent_babyagi - "Use Open AEA's Agent class to extend simple_babyagi into an Open AEA "Agent" with Finite State Machine Behaviour" 12 | ├─ aea_babyagi - "Inherit from Open AEA's "AEA" class to extend babyagi's functionality within agent_agi into an autonomous economic agent." 13 | ``` 14 | 15 | ## Getting Started 16 | 17 | Create a `.env` file from the `.env.example` provided in this repo: 18 | ```bash 19 | cp .env.example .env 20 | ``` 21 | 22 | Set your OpenAI API key and, optionally, other environment variables you want to use in the `.env` file: 23 | ```bash 24 | OPENAI_API_KEY="YOUR_API_KEY" 25 | PINECONE_API_KEY="YOUR_API_KEY" 26 | ``` 27 | 28 | Install project dependencies (you can find install instructions for Poetry [here](https://python-poetry.org/docs/)): 29 | ```bash 30 | poetry shell 31 | poetry install 32 | ``` 33 | 34 | Import AEA packages: 35 | ```bash 36 | svn export https://github.com/valory-xyz/open-aea/tags/v1.33.0/packages packages 37 | ``` 38 | 39 | Source the environment variables: 40 | ``` bash 41 | source .env 42 | ``` 43 | 44 | Run the agents: 45 | ```bash 46 | poetry run python simple_babyagi.py "develop a task list" "solve world hunger" 47 | ``` 48 | ```bash 49 | poetry run python agent_babyagi.py "develop a task list" "solve world hunger" 50 | ``` 51 | ```bash 52 | poetry run python aea_babyagi.py "develop a task list" "solve world hunger" 53 | ``` 54 | -------------------------------------------------------------------------------- /aea_babyagi.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from threading import Thread 3 | 4 | # AEA dependencies 5 | from aea_ledger_ethereum import EthereumCrypto 6 | from aea.aea_builder import AEABuilder 7 | from aea.crypto.helpers import PRIVATE_KEY_PATH_SCHEMA, create_private_key 8 | 9 | # agent_babyagi dependencies 10 | # build_fsm_and_skill builds the skill we add to the AEA 11 | # create_memory creates the shared state used by the AEA to move between actions 12 | from agent_babyagi import build_fsm_and_skill, create_memory 13 | 14 | # Create a dummy private key for the AEA wallet 15 | PRIVATE_KEY_FILE = PRIVATE_KEY_PATH_SCHEMA.format(EthereumCrypto.identifier) 16 | create_private_key(EthereumCrypto.identifier, PRIVATE_KEY_FILE) 17 | 18 | 19 | def build_aea(first_task: str, objective: str): 20 | """Build the AEA with the babyagi skill. 21 | 22 | Args: 23 | first_task (str): the first task to be completed by the agent 24 | objective (str): the objective of the agent 25 | 26 | Returns: 27 | AEA: the AEA with the babyagi skill 28 | """ 29 | # instantiate the aea builder 30 | builder = AEABuilder() 31 | # set the aea name 32 | builder.set_name("baby_agi") 33 | # create the shared state object that serves as memory for the actions of the AEA 34 | memory = create_memory(first_task, objective) 35 | # add the AEA's private key 36 | builder.add_private_key(EthereumCrypto.identifier, PRIVATE_KEY_FILE) 37 | # add the babyagi skill 38 | _, skill = build_fsm_and_skill(memory) 39 | # add the skill to the AEA 40 | builder.add_component_instance(skill) 41 | # Create our AEA 42 | my_aea = builder.build() 43 | 44 | print("\033[89m\033[1m" + "\n====== AEA babyAGI ONLINE ======" + "\033[0m\033[0m") 45 | 46 | # Set the AEA's agent context 47 | skill.skill_context.set_agent_context(my_aea.context) 48 | # update the shared state of the AEA with the memory object we created above 49 | skill.skill_context.shared_state.update(memory) 50 | return my_aea 51 | 52 | 53 | def run(first_task: str, objective: str): 54 | """Run babyAGI. 55 | 56 | Args: 57 | first_task (str): the first task to be completed by the agent 58 | objective (str): the objective of the agent 59 | """ 60 | 61 | # Build AEA-GPT 62 | my_aea = build_aea(first_task, objective) 63 | 64 | # Set the AEA running in a different thread 65 | try: 66 | t = Thread(target=my_aea.start) 67 | t.start() 68 | except KeyboardInterrupt: 69 | # Shut down the AEA 70 | my_aea.stop() 71 | t.join() 72 | t = None 73 | 74 | 75 | if __name__ == "__main__": 76 | _, first_task, objective = sys.argv 77 | try: 78 | run(first_task, objective) 79 | except KeyboardInterrupt: 80 | print("\033[89m\033[1m" + "\n======== EXIT ========" + "\033[0m\033[0m") 81 | pass 82 | -------------------------------------------------------------------------------- /simple_babyagi.py: -------------------------------------------------------------------------------- 1 | """ 2 | Heavily inspired by: https://github.com/yoheinakajima/babyagi 3 | 4 | python simple_babyagi.py "develop a task list" "solve world hunger" 5 | """ 6 | 7 | import os 8 | import sys 9 | import openai 10 | import time 11 | from collections import deque 12 | from dotenv import load_dotenv 13 | 14 | # import functions used to build the agent's actions 15 | from actions import ( 16 | task_creation_prompt_builder, 17 | task_creation_handler, 18 | task_prioritization_prompt_builder, 19 | task_prioritization_handler, 20 | task_execution_prompt_builder, 21 | task_execution_handler, 22 | task_stop_or_not_prompt_builder, 23 | task_stop_or_not_handler, 24 | ) 25 | 26 | load_dotenv() 27 | 28 | OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") 29 | 30 | # Set up OpenAI API key 31 | openai.api_key = OPENAI_API_KEY 32 | 33 | # flag to stop the procedure 34 | STOP_PROCEDURE = False 35 | 36 | # Definition of the action types for the simple agent 37 | action_types = { 38 | "task_creation": { 39 | "prompt_builder": task_creation_prompt_builder, 40 | "handler": task_creation_handler, 41 | }, 42 | "task_prioritization": { 43 | "prompt_builder": task_prioritization_prompt_builder, 44 | "handler": task_prioritization_handler, 45 | }, 46 | "task_execution": { 47 | "prompt_builder": task_execution_prompt_builder, 48 | "handler": task_execution_handler, 49 | }, 50 | "task_stop_or_not": { 51 | "prompt_builder": task_stop_or_not_prompt_builder, 52 | "handler": task_stop_or_not_handler, 53 | }, 54 | } 55 | 56 | 57 | def executor(globals_: dict, agent_type: str) -> None: 58 | """ 59 | execute an action using simple agent 60 | 61 | Args: 62 | globals_ (dict): The globals dictionary 63 | agent_type (str): The action type to execute 64 | """ 65 | # load the action type into "agent" 66 | agent = action_types[agent_type] 67 | # build the prompt for the corresponding action type 68 | builder_ = agent["prompt_builder"] 69 | # create the corresponding prompt for GPT to execute the action 70 | # type "agent" and load it into "prompt" 71 | prompt = builder_(globals_) 72 | # call GPT with the corresponding "prompt" to execute the action 73 | # and load the response from the "prompt" into "response" 74 | response = openai_call(prompt) 75 | # handle the response from GPT for the corresponding action type "agent" 76 | handler_ = agent["handler"] 77 | handler_(response, globals_) 78 | 79 | 80 | def main(first_task: str, objective: str): 81 | # initialize the globals dictionary with "objective" 82 | # this is simple_agent's state variable which is used to keep track of 83 | # the task list, current task, and the objective so GPT can reason about them. 84 | globals_ = { 85 | "objective": objective, 86 | "task_list": deque([]), 87 | "current_task": {}, 88 | "result": {"data": ""}, 89 | "keep_going": True, 90 | } 91 | # add the first task to the task list 92 | globals_["task_list"].append({"id": 1, "name": first_task}) 93 | 94 | print("\033[89m\033[1m" + "\n=== Simple Loop babyAGI ONLINE ===" + "\033[0m\033[0m") 95 | 96 | # simple agent loop 97 | while globals_["keep_going"]: 98 | # execution 99 | executor(globals_, "task_execution") 100 | # creation 101 | executor(globals_, "task_creation") 102 | # re-prioritization 103 | executor(globals_, "task_prioritization") 104 | if STOP_PROCEDURE: 105 | executor(globals_, "task_stop_or_not") 106 | time.sleep(1) 107 | 108 | 109 | def openai_call( 110 | prompt: str, use_gpt4: bool = False, temperature: float = 0.5, max_tokens: int = 200 111 | ): 112 | if not use_gpt4: 113 | response = openai.Completion.create( 114 | engine="text-davinci-003", 115 | prompt=prompt, 116 | temperature=temperature, 117 | max_tokens=max_tokens, 118 | top_p=1, 119 | frequency_penalty=0, 120 | presence_penalty=0, 121 | ) 122 | return response.choices[0].text.strip() 123 | else: 124 | messages = [{"role": "user", "content": prompt}] 125 | response = openai.ChatCompletion.create( 126 | model="gpt-4", 127 | messages=messages, 128 | temperature=temperature, 129 | max_tokens=max_tokens, 130 | n=1, 131 | stop=None, 132 | ) 133 | return response.choices[0].message.content.strip() 134 | 135 | 136 | if __name__ == "__main__": 137 | _, first_task, objective = sys.argv 138 | try: 139 | main(first_task, objective) 140 | except KeyboardInterrupt: 141 | print("\033[89m\033[1m" + "\n======== EXIT ========" + "\033[0m\033[0m") 142 | pass 143 | -------------------------------------------------------------------------------- /agent_babyagi.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import openai 4 | from collections import deque 5 | from typing import List 6 | from dotenv import load_dotenv 7 | 8 | # AEA dependencies 9 | from aea.agent import Agent 10 | from aea.configurations.base import SkillConfig 11 | from aea.connections.base import Connection 12 | from aea.identity.base import Identity 13 | from aea.skills.base import Skill, SkillContext 14 | from aea.skills.behaviours import FSMBehaviour, State 15 | from aea.context.base import AgentContext 16 | 17 | # import functions used to build the agent's actions 18 | from actions import ( 19 | task_creation_prompt_builder, 20 | task_creation_handler, 21 | task_prioritization_prompt_builder, 22 | task_prioritization_handler, 23 | task_execution_prompt_builder, 24 | task_execution_handler, 25 | task_stop_or_not_prompt_builder, 26 | task_stop_or_not_handler, 27 | ) 28 | 29 | load_dotenv() 30 | 31 | OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") 32 | 33 | # Set up OpenAI API key 34 | openai.api_key = OPENAI_API_KEY 35 | 36 | # flag to stop the procedure 37 | STOP_PROCEDURE = False 38 | 39 | # action types definition, each action type makes two function calls: builder & handler 40 | # the initial action type is execution of the first task 41 | initial = "task_execution_1" 42 | action_types = { 43 | "task_creation": { 44 | "prompt_builder": task_creation_prompt_builder, 45 | "handler": task_creation_handler, 46 | }, 47 | "task_prioritization": { 48 | "prompt_builder": task_prioritization_prompt_builder, 49 | "handler": task_prioritization_handler, 50 | }, 51 | "task_execution_1": { 52 | "prompt_builder": task_execution_prompt_builder, 53 | "handler": task_execution_handler, 54 | }, 55 | "task_execution_2": { 56 | "prompt_builder": task_execution_prompt_builder, 57 | "handler": task_execution_handler, 58 | }, 59 | "task_stop_or_not": { 60 | "prompt_builder": task_stop_or_not_prompt_builder, 61 | "handler": task_stop_or_not_handler, 62 | }, 63 | } 64 | 65 | # State Machine Definition 66 | # Ending states, adds the option to stop the procedure and execute task_stop_or_not 67 | if STOP_PROCEDURE: 68 | transitions = { 69 | "task_execution_1": {"done": "task_creation"}, 70 | "task_creation": {"done": "task_execution_2"}, 71 | "task_execution_2": {"done": "task_prioritization"}, 72 | "task_prioritization": {"done": "task_stop_or_not"}, 73 | "task_stop_or_not": {"done": "task_execution_1", "stop": None}, 74 | } 75 | # runtime state transitions of loop (execution, creation, execution, prioritization) 76 | else: 77 | transitions = { 78 | "task_execution_1": {"done": "task_creation"}, 79 | "task_creation": {"done": "task_execution_2"}, 80 | "task_execution_2": {"done": "task_prioritization"}, 81 | "task_prioritization": {"done": "task_execution_1"}, 82 | } 83 | 84 | 85 | class SimpleStateBehaviour(State): 86 | def act(self) -> None: 87 | """ 88 | Act implementation. 89 | """ 90 | # get the action type 91 | action_type = action_types[self.name] 92 | # get the prompt builder for the action type 93 | builder_ = action_type["prompt_builder"] 94 | # build the prompt using the shared state from the Agent's context 95 | prompt = builder_(self.context.shared_state) 96 | # use the prompt above to input into GPT to get the response 97 | response = self.openai_call(prompt) 98 | # get the handler for the action type 99 | handler_ = action_type["handler"] 100 | # get the event to trigger from the handler 101 | event_to_trigger = handler_(response, self.context.shared_state) 102 | self.executed = True 103 | self._event = event_to_trigger 104 | 105 | @staticmethod 106 | def openai_call( 107 | prompt: str, 108 | use_gpt4: bool = False, 109 | temperature: float = 0.5, 110 | max_tokens: int = 200, 111 | ): 112 | if use_gpt4: 113 | messages = [{"role": "user", "content": prompt}] 114 | response = openai.ChatCompletion.create( 115 | model="gpt-4", 116 | messages=messages, 117 | temperature=temperature, 118 | max_tokens=max_tokens, 119 | n=1, 120 | stop=None, 121 | ) 122 | return response.choices[0].message.content.strip() 123 | response = openai.Completion.create( 124 | engine="text-davinci-003", 125 | prompt=prompt, 126 | temperature=temperature, 127 | max_tokens=max_tokens, 128 | top_p=1, 129 | frequency_penalty=0, 130 | presence_penalty=0, 131 | ) 132 | return response.choices[0].text.strip() 133 | 134 | def is_done(self) -> bool: 135 | """Get is done.""" 136 | return self._event is not None 137 | 138 | 139 | # instantiate FSMBehaviour class for use in constructing the agent's FSM transitions 140 | class MyFSMBehaviour(FSMBehaviour): 141 | def setup(self): 142 | pass 143 | 144 | def teardown(self): 145 | pass 146 | 147 | 148 | # create the agent's shared state and return it, "memory" is the shared state 149 | # takes in string arguments to set first task and the objective of the agent 150 | def create_memory( 151 | first_task: str, 152 | objective: str, 153 | ) -> dict: 154 | """Create the shared memory.""" 155 | memory = { 156 | "objective": objective, 157 | "task_list": deque([]), 158 | "current_task": {}, 159 | "result": {"data": first_task}, 160 | "keep_going": True, 161 | } 162 | memory["task_list"].append({"id": 1, "name": first_task}) 163 | return memory 164 | 165 | 166 | def build_fsm_and_skill(memory: dict) -> tuple[MyFSMBehaviour, Skill]: 167 | """ 168 | Build the FSM object and the Skill object. The FSM is built by loading 169 | all the Simple state behaviours and their respective transition 170 | functions into the FSM. The Skill object is built by updating the skill 171 | behaviours with the FSM behaviour after the states/transitions have 172 | been loaded into it. 173 | 174 | fsm, _ = build_fsm_and_skill(memory) ...to get the fsm object 175 | _, skill = build_fsm_and_skill(memory) ...to get the skill object 176 | 177 | Args: 178 | memory (dict): the agent's shared state 179 | 180 | Returns: 181 | tuple[MyFSMBehaviour, Skill]: the FSM object and the Skill object 182 | """ 183 | # create skill and skill context 184 | config = SkillConfig(name="dummy", author="dummy") 185 | skill = Skill(configuration=config) 186 | skill_context = SkillContext(skill=skill) 187 | # create empty agent context to utilize shared state of Agent 188 | agent_context = AgentContext( 189 | identity=None, 190 | connection_status=None, 191 | outbox=None, 192 | decision_maker_message_queue=None, 193 | decision_maker_handler_context=None, 194 | task_manager=None, 195 | default_ledger_id=None, 196 | currency_denominations=None, 197 | default_connection=None, 198 | default_routing=None, 199 | search_service_address=None, 200 | decision_maker_address=None, 201 | data_dir=None, 202 | ) 203 | # set the agent context 204 | skill_context.set_agent_context(agent_context) 205 | skill_context.shared_state.update(memory) 206 | # create the FSM object 207 | fsm = MyFSMBehaviour(name="babyAGI-loop", skill_context=skill_context) 208 | 209 | # load the states and transitions (SimpleStateBehaviour) into the FSM object 210 | for key in action_types.keys(): 211 | if key not in transitions: 212 | continue 213 | behaviour = SimpleStateBehaviour(name=key, skill_context=skill_context) 214 | is_initial = key == initial 215 | fsm.register_state(str(behaviour.name), behaviour, initial=is_initial) 216 | for event, target_behaviour_name in transitions[key].items(): 217 | fsm.register_transition(str(behaviour.name), target_behaviour_name, event) 218 | 219 | # update the skill behaviours with the FSM behaviour to build the skill 220 | skill.behaviours.update({fsm.name: fsm}) 221 | 222 | return fsm, skill 223 | 224 | 225 | class BabyAGI(Agent): 226 | """A re-implementation of the Baby AGI using the Open AEA framework.""" 227 | 228 | def __init__( 229 | self, 230 | identity: Identity, 231 | memory: dict, 232 | connections: List[Connection] = None, 233 | ): 234 | """Initialise the agent.""" 235 | super().__init__(identity, connections) 236 | fsm, _ = build_fsm_and_skill(memory) 237 | self.fsm = fsm 238 | 239 | def act(self): 240 | """Act implementation.""" 241 | if self.fsm.is_done(): 242 | print("done!") 243 | return 244 | self.fsm.act() 245 | 246 | def setup(self): 247 | # empty setup method 248 | pass 249 | 250 | def teardown(self): 251 | # empty teardown method 252 | pass 253 | 254 | 255 | def run(first_task: str, objective: str): 256 | """ 257 | Run babyAGI with the given first task + objective using 258 | open-aea's "Agent" & "FSMBehaviour" classes. 259 | 260 | Args: 261 | first_task (str): the first task to be completed by the agent 262 | objective (str): the objective of the agent 263 | """ 264 | 265 | # Create the agent's shared state object 266 | memory = create_memory(first_task, objective) 267 | 268 | # Create an identity for the agent 269 | identity = Identity( 270 | name="baby_agi", address="my_address", public_key="my_public_key" 271 | ) 272 | 273 | print("\033[89m\033[1m" + "\n===== Agent babyAGI ONLINE =====" + "\033[0m\033[0m") 274 | 275 | # Create our Agent (without connections) 276 | my_agent = BabyAGI(identity, memory) 277 | 278 | # Set the agent running in a different thread 279 | try: 280 | my_agent.start() 281 | except KeyboardInterrupt: 282 | # Shut down the agent 283 | my_agent.stop() 284 | 285 | 286 | if __name__ == "__main__": 287 | _, first_task, objective = sys.argv 288 | try: 289 | run(first_task, objective) 290 | except KeyboardInterrupt: 291 | print("\033[89m\033[1m" + "\n======== EXIT ========" + "\033[0m\033[0m") 292 | pass 293 | -------------------------------------------------------------------------------- /actions.py: -------------------------------------------------------------------------------- 1 | """ 2 | Actions: contains all the functions used to compose the low-high level 3 | actions for any agent programs that use these functions. 4 | """ 5 | 6 | import os 7 | import openai 8 | from typing import List, Tuple 9 | from collections import deque 10 | import pinecone 11 | 12 | # pincone setup 13 | USE_PINECONE = False # flag to set pincone usage on or off (default: False) 14 | PINECONE_API_KEY = os.getenv("PINECONE_API_KEY") 15 | PINECONE_ENVIRONMENT = os.getenv("PINECONE_ENVIRONMENT") 16 | PINECONE_TABLE = os.getenv("PINECONE_TABLE") 17 | 18 | # Create Pinecone index 19 | DIMENSION = 1536 20 | METRIC = "cosine" 21 | POD_TYPE = "p1" 22 | if USE_PINECONE and PINECONE_TABLE not in pinecone.list_indexes(): 23 | pinecone.create_index( 24 | PINECONE_TABLE, dimension=DIMENSION, metric=METRIC, pod_type=POD_TYPE 25 | ) 26 | 27 | # Init Pinecone 28 | pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT) 29 | 30 | 31 | task_creation_template = """ 32 | You are a task creation AI that uses the result of an execution agent to 33 | create new tasks with the following objective: {objective}. The last 34 | completed task has the result: {result}. This result was based on this 35 | task description: {task_description}. These are incomplete tasks: 36 | {incomplete_tasks}. Based on the result, create new tasks to be completed 37 | by the AI system that do not overlap with incomplete tasks. Return the 38 | result as a list that is not numbered and simply lists each task in its 39 | own line, like: 40 | 41 | First task 42 | Second task 43 | Third task 44 | 45 | Do not include anything else except the list in your response. 46 | """ 47 | task_prioritization_template = """ 48 | You are a task prioritization AI tasked with cleaning the formatting of 49 | and reprioritizing the following tasks: {task_names}. Consider the 50 | ultimate objective of your team: {objective}. 51 | Do not remove any tasks. Return the result as a numbered list, like: 52 | 53 | #. First task 54 | #. Second task 55 | 56 | Start the task list with number 1 and do not include anything else 57 | except the list in your response. 58 | """ 59 | task_execution_template = """ 60 | You are an AI that performs one task based on the following objective: 61 | {objective}.\nTake into account these previously completed tasks: 62 | {context}...and your assigned task: {task}\n. What is your response? 63 | Make sure to respond with a detailed solution to the assigned task you 64 | have been given only, and do not address any other tasks or make any lists. 65 | your response should be in paragraph form. 66 | """ 67 | task_stop_or_not_template = """ 68 | You are an AI that assess task completion for the following objective: 69 | {objective}. Take into account these previously completed tasks: {context}. 70 | Has the objective been achieved? Answer with only yes or no. Only answer 71 | with yes if you think this is the best answer possible. 72 | """ 73 | 74 | 75 | def task_execution_prompt_builder(globals_: dict) -> str: 76 | """ 77 | This function builds and returns the execution prompt for GPT to take 78 | in as input when executing a task. It also prints the next task in 79 | the task list. 80 | 81 | Args: 82 | globals_ (dict): The globals dictionary 83 | 84 | Returns: 85 | str: The prompt for GPT task execution 86 | """ 87 | task_list = globals_["task_list"] 88 | task = task_list.popleft() 89 | globals_["current_task"] = task 90 | 91 | print("\033[92m\033[1m" + "\n***** NEXT TASK *****\n" + "\033[0m\033[0m") 92 | print(str(task["id"]) + ": " + task["name"]) 93 | 94 | context = get_context(globals_) 95 | return task_execution_template.format( 96 | objective=globals_["objective"], task=globals_["current_task"], context=context 97 | ) 98 | 99 | 100 | def task_execution_handler(response: str, globals_: dict) -> None: 101 | """ 102 | This function handles the GPT response corresponding to the last task 103 | execution, allows for the result to be further enriched and prints the 104 | resultant GPT response from executing the task. It also prints the result 105 | 106 | Args: 107 | response (str): The GPT response from task execution 108 | globals_ (dict): The globals dictionary 109 | """ 110 | enriched_result = { 111 | "data": response 112 | } # This is where you should enrich the result if needed 113 | globals_["result"] = enriched_result 114 | 115 | """Use Pinecone, not currently setup""" 116 | id_ = globals_["current_task"]["id"] 117 | result_id = f"result_{id_}" 118 | vector = enriched_result["data"] # extract the actual result from the dictionary 119 | if USE_PINECONE: 120 | index = pinecone.Index(index_name=PINECONE_TABLE) 121 | index.upsert( 122 | [ 123 | ( 124 | result_id, 125 | get_ada_embedding(vector), 126 | {"task": globals_["current_task"]["name"], "result": response}, 127 | ) 128 | ] 129 | ) 130 | 131 | print("\033[93m\033[1m" + "\n***** TASK RESULT *****\n" + "\033[0m\033[0m") 132 | print(globals_["result"]["data"]) 133 | 134 | return "done" 135 | 136 | 137 | def task_creation_prompt_builder(globals_: dict) -> str: 138 | """ 139 | This function builds and returns the prompt for GPT task 140 | creation so GPT can create task lists. 141 | 142 | Args: 143 | globals_ (dict): The globals dictionary 144 | 145 | Returns: 146 | str: The prompt for GPT task creation 147 | """ 148 | incomplete_tasks = [t["name"] for t in globals_["task_list"]] 149 | return task_creation_template.format( 150 | objective=globals_["objective"], 151 | result=globals_["result"], 152 | task_description=globals_["current_task"].get("name", "default"), 153 | incomplete_tasks=incomplete_tasks, 154 | ) 155 | 156 | 157 | def task_creation_handler(response: str, globals_: dict): 158 | """ 159 | This function handles the GPT response corresponding to the task 160 | creation prompt built by the task creation prompt builder and 161 | prints the resultant GPT response that is creating new tasks. 162 | 163 | Args: 164 | response (str): The GPT response from task creation 165 | globals_ (dict): The globals dictionary 166 | 167 | Returns: 168 | str: The status of the task creation handler 169 | 170 | """ 171 | new_tasks = response.split("\n") 172 | if len(globals_["task_list"]) > 0: 173 | id_ = globals_["task_list"][-1]["id"] + 1 174 | else: 175 | id_ = 1 176 | task_list = [ 177 | {"id": id_ + i, "name": task_name} for i, task_name in enumerate(new_tasks) 178 | ] 179 | globals_["task_list"] = deque(task_list) 180 | 181 | print("\033[89m\033[1m" + "\nTASK LIST:" + "\033[0m\033[0m") 182 | for t in task_list: 183 | print(t["name"]) 184 | 185 | return "done" 186 | 187 | 188 | def task_prioritization_prompt_builder(globals_: dict) -> str: 189 | """ 190 | This function builds and returns the prompt for GPT task prioritization 191 | so existing tasks can be re-prioritized. 192 | 193 | Args: 194 | globals_ (dict): The globals dictionary 195 | 196 | Returns: 197 | str: The prompt for GPT task prioritization 198 | """ 199 | task_list = globals_["task_list"] 200 | current_task = globals_["current_task"] 201 | task_names = [t["name"] for t in task_list] 202 | current_task_id = int(current_task["id"]) + 1 203 | objective = globals_["objective"] 204 | return task_prioritization_template.format( 205 | task_names=task_names, objective=objective, starting_id=current_task_id 206 | ) 207 | 208 | 209 | def task_prioritization_handler(response: str, globals_: dict): 210 | """ 211 | This function handles the GPT response corresponding to the task 212 | prioritization prompt built by the task prioritization prompt builder and 213 | prints the resultant GPT response that is re-prioritizing existing tasks. 214 | """ 215 | new_tasks = response.split("\n") 216 | task_list = deque([]) 217 | for task_string in new_tasks: 218 | task_parts = task_string.strip().split(".", 1) 219 | if len(task_parts) == 2: 220 | task_id = int(task_parts[0].strip()) 221 | task_name = task_parts[1].strip() 222 | task_list.append({"id": task_id, "name": task_name}) 223 | globals_["task_list"] = task_list 224 | globals_["current_task"] = {} 225 | print("\033[94m\033[1m" + "\n***** RE-PRIORITIZED LIST *****\n" + "\033[0m\033[0m") 226 | for t in task_list: 227 | print(str(t["id"]) + ": " + t["name"]) 228 | return "done" 229 | 230 | 231 | def get_ada_embedding(text): 232 | text = text.replace("\n", " ") 233 | return openai.Embedding.create(input=[text], model="text-embedding-ada-002")[ 234 | "data" 235 | ][0]["embedding"] 236 | 237 | 238 | def get_context(globals_: dict) -> List[Tuple[str]]: 239 | """ 240 | Get the current context (task list) from the dictionary state variable, globals_ 241 | """ 242 | """Use Pinecone, not currently setup""" 243 | if USE_PINECONE: 244 | query = globals_["objective"] 245 | query_embedding = get_ada_embedding(query) 246 | index = pinecone.Index(index_name=PINECONE_TABLE) 247 | results = index.query(query_embedding, top_k=5, include_metadata=True) 248 | sorted_results = sorted(results.matches, key=lambda x: x.score, reverse=True) 249 | return [(str(item.metadata["task"])) for item in sorted_results] 250 | return globals_["task_list"] 251 | 252 | 253 | def task_stop_or_not_prompt_builder(globals_: dict) -> str: 254 | """ 255 | This function builds and returns the task stop or not prompt for GPT 256 | in order to reason about the objective completeness when the user 257 | stops the agent loop. 258 | 259 | Args: 260 | globals_ (dict): The globals dictionary 261 | 262 | Returns: 263 | str: The prompt for GPT task stop or not 264 | """ 265 | context = get_context(globals_) 266 | return task_stop_or_not_template.format( 267 | objective=globals_["objective"], context=context 268 | ) 269 | 270 | 271 | def task_stop_or_not_handler(response: str, globals_: dict) -> None: 272 | """ 273 | This function handles the GPT response corresponding to the task stop 274 | or not prompt built by the task stop or not prompt builder and prints 275 | the resultant GPT response that is reasoning about the objective 276 | completeness when the user stops the agent loop. 277 | """ 278 | globals_["keep_going"] = response.strip().lower() != "yes" 279 | print("\033[94m\033[1m" + "\n*****TASK CONTINUATION*****\n" + "\033[0m\033[0m") 280 | print(globals_["keep_going"]) 281 | return "done" if globals_["keep_going"] else "stop" 282 | --------------------------------------------------------------------------------