├── .gitignore ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── README.md ├── agora ├── __init__.py ├── common │ ├── __init__.py │ ├── core.py │ ├── errors.py │ ├── executor.py │ ├── function_schema.py │ ├── interpreters │ │ └── restricted.py │ ├── memory.py │ ├── storage.py │ └── toolformers │ │ ├── __init__.py │ │ ├── base.py │ │ ├── camel.py │ │ └── langchain.py ├── receiver │ ├── __init__.py │ ├── components │ │ ├── negotiator.py │ │ ├── programmer.py │ │ ├── protocol_checker.py │ │ └── responder.py │ ├── core.py │ ├── memory.py │ └── server.py ├── sender │ ├── __init__.py │ ├── components │ │ ├── negotiator.py │ │ ├── programmer.py │ │ ├── protocol_picker.py │ │ ├── querier.py │ │ └── transporter.py │ ├── core.py │ ├── memory.py │ ├── schema_generator.py │ └── task_schema.py └── utils.py ├── assets └── agora_demo.gif ├── docs └── getting-started.md ├── poetry.lock └── pyproject.toml /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 110 | .pdm.toml 111 | .pdm-python 112 | .pdm-build/ 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | .dmypy.json 146 | dmypy.json 147 | 148 | # Pyre type checker 149 | .pyre/ 150 | 151 | # pytype static type analyzer 152 | .pytype/ 153 | 154 | # Cython debug symbols 155 | cython_debug/ 156 | 157 | # PyCharm 158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this file. For a more nuclear 161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 162 | #.idea/ 163 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | If you're interested in contributing, join our [Discord](https://discord.gg/MXmfhwQ4FB) to find out more! 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Agora Protocol 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: style quality 2 | 3 | # make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!) 4 | export PYTHONPATH = agora 5 | 6 | check_dirs := agora 7 | 8 | style: 9 | ruff check --select I --fix $(check_dirs) 10 | ruff format $(check_dirs) 11 | 12 | quality: 13 | ruff check --select I $(check_dirs) 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # python 2 | Python library for the Agora Protocol. 3 | 4 | The Agora Protocol is a protocol for efficient communication between heterogeneous agents. It allows agents of any framework to communicate with agents in any other framework, while maximizing efficiency. 5 | 6 | **Note**: Agora Python is currently in Open Beta! Expect breaking changes from one version to the other. 7 | 8 | ## Demo 9 | 10 | You can test Agora in your browser on [HuggingFace](https://huggingface.co/spaces/agora-protocol/agora-demo). 11 | 12 | ![](./assets/agora_demo.gif) 13 | 14 | ## Installation 15 | 16 | ``` 17 | pip install agora-protocol 18 | ``` 19 | 20 | ## Usage 21 | 22 | There are two ways to use Agora: as a sender agent (i.e. a client) or as a receiver agent (i.e. a server). An agent can also act as both a sender and a receiver. 23 | 24 | This is a quick example where two agents (a LangChain agent and a Camel agent) exchange weather data. 25 | 26 | ### Sender 27 | 28 | ```python 29 | import agora 30 | from langchain_openai import ChatOpenAI # Needs to be installed separately 31 | 32 | model = ChatOpenAI(model="gpt-4o-mini") 33 | toolformer = agora.toolformers.LangChainToolformer(model) 34 | 35 | sender = agora.Sender.make_default(toolformer) 36 | 37 | @sender.task() 38 | def get_temperature(city : str) -> int: 39 | """ 40 | Get the temperature for a given city. 41 | 42 | Parameters: 43 | city: The name of the city for which to retrieve the weather 44 | 45 | Returns: 46 | The temperature in °C for the given city. 47 | """ 48 | pass 49 | 50 | 51 | response = get_temperature('New York', target='http://localhost:5000') 52 | print(response) # Output: 25 53 | ``` 54 | 55 | ### Receiver 56 | 57 | ```python 58 | import agora 59 | import camel.types # Needs to be installed separately 60 | 61 | toolformer = agora.toolformers.CamelToolformer( 62 | camel.types.ModelPlatformType.OPENAI, 63 | camel.types.ModelType.GPT_4O 64 | ) 65 | 66 | 67 | def weather_db(city: str) -> dict: 68 | """Gets the temperature and precipitation in a city. 69 | 70 | Args: 71 | city: The name of the city for which to retrieve the weather 72 | 73 | Returns: 74 | A dictionary containing the temperature and precipitation in the city (both ints) 75 | 76 | """ 77 | # Put your tool logic here 78 | return { 79 | 'temperature': 25, 80 | 'precipitation': 12 81 | } 82 | 83 | 84 | receiver = agora.Receiver.make_default(toolformer, tools=[weather_db]) 85 | 86 | server = agora.ReceiverServer(receiver) 87 | server.run(port=5000) 88 | ``` 89 | 90 | See [Getting Started](./docs/getting-started.md) for a more complete overview. 91 | 92 | ## Contributing 93 | 94 | If you want to contribute or stay up to date with development, join our [Discord](https://discord.gg/MXmfhwQ4FB) to find out more! 95 | -------------------------------------------------------------------------------- /agora/__init__.py: -------------------------------------------------------------------------------- 1 | import agora.common as common 2 | import agora.common.core as core 3 | import agora.common.errors as errors 4 | import agora.common.executor as executor 5 | import agora.common.function_schema as function_schema 6 | import agora.common.interpreters as interpreters 7 | import agora.common.memory as memory 8 | import agora.common.storage as storage 9 | import agora.common.toolformers as toolformers 10 | import agora.receiver as receiver 11 | import agora.sender as sender 12 | import agora.utils as utils 13 | from agora.common.core import Conversation, Protocol, Suitability 14 | from agora.common.toolformers.base import Tool, Toolformer, ToolLike 15 | from agora.receiver import Receiver, ReceiverMemory, ReceiverServer 16 | from agora.sender import Sender, SenderMemory, TaskSchemaGenerator 17 | from agora.sender.task_schema import TaskSchema, TaskSchemaLike 18 | -------------------------------------------------------------------------------- /agora/common/__init__.py: -------------------------------------------------------------------------------- 1 | import agora.common.core as core 2 | import agora.common.errors as errors 3 | import agora.common.executor as executor 4 | import agora.common.function_schema as function_schema 5 | import agora.common.interpreters as interpreters 6 | import agora.common.memory as memory 7 | import agora.common.storage as storage 8 | import agora.common.toolformers as toolformers 9 | -------------------------------------------------------------------------------- /agora/common/core.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from enum import Enum 3 | from types import TracebackType 4 | from typing import Any, Dict, List, Optional 5 | 6 | from agora.utils import compute_hash, extract_metadata 7 | 8 | 9 | class Suitability(str, Enum): 10 | """ 11 | Enumeration of protocol suitability statuses. 12 | """ 13 | 14 | ADEQUATE = "adequate" 15 | INADEQUATE = "inadequate" 16 | PROBABLY_ADEQUATE = "probably_adequate" 17 | PROBABLY_INADEQUATE = "probably_inadequate" 18 | UNKNOWN = "unknown" 19 | 20 | 21 | class Conversation(ABC): 22 | """ 23 | Abstract base class representing a conversation. 24 | """ 25 | 26 | @abstractmethod 27 | def __call__(self, message: str, print_output: bool = True) -> Any: 28 | """ 29 | Processes a message within the conversation. 30 | 31 | Args: 32 | message (str): The message to process. 33 | print_output (bool): Whether to print the response. 34 | 35 | Returns: 36 | Any: The response generated by processing the message. 37 | """ 38 | pass 39 | 40 | def close(self) -> None: 41 | """ 42 | Closes the conversation. 43 | 44 | Returns: 45 | None 46 | """ 47 | pass 48 | 49 | def __enter__(self) -> "Conversation": 50 | """ 51 | Enters the conversation context. 52 | 53 | Returns: 54 | Conversation: The current conversation instance. 55 | """ 56 | return self 57 | 58 | def __exit__( 59 | self, 60 | exc_type: Optional[type], 61 | exc_value: Optional[BaseException], 62 | traceback: Optional[TracebackType], 63 | ) -> None: 64 | """ 65 | Exits the conversation context, ensuring closure. 66 | 67 | Args: 68 | exc_type (Optional[type]): The exception type if an error occurred. 69 | exc_value (Optional[BaseException]): The exception instance if raised. 70 | traceback (Optional[TracebackType]): The traceback object. 71 | 72 | Returns: 73 | None 74 | """ 75 | self.close() 76 | 77 | 78 | class Protocol: 79 | """Represents a protocol document with associated sources and metadata.""" 80 | 81 | def __init__( 82 | self, 83 | protocol_document: str, 84 | sources: List[str], 85 | metadata: Optional[Dict[str, str]], 86 | ) -> None: 87 | """Initializes a Protocol. 88 | 89 | Args: 90 | protocol_document (str): The document detailing the protocol. 91 | sources (List[str]): Sources where the protocol is referenced. 92 | metadata (Optional[Dict[str, str]]): Additional metadata for the protocol. 93 | """ 94 | self.protocol_document = protocol_document 95 | self.sources = sources 96 | 97 | if metadata is None: 98 | metadata = extract_metadata(protocol_document) 99 | 100 | self.metadata = metadata 101 | 102 | @property 103 | def hash(self) -> str: 104 | """ 105 | Computes and returns the hash of the protocol document. 106 | 107 | Returns: 108 | str: The computed hash value. 109 | """ 110 | return compute_hash(self.protocol_document) 111 | 112 | def __str__(self) -> str: 113 | """ 114 | Returns a string representation of the Protocol. 115 | 116 | Returns: 117 | str: The string representation including hash, sources, metadata, and document. 118 | """ 119 | return f"Protocol {self.hash}\nSources: {self.sources}\nMetadata: {self.metadata}\n\n{self.protocol_document}\n\n" 120 | -------------------------------------------------------------------------------- /agora/common/errors.py: -------------------------------------------------------------------------------- 1 | class ProtocolError(Exception): 2 | """Base exception class for protocol-related errors.""" 3 | 4 | def __init__(self, message: str = ""): 5 | """ 6 | Initializes the ProtocolError with an optional message. 7 | 8 | Args: 9 | message (str, optional): The error message. Defaults to an empty string. 10 | """ 11 | super().__init__(message) 12 | 13 | 14 | class ExecutionError(Exception): 15 | """Exception raised for errors during the internal execution of routines and toolformers.""" 16 | 17 | def __init__(self, message: str = ""): 18 | """ 19 | Initializes the ExecutionError with an optional message. 20 | 21 | Args: 22 | message (str, optional): The error message. Defaults to an empty string. 23 | """ 24 | super().__init__(message) 25 | 26 | 27 | class StorageError(Exception): 28 | """Exception raised for storage-related issues.""" 29 | 30 | def __init__(self, message: str = ""): 31 | """ 32 | Initializes the StorageError with an optional message. 33 | 34 | Args: 35 | message (str, optional): The error message. Defaults to an empty string. 36 | """ 37 | super().__init__(message) 38 | 39 | 40 | class SchemaError(Exception): 41 | """Exception raised for schema validation errors.""" 42 | 43 | def __init__(self, message: str = ""): 44 | """ 45 | Initializes the SchemaError with an optional message. 46 | 47 | Args: 48 | message (str, optional): The error message. Defaults to an empty string. 49 | """ 50 | super().__init__(message) 51 | 52 | 53 | class ProtocolRejectedError(ProtocolError): 54 | """Exception raised when a protocol is rejected.""" 55 | 56 | def __init__(self, message: str = ""): 57 | """ 58 | Initializes ProtocolRejectedError with an optional message. 59 | 60 | Args: 61 | message (str, optional): The error message. Defaults to 'Protocol rejected' if empty. 62 | """ 63 | super().__init__(message or "Protocol rejected") 64 | 65 | 66 | class ProtocolNotFoundError(ProtocolError): 67 | """Exception raised when a protocol is not found.""" 68 | 69 | def __init__(self, message: str = ""): 70 | """ 71 | Initializes ProtocolNotFoundError with an optional message. 72 | 73 | Args: 74 | message (str, optional): The error message. Defaults to an empty string. 75 | """ 76 | super().__init__(message) 77 | 78 | 79 | class ProtocolRetrievalError(ProtocolError): 80 | """Exception raised when retrieving a protocol fails.""" 81 | 82 | def __init__(self, message: str = ""): 83 | """ 84 | Initializes ProtocolRetrievalError with an optional message. 85 | 86 | Args: 87 | message (str, optional): The error message. Defaults to an empty string. 88 | """ 89 | super().__init__(message) 90 | 91 | 92 | class ProtocolTransportError(ProtocolError): 93 | """Exception raised for transport-related protocol errors.""" 94 | 95 | def __init__(self, message: str = ""): 96 | """ 97 | Initializes ProtocolTransportError with an optional message. 98 | 99 | Args: 100 | message (str, optional): The error message. Defaults to an empty string. 101 | """ 102 | super().__init__(message) 103 | -------------------------------------------------------------------------------- /agora/common/executor.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | from abc import abstractmethod 3 | from typing import Any, List 4 | 5 | from agora.common.interpreters.restricted import execute_restricted 6 | from agora.common.toolformers.base import Conversation, Tool, ToolLike 7 | 8 | 9 | class Executor: 10 | """Abstract base class for executors that run protocol implementations.""" 11 | 12 | @abstractmethod 13 | def __call__( 14 | self, 15 | protocol_id: str, 16 | code: str, 17 | tools: List[ToolLike], 18 | input_args: list, 19 | input_kwargs: dict, 20 | ) -> Any: 21 | """Executes code with provided tools and arguments. 22 | 23 | Args: 24 | protocol_id (str): The protocol identifier. 25 | code (str): The code to execute. 26 | tools (List[ToolLike]): Available tools for the code. 27 | input_args (list): Positional arguments. 28 | input_kwargs (dict): Keyword arguments. 29 | 30 | Returns: 31 | Any: The result of the code execution. 32 | """ 33 | pass 34 | 35 | def new_conversation( 36 | self, protocol_id: str, code: str, multiround: bool, tools: List[ToolLike] 37 | ) -> Conversation: 38 | """Starts a new conversation using the executor. 39 | 40 | Args: 41 | protocol_id (str): The protocol identifier. 42 | code (str): The code to execute. 43 | multiround (bool): Whether multiple rounds are allowed. 44 | tools (List[ToolLike]): Tools allowed for execution. 45 | 46 | Returns: 47 | Conversation: A conversation object for execution. 48 | """ 49 | return ExecutorConversation(self, protocol_id, code, multiround, tools) 50 | 51 | 52 | class UnsafeExecutor(Executor): 53 | """Executes code in an unsafe environment, allowing unrestricted operations.""" 54 | 55 | def __call__( 56 | self, 57 | protocol_id: str, 58 | code: str, 59 | tools: List[ToolLike], 60 | input_args: list, 61 | input_kwargs: dict, 62 | ) -> Any: 63 | """Executes code using Python's importlib without restrictions. 64 | 65 | Args: 66 | protocol_id (str): The protocol identifier. 67 | code (str): The code to execute. 68 | tools (List[ToolLike]): Tools available to the executed code. 69 | input_args (list): Positional arguments. 70 | input_kwargs (dict): Keyword arguments. 71 | 72 | Returns: 73 | Any: The result of the executed code. 74 | """ 75 | tools = [Tool.from_toollike(tool) for tool in tools] 76 | protocol_id = protocol_id.replace("-", "_").replace(".", "_").replace("/", "_") 77 | spec = importlib.util.spec_from_loader(protocol_id, loader=None) 78 | loaded_module = importlib.util.module_from_spec(spec) 79 | 80 | exec(code, loaded_module.__dict__) 81 | 82 | for tool in tools: 83 | loaded_module.__dict__[tool.name] = tool.func 84 | 85 | return loaded_module.run(*input_args, **input_kwargs) 86 | 87 | 88 | class RestrictedExecutor(Executor): 89 | """Executes code in a restricted environment to ensure safety.""" 90 | 91 | def __call__( 92 | self, 93 | protocol_id: str, 94 | code: str, 95 | tools: List[ToolLike], 96 | input_args: list, 97 | input_kwargs: dict, 98 | ) -> Any: 99 | """Executes the code using a restricted interpreter with limited globals. 100 | 101 | Args: 102 | protocol_id (str): The protocol identifier. 103 | code (str): The code to execute. 104 | tools (List[ToolLike]): Tools allowed in the environment. 105 | input_args (list): Positional arguments for the function. 106 | input_kwargs (dict): Keyword arguments for the function. 107 | 108 | Returns: 109 | Any: The result of the execution. 110 | """ 111 | tools = [Tool.from_toollike(tool) for tool in tools] 112 | supported_globals = {tool.name: tool.func for tool in tools} 113 | return execute_restricted( 114 | code, 115 | supported_imports=["json", "math", "typing"], 116 | function_name="run", 117 | extra_globals=supported_globals, 118 | input_args=input_args, 119 | input_kwargs=input_kwargs, 120 | ) 121 | 122 | 123 | class ExecutorConversation(Conversation): 124 | """Handles conversations by executing code via the associated executor.""" 125 | 126 | def __init__( 127 | self, 128 | executor: Executor, 129 | protocol_id: str, 130 | code: str, 131 | multiround: bool, 132 | tools: List[ToolLike], 133 | ) -> None: 134 | """Initializes ExecutorConversation. 135 | 136 | Args: 137 | executor (Executor): The executor used for code execution. 138 | protocol_id (str): The identifier of the protocol. 139 | code (str): The code to be executed. 140 | multiround (bool): Whether multiple rounds are allowed. 141 | tools (List[ToolLike]): Tools allowed for execution. 142 | """ 143 | self.executor = executor 144 | self.protocol_id = protocol_id 145 | self.code = code 146 | self.multiround = multiround 147 | self.tools = [Tool.from_toollike(tool) for tool in tools] 148 | self.memory = {} if multiround else None 149 | 150 | def __call__(self, message: str, print_output: bool = True) -> Any: 151 | """Processes a message by executing the implementation code. 152 | 153 | Args: 154 | message (str): The input message for the conversation. 155 | print_output (bool): Whether to print the result. 156 | 157 | Returns: 158 | Any: The output from the execution of the code. 159 | """ 160 | 161 | if self.multiround: 162 | response, self.memory = self.executor( 163 | self.protocol_id, 164 | self.code, 165 | self.tools, 166 | [message, dict(self.memory)], 167 | {}, 168 | ) 169 | else: 170 | response = self.executor( 171 | self.protocol_id, self.code, self.tools, [message], {} 172 | ) 173 | 174 | if print_output: 175 | print(response) 176 | 177 | return response 178 | -------------------------------------------------------------------------------- /agora/common/function_schema.py: -------------------------------------------------------------------------------- 1 | import ast 2 | import copy 3 | import functools 4 | import inspect 5 | import re 6 | import types 7 | from typing import Callable, Dict, Optional, Tuple 8 | 9 | import langchain.tools.base 10 | 11 | DEFAULT_KNOWN_TYPES = { 12 | "int": int, 13 | "str": str, 14 | "bool": bool, 15 | "float": float, 16 | "list": list, 17 | "dict": dict, 18 | } 19 | 20 | PYTHON_TYPE_TO_JSON_SCHEMA_TYPE = { 21 | int: "integer", 22 | str: "string", 23 | bool: "boolean", 24 | float: "number", 25 | list: "array", 26 | dict: "object", 27 | } 28 | 29 | 30 | def copy_func(func: Callable) -> Callable: 31 | """Create a deep copy of a function. 32 | 33 | Args: 34 | func (Callable): The function to be copied. 35 | 36 | Returns: 37 | Callable: A new function that is a deep copy of the original. 38 | """ 39 | return types.FunctionType( 40 | func.__code__, # Code object 41 | copy.copy(func.__globals__), # Global variables 42 | name=func.__name__, 43 | argdefs=copy.copy(func.__defaults__), # Default arguments 44 | closure=copy.copy(func.__closure__), # Closure variables 45 | ) 46 | 47 | 48 | def add_annotations_from_docstring( 49 | func: Callable, known_types: dict = DEFAULT_KNOWN_TYPES 50 | ) -> Callable: 51 | """Add annotations derived from Google-style docstrings to the given function. 52 | 53 | Args: 54 | func (Callable): The function to be processed. 55 | known_types (dict, optional): A dictionary mapping type names to Python types. 56 | 57 | Returns: 58 | Callable: The function with updated annotations. 59 | """ 60 | known_types = known_types.copy() 61 | 62 | # Get the source code of the function 63 | source = inspect.getsource(func) 64 | 65 | # Count the left whitespace of the first line 66 | left_whitespace = len(source) - len(source.lstrip()) 67 | 68 | # Dedent the source code 69 | source = "\n".join([line[left_whitespace:] for line in source.split("\n")]) 70 | 71 | # Parse it into an AST 72 | tree = ast.parse(source) 73 | 74 | func_def = None 75 | for node in tree.body: 76 | if isinstance(node, ast.FunctionDef) and node.name == func.__name__: 77 | func_def = node 78 | break 79 | 80 | if func_def is None: 81 | raise ValueError(f"Could not find function definition for {func.__name__}") 82 | 83 | # Extract the docstring 84 | docstring = ast.get_docstring(func_def) 85 | if not docstring: 86 | return func # No docstring, nothing to do 87 | 88 | # Parse the docstring for Google-style Args and Returns 89 | # Example format: 90 | # Args: 91 | # param1 (int): Description 92 | # param2 (str): Description 93 | # 94 | # Returns: 95 | # bool: Description 96 | # 97 | args_pattern = r"^\s*(\w+)\s*\(([^)]+)\):" 98 | 99 | lines = docstring.split("\n") 100 | arg_section_found = False 101 | return_section_found = False 102 | doc_args = {} 103 | doc_return_type = None 104 | 105 | for i, line in enumerate(lines): 106 | # Detect start of Args section 107 | if line.strip().lower().startswith("args:"): 108 | arg_section_found = True 109 | continue 110 | # Detect start of Returns section 111 | if line.strip().lower().startswith("returns:"): 112 | return_section_found = True 113 | arg_section_found = False # end args 114 | continue 115 | 116 | if arg_section_found: 117 | # If we reach a blank line or next section, stop args capture 118 | if not line.strip() or line.strip().lower().startswith("returns:"): 119 | arg_section_found = False 120 | else: 121 | match = re.match(args_pattern, line) 122 | if match: 123 | param_name, param_type = match.groups() 124 | doc_args[param_name] = param_type.strip() 125 | 126 | if return_section_found: 127 | # Extract the return line 128 | stripped = line.strip() 129 | if stripped: 130 | # If there's a colon, assume the format "Type: description" 131 | colon_pos = stripped.find(":") 132 | if colon_pos != -1: 133 | doc_return_type = stripped[:colon_pos].strip() 134 | else: 135 | # If no colon, assume entire line is the type, but only if the type is among known types 136 | if stripped in known_types: 137 | doc_return_type = stripped 138 | return_section_found = False 139 | 140 | # Update annotations 141 | current_annotations = dict(func.__annotations__) 142 | func_signature = inspect.signature(func) 143 | 144 | def resolve_type(type_str): 145 | # Return a Python type if known, otherwise leave as a string 146 | return known_types.get(type_str, type_str) 147 | 148 | # Update parameter annotations 149 | for param in func_signature.parameters.values(): 150 | if param.name in doc_args and param.name not in current_annotations: 151 | ann_type = resolve_type(doc_args[param.name]) 152 | current_annotations[param.name] = ann_type 153 | 154 | # Update return annotation if missing 155 | if doc_return_type and "return" not in current_annotations: 156 | ann_return_type = resolve_type(doc_return_type) 157 | current_annotations["return"] = ann_return_type 158 | 159 | wrapper = copy_func(func) 160 | wrapper.__annotations__ = current_annotations 161 | 162 | return wrapper 163 | 164 | 165 | def schema_from_function( 166 | func: Callable, strict: bool = False, known_types: dict = DEFAULT_KNOWN_TYPES 167 | ) -> dict: 168 | """Create an OpenAI-like JSON schema from a function's signature and docstring. 169 | 170 | Args: 171 | func (Callable): The function to generate the schema from. 172 | strict (bool, optional): Enforce strict parsing and annotation requirements. 173 | known_types (dict, optional): A dictionary mapping type names to Python types. 174 | 175 | Returns: 176 | dict: A JSON schema representing the function's parameters and return. 177 | """ 178 | known_types = known_types.copy() 179 | func_name = func.__name__ 180 | 181 | if not strict: 182 | # Try to add annotations from docstring 183 | func = add_annotations_from_docstring(func) 184 | 185 | copied_function = copy_func(func) 186 | copied_function.__annotations__ = func.__annotations__ 187 | copied_function.__doc__ = ( 188 | copied_function.__doc__.replace("Arguments:\n", "Args:\n") 189 | .replace("Parameters:\n", "Args:\n") 190 | .replace("Output:\n", "Returns:\n") 191 | ) 192 | 193 | parsed_schema = langchain.tools.base.create_schema_from_function( 194 | func_name, copied_function, parse_docstring=True 195 | ).model_json_schema() 196 | 197 | parsed_schema = { 198 | "name": func_name, 199 | "description": parsed_schema["description"], 200 | "input_schema": { 201 | "type": "object", 202 | "properties": parsed_schema["properties"], 203 | "required": parsed_schema["required"], 204 | }, 205 | } 206 | 207 | if "Returns:" in func.__doc__: 208 | returns = func.__doc__.split("Returns:")[1].strip() 209 | 210 | if returns: 211 | # If there's a colon, assume the format "Type: description" 212 | colon_pos = returns.find(":") 213 | if colon_pos != -1: 214 | return_description = returns[colon_pos + 1 :].strip() 215 | else: 216 | # If no colon, assume entire line is the description, but only if it's not in the known types 217 | if returns not in known_types: 218 | return_description = returns 219 | 220 | try: 221 | if "return" not in func.__annotations__ and strict: 222 | raise ValueError( 223 | f"Return type not found in annotations for function {func_name}" 224 | ) 225 | 226 | return_type = func.__annotations__.get("return", str) 227 | 228 | if return_type not in PYTHON_TYPE_TO_JSON_SCHEMA_TYPE: 229 | raise ValueError( 230 | f"Return type {return_type} not supported in JSON schema" 231 | ) 232 | 233 | # TODO: Is it possible to parse dictionaries? 234 | parsed_schema["output_schema"] = { 235 | "type": PYTHON_TYPE_TO_JSON_SCHEMA_TYPE[return_type], 236 | "description": return_description, 237 | } 238 | except KeyError: 239 | pass 240 | 241 | return parsed_schema 242 | 243 | 244 | def generate_docstring( 245 | description: str, 246 | params: Optional[Dict[str, Tuple[Optional[type], Optional[str]]]], 247 | returns: Optional[Tuple[Optional[type], Optional[str]]], 248 | ) -> str: 249 | """ 250 | Generate a docstring from a description, parameters, and return type. 251 | 252 | Args: 253 | description (str): The description of the function. 254 | params (Optional[Dict[str, Tuple[Optional[type], Optional[str]]]): A mapping of parameter names to type/description tuples. 255 | returns (Optional[Tuple[Optional[type], Optional[str]]]): The return type and description. 256 | 257 | Returns: 258 | str: The generated docstring. 259 | """ 260 | docstring = description 261 | 262 | if params: 263 | docstring += "\n\nArgs:" 264 | for param_name, (param_type, param_description) in params.items(): 265 | docstring += f"\n {param_name}" 266 | 267 | if param_type is not None: 268 | docstring += f" ({param_type.__name__})" 269 | 270 | if param_description: 271 | docstring += f": {param_description}" 272 | 273 | if returns: 274 | return_type, return_description = returns 275 | docstring += f"\n\nReturns:\n " 276 | 277 | if return_type: 278 | docstring += f"{return_type.__name__}" 279 | if return_description: 280 | docstring += f": {return_description}" 281 | 282 | return docstring 283 | 284 | 285 | def set_params_and_annotations( 286 | name: str, 287 | docstring: str, 288 | params: Dict[str, Tuple[Optional[type], Optional[str]]], 289 | return_type: Optional[type], 290 | ) -> Callable: 291 | """Decorator to set parameters and annotations on a function based on the given schema data. 292 | 293 | Args: 294 | name (str): The name of the function. 295 | docstring (str): The function's docstring. 296 | params (dict): A mapping of parameter names to type/description tuples. 297 | return_type (Optional[type]): The function's return type. 298 | 299 | Returns: 300 | Callable: The wrapped function with updated signature and docstring. 301 | """ 302 | 303 | def decorator(func: Callable): 304 | # Create new parameters based on the provided params dict 305 | new_params = [ 306 | inspect.Parameter( 307 | name, inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=type_ 308 | ) 309 | for name, type_ in params.items() 310 | ] 311 | 312 | # Create a new signature with updated parameters and return annotation 313 | new_sig = inspect.Signature( 314 | parameters=new_params, return_annotation=return_type 315 | ) 316 | 317 | # Define the wrapper function 318 | @functools.wraps(func) 319 | def wrapper(*args, **kwargs): 320 | return func(*args, **kwargs) 321 | 322 | # Set the new signature on the wrapper 323 | wrapper.__name__ = name 324 | wrapper.__signature__ = new_sig 325 | wrapper.__annotations__.update({k: v[0] for k, v in params.items()}) 326 | wrapper.__annotations__["return"] = return_type 327 | wrapper.__doc__ = docstring 328 | 329 | return wrapper 330 | 331 | return decorator 332 | -------------------------------------------------------------------------------- /agora/common/interpreters/restricted.py: -------------------------------------------------------------------------------- 1 | import random 2 | from typing import Any, List, Optional 3 | 4 | from RestrictedPython import ( 5 | compile_restricted, 6 | limited_builtins, 7 | safe_builtins, 8 | utility_builtins, 9 | ) 10 | from RestrictedPython.Guards import ( 11 | full_write_guard, 12 | guarded_iter_unpack_sequence, 13 | guarded_unpack_sequence, 14 | ) 15 | 16 | from agora.common.errors import ExecutionError 17 | 18 | 19 | def execute_restricted( 20 | code: str, 21 | extra_globals: Optional[dict] = None, 22 | supported_imports: Optional[List[str]] = None, 23 | function_name: str = "run", 24 | input_args: Optional[List[Any]] = None, 25 | input_kwargs: Optional[dict] = None, 26 | ) -> Any: 27 | """Executes restricted code with limited globals and supported imports. 28 | 29 | Args: 30 | code (str): The code to execute. 31 | extra_globals (Optional[dict]): Additional global variables. 32 | supported_imports (Optional[List[str]]): List of allowed modules. 33 | function_name (str): The name of the function to execute. 34 | input_args (Optional[List[Any]]): Positional arguments for the function. 35 | input_kwargs (Optional[dict]): Keyword arguments for the function. 36 | 37 | Returns: 38 | Any: The result of the executed function. 39 | 40 | Raises: 41 | ExecutionError: If an unsupported import is attempted or multiple results are registered. 42 | """ 43 | extra_globals = extra_globals or {} 44 | supported_imports = supported_imports or [] 45 | input_args = input_args or [] 46 | input_kwargs = input_kwargs or {} 47 | 48 | register_function_name = "register_" + str(random.randint(0, 1000000)) 49 | get_parameters_name = "get_parameters_" + str(random.randint(0, 1000000)) 50 | 51 | def get_parameters(): 52 | return input_args, input_kwargs 53 | 54 | code += f""" 55 | input_args, input_kwargs = {get_parameters_name}() 56 | {register_function_name}({function_name}(*input_args, **input_kwargs))""" 57 | 58 | restricted_code = compile_restricted(code, "", "exec") 59 | 60 | _SAFE_MODULES = frozenset(supported_imports) 61 | 62 | def _safe_import(name, *args, **kwargs): 63 | if name not in _SAFE_MODULES: 64 | raise ExecutionError(f"Unsupported import {name!r}") 65 | return __import__(name, *args, **kwargs) 66 | 67 | result = None 68 | 69 | def register_result(x): 70 | nonlocal result 71 | 72 | if result is not None: 73 | raise ExecutionError("Only one result can be registered") 74 | 75 | result = x 76 | 77 | restricted_globals = { 78 | "__builtins__": { 79 | **safe_builtins, 80 | **limited_builtins, 81 | **utility_builtins, 82 | "__import__": _safe_import, 83 | }, 84 | "_iter_unpack_sequence_": guarded_iter_unpack_sequence, 85 | "_unpack_sequence_": guarded_unpack_sequence, 86 | "_getiter_": iter, 87 | "_print_": print, 88 | "_apply_": lambda f, *args, **kwargs: f(*args, **kwargs), 89 | "_getitem_": lambda obj, key: obj[key], 90 | "_write_": full_write_guard, 91 | get_parameters_name: get_parameters, 92 | register_function_name: register_result, 93 | "map": map, 94 | "list": list, 95 | "dict": dict, 96 | **extra_globals, 97 | } 98 | exec(restricted_code, restricted_globals) 99 | 100 | return result 101 | -------------------------------------------------------------------------------- /agora/common/memory.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | 3 | from agora.common.core import Protocol 4 | from agora.common.errors import StorageError 5 | from agora.common.storage import Storage 6 | 7 | 8 | class ProtocolMemory: 9 | """Manages protocol-related memory, including registration and retrieval of protocols and their implementations.""" 10 | 11 | def __init__(self, storage: Storage, **kwargs): 12 | """ 13 | Initializes ProtocolMemory with the given storage and additional keyword arguments. 14 | 15 | Args: 16 | storage (Storage): The storage backend to use for managing protocols. 17 | **kwargs: Additional keyword arguments, with their default values. 18 | """ 19 | self.storage = storage 20 | 21 | self.storage.load_memory() 22 | 23 | if "protocols" not in self.storage: 24 | self.storage["protocols"] = {} 25 | 26 | for key, value in kwargs.items(): 27 | if key not in self.storage: 28 | self.storage[key] = value 29 | 30 | self.storage.save_memory() 31 | 32 | def protocol_ids(self) -> List[str]: 33 | """ 34 | Returns a list of registered protocol IDs. 35 | 36 | Returns: 37 | List[str]: A list containing all registered protocol identifiers. 38 | """ 39 | return list(self.storage["protocols"].keys()) 40 | 41 | def is_known(self, protocol_id: str) -> bool: 42 | """ 43 | Checks if a protocol ID is known (registered). 44 | 45 | Args: 46 | protocol_id (str): The protocol identifier to check. 47 | 48 | Returns: 49 | bool: True if the protocol is registered, False otherwise. 50 | """ 51 | return protocol_id in self.storage["protocols"] 52 | 53 | def register_new_protocol( 54 | self, 55 | protocol_id: str, 56 | protocol_document: str, 57 | sources: List[str], 58 | metadata: dict, 59 | implementation: Optional[str] = None, 60 | **kwargs, 61 | ): 62 | """ 63 | Registers a new protocol with the specified details. 64 | 65 | Args: 66 | protocol_id (str): The identifier of the new protocol. 67 | protocol_document (str): The document describing the protocol. 68 | sources (List[str]): A list of sources where the protocol is referenced. 69 | metadata (dict): Additional metadata related to the protocol. 70 | implementation (Optional[str], optional): The implementation code associated with the protocol. Defaults to None. 71 | **kwargs: Additional keyword arguments to store with the protocol. 72 | 73 | Raises: 74 | StorageError: If the protocol is already registered. 75 | """ 76 | if protocol_id in self.storage["protocols"]: 77 | raise StorageError(f"Protocol {protocol_id} already in memory") 78 | 79 | protocol_info = { 80 | "document": protocol_document, 81 | "sources": sources, 82 | "metadata": metadata, 83 | "implementation": implementation, 84 | } 85 | 86 | protocol_info.update(kwargs) 87 | 88 | self.storage["protocols"][protocol_id] = protocol_info 89 | self.storage.save_memory() 90 | 91 | def get_protocol(self, protocol_id: str) -> Optional[Protocol]: 92 | """ 93 | Retrieves a Protocol object based on the protocol ID. 94 | 95 | Args: 96 | protocol_id (str): The identifier of the protocol to retrieve. 97 | 98 | Returns: 99 | Optional[Protocol]: The Protocol object if found, else None. 100 | """ 101 | if "protocols" not in self.storage: 102 | return None 103 | if protocol_id not in self.storage["protocols"]: 104 | return None 105 | 106 | protocol_info = self.storage["protocols"][protocol_id] 107 | 108 | return Protocol( 109 | protocol_info["document"], 110 | protocol_info["sources"], 111 | protocol_info["metadata"], 112 | ) 113 | 114 | def get_implementation(self, protocol_id: str) -> Optional[str]: 115 | """ 116 | Gets the implementation associated with a given protocol ID. 117 | 118 | Args: 119 | protocol_id (str): The identifier of the protocol. 120 | 121 | Returns: 122 | Optional[str]: The implementation code if available, else None. 123 | """ 124 | if protocol_id not in self.storage["protocols"]: 125 | return None 126 | return self.storage["protocols"][protocol_id]["implementation"] 127 | 128 | def register_implementation(self, protocol_id: str, implementation: str): 129 | """ 130 | Registers an implementation for a specific protocol ID. 131 | 132 | Args: 133 | protocol_id (str): The identifier of the protocol. 134 | implementation (str): The implementation code to associate with the protocol. 135 | 136 | Raises: 137 | StorageError: If the protocol is not registered. 138 | """ 139 | if protocol_id not in self.storage["protocols"]: 140 | raise StorageError(f"Protocol {protocol_id} not in memory") 141 | self.storage["protocols"][protocol_id]["implementation"] = implementation 142 | self.storage.save_memory() 143 | 144 | def get_extra_field(self, protocol_id: str, field: str, default=None): 145 | """ 146 | Retrieves an extra field from a protocol's information. 147 | 148 | Args: 149 | protocol_id (str): The identifier of the protocol. 150 | field (str): The field name to retrieve. 151 | default: The default value to return if the field is not present. Defaults to None. 152 | 153 | Returns: 154 | Any: The value of the specified field, or the default if not found. 155 | """ 156 | if protocol_id not in self.storage["protocols"]: 157 | return default 158 | return self.storage["protocols"][protocol_id].get(field, default) 159 | 160 | def set_extra_field(self, protocol_id: str, field: str, value): 161 | """ 162 | Sets an extra field in a protocol's information. 163 | 164 | Args: 165 | protocol_id (str): The identifier of the protocol. 166 | field (str): The field name to set. 167 | value: The value to assign to the field. 168 | 169 | Raises: 170 | StorageError: If the protocol is not registered. 171 | """ 172 | if protocol_id not in self.storage["protocols"]: 173 | raise StorageError(f"Protocol {protocol_id} not in memory") 174 | self.storage["protocols"][protocol_id][field] = value 175 | self.storage.save_memory() 176 | -------------------------------------------------------------------------------- /agora/common/storage.py: -------------------------------------------------------------------------------- 1 | import json 2 | from abc import ABC, abstractmethod 3 | from collections.abc import MutableMapping 4 | from pathlib import Path 5 | from typing import Any, Iterator 6 | 7 | 8 | class Storage(ABC, MutableMapping): 9 | """Abstract base class for a key-value storage. 10 | 11 | This class extends both the 'ABC' class and the 'MutableMapping' interface. 12 | """ 13 | 14 | @abstractmethod 15 | def save_memory(self) -> None: 16 | """Saves current state to the underlying storage mechanism.""" 17 | pass 18 | 19 | @abstractmethod 20 | def load_memory(self) -> None: 21 | """Loads state from the underlying storage mechanism.""" 22 | pass 23 | 24 | 25 | class JSONStorage(Storage): 26 | """A JSON-based storage implementation.""" 27 | 28 | def __init__(self, storage_path: str, autosave: bool = True) -> None: 29 | """Instantiates JSONStorage. 30 | 31 | Args: 32 | storage_path (str): Path to the JSON file. 33 | autosave (bool): If True, saves automatically after updates. 34 | """ 35 | self.storage_path = Path(storage_path) 36 | self.data = {} 37 | self.load_memory() 38 | 39 | self.autosave = autosave 40 | 41 | def save_memory(self) -> None: 42 | """Saves current state to the JSON file.""" 43 | if not self.storage_path.parent.exists(): 44 | self.storage_path.parent.mkdir(parents=True) 45 | 46 | with open(self.storage_path, "w") as f: 47 | json.dump(self.data, f, indent=2) 48 | 49 | def load_memory(self) -> None: 50 | """Loads the state from the JSON file.""" 51 | if not self.storage_path.exists(): 52 | self.save_memory() 53 | with open(self.storage_path, "r") as f: 54 | self.data = json.load(f) 55 | 56 | def __getitem__(self, key: str) -> Any: 57 | """Retrieves an item by key. 58 | 59 | Args: 60 | key (str): Key to retrieve. 61 | 62 | Returns: 63 | Any: The stored value or None if not found. 64 | """ 65 | return self.data.get(key) 66 | 67 | def __setitem__(self, key: str, value: Any) -> None: 68 | """Sets a value for the specified key. 69 | 70 | Args: 71 | key (str): Key to modify. 72 | value (Any): The data to store. 73 | """ 74 | self.data[key] = value 75 | 76 | if self.autosave: 77 | self.save_memory() 78 | 79 | def __delitem__(self, key: str) -> None: 80 | """Deletes the entry associated with the specified key. 81 | 82 | Args: 83 | key (str): Key to delete. 84 | """ 85 | del self.data[key] 86 | 87 | if self.autosave: 88 | self.save_memory() 89 | 90 | def __iter__(self) -> Iterator[str]: 91 | """Iterates over stored keys. 92 | 93 | Returns: 94 | Iterator[str]: An iterator over the keys. 95 | """ 96 | return iter(self.data) 97 | 98 | def __len__(self) -> int: 99 | """Returns the number of stored items. 100 | 101 | Returns: 102 | int: The count of items. 103 | """ 104 | return len(self.data) 105 | 106 | def __contains__(self, key: object) -> bool: 107 | """Checks if a key is contained. 108 | 109 | Args: 110 | key (object): Key to check. 111 | 112 | Returns: 113 | bool: True if the key exists, False otherwise. 114 | """ 115 | return key in self.data 116 | 117 | def __str__(self) -> str: 118 | """Returns a string representation of this storage. 119 | 120 | Returns: 121 | str: String describing the JSONStorage path. 122 | """ 123 | return f"JSONStorage({self.storage_path})" 124 | -------------------------------------------------------------------------------- /agora/common/toolformers/__init__.py: -------------------------------------------------------------------------------- 1 | from agora.common.toolformers.base import Tool, Toolformer, ToolLike 2 | from agora.common.toolformers.camel import CamelConversation, CamelToolformer 3 | from agora.common.toolformers.langchain import LangChainToolformer 4 | -------------------------------------------------------------------------------- /agora/common/toolformers/base.py: -------------------------------------------------------------------------------- 1 | import json 2 | from abc import ABC, abstractmethod 3 | from typing import Callable, List, Optional, TypeAlias 4 | 5 | from agora.common.core import Conversation 6 | from agora.common.function_schema import ( 7 | DEFAULT_KNOWN_TYPES, 8 | PYTHON_TYPE_TO_JSON_SCHEMA_TYPE, 9 | generate_docstring, 10 | schema_from_function, 11 | set_params_and_annotations, 12 | ) 13 | 14 | 15 | class Tool: 16 | """Represents a tool with a name, description, argument schema, return schema, and a callable function.""" 17 | 18 | def __init__( 19 | self, 20 | name: str, 21 | description: str, 22 | args_schema: dict, 23 | return_schema: dict, 24 | func: Callable, 25 | ) -> None: 26 | """Initializes the Tool. 27 | 28 | Args: 29 | name (str): The name of the tool. 30 | description (str): A brief description of the tool. 31 | args_schema (dict): JSON schema for input arguments. 32 | return_schema (dict): JSON schema for the return values. 33 | func (Callable): The function implementing the tool. 34 | """ 35 | self.name = name 36 | self.description = description 37 | self.args_schema = args_schema 38 | self.return_schema = return_schema 39 | self.func = func 40 | 41 | @property 42 | def openai_schema(self) -> dict: 43 | """Returns the OpenAI-compatible schema of the tool. 44 | 45 | Returns: 46 | dict: The OpenAI-compatible schema. 47 | """ 48 | return { 49 | "type": "function", 50 | "function": { 51 | "name": self.name, 52 | "description": self.description, 53 | "parameters": self.args_schema, 54 | }, 55 | } 56 | 57 | @staticmethod 58 | def from_function( 59 | func: Callable, 60 | name: str = None, 61 | description: str = None, 62 | args_schema: dict = None, 63 | return_schema: dict = None, 64 | infer_schema: bool = True, 65 | inference_known_types: dict = DEFAULT_KNOWN_TYPES, 66 | strict_inference: bool = False, 67 | ) -> "Tool": 68 | """Create a Tool instance from a given function, optionally inferring schemas. 69 | 70 | Args: 71 | func (Callable): The function to create a Tool from. 72 | name (str, optional): The name of the tool. Defaults to the function's name if None. 73 | description (str, optional): A description of the tool. Defaults to the function's docstring or schema description. 74 | args_schema (dict, optional): JSON schema for input arguments. If None and infer_schema is True, schema is inferred. Defaults to None. 75 | return_schema (dict, optional): JSON schema for return values. If None and infer_schema is True, schema is inferred. Defaults to None. 76 | infer_schema (bool, optional): Whether to infer schemas automatically. Defaults to True. 77 | inference_known_types (dict, optional): Known types for schema inference. Defaults to DEFAULT_KNOWN_TYPES. 78 | strict_inference (bool, optional): Whether to enforce strict schema inference. Defaults to False. 79 | 80 | Returns: 81 | Tool: A new Tool instance based on the provided function. 82 | 83 | Raises: 84 | ValueError: If required parameters are missing when infer_schema is False. 85 | """ 86 | if infer_schema: 87 | schema = schema_from_function( 88 | func, known_types=inference_known_types, strict=strict_inference 89 | ) 90 | 91 | return Tool( 92 | name=name or func.__name__, 93 | description=description or schema.get("description", func.__doc__), 94 | args_schema=args_schema or schema.get("input_schema", {}), 95 | return_schema=schema.get("output_schema", {}), 96 | func=func, 97 | ) 98 | else: 99 | if not infer_schema: 100 | if not name: 101 | raise ValueError("name must be provided if infer_schema is False") 102 | if not description: 103 | raise ValueError( 104 | "description must be provided if infer_schema is False" 105 | ) 106 | if not args_schema: 107 | raise ValueError( 108 | "args_schema must be provided if infer_schema is False" 109 | ) 110 | if not return_schema: 111 | raise ValueError( 112 | "return_schema must be provided if infer_schema is False" 113 | ) 114 | 115 | return Tool( 116 | name=name, 117 | description=description, 118 | args_schema=args_schema, 119 | return_schema=return_schema, 120 | func=func, 121 | ) 122 | 123 | @staticmethod 124 | def from_toollike( 125 | tool_like: "ToolLike", 126 | name: Optional[str] = None, 127 | description: Optional[str] = None, 128 | args_schema: Optional[dict] = None, 129 | return_schema: Optional[dict] = None, 130 | inference_known_types: Optional[dict] = DEFAULT_KNOWN_TYPES, 131 | strict_inference: Optional[bool] = None, 132 | ) -> "Tool": 133 | """Convert a Tool-like object into a Tool instance. 134 | 135 | Args: 136 | tool_like (ToolLike): The Tool-like object to convert. 137 | name (Optional[str], optional): The name of the tool. Defaults to None. 138 | description (Optional[str], optional): A description of the tool. Defaults to None. 139 | args_schema (Optional[dict], optional): JSON schema for input arguments. Defaults to None. 140 | return_schema (Optional[dict], optional): JSON schema for return values. Defaults to None. 141 | inference_known_types (Optional[dict], optional): Known types for schema inference. Defaults to DEFAULT_KNOWN_TYPES. 142 | strict_inference (Optional[bool], optional): Whether to enforce strict schema inference. Defaults to None. 143 | 144 | Returns: 145 | Tool: A new Tool instance based on the Tool-like object. 146 | 147 | Raises: 148 | ValueError: If the Tool-like object is neither a Tool nor a callable. 149 | """ 150 | if isinstance(tool_like, Tool): 151 | return tool_like 152 | elif callable(tool_like): 153 | return Tool.from_function( 154 | tool_like, 155 | name=name, 156 | description=description, 157 | args_schema=args_schema, 158 | return_schema=return_schema, 159 | infer_schema=True, 160 | inference_known_types=inference_known_types, 161 | strict_inference=strict_inference, 162 | ) 163 | else: 164 | raise ValueError("Tool-like object must be either a Tool or a callable") 165 | 166 | @property 167 | def _args_schema_parsed(self) -> dict: 168 | """Parse the argument schema into a structured format. 169 | 170 | Returns: 171 | dict: A dictionary mapping argument names to their types and descriptions. 172 | """ 173 | inverted_types = {v: k for k, v in PYTHON_TYPE_TO_JSON_SCHEMA_TYPE.items()} 174 | params = {} 175 | 176 | for arg_name, arg_schema in self.args_schema["properties"].items(): 177 | arg_type = inverted_types[arg_schema["type"]] 178 | arg_description = arg_schema.get("description", "") 179 | 180 | if arg_schema["type"] == "object": 181 | arg_description = arg_description.strip() 182 | 183 | if arg_description and not arg_description.endswith("."): 184 | arg_description += "." 185 | 186 | arg_description += " Schema:" + json.dumps(arg_schema) 187 | arg_description = arg_description.strip() 188 | 189 | params[arg_name] = (arg_type, arg_description) 190 | 191 | return params 192 | 193 | @property 194 | def _return_schema_parsed(self) -> Optional[tuple]: 195 | """Parse the return schema into a structured format. 196 | 197 | Returns: 198 | Optional[tuple]: A tuple containing the return type and its description, or None if no return schema is present. 199 | """ 200 | inverted_types = {v: k for k, v in PYTHON_TYPE_TO_JSON_SCHEMA_TYPE.items()} 201 | if self.return_schema: 202 | return_type = inverted_types[self.return_schema["type"]] 203 | 204 | return_description = self.return_schema.get("description", "") 205 | 206 | if self.return_schema["type"] == "object": 207 | return_description = return_description.strip() 208 | if return_description and not return_description.endswith("."): 209 | return_description += "." 210 | 211 | return_description += " Schema: " + json.dumps(self.return_schema) 212 | return_description = return_description.strip() 213 | 214 | return (return_type, return_description) 215 | 216 | return None 217 | 218 | @property 219 | def docstring(self) -> str: 220 | """Generate a docstring for the tool based on its description and schemas. 221 | 222 | Returns: 223 | str: The generated docstring. 224 | """ 225 | return generate_docstring( 226 | self.description, self._args_schema_parsed, self._return_schema_parsed 227 | ) 228 | 229 | def __str__(self) -> str: 230 | """Return the string representation of the Tool. 231 | 232 | Returns: 233 | str: The string representation. 234 | """ 235 | return f"Tool({self.name})\n" + self.docstring 236 | 237 | def as_documented_python(self) -> str: 238 | """Export the tool as a documented Python function. 239 | 240 | Returns: 241 | str: The Python function code as a string with documentation. 242 | """ 243 | inverted_types = {v: k for k, v in PYTHON_TYPE_TO_JSON_SCHEMA_TYPE.items()} 244 | 245 | s = f"def {self.name}(" 246 | 247 | signature_args = [] 248 | 249 | for arg_name, arg_schema in self.args_schema["properties"].items(): 250 | arg_type = inverted_types[arg_schema["type"]].__name__ 251 | signature_args.append(f"{arg_name}: {arg_type}") 252 | 253 | s += ", ".join(signature_args) 254 | s += "):\n" 255 | 256 | s += self.docstring 257 | 258 | return s 259 | 260 | def as_annotated_function(self) -> Callable: 261 | """Return the tool as an annotated function. 262 | 263 | Returns: 264 | Callable: The annotated function. 265 | """ 266 | return_schema_parsed = self._return_schema_parsed 267 | 268 | if return_schema_parsed: 269 | return_type = return_schema_parsed[0] 270 | else: 271 | return_type = None 272 | 273 | return set_params_and_annotations( 274 | self.name, self.docstring, self._args_schema_parsed, return_type 275 | )(self.func) 276 | 277 | 278 | ToolLike: TypeAlias = Callable | Tool 279 | 280 | 281 | class Toolformer(ABC): 282 | """Abstract base class for Toolformers, which manage conversations with tools.""" 283 | 284 | @abstractmethod 285 | def new_conversation( 286 | self, prompt: str, tools: List[ToolLike], category: Optional[str] = None 287 | ) -> Conversation: 288 | """Starts a new conversation with the given prompt and tools. 289 | 290 | Args: 291 | prompt (str): The initial prompt for the conversation. 292 | tools (List[ToolLike]): Tools to be available in the conversation. 293 | category (Optional[str]): The category of the conversation. 294 | 295 | Returns: 296 | Conversation: A Conversation instance managing the interaction. 297 | """ 298 | pass 299 | -------------------------------------------------------------------------------- /agora/common/toolformers/camel.py: -------------------------------------------------------------------------------- 1 | from typing import TYPE_CHECKING, List, Optional 2 | 3 | from agora.common.toolformers.base import Conversation, Tool, Toolformer, ToolLike 4 | 5 | if TYPE_CHECKING: 6 | import camel.agents 7 | import camel.messages 8 | import camel.models 9 | import camel.toolkits.function_tool 10 | import camel.types 11 | 12 | try: 13 | import camel.agents 14 | import camel.messages 15 | import camel.models 16 | import camel.toolkits.function_tool 17 | import camel.types 18 | 19 | CAMEL_IMPORT_ERROR = None 20 | except ImportError as e: 21 | CAMEL_IMPORT_ERROR = e 22 | 23 | 24 | class CamelConversation(Conversation): 25 | """Handles conversations using the Camel AI Toolformer.""" 26 | 27 | def __init__( 28 | self, 29 | toolformer: "CamelToolformer", 30 | agent: "camel.agents.ChatAgent", 31 | category: Optional[str] = None, 32 | ) -> None: 33 | """Initialize the CamelConversation with a Toolformer and ChatAgent. 34 | 35 | Args: 36 | toolformer (CamelToolformer): The CamelToolformer instance managing the conversation. 37 | agent (ChatAgent): The ChatAgent handling the conversation logic. 38 | category (Optional[str], optional): The category of the conversation. Defaults to None. 39 | 40 | Raises: 41 | ImportError: If camel-ai is not available. 42 | """ 43 | 44 | if CAMEL_IMPORT_ERROR: 45 | raise CAMEL_IMPORT_ERROR 46 | 47 | self.toolformer = toolformer 48 | self.agent = agent 49 | self.category = category 50 | 51 | def __call__(self, message: str, print_output: bool = True) -> str: 52 | """Process a message within the conversation and return the response. 53 | 54 | Args: 55 | message (str): The message to process. 56 | print_output (bool, optional): Whether to print the response. Defaults to True. 57 | 58 | Returns: 59 | str: The response from the conversation. 60 | """ 61 | formatted_message = camel.messages.BaseMessage.make_user_message( 62 | "user", message 63 | ) 64 | 65 | response = self.agent.step(formatted_message) 66 | 67 | reply = response.msg.content 68 | 69 | if print_output: 70 | print(reply) 71 | 72 | return reply 73 | 74 | 75 | class CamelToolformer(Toolformer): 76 | """Toolformer implementation using the Camel AI framework.""" 77 | 78 | def __init__( 79 | self, 80 | model_platform: "camel.types.ModelPlatformType", 81 | model_type: "camel.types.ModelType", 82 | model_config_dict: Optional[dict] = None, 83 | name: Optional[str] = None, 84 | ) -> None: 85 | """Initialize the CamelToolformer with model details. 86 | 87 | Args: 88 | model_platform (ModelPlatformType): The platform of the model (e.g. "openai"). 89 | model_type (ModelPlatformType): The type of the model (e.g. "gpt-4o"). 90 | model_config_dict (dict, optional): Configuration dictionary for the model. Defaults to None (empty dict). 91 | name (Optional[str], optional): Optional name for the Toolformer. Defaults to None. 92 | 93 | Raises: 94 | ImportError: If camel-ai is not available. 95 | """ 96 | 97 | if CAMEL_IMPORT_ERROR: 98 | raise CAMEL_IMPORT_ERROR 99 | 100 | if model_config_dict is None: 101 | model_config_dict = {} 102 | 103 | self.model_platform = model_platform 104 | self.model_type = model_type 105 | self.model_config_dict = model_config_dict 106 | self._name = name 107 | 108 | @property 109 | def name(self) -> str: 110 | """Get the name of the Toolformer. 111 | 112 | Returns: 113 | str: The name of the Toolformer. 114 | """ 115 | if self._name is None: 116 | return f"{self.model_platform.value}_{self.model_type.value}" 117 | else: 118 | return self._name 119 | 120 | def new_conversation( 121 | self, prompt: str, tools: List[ToolLike], category: Optional[str] = None 122 | ) -> Conversation: 123 | """Start a new conversation with the given prompt and tools. 124 | 125 | Args: 126 | prompt (str): The initial prompt for the conversation. 127 | tools (List[ToolLike]): A list of tools to be available in the conversation. 128 | category (Optional[str], optional): The category of the conversation. Defaults to None. 129 | 130 | Returns: 131 | Conversation: A Conversation instance managing the interaction. 132 | """ 133 | model = camel.models.ModelFactory.create( 134 | model_platform=self.model_platform, 135 | model_type=self.model_type, 136 | model_config_dict=dict(self.model_config_dict), 137 | ) 138 | 139 | tools = [Tool.from_toollike(tool) for tool in tools] 140 | 141 | agent = camel.agents.ChatAgent( 142 | model=model, 143 | system_message=camel.messages.BaseMessage.make_assistant_message( 144 | "system", prompt 145 | ), 146 | tools=[ 147 | camel.toolkits.function_tool.FunctionTool( 148 | tool.func, openai_tool_schema=tool.openai_schema 149 | ) 150 | for tool in tools 151 | ], 152 | ) 153 | 154 | return CamelConversation(self, agent, category) 155 | -------------------------------------------------------------------------------- /agora/common/toolformers/langchain.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | 3 | from langchain_core.language_models import BaseChatModel 4 | from langchain_core.messages import AIMessage, HumanMessage, SystemMessage 5 | from langchain_core.tools import tool as function_to_tool 6 | from langgraph.graph.graph import CompiledGraph 7 | from langgraph.prebuilt import create_react_agent 8 | 9 | from agora.common.toolformers.base import Conversation, Tool, Toolformer, ToolLike 10 | 11 | 12 | class LangChainConversation(Conversation): 13 | def __init__( 14 | self, agent: CompiledGraph, messages: List[str], category: Optional[str] = None 15 | ) -> None: 16 | """Initializes a LangChainConversation instance. 17 | 18 | Args: 19 | agent (CompiledGraph): The compiled LangChain agent to process messages. 20 | messages (List[str]): The conversation history. 21 | category (Optional[str], optional): An optional category or tag for the conversation. 22 | """ 23 | self.agent = agent 24 | self.messages = messages 25 | self.category = category 26 | 27 | def __call__(self, message: str, print_output: bool = True) -> str: 28 | """Sends a message to the conversation and returns the AI response. 29 | 30 | Args: 31 | message (str): The user message or query. 32 | print_output (bool, optional): Whether to print the AI response as it streams. 33 | 34 | Returns: 35 | str: The concatenated AI response. 36 | """ 37 | self.messages.append(HumanMessage(content=message)) 38 | final_message = "" 39 | 40 | aggregate = None 41 | 42 | for chunk in self.agent.stream( 43 | {"messages": self.messages}, stream_mode="values" 44 | ): 45 | for message in chunk["messages"]: 46 | if isinstance(message, AIMessage): 47 | content = message.content 48 | if isinstance(content, str): 49 | final_message += content 50 | else: 51 | for content_chunk in content: 52 | if isinstance(content_chunk, str): 53 | if print_output: 54 | print(content_chunk, end="") 55 | final_message += content_chunk 56 | 57 | aggregate = chunk if aggregate is None else (aggregate + chunk) 58 | 59 | if print_output: 60 | print() 61 | 62 | self.messages.append(AIMessage(content=final_message)) 63 | 64 | return final_message 65 | 66 | 67 | class LangChainToolformer(Toolformer): 68 | def __init__(self, model: BaseChatModel): 69 | """Initializes a LangChainToolformer. 70 | 71 | Args: 72 | model (BaseChatModel): The underlying language model for processing. 73 | """ 74 | self.model = model 75 | 76 | def new_conversation( 77 | self, prompt: str, tools: List[ToolLike], category: Optional[str] = None 78 | ) -> Conversation: 79 | """Creates a new conversation using the provided prompt and tools. 80 | 81 | Args: 82 | prompt (str): The initial conversation prompt. 83 | tools (List[ToolLike]): Tools available to the conversation. 84 | category (Optional[str], optional): A category or tag for this conversation. 85 | 86 | Returns: 87 | Conversation: The conversation instance using the specified tools. 88 | """ 89 | tools = [Tool.from_toollike(tool) for tool in tools] 90 | tools = [function_to_tool(tool.as_annotated_function()) for tool in tools] 91 | agent_executor = create_react_agent(self.model, tools) 92 | 93 | return LangChainConversation(agent_executor, [SystemMessage(prompt)], category) 94 | -------------------------------------------------------------------------------- /agora/receiver/__init__.py: -------------------------------------------------------------------------------- 1 | import agora.receiver.components.negotiator as negotiator 2 | import agora.receiver.components.programmer as programmer 3 | import agora.receiver.components.protocol_checker as protocol_checker 4 | import agora.receiver.components.responder as responder 5 | import agora.receiver.server as server 6 | from agora.receiver.core import Receiver 7 | from agora.receiver.memory import ReceiverMemory 8 | from agora.receiver.server import ReceiverServer 9 | -------------------------------------------------------------------------------- /agora/receiver/components/negotiator.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from agora.common.core import Conversation 4 | from agora.common.toolformers.base import Tool, Toolformer, ToolLike 5 | 6 | NEGOTIATION_RULES = """ 7 | Here are some rules (that should also be explained to the other GPT): 8 | - You can assume that the protocol has a sender and a receiver. Do not worry about how the messages will be delivered, focus only on the content of the messages. 9 | - Keep the protocol short and simple. It should be easy to understand and implement. 10 | - The protocol must specify the exact format of what is sent and received. Do not leave it open to interpretation. 11 | - The implementation will be written by a programmer that does not have access to the negotiation process, so make sure the protocol is clear and unambiguous. 12 | - The implementation will receive a string and return a string, so structure your protocol accordingly. 13 | - The other party might have a different internal data schema or set of tools, so make sure that the protocol is flexible enough to accommodate that. 14 | - Keep the negotiation short: no need to repeat the same things over and over. 15 | - If the other party has proposed a protocol and you're good with it, there's no reason to keep negotiating or to repeat the protocol to the other party. 16 | - Do not restate parts of the protocols that have already been agreed upon. 17 | And remember: keep the protocol as simple and unequivocal as necessary. The programmer that will implement the protocol can code, but they are not a mind reader. 18 | """ 19 | 20 | TOOLS_NEGOTIATOR_PROMPT = f""" 21 | You are ProtocolNegotiatorGPT. You are negotiating a protocol on behalf of a web service that can perform a task. 22 | The other party is a GPT that is negotiating on behalf of the user. Your goal is to negotiate a protocol that is simple and clear, \ 23 | but also expressive enough to allow the service to perform the task. A protocol is sufficiently expressive if you could write code \ 24 | that, given the query formatted according to the protocol and the tools at the service's disposal, can parse the query according to \ 25 | the protocol's specification, perform the task (if any) and send a reply. 26 | {NEGOTIATION_RULES} 27 | You will receive a list of tools that are available to the programmer that will implement the protocol. 28 | When you are okay with the protocol, don't further repeat everything, just tell to the other party that you are done. 29 | """ 30 | 31 | 32 | class ReceiverNegotiator: 33 | """Manages protocol negotiations for the Receiver.""" 34 | 35 | def __init__(self, toolformer: Toolformer): 36 | """Initialize the ReceiverNegotiator with a Toolformer. 37 | 38 | Args: 39 | toolformer (Toolformer): The Toolformer instance managing tools. 40 | """ 41 | self.toolformer = toolformer 42 | 43 | def create_conversation( 44 | self, tools: List[ToolLike], additional_info: str = "" 45 | ) -> Conversation: 46 | """Create a new negotiation conversation based on available tools. 47 | 48 | Args: 49 | tools (List[ToolLike]): A list of tools available for negotiation. 50 | additional_info (str, optional): Additional information for the negotiation. Defaults to ''. 51 | 52 | Returns: 53 | Conversation: A Conversation instance managing the negotiation. 54 | """ 55 | prompt = TOOLS_NEGOTIATOR_PROMPT 56 | 57 | if additional_info: 58 | prompt += "\n\n" + additional_info 59 | 60 | prompt += "\n\nThe tools that the implementer will have access to are:\n\n" 61 | 62 | if len(tools) == 0: 63 | prompt += "No additional tools provided" 64 | else: 65 | for tool in tools: 66 | tool = Tool.from_toollike(tool) 67 | prompt += tool.as_documented_python() + "\n\n" 68 | 69 | return self.toolformer.new_conversation(prompt, tools, category="negotiation") 70 | -------------------------------------------------------------------------------- /agora/receiver/components/programmer.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from agora.common.toolformers.base import Tool, Toolformer, ToolLike 4 | from agora.utils import extract_substring 5 | 6 | NO_MULTIROUND_REPLY = """ reply takes a single argument, "query", which is a string, and must return a string. 7 | """ 8 | 9 | MULTIROUND_REPLY = """reply takes two arguments: 10 | - "query", which is a string 11 | - "memory", which is a dictionary that can be used to store information between rounds (if the protocol requires the receiver to act in multiple rounds). 12 | It must return a tuple of two elements: 13 | - A string, which is the response to the query 14 | - A dictionary, which is the updated memory 15 | """ 16 | 17 | NO_MULTIROUND_EXAMPLE = """ 18 | def reply(query): 19 | ... 20 | return response 21 | """ 22 | 23 | MULTIROUND_EXAMPLE = """ 24 | def reply(query, memory): 25 | ... 26 | return response, updated_memory 27 | """ 28 | 29 | TOOL_PROGRAMMER_PROMPT = """ 30 | You are ProtocolProgrammerGPT. Your task is to write a routine that takes a query formatted according to the protocol and returns a response. 31 | The routine is a Python file that contains a function "reply". {reply_description} 32 | Depending on the protocol, the routine might be need to perform some actions before returning the response. The user might provide you with a list of \ 33 | Python functions you can call to help you with this task. You don't need to worry about importing them, they are already available in the environment. 34 | Rules: 35 | - The implementation must be written in Python. 36 | - You can define any number of helper functions and import any libraries that are part of the Python standard library. 37 | - Do not import libraries that are not part of the Python standard library. 38 | - Remember to import standard libraries if you need them. 39 | - If there is an unexpected error that is not covered by the protocol, throw an exception.\ 40 | If instead the protocol specifies how to handle the error, return the response according to the protocol's specification. 41 | - Do not execute anything (aside from library imports) when the file itself is loaded. I will personally import the file and call the reply function with the task data. 42 | Begin by thinking about the implementation and how you would structure the code. \ 43 | Then, write your implementation by writing a code block that contains the tags and . For example: 44 | ```python 45 | 46 | {example} 47 | 48 | 49 | """ 50 | 51 | 52 | class ReceiverProgrammer: 53 | """Generates implementations for protocols based on their specifications.""" 54 | 55 | def __init__(self, toolformer: Toolformer, num_attempts: int = 5): 56 | """Initialize the ReceiverProgrammer with a Toolformer and retry attempts. 57 | 58 | Args: 59 | toolformer (Toolformer): The Toolformer instance managing tools. 60 | num_attempts (int, optional): Number of attempts to generate implementation. Defaults to 5. 61 | """ 62 | self.toolformer = toolformer 63 | self.num_attempts = num_attempts 64 | 65 | def __call__( 66 | self, 67 | tools: List[ToolLike], 68 | protocol_document: str, 69 | multiround: bool, 70 | additional_info: str = "", 71 | ) -> str: 72 | """Generate the implementation code for a given protocol. 73 | 74 | Args: 75 | tools (List[ToolLike]): A list of tools available for implementation. 76 | protocol_document (str): The protocol document outlining requirements. 77 | multiround (bool): Indicates if the protocol supports multiple rounds of interaction. 78 | additional_info (str, optional): Additional information for implementation. Defaults to ''. 79 | 80 | Returns: 81 | str: The generated implementation code. 82 | """ 83 | message = ( 84 | "Protocol document:\n\n" 85 | + protocol_document 86 | + "\n\n" 87 | + "Additional functions:\n\n" 88 | ) 89 | 90 | if len(tools) == 0: 91 | message += "No additional functions provided" 92 | else: 93 | for tool in tools: 94 | tool = Tool.from_toollike(tool) 95 | message += str(tool) + "\n\n" 96 | 97 | prompt = TOOL_PROGRAMMER_PROMPT.format( 98 | reply_description=MULTIROUND_REPLY if multiround else NO_MULTIROUND_REPLY, 99 | example=MULTIROUND_EXAMPLE if multiround else NO_MULTIROUND_EXAMPLE, 100 | ) 101 | 102 | if additional_info: 103 | prompt += "\n\n" + additional_info 104 | 105 | conversation = self.toolformer.new_conversation( 106 | prompt, [], category="programming" 107 | ) 108 | 109 | for _ in range(self.num_attempts): 110 | reply = conversation(message, print_output=False) 111 | 112 | implementation = extract_substring( 113 | reply, "", "", include_tags=False 114 | ) 115 | 116 | if implementation is not None: 117 | break 118 | 119 | message = "You have not provided an implementation yet. Please provide one by surrounding it in the tags and ." 120 | 121 | implementation = implementation.strip() 122 | 123 | # Sometimes the LLM leaves the Markdown formatting in the implementation 124 | implementation = ( 125 | implementation.replace("```python", "").replace("```", "").strip() 126 | ) 127 | 128 | implementation = implementation.replace("def reply(", "def run(") 129 | 130 | return implementation 131 | -------------------------------------------------------------------------------- /agora/receiver/components/protocol_checker.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from agora.common.toolformers.base import Tool, Toolformer, ToolLike 4 | 5 | CHECKER_TOOL_PROMPT = ( 6 | "You are ProtocolCheckerGPT. Your task is to look at the provided protocol and determine if you have access " 7 | "to the tools required to implement it. A protocol is sufficiently expressive if an implementer could write code that, given a query formatted according to the protocol and the tools " 8 | 'at your disposal, can parse the query according to the protocol\'s specification and send a reply. Think about it and at the end of the reply write "YES" if the' 9 | 'protocol is adequate or "NO". Do not attempt to implement the protocol or call the tools: that will be done by the implementer.' 10 | ) 11 | 12 | 13 | class ReceiverProtocolChecker: 14 | """Checks protocol validity and suitability for the Receiver.""" 15 | 16 | def __init__(self, toolformer: Toolformer): 17 | """Initialize the ReceiverProtocolChecker with a Toolformer. 18 | 19 | Args: 20 | toolformer (Toolformer): The Toolformer instance managing tools. 21 | """ 22 | self.toolformer = toolformer 23 | 24 | def __call__( 25 | self, protocol_document: str, tools: List[ToolLike], additional_info: str = "" 26 | ) -> bool: 27 | """Determine if the protocol is suitable based on available tools. 28 | 29 | Args: 30 | protocol_document (str): The protocol document to evaluate. 31 | tools (List[ToolLike]): A list of tools available to implement the protocol. 32 | additional_info (str, optional): Additional information for evaluation. Defaults to ''. 33 | 34 | Returns: 35 | bool: True if the protocol is suitable, False otherwise. 36 | """ 37 | message = ( 38 | "Protocol document:\n\n" 39 | + protocol_document 40 | + "\n\n" 41 | + "Functions that the implementer will have access to:\n\n" 42 | ) 43 | 44 | if len(tools) == 0: 45 | message += "No additional functions provided" 46 | else: 47 | for tool in tools: 48 | tool = Tool.from_toollike(tool) 49 | message += str(tool) + "\n\n" 50 | 51 | prompt = CHECKER_TOOL_PROMPT 52 | 53 | if additional_info: 54 | prompt += "\n\n" + additional_info 55 | 56 | conversation = self.toolformer.new_conversation( 57 | prompt, [], category="protocolChecking" 58 | ) 59 | 60 | reply = conversation(message, print_output=False) 61 | 62 | # print('Reply:', reply) 63 | # print(reply.lower().strip()[-10:]) 64 | # print('Parsed decision:', 'yes' in reply.lower().strip()[-10:]) 65 | 66 | return "yes" in reply.lower().strip()[-10:] 67 | -------------------------------------------------------------------------------- /agora/receiver/components/responder.py: -------------------------------------------------------------------------------- 1 | # The responder is a special toolformer that replies to a service based on a protocol document. 2 | # It receives the protocol document and writes the response that must be sent to the system. 3 | 4 | from typing import List, Optional 5 | 6 | from agora.common.toolformers.base import Conversation, Toolformer, ToolLike 7 | 8 | PROTOCOL_RESPONDER_PROMPT = ( 9 | "You are ResponderGPT. Below you will find a document describing detailing how to respond to a query. " 10 | "The communication might involve multiple rounds of back-and-forth." 11 | "Use the provided functions to execute what is requested and provide the response according to the protocol's specification. " 12 | "Only reply with the response itself, with no additional information or escaping. Similarly, do not add any additional whitespace or formatting." 13 | ) 14 | 15 | NL_RESPONDER_PROMPT = ( 16 | "You are NaturalLanguageResponderGPT. You will receive a query from a user. " 17 | "Use the provided functions to execute what is requested and reply with a response (in natural language). " 18 | "Important: the user does not have the capacity to respond to follow-up questions, so if you think you have enough information to reply/execute the actions, do so." 19 | ) 20 | 21 | 22 | class Responder: 23 | def __init__(self, toolformer: Toolformer) -> None: 24 | """Initializes a new Responder. 25 | 26 | Args: 27 | toolformer (Toolformer): The Toolformer instance handling transformations. 28 | """ 29 | self.toolformer = toolformer 30 | 31 | def create_protocol_conversation( 32 | self, protocol_document: str, tools: List[ToolLike], additional_info: str = "" 33 | ) -> Conversation: 34 | """Creates a protocol-based conversation. 35 | 36 | Args: 37 | protocol_document (str): The text describing the protocol. 38 | tools (List[ToolLike]): A list of tools available to the conversation. 39 | additional_info (str, optional): Additional context for the conversation. 40 | 41 | Returns: 42 | Conversation: The newly created conversation following the protocol. 43 | """ 44 | # print('===NL RESPONDER (WITH PROTOCOL)===') 45 | 46 | prompt = PROTOCOL_RESPONDER_PROMPT 47 | 48 | if additional_info: 49 | prompt += "\n\n" + additional_info 50 | 51 | prompt += "\n\nThe protocol is the following:\n\n" + protocol_document 52 | 53 | return self.toolformer.new_conversation(prompt, tools, category="conversation") 54 | 55 | def create_nl_conversation( 56 | self, tools: List[ToolLike], additional_info: str = "" 57 | ) -> Conversation: 58 | """Creates a natural language conversation without protocol constraints. 59 | 60 | Args: 61 | tools (List[ToolLike]): Tools available during the conversation. 62 | additional_info (str, optional): Additional context. 63 | 64 | Returns: 65 | Conversation: The created NL conversation. 66 | """ 67 | # print('===NL RESPONDER (NO PROTOCOL)===') 68 | # print('Preparing NL response with tools:', [tool.name for tool in tools]) 69 | 70 | prompt = NL_RESPONDER_PROMPT 71 | 72 | if additional_info: 73 | prompt += "\n\n" + additional_info 74 | 75 | return self.toolformer.new_conversation(prompt, tools, category="conversation") 76 | 77 | def create_conversation( 78 | self, 79 | protocol_document: Optional[str], 80 | tools: List[ToolLike], 81 | additional_info: str = "", 82 | ) -> Conversation: 83 | """Creates either a protocol-based or a natural language conversation. 84 | 85 | Args: 86 | protocol_document (Optional[str]): The protocol text if available. If None, a natural language conversation is created. 87 | tools (List[ToolLike]): Tools for conversation handling. 88 | additional_info (str, optional): Additional context or configuration. 89 | 90 | Returns: 91 | Conversation: The resulting conversation instance. 92 | """ 93 | if protocol_document is None: 94 | return self.create_nl_conversation(tools, additional_info) 95 | else: 96 | return self.create_protocol_conversation( 97 | protocol_document, tools, additional_info 98 | ) 99 | -------------------------------------------------------------------------------- /agora/receiver/core.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | 3 | from agora.common.core import Suitability 4 | from agora.common.errors import ProtocolRejectedError, ProtocolRetrievalError 5 | from agora.common.executor import Executor, RestrictedExecutor 6 | from agora.common.storage import JSONStorage, Storage 7 | from agora.common.toolformers.base import Conversation, ToolLike 8 | from agora.receiver.components.negotiator import ReceiverNegotiator 9 | from agora.receiver.components.programmer import ReceiverProgrammer 10 | from agora.receiver.components.protocol_checker import ReceiverProtocolChecker 11 | from agora.receiver.components.responder import Responder 12 | from agora.receiver.memory import ReceiverMemory 13 | from agora.utils import download_and_verify_protocol, extract_metadata 14 | 15 | 16 | class Receiver: 17 | """ 18 | Handles receiving and processing protocols, including negotiation and execution. 19 | """ 20 | 21 | def __init__( 22 | self, 23 | memory: ReceiverMemory, 24 | responder: Responder, 25 | protocol_checker: ReceiverProtocolChecker, 26 | negotiator: ReceiverNegotiator, 27 | programmer: ReceiverProgrammer, 28 | executor: Executor, 29 | tools: List[ToolLike], 30 | additional_info: str = "", 31 | implementation_threshold: int = 5, 32 | ): 33 | """ 34 | Initializes the Receiver with needed components and configurations. 35 | 36 | Args: 37 | memory (ReceiverMemory): Manages protocol memory. 38 | responder (Responder): Handles responses based on protocols. 39 | protocol_checker (ReceiverProtocolChecker): Checks protocol validity. 40 | negotiator (ReceiverNegotiator): Manages protocol negotiations. 41 | programmer (ReceiverProgrammer): Generates protocol implementations. 42 | executor (Executor): Executes protocol implementations. 43 | tools (List[ToolLike]): A list of available tools. 44 | additional_info (str, optional): Extra info used during operation. 45 | implementation_threshold (int, optional): Threshold for auto-generating code. 46 | """ 47 | self.memory = memory 48 | self.responder = responder 49 | self.protocol_checker = protocol_checker 50 | self.negotiator = negotiator 51 | self.programmer = programmer 52 | self.executor = executor 53 | self.tools = tools 54 | self.additional_info = additional_info 55 | self.implementation_threshold = implementation_threshold 56 | 57 | @staticmethod 58 | def make_default( 59 | toolformer, 60 | storage: Storage = None, 61 | responder: Responder = None, 62 | protocol_checker: ReceiverProtocolChecker = None, 63 | negotiator: ReceiverNegotiator = None, 64 | programmer: ReceiverProgrammer = None, 65 | executor: Executor = None, 66 | tools: List[ToolLike] = None, 67 | additional_info: str = "", 68 | storage_path: str = "./.agora/storage/receiver.json", 69 | implementation_threshold: int = 5, 70 | ) -> "Receiver": 71 | """ 72 | Creates a default Receiver instance with customizable components. 73 | 74 | Args: 75 | toolformer: The Toolformer instance. 76 | storage (Storage, optional): A storage backend or None to create a default. 77 | responder (Responder, optional): The responder component. 78 | protocol_checker (ReceiverProtocolChecker, optional): The protocol checker. 79 | negotiator (ReceiverNegotiator, optional): The negotiator component. 80 | programmer (ReceiverProgrammer, optional): The programmer component. 81 | executor (Executor, optional): The executor component. 82 | tools (List[ToolLike], optional): A list of tools. Defaults to empty list. 83 | additional_info (str, optional): Extra info. Defaults to ''. 84 | storage_path (str, optional): Path for JSON storage. Defaults to './receiver_storage.json'. 85 | implementation_threshold (int, optional): Threshold for code generation. 86 | 87 | Returns: 88 | Receiver: A configured Receiver instance. 89 | """ 90 | if tools is None: 91 | tools = [] 92 | 93 | if storage is None: 94 | storage = JSONStorage(storage_path) 95 | memory = ReceiverMemory(storage) 96 | 97 | if responder is None: 98 | responder = Responder(toolformer) 99 | 100 | if protocol_checker is None: 101 | protocol_checker = ReceiverProtocolChecker(toolformer) 102 | 103 | if negotiator is None: 104 | negotiator = ReceiverNegotiator(toolformer) 105 | 106 | if programmer is None: 107 | programmer = ReceiverProgrammer(toolformer) 108 | 109 | if executor is None: 110 | executor = RestrictedExecutor() 111 | 112 | return Receiver( 113 | memory, 114 | responder, 115 | protocol_checker, 116 | negotiator, 117 | programmer, 118 | executor, 119 | tools, 120 | additional_info, 121 | implementation_threshold, 122 | ) 123 | 124 | def _get_implementation(self, protocol_id: str) -> Optional[str]: 125 | """ 126 | Retrieves or generates the implementation code for the given protocol. 127 | 128 | Args: 129 | protocol_id (str): The identifier of the protocol. 130 | 131 | Returns: 132 | Optional[str]: The implementation code if generated or previously stored. None if not available. 133 | """ 134 | # Check if a routine exists and eventually create it 135 | implementation = self.memory.get_implementation(protocol_id) 136 | 137 | if ( 138 | implementation is None 139 | and self.memory.get_protocol_conversations(protocol_id) 140 | >= self.implementation_threshold 141 | ): 142 | protocol = self.memory.get_protocol(protocol_id) 143 | implementation = self.programmer( 144 | self.tools, 145 | protocol.protocol_document, 146 | protocol.metadata.get("multiround", False), 147 | ) 148 | self.memory.register_implementation(protocol_id, implementation) 149 | 150 | return implementation 151 | 152 | def create_conversation( 153 | self, protocol_hash: str, protocol_sources: List[str] 154 | ) -> Conversation: 155 | """ 156 | Creates a new conversation based on the protocol hash and sources. 157 | 158 | Args: 159 | protocol_hash (str): Hash identifier for the protocol. 160 | protocol_sources (List[str]): A list of protocol source URLs. 161 | 162 | Returns: 163 | Conversation: A new conversation or negotiation session. 164 | 165 | Raises: 166 | ProtocolRetrievalError: If unable to download the protocol. 167 | ProtocolRejectedError: If the protocol is deemed inadequate. 168 | """ 169 | if protocol_hash == "negotiation": 170 | return self.negotiator.create_conversation(self.tools, self.additional_info) 171 | 172 | protocol_document = None 173 | implementation = None 174 | 175 | if protocol_hash is not None: 176 | if not self.memory.is_known(protocol_hash): 177 | for protocol_source in protocol_sources: 178 | protocol_document = download_and_verify_protocol( 179 | protocol_hash, protocol_source 180 | ) 181 | if protocol_document is not None: 182 | break 183 | 184 | if protocol_document is None: 185 | raise ProtocolRetrievalError("Failed to download protocol") 186 | 187 | metadata = extract_metadata(protocol_document) 188 | self.memory.register_new_protocol( 189 | protocol_hash, protocol_sources, protocol_document, metadata 190 | ) 191 | 192 | self.memory.increment_protocol_conversations(protocol_hash) 193 | 194 | protocol = self.memory.get_protocol(protocol_hash) 195 | protocol_document = protocol.protocol_document 196 | metadata = protocol.metadata 197 | 198 | if self.memory.get_suitability(protocol_hash) == Suitability.UNKNOWN: 199 | if self.protocol_checker(protocol_document, self.tools): 200 | self.memory.set_suitability(protocol_hash, Suitability.ADEQUATE) 201 | else: 202 | self.memory.set_suitability(protocol_hash, Suitability.INADEQUATE) 203 | 204 | if self.memory.get_suitability(protocol_hash) == Suitability.ADEQUATE: 205 | protocol_document = self.memory.get_protocol( 206 | protocol_hash 207 | ).protocol_document 208 | else: 209 | raise ProtocolRejectedError( 210 | f"{protocol_hash} is not suitable for execution" 211 | ) 212 | 213 | implementation = self._get_implementation(protocol_hash) 214 | 215 | if implementation is None: 216 | return self.responder.create_conversation( 217 | protocol_document, self.tools, self.additional_info 218 | ) 219 | else: 220 | return self.executor.new_conversation( 221 | protocol_hash, 222 | implementation, 223 | metadata.get("multiround", False), 224 | self.tools, 225 | ) 226 | -------------------------------------------------------------------------------- /agora/receiver/memory.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from agora.common.core import Suitability 4 | from agora.common.memory import ProtocolMemory 5 | 6 | 7 | class ReceiverMemory(ProtocolMemory): 8 | """ 9 | Manages memory for the Receiver, including protocol registrations and suitability assessments. 10 | """ 11 | 12 | def register_new_protocol( 13 | self, 14 | protocol_id: str, 15 | protocol_sources: List[str], 16 | protocol_document: str, 17 | metadata: dict, 18 | ): 19 | """ 20 | Registers a new protocol with given sources, document, and metadata. 21 | 22 | Args: 23 | protocol_id (str): The identifier of the protocol. 24 | protocol_sources (List[str]): A list of source URLs for the protocol. 25 | protocol_document (str): The protocol contents. 26 | metadata (dict): Additional protocol metadata. 27 | """ 28 | super().register_new_protocol( 29 | protocol_id, 30 | protocol_document, 31 | protocol_sources, 32 | metadata, 33 | None, 34 | suitability=Suitability.UNKNOWN, 35 | conversations=0, 36 | ) 37 | 38 | def get_protocol_conversations(self, protocol_id: str) -> int: 39 | """ 40 | Returns the number of conversations associated with a protocol. 41 | 42 | Args: 43 | protocol_id (str): The protocol's identifier. 44 | 45 | Returns: 46 | int: The conversation count. 47 | """ 48 | return self.get_extra_field(protocol_id, "conversations", 0) 49 | 50 | def increment_protocol_conversations(self, protocol_id: str) -> None: 51 | """ 52 | Increments the conversation count for the specified protocol. 53 | 54 | Args: 55 | protocol_id (str): The identifier of the protocol. 56 | """ 57 | self.set_extra_field( 58 | protocol_id, 59 | "conversations", 60 | self.get_protocol_conversations(protocol_id) + 1, 61 | ) 62 | 63 | def set_suitability(self, protocol_id: str, suitability: Suitability) -> None: 64 | """ 65 | Sets the suitability for a given protocol. 66 | 67 | Args: 68 | protocol_id (str): The identifier of the protocol. 69 | suitability (Suitability): The new suitability value. 70 | """ 71 | super().set_extra_field(protocol_id, "suitability", suitability) 72 | 73 | def get_suitability(self, protocol_id: str) -> Suitability: 74 | """ 75 | Retrieves the suitability for a given protocol. 76 | 77 | Args: 78 | protocol_id (str): The protocol's identifier. 79 | 80 | Returns: 81 | Suitability: The current suitability status. 82 | """ 83 | return self.get_extra_field(protocol_id, "suitability", Suitability.UNKNOWN) 84 | -------------------------------------------------------------------------------- /agora/receiver/server.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | from threading import Timer 3 | 4 | from flask import Flask, jsonify, request 5 | 6 | from agora.receiver.core import Receiver 7 | 8 | 9 | class ReceiverServer: 10 | """Handles and manages HTTP conversations via Flask with a given Receiver. 11 | 12 | This class sets up Flask routes for handling conversation requests 13 | using the provided Receiver instance. 14 | """ 15 | 16 | def __init__(self, receiver: "Receiver") -> None: 17 | """Initializes the server with a Receiver instance. 18 | 19 | Args: 20 | receiver (Receiver): The receiver that creates new conversations. 21 | """ 22 | self.receiver = receiver 23 | self.app = Flask(__name__) 24 | self.conversation_storage = {} 25 | 26 | @self.app.route("/", methods=["POST"]) 27 | def main(): 28 | try: 29 | data = request.json 30 | 31 | conversation = self.receiver.create_conversation( 32 | data["protocolHash"], data["protocolSources"] 33 | ) 34 | 35 | if data.get("multiround", False): 36 | # Multiround mode; generate a unique ID for the conversation 37 | conversation_id = str(uuid.uuid4()) 38 | 39 | self.conversation_storage[conversation_id] = conversation 40 | 41 | response = { 42 | "status": "success", 43 | "conversationId": conversation_id, 44 | "body": conversation(data["body"]), 45 | } 46 | 47 | # Automatically delete the conversation after 300 seconds 48 | Timer( 49 | 300, 50 | lambda: self.conversation_storage.pop(conversation_id, None), 51 | ).start() 52 | else: 53 | response = {"status": "success", "body": conversation(data["body"])} 54 | 55 | return jsonify(response) 56 | except Exception as e: 57 | import traceback 58 | 59 | traceback.print_exc() 60 | return jsonify({"status": "error", "message": str(e)}) 61 | 62 | @self.app.route("/conversations/", methods=["POST", "DELETE"]) 63 | def continue_conversation(conversation_id): 64 | if request.method == "DELETE": 65 | # The deletion will succeed even if the conversation does not exist 66 | self.conversation_storage.pop(conversation_id, None) 67 | return jsonify({"status": "success"}) 68 | 69 | data = request.json 70 | 71 | conversation = self.conversation_storage.get(conversation_id) 72 | 73 | if conversation is None: 74 | return jsonify( 75 | {"status": "error", "message": "Conversation not found."} 76 | ) 77 | 78 | response = {"status": "success", "body": conversation(data["body"])} 79 | 80 | return jsonify(response) 81 | 82 | def run(self, *args, **kwargs) -> None: 83 | """Runs the Flask application. 84 | 85 | Args: 86 | *args: Positional arguments for Flask's run method. 87 | **kwargs: Keyword arguments for Flask's run method. 88 | """ 89 | self.app.run(*args, **kwargs) 90 | -------------------------------------------------------------------------------- /agora/sender/__init__.py: -------------------------------------------------------------------------------- 1 | import agora.sender.components.negotiator as negotiator 2 | import agora.sender.components.programmer as programmer 3 | import agora.sender.components.protocol_picker as protocol_picker 4 | import agora.sender.components.querier as querier 5 | import agora.sender.components.transporter as transporter 6 | import agora.sender.schema_generator as schema_generator 7 | from agora.sender.core import Sender 8 | from agora.sender.memory import SenderMemory 9 | from agora.sender.schema_generator import TaskSchemaGenerator 10 | -------------------------------------------------------------------------------- /agora/sender/components/negotiator.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | 3 | from agora.common.core import Protocol 4 | from agora.common.toolformers.base import Toolformer 5 | from agora.sender.task_schema import TaskSchema, TaskSchemaLike 6 | from agora.utils import extract_metadata, extract_substring 7 | 8 | NEGOTIATION_RULES = """ 9 | Here are some rules (that should also be explained to the other GPT): 10 | - You can assume that the protocol has a sender and a receiver. Do not worry about how the messages will be delivered, focus only on the content of the messages. 11 | - Keep the protocol short and simple. It should be easy to understand and implement. 12 | - The protocol must specify the exact format of what is sent and received. Do not leave it open to interpretation. 13 | - The implementation will be written by a programmer that does not have access to the negotiation process, so make sure the protocol is clear and unambiguous. 14 | - The implementation will receive a string and return a string, so structure your protocol accordingly. 15 | - The other party might have a different internal data schema or set of tools, so make sure that the protocol is flexible enough to accommodate that. 16 | - There will only be one message sent by the sender and one message sent by the receiver. Design the protocol accordingly. 17 | - Keep the negotiation short: no need to repeat the same things over and over. 18 | - If the other party has proposed a protocol and you're good with it, there's no reason to keep negotiating or to repeat the protocol to the other party. 19 | - Do not restate parts of the protocols that have already been agreed upon. 20 | And remember: keep the protocol as simple and unequivocal as necessary. The programmer that will implement the protocol can code, but they are not a mind reader. 21 | """ 22 | 23 | TASK_NEGOTIATOR_PROMPT = f""" 24 | You are ProtocolNegotiatorGPT. Your task is to negotiate a protocol that can be used to query a service. 25 | You will receive a JSON schema of the task that the service must perform. Negotiate with the service to determine a protocol that can be used to query it. 26 | To do so, you will chat with another GPT (role: user) that will negotiate on behalf of the service. 27 | {NEGOTIATION_RULES} 28 | Once you are ready to save the protocol, reply wrapping the final version of the protocol, as agreed in your negotiation, between the tags and . 29 | Within the body of the tag, before everything else, add a section (between ---) that contains the name, the description of the protocol, and whether the protocol requires multiple rounds of communication. For instance: 30 | 31 | --- 32 | name: MyProtocol 33 | description: This protocol is for... 34 | multiround: false 35 | --- 36 | 37 | Body of the protocol... 38 | 39 | 40 | """ 41 | 42 | 43 | class SenderNegotiator: 44 | """Manages the negotiation of protocols for sending tasks.""" 45 | 46 | def __init__(self, toolformer: Toolformer, max_rounds: int = 10) -> None: 47 | """Initializes the SenderNegotiator. 48 | 49 | Args: 50 | toolformer (Toolformer): The Toolformer instance. 51 | max_rounds (int): Maximum number of negotiation rounds. 52 | """ 53 | self.toolformer = toolformer 54 | self.max_rounds = max_rounds 55 | 56 | def __call__( 57 | self, 58 | task_schema: TaskSchemaLike, 59 | callback: Callable[[str], str], 60 | additional_info: str = "", 61 | ) -> Protocol: 62 | """Negotiates and finalizes a protocol based on the task schema. 63 | 64 | Args: 65 | task_schema (TaskSchemaLike): The schema of the task. 66 | callback (Callable[[str], str]): A callback to handle messages from the other party. 67 | additional_info (str): Additional information for the negotiation. 68 | 69 | Returns: 70 | Protocol: The finalized Protocol object. 71 | """ 72 | task_schema = TaskSchema.from_taskschemalike(task_schema) 73 | found_protocol = None 74 | 75 | prompt = ( 76 | TASK_NEGOTIATOR_PROMPT 77 | + "\nThe JSON schema of the task is the following:\n\n" 78 | + str(task_schema) 79 | ) 80 | 81 | if additional_info: 82 | prompt += "\n\n" + additional_info 83 | 84 | conversation = self.toolformer.new_conversation( 85 | prompt, [], category="negotiation" 86 | ) 87 | 88 | other_message = "Hello! How may I help you?" 89 | 90 | for i in range(self.max_rounds): 91 | # print('===NegotiatorGPT===') 92 | message = conversation(other_message, print_output=False) 93 | 94 | # print('Checking if we can extract from:', message) 95 | # print('---------') 96 | protocol = extract_substring( 97 | message, "", "", include_tags=False 98 | ) 99 | 100 | if protocol is None: 101 | # print('Could not extract') 102 | response = callback(message) 103 | 104 | if response["status"] == "success": 105 | other_message = response["body"] 106 | else: 107 | other_message = ( 108 | "Error interacting with the other party: " + response["message"] 109 | ) 110 | 111 | # print() 112 | # print('===Other GPT===') 113 | # print(other_message) 114 | # print() 115 | else: 116 | metadata = extract_metadata(protocol) 117 | 118 | found_protocol = Protocol(protocol, [], metadata) 119 | break 120 | 121 | return found_protocol 122 | -------------------------------------------------------------------------------- /agora/sender/components/programmer.py: -------------------------------------------------------------------------------- 1 | from agora.common.toolformers.base import Toolformer 2 | from agora.sender.task_schema import TaskSchema, TaskSchemaLike 3 | from agora.utils import extract_substring 4 | 5 | TASK_PROGRAMMER_PROMPT = """ 6 | You are ProtocolProgrammerGPT. You will act as an intermediate between a machine (that has a certain input and output schema in JSON) \ 7 | and a remote server that can perform a task following a certain protocol. Your task is to write a routine that takes some task data \ 8 | (which follows the input schema), sends query in a format defined by the protocol, parses it and returns the output according to the output schema so that \ 9 | the machine can use it. 10 | The routine is a Python file that contains a function "send_query". send_query takes a single argument, "task_data", which is a dictionary, and must return \ 11 | one of (dict, str, float, int, None), which is the response to the query formatted according to the output schema. 12 | In order to communicate with the remote server, you can use the function "send_to_server" that is already available in the environment. 13 | send_to_server takes a single argument, "query" (which is a string formatted according to the protocol), and returns a string (again formatted according \ 14 | to the protocol). Do not worry about managing communication, everything is already set up for you. Just focus on preparing the right query. 15 | 16 | Rules: 17 | - The implementation must be written in Python. 18 | - You can define any number of helper functions and import any libraries that are part of the Python standard library. 19 | - Do not import libraries that are not part of the Python standard library. 20 | - send_to_server will be already available in the environment. There is no need to import it. 21 | - Your task is to prepare the query, send it and parse the response. 22 | - Remember to import standard libraries if you need them. 23 | - If there is an unexpected error that is not covered by the protocol, throw an exception.\ 24 | If instead the protocol specifies how to handle the error, return the response according to the protocol's specification. 25 | - Do not execute anything (aside from library imports) when the file itself is loaded. I will personally import the file and call the send_query function with the task data. 26 | Begin by thinking about the implementation and how you would structure the code. \ 27 | Then, write your implementation by writing a code block that contains the tags and . For example: 28 | ```python 29 | 30 | 31 | def send_query(task_data): 32 | ... 33 | 34 | 35 | """ 36 | 37 | 38 | class SenderProgrammer: 39 | """Generates implementations based on task schemas and protocol documents.""" 40 | 41 | def __init__(self, toolformer: Toolformer, num_attempts: int = 5): 42 | """Initializes the SenderProgrammer. 43 | 44 | Args: 45 | toolformer (Toolformer): The Toolformer instance. 46 | num_attempts (int): Number of attempts to generate implementations. 47 | """ 48 | self.toolformer = toolformer 49 | self.num_attempts = num_attempts 50 | 51 | def __call__(self, task_schema: TaskSchemaLike, protocol_document: str) -> str: 52 | """Generates implementation code for a given schema and protocol. 53 | 54 | Args: 55 | task_schema (TaskSchemaLike): The schema of the task. 56 | protocol_document (str): The protocol specifications. 57 | 58 | Returns: 59 | str: The generated implementation code. 60 | """ 61 | task_schema = TaskSchema.from_taskschemalike(task_schema) 62 | conversation = self.toolformer.new_conversation( 63 | TASK_PROGRAMMER_PROMPT, [], category="programming" 64 | ) 65 | message = ( 66 | "JSON schema:\n\n" 67 | + str(task_schema) 68 | + "\n\n" 69 | + "Protocol document:\n\n" 70 | + protocol_document 71 | ) 72 | 73 | for _ in range(self.num_attempts): 74 | reply = conversation(message, print_output=False) 75 | 76 | implementation = extract_substring( 77 | reply, "", "", include_tags=False 78 | ) 79 | 80 | if implementation is not None: 81 | break 82 | 83 | message = "You have not provided an implementation yet. Please provide one by surrounding it in the tags and ." 84 | 85 | implementation = implementation.strip() 86 | 87 | # Sometimes the LLM leaves the Markdown formatting in the implementation 88 | implementation = ( 89 | implementation.replace("```python", "").replace("```", "").strip() 90 | ) 91 | 92 | implementation = implementation.replace("def send_query(", "def run(") 93 | 94 | return implementation 95 | -------------------------------------------------------------------------------- /agora/sender/components/protocol_picker.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional, Tuple 2 | 3 | from agora.common.core import Protocol, Suitability 4 | from agora.common.toolformers.base import Toolformer 5 | from agora.sender.task_schema import TaskSchema, TaskSchemaLike 6 | 7 | CHECKER_TASK_PROMPT = ( 8 | "You are ProtocolCheckerGPT. Your task is to look at the provided protocol and determine if it is expressive " 9 | "enough to fullfill the required task (of which you'll receive a JSON schema). A protocol is sufficiently expressive if you could write code that, given the input data, sends " 10 | 'the query according to the protocol\'s specification and parses the reply. Think about it and at the end of the reply write "YES" if the' 11 | 'protocol is adequate or "NO"' 12 | ) 13 | 14 | 15 | class ProtocolPicker: 16 | """Facilitates checking and selecting protocols for a given task schema.""" 17 | 18 | def __init__(self, toolformer: Toolformer) -> None: 19 | """Initializes the ProtocolPicker. 20 | 21 | Args: 22 | toolformer (Toolformer): The Toolformer instance used for protocol checking. 23 | """ 24 | self.toolformer = toolformer 25 | 26 | def check_protocol_for_task( 27 | self, protocol_document: str, task_schema: TaskSchemaLike 28 | ) -> bool: 29 | """Checks if a given protocol is adequate for a task. 30 | 31 | Args: 32 | protocol_document (str): The protocol document text. 33 | task_schema (TaskSchemaLike): The task schema. 34 | 35 | Returns: 36 | bool: True if the protocol is adequate, otherwise False. 37 | """ 38 | task_schema = TaskSchema.from_taskschemalike(task_schema) 39 | conversation = self.toolformer.new_conversation( 40 | CHECKER_TASK_PROMPT, [], category="protocolChecking" 41 | ) 42 | 43 | message = ( 44 | "The protocol is the following:\n\n" 45 | + protocol_document 46 | + "\n\nThe task is the following:\n\n" 47 | + str(task_schema) 48 | ) 49 | 50 | reply = conversation(message, print_output=False) 51 | 52 | return "yes" in reply.lower().strip()[-10:] 53 | 54 | def pick_protocol( 55 | self, task_schema: TaskSchemaLike, *protocol_lists: List[Protocol] 56 | ) -> Tuple[Optional[Protocol], dict]: 57 | """Selects the first adequate protocol from provided lists. 58 | 59 | Args: 60 | task_schema (TaskSchemaLike): The schema of the task. 61 | *protocol_lists (List[Protocol]): One or more lists of Protocol objects. 62 | 63 | Returns: 64 | (Optional[Protocol], dict): A tuple of the chosen protocol (if any) 65 | and a dictionary of hash evaluations. 66 | """ 67 | protocol_evaluations = {} 68 | 69 | for protocol_list in protocol_lists: 70 | for protocol in protocol_list: 71 | if self.check_protocol_for_task( 72 | protocol.protocol_document, task_schema 73 | ): 74 | protocol_evaluations[protocol.hash] = Suitability.ADEQUATE 75 | return protocol, protocol_evaluations 76 | else: 77 | protocol_evaluations[protocol.hash] = Suitability.INADEQUATE 78 | 79 | return None, protocol_evaluations 80 | -------------------------------------------------------------------------------- /agora/sender/components/querier.py: -------------------------------------------------------------------------------- 1 | # The querier queries a service based on a protocol document. 2 | # It receives the protocol document and writes the query that must be performed to the system. 3 | 4 | import json 5 | from typing import Any, Callable, Dict 6 | 7 | from agora.common.errors import ExecutionError, ProtocolRejectedError 8 | from agora.common.toolformers.base import Tool, Toolformer 9 | from agora.sender.task_schema import TaskSchema, TaskSchemaLike 10 | 11 | PROTOCOL_QUERIER_PROMPT = ( 12 | "You are NaturalLanguageQuerierGPT. You act as an intermediary between a machine (who has a very specific input and output schema) and an external service (which follows a very specific protocol)." 13 | 'You will receive a task description (including a schema of the input and output that the machine uses) and the corresponding data. Call the "send_query" tool with a message following the protocol.' 14 | "Do not worry about managing communication, everything is already set up for you. Just focus on sending the right message." 15 | "The send_query tool will return the reply of the service.\n" 16 | "Some protocols may explictly require multiple queries. In that case, you can call send_query multiple times. Otherwise, call it only once. \n" 17 | "In any case, you cannot call send_query more than {max_queries} time(s), no matter what the protocol says." 18 | 'Once you receive the reply, call the "deliverStructuredOutput" tool with parameters according to the task\'s output schema. \n' 19 | "You cannot call deliverStructuredOutput multiple times, so make sure to deliver the right output the first time." 20 | 'If there is an error and the machine\'s input/output schema specifies how to handle an error, return the error in that format. Otherwise, call the "error" tool.' 21 | ) 22 | 23 | 24 | def construct_query_description( 25 | protocol_document: str, task_schema: TaskSchemaLike, task_data: Any 26 | ) -> str: 27 | """Constructs a query description for the protocol and task. 28 | 29 | Args: 30 | protocol_document (str): The protocol document text. 31 | task_schema (TaskSchemaLike): The schema for the task. 32 | task_data (Any): The data for the task. 33 | 34 | Returns: 35 | str: The constructed query description. 36 | """ 37 | query_description = "" 38 | if protocol_document is not None: 39 | query_description += "Protocol document:\n\n" 40 | query_description += protocol_document + "\n\n" 41 | 42 | task_schema = TaskSchema.from_taskschemalike(task_schema).to_json() 43 | query_description += "JSON schema of the task:\n\n" 44 | query_description += "Input (i.e. what the machine will provide you):\n" 45 | query_description += json.dumps(task_schema["input_schema"], indent=2) + "\n\n" 46 | query_description += "Output (i.e. what you have to provide to the machine):\n" 47 | query_description += json.dumps(task_schema["output_schema"], indent=2) + "\n\n" 48 | query_description += "JSON data of the task:\n\n" 49 | query_description += json.dumps(task_data, indent=2) + "\n\n" 50 | 51 | return query_description 52 | 53 | 54 | NL_QUERIER_PROMPT = ( 55 | "You are NaturalLanguageQuerierGPT. You act as an intermediary between a machine (which has a very specific input and output schema) and an agent (who uses natural language)." 56 | 'You will receive a task description (including a schema of the input and output that the machine uses) and the corresponding data. Call the "send_query" tool with a natural language message where you ask to perform the task according to the data.' 57 | "Make sure to mention all the relevant information. " 58 | "Do not worry about managing communication, everything is already set up for you. Just focus on asking the right question." 59 | "The send_query tool will return the reply of the service.\n" 60 | 'Once you have enough information, call the "deliverStructuredOutput" tool with parameters according to the task\'s output schema. \n' 61 | "Note: you can only call send_query {max_queries} time(s), so be efficient. Similarly, you cannot call deliverStructuredOutput multiple times, so make sure to deliver the right output the first time." 62 | 'If there is an error and the machine\'s input/output schema specifies how to handle it, return the error in that format. Otherwise, call the "register_error" tool.' 63 | ) 64 | #'If the query fails, do not attempt to send another query.' 65 | 66 | 67 | def parse_and_handle_query(query: str, callback: Callable[[str], Dict]) -> str: 68 | """Parses and processes a query by calling the given callback. 69 | 70 | Args: 71 | query (str): The query to be processed. 72 | callback (Callable[[str], Dict]): The function that processes the query. 73 | 74 | Returns: 75 | str: The response from the callback or error information. 76 | """ 77 | try: 78 | response = callback(query) 79 | 80 | if response["status"] == "success": 81 | return response["body"] 82 | else: 83 | if response.get("message", "").lower() == "protocol rejected": 84 | raise ProtocolRejectedError("Protocol was rejected by the service") 85 | return "Error calling the tool: " + response["message"] 86 | except ProtocolRejectedError: 87 | raise 88 | except Exception as e: 89 | # import traceback 90 | # traceback.print_exc() 91 | return "Error calling the tool: " + str(e) 92 | 93 | 94 | class Querier: 95 | """Handles querying external services based on protocol documents and task schemas.""" 96 | 97 | def __init__( 98 | self, 99 | toolformer: Toolformer, 100 | max_queries: int = 5, 101 | max_messages: int = None, 102 | force_query: bool = True, 103 | ): 104 | """ 105 | Initializes the Querier with the given toolformer and query/message limits. 106 | 107 | Args: 108 | toolformer (Toolformer): The Toolformer instance managing tools and conversations. 109 | max_queries (int, optional): Maximum number of queries allowed. Defaults to 5. 110 | max_messages (int, optional): Maximum number of messages allowed. If None, set to max_queries * 2. Defaults to None. 111 | force_query (bool, optional): Whether to enforce sending a query before output. Defaults to True. 112 | """ 113 | self.toolformer = toolformer 114 | self.max_queries = max_queries 115 | 116 | if max_messages is None: 117 | max_messages = max_queries * 2 118 | 119 | self.max_messages = max_messages 120 | self.force_query = force_query 121 | 122 | def handle_conversation( 123 | self, 124 | prompt: str, 125 | message: str, 126 | output_schema: dict, 127 | callback: Callable[[str], Dict], 128 | ) -> str: 129 | """ 130 | Manages the conversation flow for handling queries and delivering outputs. 131 | 132 | Args: 133 | prompt (str): The initial prompt for the conversation. 134 | message (str): The message to process in the conversation. 135 | output_schema (dict): The schema defining the structure of the expected output. 136 | callback (Callable[[str], Dict]): A callback function to handle query responses. 137 | 138 | Returns: 139 | str: The structured output produced by the conversation. 140 | """ 141 | query_counter = 0 142 | 143 | def send_query_internal(query): 144 | # print('Sending query:', query) 145 | nonlocal query_counter 146 | query_counter += 1 147 | 148 | if query_counter > self.max_queries: 149 | # LLM is not listening, issue a warning 150 | return "You have attempted to send too many queries. Finish the message and allow the user to speak, or the system will crash." 151 | 152 | return parse_and_handle_query(query, callback) 153 | 154 | def send_query(query: str) -> str: 155 | """ 156 | Send a query to the other service based on a protocol document. 157 | 158 | Args: 159 | query: The query to send to the service 160 | 161 | Returns: 162 | The response from the service 163 | """ 164 | return send_query_internal(query) 165 | 166 | send_query_tool = Tool.from_function(send_query) 167 | 168 | found_output = None 169 | found_error = None 170 | 171 | def register_output(**kwargs) -> str: 172 | # print('Registering output:', kwargs) 173 | 174 | nonlocal found_output 175 | 176 | if self.force_query and query_counter == 0: 177 | return "You must send a query before delivering the structured output." 178 | 179 | if found_output is not None: 180 | return "You have already registered an output. You cannot register another one." 181 | 182 | found_output = kwargs 183 | return "Done" 184 | 185 | register_output_tool = Tool( 186 | "deliverStructuredOutput", 187 | "Deliver the structured output to the machine.", 188 | output_schema, 189 | { 190 | "type": "string", 191 | "description": "The sytem response to the structured output.", 192 | }, 193 | register_output, 194 | ) 195 | 196 | def register_error(error: str) -> str: 197 | """ 198 | Return an error message to the machine. 199 | 200 | Args: 201 | error: The error message to return to the machine 202 | 203 | Returns: 204 | A message to the machine saying that an error has been registered. 205 | """ 206 | 207 | nonlocal found_error 208 | found_error = error 209 | # We do not raise immediately because this would be caught by some models 210 | return "Error registered. Finish the message and allow the user to speak." 211 | 212 | error_tool = Tool.from_function(register_error) 213 | 214 | prompt = prompt.format(max_queries=self.max_queries) 215 | 216 | conversation = self.toolformer.new_conversation( 217 | prompt, 218 | [send_query_tool, register_output_tool, error_tool], 219 | category="conversation", 220 | ) 221 | 222 | for _ in range(self.max_messages): 223 | conversation(message, print_output=False) 224 | 225 | if found_error is not None: 226 | raise ExecutionError(found_error) 227 | 228 | if found_output is not None: 229 | break 230 | 231 | # If we haven't sent a query yet, we can't proceed 232 | if query_counter == 0 and self.force_query: 233 | message = ( 234 | "You must send a query before delivering the structured output." 235 | ) 236 | elif found_output is None: 237 | message = "You must deliver the structured output." 238 | 239 | return found_output 240 | 241 | def __call__( 242 | self, 243 | task_schema: TaskSchemaLike, 244 | task_data: Any, 245 | protocol_document: str, 246 | callback: Callable[[str], Dict], 247 | ) -> str: 248 | """ 249 | Executes the querying process based on task schema and protocol document. 250 | 251 | Args: 252 | task_schema (TaskSchemaLike): The schema of the task to be performed. 253 | task_data (Any): The data associated with the task. 254 | protocol_document (str): The document defining the protocol for querying. 255 | callback: A callback function to handle query responses. 256 | 257 | Returns: 258 | str: The structured output resulting from the querying process. 259 | """ 260 | query_description = construct_query_description( 261 | protocol_document, task_schema, task_data 262 | ) 263 | task_schema = TaskSchema.from_taskschemalike(task_schema) 264 | output_schema = task_schema.output_schema 265 | 266 | if output_schema is None: 267 | raise ValueError( 268 | "Task schema must have an output schema to deliver structured output." 269 | ) 270 | 271 | if output_schema["type"] == "object" and "properties" in output_schema: 272 | object_output = True 273 | else: 274 | output_schema = {"type": "object", "properties": {"output": output_schema}} 275 | object_output = False 276 | 277 | result = self.handle_conversation( 278 | PROTOCOL_QUERIER_PROMPT, query_description, output_schema, callback 279 | ) 280 | 281 | if object_output: 282 | return result 283 | 284 | return result["output"] 285 | -------------------------------------------------------------------------------- /agora/sender/components/transporter.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import List 3 | 4 | import requests 5 | 6 | from agora.common.core import Conversation 7 | from agora.common.errors import ProtocolTransportError 8 | 9 | 10 | class SenderTransporter(ABC): 11 | @abstractmethod 12 | def new_conversation( 13 | self, 14 | target: str, 15 | multiround: bool, 16 | protocol_hash: str, 17 | protocol_sources: List[str], 18 | ) -> Conversation: 19 | """ 20 | Creates a new conversation with the target. 21 | 22 | Args: 23 | target (str): The target URL or endpoint. 24 | multiround (bool): Whether the conversation is multi-round. 25 | protocol_hash (str): The protocol's hash identifier. 26 | protocol_sources (List[str]): Sources referencing the protocol. 27 | 28 | Returns: 29 | Conversation: A conversation instance. 30 | """ 31 | pass 32 | 33 | 34 | class SimpleSenderTransporter(SenderTransporter): 35 | class SimpleExternalConversation(Conversation): 36 | def __init__( 37 | self, 38 | target: str, 39 | multiround: bool, 40 | protocol_hash: str, 41 | protocol_sources: List[str], 42 | ): 43 | """ 44 | Initializes a simple external conversation. 45 | 46 | Args: 47 | target (str): The target URL or endpoint. 48 | multiround (bool): Whether multi-round communication is enabled. 49 | protocol_hash (str): The protocol hash. 50 | protocol_sources (List[str]): Protocol sources. 51 | """ 52 | self.target = target 53 | self.multiround = multiround 54 | self.protocol_hash = protocol_hash 55 | self.protocol_sources = protocol_sources 56 | self._conversation_id = None 57 | 58 | def __call__(self, message: str): 59 | """ 60 | Sends a message in the current conversation. 61 | 62 | Args: 63 | message (str): The message to send. 64 | 65 | Returns: 66 | dict: The response containing 'status' and 'body'. 67 | """ 68 | if self._conversation_id is None: 69 | target_url = self.target 70 | else: 71 | target_url = f"{self.target}/conversations/{self._conversation_id}" 72 | 73 | raw_query = { 74 | "protocolHash": self.protocol_hash, 75 | "protocolSources": self.protocol_sources, 76 | "body": message, 77 | } 78 | 79 | if self.multiround: 80 | raw_query["multiround"] = True 81 | 82 | raw_response = requests.post(target_url, json=raw_query) 83 | 84 | if raw_response.status_code != 200: 85 | raise ProtocolTransportError( 86 | "Error in external conversation: " + raw_response.text 87 | ) 88 | 89 | response = raw_response.json() 90 | 91 | if self.multiround and self._conversation_id is None: 92 | if "conversationId" not in response: 93 | raise Exception( 94 | "Multiround conversation did not return conversationId:", 95 | response, 96 | ) 97 | self._conversation_id = response["conversationId"] 98 | 99 | return {"status": response["status"], "body": response["body"]} 100 | 101 | def close(self) -> None: 102 | """ 103 | Closes the conversation by deleting it from the remote service. 104 | """ 105 | if self._conversation_id is not None: 106 | raw_response = requests.delete( 107 | f"{self.target}/conversations/{self._conversation_id}" 108 | ) 109 | if raw_response.status_code != 200: 110 | raise Exception( 111 | "Error in closing external conversation:", raw_response.text 112 | ) 113 | 114 | def new_conversation( 115 | self, 116 | target: str, 117 | multiround: bool, 118 | protocol_hash: str, 119 | protocol_sources: List[str], 120 | ) -> SimpleExternalConversation: 121 | """ 122 | Creates a new SimpleExternalConversation instance. 123 | 124 | Args: 125 | target (str): The target URL or endpoint. 126 | multiround (bool): Whether the conversation is multi-round. 127 | protocol_hash (str): The protocol's hash identifier. 128 | protocol_sources (List[str]): Protocol sources. 129 | 130 | Returns: 131 | SimpleExternalConversation: A new conversation instance. 132 | """ 133 | return self.SimpleExternalConversation( 134 | target, multiround, protocol_hash, protocol_sources 135 | ) 136 | -------------------------------------------------------------------------------- /agora/sender/core.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | from typing import Any, Optional 3 | 4 | from agora.common.core import Protocol 5 | from agora.common.errors import ExecutionError 6 | from agora.common.executor import Executor, RestrictedExecutor 7 | from agora.common.storage import JSONStorage, Storage 8 | from agora.common.toolformers.base import Tool 9 | from agora.sender.components.negotiator import SenderNegotiator 10 | from agora.sender.components.programmer import SenderProgrammer 11 | from agora.sender.components.protocol_picker import ProtocolPicker 12 | from agora.sender.components.querier import Querier 13 | from agora.sender.components.transporter import ( 14 | SenderTransporter, 15 | SimpleSenderTransporter, 16 | ) 17 | from agora.sender.memory import SenderMemory 18 | from agora.sender.schema_generator import TaskSchemaGenerator 19 | from agora.sender.task_schema import TaskSchema, TaskSchemaLike 20 | from agora.utils import encode_as_data_uri 21 | 22 | 23 | class Sender: 24 | """ 25 | Main Sender class responsible for orchestrating protocols, components, and memory. 26 | """ 27 | 28 | def __init__( 29 | self, 30 | memory: SenderMemory, 31 | protocol_picker: ProtocolPicker, 32 | negotiator: SenderNegotiator, 33 | programmer: SenderProgrammer, 34 | executor: Executor, 35 | querier: Querier, 36 | transporter: SenderTransporter, 37 | protocol_threshold: int = 5, 38 | negotiation_threshold: int = 10, 39 | implementation_threshold: int = 5, 40 | ): 41 | """Initialize the Sender with the necessary components and thresholds. 42 | 43 | Args: 44 | memory (SenderMemory): Memory component for storing protocols and task conversations. 45 | protocol_picker (ProtocolPicker): Component responsible for selecting protocols. 46 | negotiator (SenderNegotiator): Handles negotiation of protocols. 47 | programmer (SenderProgrammer): Generates protocol implementations. 48 | executor (Executor): Executes protocol implementations. 49 | querier (Querier): Manages querying external services. 50 | transporter (SenderTransporter): Handles the transportation of messages. 51 | protocol_threshold (int, optional): Minimum number of conversations to check existing protocols and see if one is suitable. Defaults to 5. 52 | negotiation_threshold (int, optional): Minimum number of conversations to negotiate a new protocol. Defaults to 10. 53 | implementation_threshold (int, optional): Minimum number of conversations using a protocol to write an implementation. Defaults to 5. 54 | """ 55 | self.memory = memory 56 | self.protocol_picker = protocol_picker 57 | self.negotiator = negotiator 58 | self.programmer = programmer 59 | self.executor = executor 60 | self.querier = querier 61 | self.transporter = transporter 62 | self.protocol_threshold = protocol_threshold 63 | self.negotiation_threshold = negotiation_threshold 64 | self.implementation_threshold = implementation_threshold 65 | 66 | @staticmethod 67 | def make_default( 68 | toolformer, 69 | storage: Storage = None, 70 | protocol_picker: ProtocolPicker = None, 71 | negotiator: SenderNegotiator = None, 72 | programmer: SenderProgrammer = None, 73 | executor: Executor = None, 74 | querier: Querier = None, 75 | transporter: SenderTransporter = None, 76 | storage_path: str = "./.agora/storage/sender.json", 77 | protocol_threshold: int = 5, 78 | negotiation_threshold: int = 10, 79 | implementation_threshold: int = 5, 80 | ): 81 | """Create a default Sender instance with optional custom components. 82 | 83 | Args: 84 | toolformer: The toolformer instance to use for creating components. 85 | storage (Storage, optional): Custom storage backend. Defaults to None. 86 | protocol_picker (ProtocolPicker, optional): Custom protocol picker. Defaults to None. 87 | negotiator (SenderNegotiator, optional): Custom negotiator. Defaults to None. 88 | programmer (SenderProgrammer, optional): Custom programmer. Defaults to None. 89 | executor (Executor, optional): Custom executor. Defaults to None. 90 | querier (Querier, optional): Custom querier. Defaults to None. 91 | transporter (SenderTransporter, optional): Custom transporter. Defaults to None. 92 | storage_path (str, optional): Path to the storage file. Defaults to './sender_storage.json'. 93 | protocol_threshold (int, optional): Minimum number of conversations to check existing protocols and see if one is suitable. Defaults to 5. 94 | negotiation_threshold (int, optional): Minimum number of conversations to negotiate a new protocol. Defaults to 10. 95 | implementation_threshold (int, optional): Minimum number of conversations using a protocol to write an implementation. Defaults to 5. 96 | 97 | Returns: 98 | Sender: A configured Sender instance. 99 | """ 100 | if storage is None: 101 | storage = JSONStorage(storage_path) 102 | memory = SenderMemory(storage) 103 | 104 | if protocol_picker is None: 105 | protocol_picker = ProtocolPicker(toolformer) 106 | if negotiator is None: 107 | negotiator = SenderNegotiator(toolformer) 108 | if programmer is None: 109 | programmer = SenderProgrammer(toolformer) 110 | if executor is None: 111 | executor = RestrictedExecutor() 112 | if querier is None: 113 | querier = Querier(toolformer) 114 | if transporter is None: 115 | transporter = SimpleSenderTransporter() 116 | 117 | return Sender( 118 | memory, 119 | protocol_picker, 120 | negotiator, 121 | programmer, 122 | executor, 123 | querier, 124 | transporter, 125 | protocol_threshold, 126 | negotiation_threshold, 127 | implementation_threshold, 128 | ) 129 | 130 | def _negotiate_protocol( 131 | self, task_id: str, task_schema: TaskSchemaLike, target: str 132 | ) -> Optional[Protocol]: 133 | """Negotiate a protocol based on the task schema and target. 134 | 135 | Args: 136 | task_id (str): The identifier of the task. 137 | task_schema (TaskSchemaLike): The schema of the task to be performed. 138 | target (str): The target for which the protocol is being negotiated. 139 | 140 | Returns: 141 | Optional[Protocol]: The negotiated Protocol object if successful, else None. 142 | """ 143 | with self.transporter.new_conversation( 144 | target, True, "negotiation", None 145 | ) as external_conversation: 146 | 147 | def send_query(query): 148 | response = external_conversation(query) 149 | # print('Response to negotiator:', response) 150 | return response 151 | 152 | protocol = self.negotiator(task_schema, send_query) 153 | 154 | if protocol is not None: 155 | self.memory.register_new_protocol( 156 | protocol.hash, 157 | protocol.protocol_document, 158 | protocol.sources, 159 | protocol.metadata, 160 | ) 161 | self.memory.set_default_suitability( 162 | protocol.hash, task_id, protocol.metadata.get("suitability", "unknown") 163 | ) 164 | self.memory.set_suitability_override( 165 | protocol.hash, 166 | task_id, 167 | target, 168 | protocol.metadata.get("suitability", "adequate"), 169 | ) 170 | 171 | return protocol 172 | 173 | def _get_suitable_protocol( 174 | self, task_id: str, task_schema: TaskSchemaLike, target: str 175 | ) -> Optional[Protocol]: 176 | """Retrieve a suitable protocol for the given task and target. 177 | 178 | Args: 179 | task_id (str): The identifier of the task. 180 | task_schema (TaskSchemaLike): The schema of the task to be performed. 181 | target (str): The target for which a suitable protocol is needed. 182 | 183 | Returns: 184 | Optional[Protocol]: A suitable Protocol object if found, else None. 185 | """ 186 | # Look in the memory 187 | suitable_protocol = self.memory.get_suitable_protocol(task_id, target) 188 | 189 | if ( 190 | suitable_protocol is None 191 | and self.memory.get_task_conversations(task_id, target) 192 | > self.protocol_threshold 193 | ): 194 | protocol_ids = self.memory.get_unclassified_protocols(task_id) 195 | protocols = [ 196 | self.memory.get_protocol(protocol_id) for protocol_id in protocol_ids 197 | ] 198 | suitable_protocol, protocol_evaluations = ( 199 | self.protocol_picker.pick_protocol(task_schema, protocols) 200 | ) 201 | 202 | for protocol_id, evaluation in protocol_evaluations.items(): 203 | self.memory.set_default_suitability(protocol_id, task_id, evaluation) 204 | 205 | if ( 206 | suitable_protocol is None 207 | and self.memory.get_task_conversations(task_id, target) 208 | > self.negotiation_threshold 209 | ): 210 | suitable_protocol = self._negotiate_protocol(task_id, task_schema, target) 211 | 212 | return suitable_protocol 213 | 214 | def _get_implementation(self, protocol_id: str, task_schema): 215 | """Obtain the implementation for a specific protocol and task schema. 216 | 217 | Args: 218 | protocol_id (str): The identifier of the protocol. 219 | task_schema: The schema of the task to be performed. 220 | 221 | Returns: 222 | str: The implementation code for the protocol. 223 | """ 224 | # Check if a routine exists and eventually create it 225 | implementation = self.memory.get_implementation(protocol_id) 226 | 227 | if ( 228 | implementation is None 229 | and self.memory.get_protocol_conversations(protocol_id) 230 | > self.implementation_threshold 231 | ): 232 | protocol = self.memory.get_protocol(protocol_id) 233 | implementation = self.programmer(task_schema, protocol.protocol_document) 234 | self.memory.register_implementation(protocol_id, implementation) 235 | 236 | return implementation 237 | 238 | def _run_routine(self, protocol_id: str, implementation: str, task_data, callback): 239 | """Run the routine associated with a protocol using the provided implementation and task data. 240 | 241 | Args: 242 | protocol_id (str): The identifier of the protocol. 243 | implementation (str): The implementation code to execute. 244 | task_data: The data required for the task. 245 | callback: The callback function to send queries to the external service. 246 | 247 | Returns: 248 | Any: The result of the routine execution. 249 | """ 250 | 251 | def send_to_server(query: str): 252 | """Send a query to the other service based on a protocol document. 253 | 254 | Args: 255 | query (str): The query to send to the service 256 | 257 | Returns: 258 | str: The response from the service 259 | """ 260 | 261 | response = callback(query) 262 | # print('Tool run_routine responded with:', response) 263 | return response["body"] 264 | 265 | send_query_tool = Tool.from_function(send_to_server) # TODO: Handle errors 266 | 267 | return self.executor( 268 | protocol_id, implementation, [send_query_tool], [task_data], {} 269 | ) 270 | 271 | def execute_task( 272 | self, 273 | task_id: str, 274 | task_schema: TaskSchemaLike, 275 | task_data: dict, 276 | target: str, 277 | force_no_protocol: bool = False, 278 | force_llm: bool = False, 279 | ) -> Any: 280 | """Execute a task by selecting and running an appropriate protocol or falling back to querying. 281 | 282 | Args: 283 | task_id (str): The identifier of the task. 284 | task_schema (TaskSchemaLike): The schema of the task to be performed. 285 | task_data: The data required for the task. 286 | target (str): The target for which the task is being executed. 287 | force_no_protocol (bool, optional): If True, forces execution without a protocol. Defaults to False. 288 | force_llm (bool, optional): If True, forces execution using a language model. Defaults to False. 289 | 290 | Returns: 291 | Any: The result of the task execution. 292 | """ 293 | self.memory.increment_task_conversations(task_id, target) 294 | 295 | if force_no_protocol: 296 | protocol = None 297 | else: 298 | protocol = self._get_suitable_protocol(task_id, task_schema, target) 299 | 300 | sources = [] 301 | 302 | if protocol is not None: 303 | self.memory.increment_protocol_conversations(protocol.hash) 304 | sources = protocol.sources 305 | 306 | if len(sources) == 0: 307 | # If there are no sources, use a data URI as source 308 | sources = [encode_as_data_uri(protocol.protocol_document)] 309 | 310 | with self.transporter.new_conversation( 311 | target, 312 | protocol.metadata.get("multiround", True) if protocol else True, 313 | protocol.hash if protocol else None, 314 | sources, 315 | ) as external_conversation: 316 | 317 | def send_query(query): 318 | response = external_conversation(query) 319 | # print('Response to sender:', response) 320 | return response 321 | 322 | implementation = None 323 | 324 | if protocol is not None and not force_llm: 325 | implementation = self._get_implementation(protocol.hash, task_schema) 326 | 327 | if implementation is None: 328 | response = self.querier( 329 | task_schema, 330 | task_data, 331 | protocol.protocol_document if protocol else None, 332 | send_query, 333 | ) 334 | else: 335 | try: 336 | response = self._run_routine( 337 | protocol.hash, implementation, task_data, send_query 338 | ) 339 | except ExecutionError as e: 340 | # print('Error running routine:', e) 341 | # print('Fallback to querier') 342 | 343 | response = self.querier( 344 | task_schema, 345 | task_data, 346 | protocol.protocol_document if protocol else None, 347 | send_query, 348 | ) 349 | 350 | return response 351 | 352 | def task( 353 | self, 354 | task_id: Optional[str] = None, 355 | description: Optional[str] = None, 356 | input_schema: Optional[dict] = None, 357 | output_schema: Optional[dict] = None, 358 | schema_generator: Optional[TaskSchemaGenerator] = None, 359 | ): 360 | """Decorator to define a task with optional schemas and description. 361 | 362 | Args: 363 | task_id (str, optional): The identifier of the task. Defaults to None. 364 | description (str, optional): A brief description of the task. Defaults to None. 365 | input_schema (dict, optional): The input schema for the task. Defaults to None. 366 | output_schema (dict, optional): The output schema for the task. Defaults to None. 367 | schema_generator (TaskSchemaGenerator, optional): A generator to fill in missing schema fields. Defaults to None. 368 | 369 | Returns: 370 | Callable: The decorated function. 371 | """ 372 | 373 | def wrapper(func): 374 | nonlocal task_id 375 | 376 | if task_id is None: 377 | task_id = func.__name__ 378 | 379 | try: 380 | task_schema = TaskSchema.from_function( 381 | func, 382 | description=description, 383 | input_schema=input_schema, 384 | output_schema=output_schema, 385 | ) 386 | except Exception as e: 387 | if schema_generator is None: 388 | raise e 389 | 390 | task_schema = schema_generator.from_function(func) 391 | 392 | def wrapped(*args, target=None, **kwargs): 393 | # Figure out from the function signature what the input data should be 394 | signature = inspect.signature(func) 395 | task_data = signature.bind(*args, **kwargs) 396 | task_data.apply_defaults() 397 | task_data = task_data.arguments 398 | 399 | return self.execute_task(task_id, task_schema, task_data, target) 400 | 401 | if "target" in task_schema.input_schema["required"]: 402 | raise ValueError("The task schema should not require a target field") 403 | 404 | tool_input_schema = dict(task_schema.input_schema) 405 | tool_input_schema["properties"]["target"] = { 406 | "type": "string", 407 | "description": "The URL of the target system or service for the task", 408 | } 409 | 410 | tool = Tool( 411 | wrapped.__name__, 412 | task_schema.description, 413 | tool_input_schema, 414 | task_schema.output_schema, 415 | wrapped, 416 | ) 417 | 418 | return tool.as_annotated_function() 419 | 420 | return wrapper 421 | -------------------------------------------------------------------------------- /agora/sender/memory.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from agora.common.core import Protocol, Suitability 4 | from agora.common.errors import StorageError 5 | from agora.common.memory import ProtocolMemory 6 | from agora.common.storage import Storage 7 | 8 | 9 | class SenderMemory(ProtocolMemory): 10 | """ 11 | Manages the memory for the Sender, including protocol suitability and task conversations. 12 | """ 13 | 14 | def __init__(self, storage: Storage): 15 | """ 16 | Initializes SenderMemory with a storage backend. 17 | 18 | Args: 19 | storage (Storage): The storage backend for memory. 20 | """ 21 | super().__init__(storage, num_conversations={}) 22 | 23 | def get_suitability( 24 | self, protocol_id: str, task_id: str, target: Optional[str] 25 | ) -> Suitability: 26 | """ 27 | Retrieves the suitability status for a given protocol ID and task ID. 28 | 29 | Args: 30 | protocol_id (str): The protocol identifier. 31 | task_id (str): The task identifier. 32 | target (Optional[str]): The target system or service. 33 | 34 | Returns: 35 | Suitability: The stored suitability status. 36 | """ 37 | suitability_info = super().get_extra_field(protocol_id, "suitability", {}) 38 | 39 | if task_id not in suitability_info: 40 | return Suitability.UNKNOWN 41 | 42 | if target is not None and target in suitability_info[task_id]["overrides"]: 43 | return suitability_info[task_id]["overrides"][target] 44 | 45 | return suitability_info[task_id]["default"] 46 | 47 | def get_known_suitable_protocol_ids(self, task_id, target): 48 | """ 49 | Returns known suitable protocol IDs for the given task and target. 50 | 51 | Args: 52 | task_id: The task identifier. 53 | target: The target system or service. 54 | 55 | Returns: 56 | list: A list of known suitable protocol IDs. 57 | """ 58 | suitable_protocols = [] 59 | for protocol_id in self.protocol_ids(): 60 | if ( 61 | self.get_suitability(protocol_id, task_id, target) 62 | == Suitability.ADEQUATE 63 | ): 64 | suitable_protocols.append(protocol_id) 65 | 66 | return suitable_protocols 67 | 68 | def get_suitable_protocol(self, task_id, target) -> Optional[Protocol]: 69 | """ 70 | Retrieves a suitable protocol object for the given task and target if available. 71 | 72 | Args: 73 | task_id: The task identifier. 74 | target: The target system or service. 75 | 76 | Returns: 77 | Optional[Protocol]: The first suitable protocol found or None if none available. 78 | """ 79 | suitable_protocols = self.get_known_suitable_protocol_ids(task_id, target) 80 | if len(suitable_protocols) == 0: 81 | return None 82 | return self.get_protocol(suitable_protocols[0]) 83 | 84 | def increment_task_conversations(self, task_id, target): 85 | """ 86 | Increments the conversation counter for a given task and target. 87 | 88 | Args: 89 | task_id: The task identifier. 90 | target: The target system or service. 91 | """ 92 | if "num_conversations" not in self.storage: 93 | self.storage["num_conversations"] = {} 94 | if task_id not in self.storage["num_conversations"]: 95 | self.storage["num_conversations"][task_id] = {} 96 | if target not in self.storage["num_conversations"][task_id]: 97 | self.storage["num_conversations"][task_id][target] = 0 98 | self.storage["num_conversations"][task_id][target] += 1 99 | 100 | self.storage.save_memory() 101 | 102 | def get_task_conversations(self, task_id, target): 103 | """ 104 | Retrieves the number of stored conversations for a task and target. 105 | 106 | Args: 107 | task_id: The task identifier. 108 | target: The target system or service. 109 | 110 | Returns: 111 | int: The number of conversations. 112 | """ 113 | if "num_conversations" not in self.storage: 114 | return 0 115 | if task_id not in self.storage["num_conversations"]: 116 | return 0 117 | if target not in self.storage["num_conversations"][task_id]: 118 | return 0 119 | return self.storage["num_conversations"][task_id][target] 120 | 121 | def increment_protocol_conversations(self, protocol_id): 122 | """ 123 | Increments the conversation counter for a given protocol. 124 | 125 | Args: 126 | protocol_id: The protocol identifier. 127 | """ 128 | num_conversations = self.get_protocol_conversations(protocol_id) 129 | self.set_extra_field(protocol_id, "conversations", num_conversations + 1) 130 | 131 | def get_protocol_conversations(self, protocol_id): 132 | """ 133 | Retrieves the number of stored conversations for a protocol. 134 | 135 | Args: 136 | protocol_id: The protocol identifier. 137 | 138 | Returns: 139 | int: The number of conversations. 140 | """ 141 | return self.get_extra_field(protocol_id, "conversations", 0) 142 | 143 | def has_suitable_protocol(self, task_id, target): 144 | """ 145 | Checks whether a suitable protocol exists for a given task and target. 146 | 147 | Args: 148 | task_id: The task identifier. 149 | target: The target system or service. 150 | 151 | Returns: 152 | bool: True if a suitable protocol exists, otherwise False. 153 | """ 154 | return len(self.get_known_suitable_protocol_ids(task_id, target)) > 0 155 | 156 | def get_unclassified_protocols(self, task_id): 157 | """Get protocols that have not been classified for a specific task. 158 | 159 | Args: 160 | task_id: The identifier of the task. 161 | 162 | Returns: 163 | List[str]: A list of unclassified protocol IDs. 164 | """ 165 | unclassified_protocols = [] 166 | for protocol_id in self.protocol_ids(): 167 | if self.get_suitability(protocol_id, task_id, None) == Suitability.UNKNOWN: 168 | unclassified_protocols.append(protocol_id) 169 | 170 | return unclassified_protocols 171 | 172 | def set_default_suitability( 173 | self, protocol_id: str, task_id: str, suitability: Suitability 174 | ): 175 | """Set the default suitability for a protocol and task. 176 | 177 | Args: 178 | protocol_id (str): The identifier of the protocol. 179 | task_id (str): The identifier of the task. 180 | suitability (Suitability): The default suitability status to set. 181 | """ 182 | suitability_info = self.get_extra_field(protocol_id, "suitability", {}) 183 | 184 | if task_id not in suitability_info: 185 | suitability_info[task_id] = { 186 | "default": Suitability.UNKNOWN, 187 | "overrides": {}, 188 | } 189 | 190 | suitability_info[task_id]["default"] = suitability 191 | 192 | self.set_extra_field(protocol_id, "suitability", suitability_info) 193 | 194 | def set_suitability_override( 195 | self, protocol_id: str, task_id: str, target: str, suitability: Suitability 196 | ): 197 | """Override the suitability of a protocol for a specific task and target. 198 | 199 | Args: 200 | protocol_id (str): The identifier of the protocol. 201 | task_id (str): The identifier of the task. 202 | target (str): The target for which the suitability is overridden. 203 | suitability (Suitability): The overridden suitability status. 204 | """ 205 | suitability_info = self.get_extra_field(protocol_id, "suitability", {}) 206 | 207 | if task_id not in suitability_info: 208 | suitability_info[task_id] = { 209 | "default": Suitability.UNKNOWN, 210 | "overrides": {}, 211 | } 212 | 213 | suitability_info[task_id]["overrides"][target] = suitability 214 | self.set_extra_field(protocol_id, "suitability", suitability_info) 215 | 216 | def register_new_protocol( 217 | self, protocol_id: str, protocol_document: str, sources: list, metadata: dict 218 | ): 219 | """Register a new protocol with the given sources, document, and metadata. 220 | 221 | Args: 222 | protocol_id (str): The identifier of the new protocol. 223 | protocol_document (str): The document describing the protocol. 224 | sources (list): A list of sources where the protocol is referenced. 225 | metadata (dict): Additional metadata related to the protocol. 226 | """ 227 | if protocol_id in self.storage["protocols"]: 228 | raise StorageError("Protocol already in memory:", protocol_id) 229 | 230 | super().register_new_protocol( 231 | protocol_id, protocol_document, sources, metadata, None, suitability={} 232 | ) 233 | -------------------------------------------------------------------------------- /agora/sender/schema_generator.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import json 3 | from typing import Callable 4 | 5 | from agora.common.toolformers.base import Toolformer 6 | from agora.sender.task_schema import TaskSchema 7 | 8 | SCHEMA_GENERATOR_PROMPT = """ 9 | You are TaskSchemaGeneratorGPT. Your task is to convert a description of a task into a standardized schema. 10 | The final schema is a JSON object that describes the input and output of the task. 11 | It has the following fields: 12 | - description (string): A description of the task. 13 | - input (object): A JSON object that describes the input of the task as a classic JSON schema object (i.e. it has the fields type, properties, etc.). 14 | - output (object): A JSON object that describes the output of the task as a classic JSON schema object (i.e. it has the fields type, properties, etc.). 15 | 16 | Some rules: 17 | - All fields are required. Do not add any additional fields. 18 | - If the description is not clear, instead of asking for more information, make educated guesses. 19 | - Never ask for additional information. 20 | {EXTRA_RULES} 21 | 22 | Reply with the schema and nothing else. 23 | """ 24 | 25 | FROM_FUNCTION_EXTRA_RULES = """ 26 | - If the function has type hints, use them and do not override them. 27 | - Do not add any new input parameters.""" 28 | 29 | 30 | class TaskSchemaGenerator: 31 | """Toolformer-based task schema generation.""" 32 | 33 | def __init__(self, toolformer: Toolformer): 34 | """Initialize the SchemaGenerator. 35 | 36 | Args: 37 | toolformer (Toolformer): The toolformer to use for schema generation. 38 | """ 39 | self.toolformer = toolformer 40 | 41 | def _generate( 42 | self, 43 | prompt: str, 44 | message: str, 45 | description: str = None, 46 | input_schema: dict = None, 47 | output_schema: dict = None, 48 | ) -> TaskSchema: 49 | if description is not None and input_schema is None and output_schema is None: 50 | # We can generate the schema directly 51 | return TaskSchema(description, input_schema, output_schema) 52 | 53 | # We inform the toolformer of the overrides, since they can be useful to generate the rest of the schema 54 | if description is not None: 55 | message += "\n\n" + "Description override:\n\n" + description 56 | 57 | if input_schema is not None: 58 | message += ( 59 | "\n\n" 60 | + "Input schema override:\n\n" 61 | + json.dumps(input_schema, indent=2) 62 | ) 63 | 64 | if output_schema is not None: 65 | message += ( 66 | "\n\n" 67 | + "Output schema override:\n\n" 68 | + json.dumps(output_schema, indent=2) 69 | ) 70 | 71 | conversation = self.toolformer.new_conversation(prompt, [], category="schema") 72 | 73 | reply = conversation(message, print_output=False) 74 | 75 | # Extract the schema from the reply 76 | schema = reply[reply.find("{") : reply.rfind("}") + 1] 77 | 78 | schema = json.loads(schema) 79 | 80 | if description is not None: 81 | schema["description"] = description 82 | 83 | if input_schema is not None: 84 | schema["input_schema"] = input_schema 85 | 86 | if output_schema is not None: 87 | schema["output_schema"] = output_schema 88 | 89 | return TaskSchema.from_json(schema) 90 | 91 | def from_function( 92 | self, 93 | func: Callable, 94 | description: str = None, 95 | input_schema: dict = None, 96 | output_schema: dict = None, 97 | ) -> TaskSchema: 98 | """Generate a TaskSchema schema from a function. 99 | Unlike TaskSchema.from_function, this method supports generating schemas from functions without type hints. 100 | 101 | Args: 102 | func (Callable): The function to generate the schema from. 103 | description (str, optional): If not None, overrides the generated description. Defaults to None. 104 | input_schema (dict, optional): If not None, overrides the generated input schema. Defaults to None. 105 | output_schema (dict, optional): If not None, overrides the generated output schema. Defaults to None. 106 | 107 | Returns: 108 | TaskSchema: The generated schema. 109 | """ 110 | prompt = SCHEMA_GENERATOR_PROMPT.format(EXTRA_RULES=FROM_FUNCTION_EXTRA_RULES) 111 | 112 | message = "Function code:\n\n" + inspect.getsource(func) 113 | 114 | return self._generate( 115 | prompt, 116 | message, 117 | description=description, 118 | input_schema=input_schema, 119 | output_schema=output_schema, 120 | ) 121 | 122 | def from_text( 123 | self, 124 | text: str, 125 | description: str = None, 126 | input_schema: dict = None, 127 | output_schema: dict = None, 128 | ) -> TaskSchema: 129 | """Generate a JSON schema from a textual description. 130 | 131 | Args: 132 | text (str): The description of the task. 133 | description (str, optional): If not None, overrides the generated description. Defaults to None. 134 | input_schema (dict, optional): If not None, overrides the generated input schema. Defaults to None. 135 | output_schema (dict, optional): If not None, overrides the generated output schema. Defaults to None. 136 | 137 | Returns: 138 | TaskSchema: The generated schema. 139 | """ 140 | prompt = SCHEMA_GENERATOR_PROMPT.format(EXTRA_RULES="") 141 | 142 | message = "Description of the task:\n\n" + text 143 | 144 | return self._generate( 145 | prompt, 146 | message, 147 | description=description, 148 | input_schema=input_schema, 149 | output_schema=output_schema, 150 | ) 151 | -------------------------------------------------------------------------------- /agora/sender/task_schema.py: -------------------------------------------------------------------------------- 1 | import json 2 | from collections.abc import Mapping 3 | from typing import TYPE_CHECKING, Callable, Optional, TypeAlias 4 | 5 | from agora.common.errors import SchemaError 6 | from agora.common.function_schema import schema_from_function 7 | 8 | if TYPE_CHECKING: 9 | from agora.sender.schema_generator import TaskSchemaGenerator 10 | 11 | 12 | class TaskSchema(Mapping): 13 | """Defines the schema for a task, including description and input/output schemas.""" 14 | 15 | def __init__( 16 | self, 17 | description: Optional[str], 18 | input_schema: Optional[dict], 19 | output_schema: Optional[dict], 20 | ): 21 | """Initializes the TaskSchema. 22 | 23 | Args: 24 | description (Optional[str]): A description of the task. 25 | input_schema (Optional[dict]): The JSON schema of the input data. 26 | output_schema (Optional[dict]): The JSON schema of the output data. 27 | """ 28 | self.description = description 29 | self.input_schema = input_schema 30 | self.output_schema = output_schema 31 | 32 | @property 33 | def fields(self) -> dict: 34 | return { 35 | "description": self.description, 36 | "input_schema": self.input_schema, 37 | "output_schema": self.output_schema, 38 | } 39 | 40 | def __len__(self): 41 | return len(self.fields) 42 | 43 | def __iter__(self): 44 | return iter(self.fields) 45 | 46 | def __getitem__(self, key): 47 | return self.fields[key] 48 | 49 | @staticmethod 50 | def from_json(json_dict: dict) -> "TaskSchema": 51 | """ 52 | Creates a TaskSchema from a JSON dictionary. 53 | 54 | Args: 55 | json_dict (dict): The JSON dictionary containing task schema details. 56 | 57 | Returns: 58 | TaskSchema: An instance of TaskSchema based on the provided JSON. 59 | 60 | Raises: 61 | SchemaError: If required fields are missing in the JSON dictionary. 62 | """ 63 | for field in ["description", "input_schema", "output_schema"]: 64 | if field not in json_dict: 65 | raise SchemaError(f'"{field}" field is required in TaskSchema') 66 | 67 | return TaskSchema( 68 | json_dict["description"], 69 | json_dict["input_schema"], 70 | json_dict["output_schema"], 71 | ) 72 | 73 | def to_json(self) -> dict: 74 | """ 75 | Converts the TaskSchema to a JSON dictionary. 76 | 77 | Returns: 78 | dict: The JSON representation of the TaskSchema. 79 | """ 80 | return self.fields 81 | 82 | @staticmethod 83 | def from_function( 84 | func: Callable, 85 | description: Optional[str] = None, 86 | input_schema: Optional[dict] = None, 87 | output_schema: Optional[dict] = None, 88 | generator: Optional["TaskSchemaGenerator"] = None, 89 | ) -> "TaskSchema": 90 | """ 91 | Creates a TaskSchema from a function, inferring schemas if necessary. 92 | 93 | Args: 94 | func (Callable): The function to infer the schema from. 95 | description (Optional[str], optional): Overrides the task description. Defaults to None. 96 | input_schema (Optional[dict], optional): Overrides the input schema. Defaults to None. 97 | output_schema (Optional[dict], optional): Overrides the output schema. Defaults to None. 98 | generator (Optional[TaskSchemaGenerator], optional): Used to fill the fields that could not be parsed from function introspection. Defaults to None. 99 | 100 | Returns: 101 | TaskSchema: An instance of TaskSchema based on the function. 102 | """ 103 | 104 | if description is not None and input_schema is None and output_schema is None: 105 | # We can generate the schema directly 106 | return TaskSchema(description, input_schema, output_schema) 107 | 108 | try: 109 | schema = schema_from_function(func) 110 | except Exception as e: 111 | if generator is None: 112 | raise e 113 | schema = generator.from_function( 114 | func, description, input_schema, output_schema 115 | ).fields 116 | 117 | if description is None: 118 | description = schema.get("description", None) 119 | 120 | if input_schema is None: 121 | input_schema = schema.get("input_schema", None) 122 | 123 | if output_schema is None: 124 | output_schema = schema.get("output_schema", None) 125 | 126 | # TODO: Throw an error if any of the fields are still None 127 | 128 | return TaskSchema(description, input_schema, output_schema) 129 | 130 | @staticmethod 131 | def from_taskschemalike(task_schema_like: "TaskSchemaLike") -> "TaskSchema": 132 | """ 133 | Converts a TaskSchema-like object into a TaskSchema instance. 134 | 135 | Args: 136 | task_schema_like (TaskSchemaLike): The TaskSchema-like object to convert. 137 | 138 | Returns: 139 | TaskSchema: An instance of TaskSchema. 140 | 141 | Raises: 142 | SchemaError: If the input is neither a TaskSchema nor a dictionary. 143 | """ 144 | if isinstance(task_schema_like, TaskSchema): 145 | return task_schema_like 146 | elif isinstance(task_schema_like, dict): 147 | return TaskSchema.from_json(task_schema_like) 148 | else: 149 | raise SchemaError("TaskSchemaLike must be either a TaskSchema or a dict") 150 | 151 | def __str__(self) -> str: 152 | """ 153 | Returns the JSON string representation of the TaskSchema. 154 | 155 | Returns: 156 | str: The JSON-formatted string of the TaskSchema. 157 | """ 158 | return json.dumps(self.to_json(), indent=2) 159 | 160 | 161 | TaskSchemaLike: TypeAlias = TaskSchema | dict 162 | -------------------------------------------------------------------------------- /agora/utils.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import hashlib 3 | import urllib.parse 4 | from typing import Optional 5 | 6 | import requests 7 | import yaml 8 | 9 | 10 | def extract_substring( 11 | text: str, start_tag: str, end_tag: str, include_tags=True 12 | ) -> Optional[str]: 13 | """Extracts a substring from the given text, bounded by start_tag and end_tag. 14 | Case insensitive. 15 | 16 | Args: 17 | text (str): The source string. 18 | start_tag (str): The beginning delimiter. 19 | end_tag (str): The ending delimiter. 20 | include_tags (bool): Whether to include the tags in the result. Defaults to True. 21 | 22 | Returns: 23 | Optional[str]: The extracted substring or None if not found. 24 | """ 25 | start_position = text.lower().find(start_tag.lower()) 26 | end_position = text.lower().find(end_tag.lower(), start_position + len(start_tag)) 27 | 28 | if start_position == -1 or end_position == -1: 29 | return None 30 | 31 | if include_tags: 32 | return text[start_position : end_position + len(end_tag)].strip() 33 | return text[start_position + len(start_tag) : end_position].strip() 34 | 35 | 36 | def compute_hash(s: str) -> str: 37 | """Computes a hash of the given string. 38 | 39 | Args: 40 | s (str): The input string to hash. 41 | 42 | Returns: 43 | str: The resulting hash as a Base64-encoded string. 44 | """ 45 | m = hashlib.sha1() 46 | m.update(s.encode()) 47 | 48 | b = m.digest() 49 | 50 | return base64.b64encode(b).decode("ascii") 51 | 52 | 53 | def extract_metadata(text: str) -> dict: 54 | """Extracts metadata from the given text in YAML format. 55 | 56 | Args: 57 | text (str): The source text containing YAML metadata. 58 | 59 | Returns: 60 | dict: A dictionary of extracted metadata. 61 | """ 62 | metadata = extract_substring(text, "---", "---", include_tags=False) 63 | 64 | metadata = yaml.safe_load(metadata) 65 | 66 | name = metadata.get("name", "Unnamed protocol") 67 | description = metadata.get("description", "No description provided") 68 | multiround = metadata.get("multiround", False) 69 | 70 | return {"name": name, "description": description, "multiround": multiround} 71 | 72 | 73 | def encode_as_data_uri(text: str) -> str: 74 | """Encodes the given text as a data URI. 75 | 76 | Args: 77 | text (str): The text to encode. 78 | 79 | Returns: 80 | str: The encoded data URI. 81 | """ 82 | return "data:text/plain;charset=utf-8," + urllib.parse.quote(text) 83 | 84 | 85 | def download_and_verify_protocol( 86 | protocol_hash: str, protocol_source: str, timeout: int = 10000 87 | ) -> Optional[str]: 88 | """Downloads a protocol from a source or decodes it if it's a data URI, then verifies its hash. 89 | 90 | Args: 91 | protocol_hash (str): The expected hash of the protocol. 92 | protocol_source (str): The protocol's location (URL or data URI). 93 | timeout (int): The request timeout in milliseconds. 94 | 95 | Returns: 96 | Optional[str]: The protocol text if hash verification passes, otherwise None. 97 | """ 98 | if protocol_source.startswith("data:"): 99 | # Check if it's base64 encoded 100 | if protocol_source.startswith("data:text/plain;charset=utf-8;base64,"): 101 | protocol = base64.b64decode( 102 | protocol_source[len("data:text/plain;charset=utf-8;base64,") :] 103 | ).decode("utf-8") 104 | elif protocol_source.startswith("data:text/plain;charset=utf-8,"): 105 | protocol = urllib.parse.unquote( 106 | protocol_source[len("data:text/plain;charset=utf-8,") :] 107 | ) 108 | else: 109 | # print('Unsupported data URI:', protocol_source) 110 | return None 111 | else: 112 | response = requests.get(protocol_source, timeout=timeout) 113 | # It's just a simple txt file 114 | if response.status_code == 200: 115 | protocol = response.text 116 | else: 117 | # print('Failed to download protocol from', protocol_source) 118 | return None 119 | 120 | # Check if the hash matches 121 | if compute_hash(protocol) == protocol_hash: 122 | return protocol 123 | 124 | # print('Protocol does not match hash:', protocol_source) 125 | return None 126 | -------------------------------------------------------------------------------- /assets/agora_demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agora-protocol/python/d4628c312d34d041bbdfe2b1bc8e7c0dab7f07d6/assets/agora_demo.gif -------------------------------------------------------------------------------- /docs/getting-started.md: -------------------------------------------------------------------------------- 1 | There are two ways to use Agora: as a sender agent (i.e. a client) or as a receiver agent (i.e. a server). An agent can also act as both a sender and a receiver. 2 | 3 | In this quick tutorial, we'll establish a channel between two agents: 4 | - A LangChain agent that wants to retrieve temperature data 5 | - A Camel agent that has access to weather data 6 | 7 | ### Installation 8 | 9 | ``` 10 | pip install agora-protocol 11 | ``` 12 | 13 | For this tutorial, you'll also want to install two agent frameworks: 14 | ``` 15 | pip install langchain_openai 16 | pip install Pillow requests_oauthlib # Dependencies for camel-ai 17 | pip install camel-ai 18 | ``` 19 | 20 | We'll use an OpenAI model as base. You can set the API key via the `OPENAI_API_KEY=` environmental variable. 21 | 22 | 23 | ### Sender 24 | 25 | The Sender is an agent designed to execute **tasks**. Defining a task is as simple as taking a documented Python function and adding the `@sender.task` decorator. 26 | 27 | ```python 28 | import agora 29 | from langchain_openai import ChatOpenAI 30 | 31 | model = ChatOpenAI(model="gpt-4o-mini") 32 | toolformer = agora.toolformers.LangChainToolformer(model) 33 | 34 | sender = agora.Sender.make_default(toolformer) 35 | 36 | @sender.task() 37 | def get_temperature(city : str) -> int: 38 | """ 39 | Get the temperature for a given city. 40 | 41 | Parameters: 42 | city: The name of the city for which to retrieve the weather 43 | 44 | Returns: 45 | The temperature in °C for the given city. 46 | """ 47 | pass 48 | ``` 49 | 50 | Note: any properly annotated function with Google-style docstrings can be automatically converted to a task. Refer to this page for other ways to describe tasks. 51 | 52 | The function is automatically converted to a task function. A task function takes exactly the same arguments, in addition to a keyword-only argument `target` which represents the address of the remote agent. 53 | 54 | ```python 55 | response = get_temperature('New York', target='http://localhost:5000') 56 | print(response) # Output: 25 57 | ``` 58 | 59 | When running this code, the Sender agent will begin a conversation with the Receiver agent on `localhost:5000`. The two will exchange information first using natural language and then, depending on the need, with structured data and automatic routines (see [specification]). All of this is abstracted away and happens under the hood. 60 | 61 | ### Receiver 62 | 63 | Let's now setup a Receiver instance on port 5000. This time, we'll use a Camel agent with one tool, `weather_db`: 64 | 65 | ```python 66 | import agora 67 | import camel.types # Needs to be installed separately 68 | 69 | toolformer = agora.toolformers.CamelToolformer( 70 | camel.types.ModelPlatformType.OPENAI, 71 | camel.types.ModelType.GPT_4O 72 | ) 73 | 74 | 75 | def weather_db(city: str) -> dict: 76 | """Gets the temperature and precipitation in a city. 77 | 78 | Args: 79 | city: The name of the city for which to retrieve the weather 80 | 81 | Returns: 82 | A dictionary containing the temperature and precipitation in the city (both ints) 83 | 84 | """ 85 | # Put your tool logic here 86 | return { 87 | 'temperature': 25, 88 | 'precipitation': 12 89 | } 90 | 91 | 92 | receiver = agora.Receiver.make_default(toolformer, tools=[weather_db]) 93 | ``` 94 | 95 | A receiver needs to be wrapped in a server capable of handling HTTP queries. For convinience's sake, the Agora client provides a Flask server that does that out of the box: 96 | 97 | ``` 98 | server = agora.ReceiverServer(receiver) 99 | server.run(port=5000) 100 | ``` 101 | 102 | We're done! The `get_temperature` task can now be used seamlessly in your custom workflow, or even by another agent. -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "agora-protocol" 3 | version = "0.2.0" 4 | description = "Python library for the Agora Protocol." 5 | authors = ["Samuele Marro "] 6 | license = "MIT" 7 | readme = "README.md" 8 | packages = [ 9 | { include = "agora" }, 10 | ] 11 | 12 | [tool.poetry.dependencies] 13 | python = ">=3.10,<3.13" 14 | langchain = "^0.3.12" 15 | langchain-core = "^0.3.20" 16 | langgraph = "^0.2.60" 17 | flask = "^3.1" 18 | requests = "^2.32" 19 | PyYAML = "^6.0" 20 | RestrictedPython = "^7.4" 21 | camel-ai = {version="^0.2.6", optional=true} 22 | 23 | [tool.poetry.extras] 24 | camel-ai = ["camel-ai"] 25 | 26 | [build-system] 27 | requires = ["poetry-core"] 28 | build-backend = "poetry.core.masonry.api" 29 | --------------------------------------------------------------------------------