├── .DS_Store ├── .gitignore ├── agents_manager ├── __init__.py ├── models │ ├── __init__.py │ ├── Grok.py │ ├── Llama.py │ ├── DeepSeek.py │ ├── GenUi.py │ ├── OpenAi.py │ ├── Anthropic.py │ └── Genai.py ├── Container.py ├── Model.py ├── Agent.py ├── utils.py └── AgentManager.py ├── .github └── workflows │ └── publish-to-pypi.yml ├── pyproject.toml ├── LICENCE ├── requirements.txt ├── tests ├── chain_agents.py ├── tests.py ├── share_context.py └── tree_agents.py └── README.md /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sandeshnaroju/agents_manager/HEAD/.DS_Store -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | .idea/* 3 | **/__pycache__/* 4 | .pytest_cache/* 5 | *.pyc 6 | 7 | env/* 8 | 9 | main.py 10 | .env 11 | 12 | dist/* 13 | 14 | *.egg-info/* 15 | main* -------------------------------------------------------------------------------- /agents_manager/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from agents_manager.Agent import Agent 3 | from agents_manager.AgentManager import AgentManager 4 | from agents_manager.Model import Model 5 | from agents_manager.Container import Container 6 | 7 | __all__ = ["Agent", "AgentManager", "Model", "Container"] -------------------------------------------------------------------------------- /agents_manager/models/__init__.py: -------------------------------------------------------------------------------- 1 | from agents_manager.models.OpenAi import OpenAi 2 | from agents_manager.models.Grok import Grok 3 | from agents_manager.models.DeepSeek import DeepSeek 4 | from agents_manager.models.Anthropic import Anthropic 5 | from agents_manager.models.Llama import Llama 6 | from agents_manager.models.Genai import Genai 7 | from agents_manager.models.GenUi import GenUi 8 | 9 | __all__ = ["OpenAi", "Grok", "DeepSeek", "Anthropic", "Llama", "Genai", "GenUi"] 10 | -------------------------------------------------------------------------------- /.github/workflows/publish-to-pypi.yml: -------------------------------------------------------------------------------- 1 | name: Publish to PyPI 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*.*.*" 7 | 8 | jobs: 9 | build-and-publish: 10 | runs-on: ubuntu-latest 11 | permissions: 12 | id-token: write 13 | 14 | steps: 15 | - name: Checkout code 16 | uses: actions/checkout@v4 17 | 18 | - name: Set up Python 19 | uses: actions/setup-python@v5 20 | with: 21 | python-version: "3.x" 22 | 23 | - name: Install dependencies 24 | run: | 25 | python -m pip install --upgrade pip 26 | pip install build twine 27 | 28 | - name: Build package 29 | run: python -m build 30 | 31 | - name: Publish to PyPI 32 | uses: pypa/gh-action-pypi-publish@release/v1 33 | -------------------------------------------------------------------------------- /agents_manager/models/Grok.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from openai import OpenAI 4 | 5 | from agents_manager.models import OpenAi 6 | 7 | 8 | class Grok(OpenAi): 9 | def __init__(self, name: str, **kwargs: Any) -> None: 10 | """ 11 | Initialize the Grok model with a name and optional keyword arguments. 12 | 13 | Args: 14 | name (str): The name of the Grok model (e.g., "grok-2-latest"). 15 | **kwargs (Any): Additional arguments, including optional "api_key". 16 | """ 17 | super().__init__(name, **kwargs) 18 | 19 | if name is None: 20 | raise ValueError("A valid Grok model name is required") 21 | 22 | self.client = OpenAI( 23 | api_key=kwargs.get("api_key"), 24 | base_url="https://api.x.ai/v1" 25 | ) 26 | -------------------------------------------------------------------------------- /agents_manager/models/Llama.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from openai import OpenAI 4 | 5 | from agents_manager.models import OpenAi 6 | 7 | 8 | class Llama(OpenAi): 9 | def __init__(self, name: str, **kwargs: Any) -> None: 10 | """ 11 | Initialize the Llama model with a name and optional keyword arguments. 12 | 13 | Args: 14 | name (str): The name of the Llama model (e.g., "llama3.1-70b"). 15 | **kwargs (Any): Additional arguments, including optional "api_key". 16 | """ 17 | super().__init__(name, **kwargs) 18 | 19 | if name is None: 20 | raise ValueError("A valid Llama model name is required") 21 | 22 | self.client = OpenAI( 23 | api_key=kwargs.get("api_key"), 24 | base_url="https://api.llama-api.com" 25 | ) 26 | -------------------------------------------------------------------------------- /agents_manager/models/DeepSeek.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from openai import OpenAI 4 | 5 | from agents_manager.models import OpenAi 6 | 7 | 8 | class DeepSeek(OpenAi): 9 | def __init__(self, name: str, **kwargs: Any) -> None: 10 | """ 11 | Initialize the DeepSeek model with a name and optional keyword arguments. 12 | 13 | Args: 14 | name (str): The name of the DeepSeek model (e.g., "deepseek-chat"). 15 | **kwargs (Any): Additional arguments, including optional "api_key". 16 | """ 17 | super().__init__(name, **kwargs) 18 | 19 | if name is None: 20 | raise ValueError("A valid DeepSeek model name is required") 21 | 22 | self.client = OpenAI( 23 | api_key=kwargs.get("api_key"), 24 | base_url="https://api.deepseek.com" 25 | ) 26 | -------------------------------------------------------------------------------- /agents_manager/models/GenUi.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from openai import OpenAI 4 | 5 | from agents_manager.models import OpenAi 6 | import os 7 | 8 | 9 | class GenUi(OpenAi): 10 | def __init__(self, name: str, **kwargs: Any) -> None: 11 | """ 12 | Initialize the Grok model with a name and optional keyword arguments. 13 | 14 | Args: 15 | name (str): The name of the Grok model (e.g., "grok-2-latest"). 16 | **kwargs (Any): Additional arguments, including optional "api_key". 17 | """ 18 | super().__init__(name, **kwargs) 19 | 20 | if name is None: 21 | raise ValueError("A valid GenUi model name is required") 22 | 23 | self.client = OpenAI( 24 | api_key=os.getenv("PROCHAT_API_KEY", kwargs.get("api_key")), 25 | base_url="https://www.prochat.dev/apps/api/v1", 26 | ) 27 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "agents-manager" 7 | version = "1.4.6" 8 | authors = [ 9 | { name = "Naroju Sandesh", email = "sandeshnaroju@gmail.com" }, 10 | ] 11 | description = "A lightweight Python package for managing multi-agent orchestration. Easily define agents with custom instructions, tools, containers, and models, and orchestrate their interactions seamlessly. Perfect for building modular, collaborative AI systems." 12 | readme = "README.md" 13 | requires-python = ">=3.7" 14 | dependencies = [ 15 | "docker>=7.1.0", 16 | "google-genai>=1.4.0", 17 | "anthropic>=0.47.1", 18 | "openai>=1.0.0", 19 | "python-dotenv>=0.21.0", 20 | ] 21 | license = { text = "MIT" } 22 | keywords = ["agents", "multi-agent", "orchestration", "AI"] 23 | 24 | [project.urls] 25 | "Homepage" = "https://github.com/sandeshnaroju/agents_manager" 26 | "Repository" = "https://github.com/sandeshnaroju/agents_manager" 27 | -------------------------------------------------------------------------------- /LICENCE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 https://github.com/sandeshnaroju 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | annotated-types==0.7.0 2 | anthropic==0.47.1 3 | anyio==4.8.0 4 | backports.tarfile==1.2.0 5 | build==1.2.2.post1 6 | cachetools==5.5.2 7 | certifi==2025.1.31 8 | charset-normalizer==3.4.1 9 | distro==1.9.0 10 | docker==7.1.0 11 | docutils==0.21.2 12 | dotenv==0.9.9 13 | exceptiongroup==1.2.2 14 | google-auth==2.38.0 15 | google-genai==1.4.0 16 | h11==0.14.0 17 | httpcore==1.0.7 18 | httpx==0.28.1 19 | id==1.5.0 20 | idna==3.10 21 | importlib_metadata==8.6.1 22 | jaraco.classes==3.4.0 23 | jaraco.context==6.0.1 24 | jaraco.functools==4.1.0 25 | jiter==0.8.2 26 | keyring==25.6.0 27 | markdown-it-py==3.0.0 28 | mdurl==0.1.2 29 | more-itertools==10.6.0 30 | nh3==0.2.20 31 | openai==1.63.2 32 | packaging==24.2 33 | pyasn1==0.6.1 34 | pyasn1_modules==0.4.1 35 | pydantic==2.10.6 36 | pydantic_core==2.27.2 37 | Pygments==2.19.1 38 | pyproject_hooks==1.2.0 39 | pytest==8.2.2 40 | python-dotenv==1.0.1 41 | readme_renderer==44.0 42 | requests==2.32.3 43 | requests-toolbelt==1.0.0 44 | rfc3986==2.0.0 45 | rich==13.9.4 46 | rsa==4.9 47 | sniffio==1.3.1 48 | tomli==2.2.1 49 | tqdm==4.67.1 50 | twine==6.1.0 51 | typing_extensions==4.12.2 52 | urllib3==2.3.0 53 | websockets==14.2 54 | zipp==3.21.0 55 | -------------------------------------------------------------------------------- /tests/chain_agents.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | from agents_manager import Agent, AgentManager 3 | from agents_manager.utils import handover 4 | 5 | 6 | class Format(BaseModel): 7 | secret: str 8 | tool_name: str 9 | 10 | 11 | def chain_setup(model): 12 | def transfer_to_agent5() -> Agent: 13 | """Follow me for success""" 14 | return agent5 15 | 16 | handover6 = handover("agent6", "Has some secrets", share_context=False) 17 | 18 | agent4 = Agent( 19 | name="agent4", 20 | instruction='Your only task is giving the secret key to the user using proper tools. Response will just be this dict nothing else {"secret": , "tool_name": }', 21 | model=model, 22 | output_format=Format, 23 | ) 24 | 25 | agent5 = Agent( 26 | name="agent5", 27 | instruction="Use tools to find secret key for user. After you find just say `here is the secret key: . I got it from tool named ` and nothing else", 28 | model=model, 29 | ) 30 | 31 | agent6 = Agent( 32 | name="agent6", 33 | instruction="If someone asks for secret key give them this `chaining_agents_works`", 34 | model=model, 35 | ) 36 | 37 | agent4.tools = [transfer_to_agent5] 38 | agent5.tools = [handover6] 39 | 40 | manager = AgentManager() 41 | 42 | manager.add_agent(agent4) 43 | manager.add_agent(agent5) 44 | manager.add_agent(agent6) 45 | 46 | return manager 47 | -------------------------------------------------------------------------------- /tests/tests.py: -------------------------------------------------------------------------------- 1 | import json 2 | from agents_manager.models import OpenAi, Anthropic, Genai 3 | 4 | from tree_agents import tree_setup 5 | from chain_agents import chain_setup 6 | from share_context import share_context_setup 7 | 8 | STORY = """ 9 | A quiet seed fell into rich soil. 10 | Rain came gently, and the sun followed. 11 | Days passed. A sprout emerged, green and hopeful. 12 | It grew tall, touched by breeze and birdsong. 13 | In time, it became a tree, offering shade and shelter. 14 | Life continued, simple and still, beneath its patient branches. 15 | """ 16 | 17 | openai_model = OpenAi(name="gpt-4o-mini") 18 | genai_model = Genai(name="gemini-2.0-flash") 19 | anthropic_model = Anthropic(name="claude-sonnet-4-20250514", max_tokens=1024) 20 | 21 | 22 | def test_tree_handover(): 23 | manager = tree_setup(openai_model) 24 | 25 | resp = manager.run_agent( 26 | "agent1", 27 | [{"role": "user", "content": f"Summarize it and then extend it {STORY}"}], 28 | ) 29 | 30 | resp = json.loads(resp["content"]) 31 | 32 | assert resp["summarize"]["pos"] == 1 33 | assert resp["extend"]["pos"] == 2 34 | 35 | 36 | def test_chain_handover(): 37 | manager = chain_setup(openai_model) 38 | 39 | resp = manager.run_agent( 40 | "agent4", 41 | [{"role": "user", "content": "Give me the secret"}], 42 | ) 43 | 44 | resp = json.loads(resp["content"]) 45 | 46 | assert resp["secret"] == "chaining_agents_works" 47 | assert resp["tool_name"] == "handover_agent6" 48 | 49 | 50 | def test_share_context(): 51 | manager = share_context_setup(anthropic_model, True) 52 | 53 | resp = manager.run_agent( 54 | "master", {"role": "user", "content": "Do as the system prompt says"} 55 | ) 56 | 57 | assert "489346111" in resp["content"] 58 | -------------------------------------------------------------------------------- /tests/share_context.py: -------------------------------------------------------------------------------- 1 | from agents_manager.utils import handover 2 | from agents_manager import Agent, AgentManager 3 | 4 | 5 | def share_context_setup(model, share_context): 6 | handover_agent1 = handover( 7 | "agent1", 8 | "This is the first tool", 9 | ) 10 | 11 | handover_agent2 = handover( 12 | "agent2", 13 | "This is the second tool", 14 | share_context=share_context, 15 | ) 16 | 17 | handover_agent3 = handover( 18 | "agent3", 19 | "This is the third tool", 20 | share_context=share_context, 21 | ) 22 | 23 | agent1 = Agent( 24 | name="agent1", 25 | instruction="""Whatever the user asks just respond with "489".""", 26 | model=model, 27 | ) 28 | 29 | agent2 = Agent( 30 | name="agent2", 31 | instruction="""Whatever the user asks look into latest tool call result and append "346" to it""", 32 | model=model, 33 | ) 34 | 35 | agent3 = Agent( 36 | name="agent3", 37 | instruction="""Whatever the user asks look into latest tool call result and append "111" to it""", 38 | model=model, 39 | ) 40 | 41 | master = Agent( 42 | name="master", 43 | instruction="""Whatever the user asks just run the given 3 tools. All tools have in description which to run 44 | when. Blindly run it all and then you need to give the result of the third tool back. Only return what 45 | third tool returns nothing else. 46 | """, 47 | model=model, 48 | tools=[handover_agent1, handover_agent2, handover_agent3], 49 | ) 50 | 51 | agent_manager = AgentManager() 52 | agent_manager.add_agent(master) 53 | agent_manager.add_agent(agent1) 54 | agent_manager.add_agent(agent2) 55 | agent_manager.add_agent(agent3) 56 | 57 | return agent_manager 58 | -------------------------------------------------------------------------------- /tests/tree_agents.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | from agents_manager import Agent 3 | from agents_manager import AgentManager 4 | from agents_manager.utils import handover 5 | 6 | 7 | class OtherAgentFormat(BaseModel): 8 | content: str 9 | pos: int 10 | 11 | 12 | class AgentOneFormat(BaseModel): 13 | summarize: OtherAgentFormat 14 | extend: OtherAgentFormat 15 | 16 | 17 | def tree_setup(model): 18 | def transfer_to_agent2() -> Agent: 19 | """Extends length of any content""" 20 | return agent2 21 | 22 | handover3 = handover("agent3", "Summarizes any given content", share_context=False) 23 | 24 | agent1 = Agent( 25 | name="agent1", 26 | instruction=""" 27 | Your responsibility is to properly forward user query to respective agents and then at last 28 | properly format the response. 29 | 30 | You should always reply in this format: 31 | { 32 | "summarize": { 33 | "content": , 34 | "pos": 35 | }, 36 | "extend": { 37 | "content": , 38 | "pos": 39 | } 40 | } 41 | """, 42 | model=model, 43 | output_format=AgentOneFormat, 44 | ) 45 | 46 | agent2 = Agent( 47 | name="agent2", 48 | instruction=""" 49 | Your job is to extend the length of any thing the user provides, you should reply with 50 | atleast 100 words. Add whatever you want. You can't do anything more than that. 51 | """, 52 | model=model, 53 | ) 54 | 55 | agent3 = Agent( 56 | name="agent3", 57 | instruction=""" 58 | Summarize any content with less than 20 words. You can't do anything more than that. 59 | """, 60 | model=model, 61 | ) 62 | 63 | agent1.tools = [transfer_to_agent2, handover3] 64 | 65 | manager = AgentManager() 66 | 67 | manager.add_agent(agent1) 68 | manager.add_agent(agent2) 69 | manager.add_agent(agent3) 70 | 71 | return manager 72 | -------------------------------------------------------------------------------- /agents_manager/Container.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict 2 | 3 | import docker 4 | from agents_manager.utils import replace_placeholder 5 | 6 | 7 | class Container: 8 | def __init__(self, name: str, description: str, **kwargs: Any): 9 | self.name = name 10 | self.description = description 11 | self.environment = kwargs.get("environment", {}) 12 | self.auth_credentials = kwargs.pop("authenticate", {}) 13 | self.return_to = kwargs.get("return_to", None) 14 | self.kwargs = kwargs 15 | self.client = None 16 | self.initialize() 17 | self._authenticate() 18 | 19 | def initialize(self): 20 | try: 21 | self.client = docker.from_env() 22 | except Exception as e: 23 | print(f"Error: {e}") 24 | 25 | def _authenticate(self): 26 | if self.auth_credentials: 27 | self.client.login( 28 | username=self.auth_credentials.get("username"), 29 | password=self.auth_credentials.get("password"), 30 | registry=self.auth_credentials.get("registry"), 31 | ) 32 | 33 | def pull_image(self): 34 | """Pull the specified image from the registry.""" 35 | image = self.kwargs.get("image") 36 | if not image: 37 | raise ValueError("No image specified in kwargs") 38 | self.client.images.pull(image) 39 | 40 | def run(self, arguments: Dict[str, Any]): 41 | """Run the container with provided arguments.""" 42 | if "image" not in self.kwargs: 43 | raise ValueError("Image must be specified in kwargs") 44 | self.pull_image() 45 | self.kwargs["detach"] = False 46 | self.kwargs["remove"] = True 47 | self.kwargs["environment"] = arguments 48 | return_to = self.kwargs.pop("return_to", None) 49 | 50 | result = self.client.containers.run( 51 | **self.kwargs, 52 | ) 53 | if return_to and "agent" in return_to: 54 | if "instruction" in return_to: 55 | instruction = replace_placeholder(return_to["instruction"], result) 56 | return_to["agent"].set_instruction(instruction=instruction) 57 | return return_to["agent"] 58 | return result 59 | -------------------------------------------------------------------------------- /agents_manager/Model.py: -------------------------------------------------------------------------------- 1 | import json 2 | from abc import ABC, abstractmethod 3 | from typing import List, Dict, Any, Optional, Generator, Callable 4 | 5 | 6 | class Model(ABC): 7 | def __init__(self, name: str, **kwargs: Any) -> None: 8 | """ 9 | Initialize the Model with a name and optional keyword arguments. 10 | 11 | Args: 12 | name (str): The name of the model. 13 | **kwargs (Any): Additional keyword arguments. 14 | """ 15 | self.messages: str = "" # Messages can be None initially 16 | self.name: str = name 17 | self.kwargs: Dict[str, Any] = kwargs 18 | 19 | def set_messages(self, messages: List[Dict[str, str]]) -> None: 20 | """ 21 | Set the messages for the model. 22 | 23 | Args: 24 | messages (List[Dict[str, str]]): A list of message dictionaries with "role" and "content". 25 | """ 26 | self.messages = json.dumps(messages) 27 | 28 | def get_messages(self) -> Optional[List[Dict[str, str]]]: 29 | """ 30 | Get the messages for the model. 31 | 32 | Returns: 33 | Optional[List[Dict[str, str]]]: The list of message dictionaries if set, else None. 34 | """ 35 | return json.loads(self.messages) if len(self.messages) > 0 else None 36 | 37 | def clear_messages(self) -> None: 38 | """ 39 | Clear the messages for the model. 40 | """ 41 | self.messages = None 42 | 43 | def set_kwargs(self, kwargs: Dict[str, Any]) -> None: 44 | """ 45 | Update the model's keyword arguments by merging with existing ones. 46 | 47 | Args: 48 | kwargs (Dict[str, Any]): New keyword arguments to merge with existing ones. 49 | """ 50 | self.kwargs = {**self.kwargs, **kwargs} 51 | 52 | @abstractmethod 53 | def generate_response(self) -> Dict: 54 | """ 55 | Generate a non-streaming response based on the model's implementation. 56 | 57 | Returns: 58 | Any: The response, type depends on the concrete implementation. 59 | """ 60 | return { 61 | "tool_calls": [], 62 | "content": "", 63 | } 64 | 65 | @abstractmethod 66 | def generate_stream_response(self) -> Generator[Dict, None, None]: 67 | """ 68 | Generate a non-streaming response based on the model's implementation. 69 | 70 | Returns: 71 | Any: The response, type depends on the concrete implementation. 72 | """ 73 | yield { 74 | "tool_calls": [], 75 | "content": "", 76 | } 77 | 78 | @abstractmethod 79 | def get_tool_format(self) -> Dict[str, Any]: 80 | """ 81 | Get the format for the tool call. 82 | 83 | Returns: 84 | Dict[str, Any]: The tool call format. 85 | """ 86 | return {} 87 | 88 | @abstractmethod 89 | def get_keys_in_tool_output(self, tool_call: Dict[str, Any]) -> Dict[str, Any]: 90 | """ 91 | Get the parsed tool call data. 92 | 93 | Args: 94 | tool_call (Dict[str, Any]): The tool call data. 95 | 96 | Returns: 97 | Dict[str, Any]: The parsed tool call data. 98 | """ 99 | return {} 100 | 101 | @abstractmethod 102 | def get_assistant_message(self, response: Any) -> Dict[str, Any]: 103 | """ 104 | Get the assistant message for prepending to the response. 105 | 106 | Args: 107 | response (Any): The response from the model. 108 | 109 | Returns: 110 | Dict[str, Any]: The assistant message. 111 | """ 112 | return {} 113 | 114 | @abstractmethod 115 | def get_tool_message(self, tool_responses: List[Dict[str, Any]]) -> Any: 116 | """ 117 | Get the tool message for appending to the response. 118 | 119 | Args: 120 | tool_responses (List[Dict[str, Any]]): The tool responses. 121 | Returns: 122 | Dict[str, Any]: The tool message. 123 | """ 124 | return {} 125 | 126 | @abstractmethod 127 | def set_system_message(self, message: str) -> None: 128 | """ 129 | Set the system message for the model. 130 | 131 | Args: 132 | message (str): The system message. 133 | """ 134 | pass 135 | 136 | @abstractmethod 137 | def set_user_message(self, message: str) -> None: 138 | """ 139 | Set the user message for the model. 140 | 141 | Args: 142 | message (str): The user message. 143 | """ 144 | pass 145 | 146 | @abstractmethod 147 | def set_tools(self, tools: List[Callable]) -> None: 148 | """ 149 | Set the tools for the model. 150 | 151 | Args: 152 | tools (List[Callable]): The tools. 153 | """ 154 | pass 155 | 156 | @abstractmethod 157 | def set_output_format(self, output_format: Callable) -> None: 158 | """ 159 | Set the output format for the model. 160 | 161 | Args: 162 | output_format (Callable): The output format. 163 | """ 164 | pass -------------------------------------------------------------------------------- /agents_manager/Agent.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional, Callable, Dict, Union, Generator 2 | 3 | from agents_manager.Container import Container 4 | from agents_manager.Model import Model 5 | from agents_manager.utils import function_to_json 6 | 7 | 8 | class Agent: 9 | def __init__(self, name: Optional[str] = None, 10 | instruction: Optional[str] = None, 11 | model: Optional[Model] = None, 12 | tools: Optional[List[Union[Callable, Container]]] = None, 13 | tool_choice: Optional[Dict] = None, 14 | output_format: Optional[Callable] = None 15 | ) -> None: 16 | """ 17 | Initialize the Agent with a name, instruction, model, tools, and tool choice function. 18 | Args: 19 | name : 20 | instruction: 21 | model: 22 | tools: 23 | tool_choice: 24 | output_format 25 | """ 26 | 27 | self.name: Optional[str] = name 28 | self.instruction: str = instruction or "" 29 | self.tools: List[Union[Callable, Container]] = tools or [] 30 | if model is None or not isinstance(model, Model): 31 | raise ValueError("A valid instance of a Model subclass is required") 32 | self.model: Model = model 33 | self.tool_choice = tool_choice 34 | self.output_format = output_format 35 | 36 | def set_instruction(self, instruction: str) -> None: 37 | """ 38 | Set the system instruction and update the model's messages. 39 | 40 | Args: 41 | instruction (str): The system instruction for the agent. 42 | """ 43 | self.instruction = instruction 44 | 45 | def get_instruction(self) -> str: 46 | """ 47 | Get the system instruction for the agent. 48 | 49 | Returns: 50 | str: The system instruction. 51 | """ 52 | return self.instruction 53 | 54 | def get_messages(self) -> Optional[List[Dict[str, str]]]: 55 | """ 56 | Get the messages for the model. 57 | 58 | Returns: 59 | Optional[List[Dict[str, str]]]: The list of message dictionaries if set, else None. 60 | """ 61 | return self.model.get_messages() 62 | 63 | def set_messages(self, messages: List[Dict[str, str]]) -> None: 64 | """ 65 | Set the messages for the model. 66 | 67 | Args: 68 | messages (List[Dict[str, str]]): A list of message dictionaries with "role" and "content". 69 | """ 70 | self.model.set_messages(messages) 71 | 72 | def set_tools(self, tools: List[Callable]) -> None: 73 | """ 74 | Set the tools for the agent and update the model's kwargs. 75 | 76 | Args: 77 | tools (List[Callable]): List of callable tools to be used by the agent. 78 | """ 79 | self.tools = tools 80 | self.model.set_tools(tools) 81 | 82 | def get_tools(self) -> List[Callable]: 83 | """ 84 | Get the tools for the agent. 85 | 86 | Returns: 87 | List[Callable]: The list of callable tools. 88 | """ 89 | return self.tools 90 | 91 | def get_model(self) -> Model: 92 | """ 93 | Get the model instance for the agent. 94 | 95 | Returns: 96 | Model: The model instance. 97 | """ 98 | return self.model 99 | 100 | def set_model(self, model: Model) -> None: 101 | """ 102 | Set the model instance for the agent. 103 | 104 | Args: 105 | model (Model): An instance of a concrete Model subclass. 106 | """ 107 | if model is None or not isinstance(model, Model): 108 | raise ValueError("A valid instance of a Model subclass is required") 109 | self.model = model 110 | 111 | def set_tool_choice(self, tool_choice: Callable) -> None: 112 | """ 113 | Set the tool choice function for the agent. 114 | 115 | Args: 116 | tool_choice (Callable): The function that selects a tool from the list of tools. 117 | """ 118 | self.tool_choice = tool_choice 119 | self.model.set_kwargs({ 120 | "tool_choice": function_to_json(tool_choice) 121 | }) 122 | 123 | def get_response(self) -> Dict: 124 | """ 125 | Generate a non-streaming response from the model. 126 | 127 | Returns: 128 | Any: The response, type depends on the model's implementation. 129 | """ 130 | if not hasattr(self.model, 'messages') or self.model.messages is None: 131 | raise ValueError("Messages must be set before generating a response") 132 | return self.model.generate_response() 133 | 134 | def get_stream_response(self) -> Generator[Dict, None, None]: 135 | """ 136 | Generate a non-streaming response from the model. 137 | 138 | Returns: 139 | Any: The response, type depends on the model's implementation. 140 | """ 141 | if not hasattr(self.model, 'messages') or self.model.messages is None: 142 | raise ValueError("Messages must be set before generating a response") 143 | yield from self.model.generate_stream_response() 144 | 145 | def set_system_message(self, message: str) -> None: 146 | """ 147 | Set the system message for the agent. 148 | 149 | Args: 150 | message (str): The system message. 151 | """ 152 | self.model.set_system_message(message) 153 | 154 | def set_user_message(self, message: str) -> None: 155 | """ 156 | Set the user message for the agent. 157 | 158 | Args: 159 | message (str): The user message. 160 | """ 161 | self.model.set_user_message(message) 162 | 163 | def set_output_format(self) -> None: 164 | """ 165 | Set the output format function for the agent. 166 | 167 | """ 168 | self.model.set_output_format(self.output_format) -------------------------------------------------------------------------------- /agents_manager/utils.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | 3 | 4 | def populate_template(template, data): 5 | if isinstance(template, dict): 6 | result = {} 7 | for key, value in template.items(): 8 | if isinstance(value, str) and value.startswith("{") and value.endswith("}"): 9 | key_in_data = value[1:-1] 10 | result[key] = data.get(key_in_data, value) 11 | else: 12 | result[key] = populate_template(value, data) 13 | return result 14 | elif isinstance(template, list): 15 | return [populate_template(item, data) for item in template] 16 | else: 17 | return template 18 | 19 | 20 | def function_to_json(func, format_template: dict = None) -> dict: 21 | """ 22 | Converts a Python function into a JSON-serializable dictionary based on a custom format template. 23 | 24 | Args: 25 | func: The function to be converted. 26 | format_template: A dictionary specifying the desired output structure. 27 | Use placeholders like '{name}', '{description}', '{parameters}', '{required}' 28 | as keys or values to indicate where function data should be inserted. 29 | If None, a default format is used. 30 | 31 | Returns: 32 | A dictionary representing the function's signature in the specified format. 33 | """ 34 | # Default type mapping for annotations 35 | type_map = { 36 | str: "string", 37 | int: "integer", 38 | float: "number", 39 | bool: "boolean", 40 | list: "array", 41 | dict: "object", 42 | type(None): "null", 43 | } 44 | 45 | # Get function signature 46 | try: 47 | signature = inspect.signature(func) 48 | except ValueError as e: 49 | raise ValueError( 50 | f"Failed to get signature for function {func.__name__}: {str(e)}" 51 | ) 52 | 53 | # Build parameters dynamically 54 | parameters = {} 55 | for param in signature.parameters.values(): 56 | param_type = ( 57 | type_map.get(param.annotation, "string") 58 | if param.annotation != inspect.Parameter.empty 59 | else "string" 60 | ) 61 | param_details = {"type": param_type} 62 | if param.default != inspect.Parameter.empty: 63 | param_details["default"] = param.default 64 | parameters[param.name] = param_details 65 | 66 | # Identify required parameters 67 | required = [ 68 | param.name 69 | for param in signature.parameters.values() 70 | if param.default == inspect.Parameter.empty 71 | ] 72 | 73 | # Default format if none provided 74 | if format_template is None: 75 | format_template = { 76 | "type": "function", 77 | "function": { 78 | "name": "{name}", 79 | "description": "{description}", 80 | "parameters": { 81 | "type": "object", 82 | "properties": "{parameters}", 83 | "required": "{required}", 84 | "additionalProperties": False, 85 | }, 86 | }, 87 | "strict": True, 88 | } 89 | 90 | # Extract function metadata 91 | func_data = { 92 | "name": func.__name__, 93 | "description": (func.__doc__ or "").strip(), 94 | "parameters": parameters, 95 | "required": required if required else [], 96 | } 97 | 98 | return populate_template(format_template, func_data) 99 | 100 | 101 | def container_to_json(container, format_template: dict = None) -> dict: 102 | """ 103 | Converts a Container instance into a JSON-serializable dictionary based on a custom format template. 104 | 105 | Args: 106 | container: The Container instance to be converted. 107 | format_template: A dictionary specifying the desired output structure. 108 | Use placeholders like '{name}', '{description}', '{parameters}', '{required}' 109 | as keys or values to indicate where container data should be inserted. 110 | If None, a default format is used. 111 | 112 | Returns: 113 | A dictionary representing the container's attributes in the specified format. 114 | """ 115 | # Default type mapping for annotations 116 | type_map = { 117 | "string": "string", 118 | "integer": "integer", 119 | "number": "number", 120 | "boolean": "boolean", 121 | "array": "array", 122 | "object": "object", 123 | "null": "null", 124 | } 125 | 126 | # Build parameters dynamically from environment variables 127 | parameters = {} 128 | required = [] 129 | 130 | for env_var in container.environment: 131 | param_type = type_map.get(env_var.get("type", "string"), "string") 132 | param_details = {"type": param_type} 133 | parameters[env_var["name"]] = param_details 134 | required.append(env_var["name"]) 135 | 136 | # Default format if none provided 137 | if format_template is None: 138 | format_template = { 139 | "type": "container", 140 | "container": { 141 | "name": "{name}", 142 | "description": "{description}", 143 | "parameters": { 144 | "type": "object", 145 | "properties": "{parameters}", 146 | "required": "{required}", 147 | "additionalProperties": False, 148 | }, 149 | }, 150 | "strict": True, 151 | } 152 | 153 | # Extract container metadata 154 | container_data = { 155 | "name": container.name, 156 | "description": container.description, 157 | "parameters": parameters, 158 | "required": required, 159 | } 160 | 161 | return populate_template(format_template, container_data) 162 | 163 | 164 | def extract_key_values(tool_call_output: dict, keys_to_find: list) -> dict: 165 | """ 166 | Extracts values for specified keys from a tool_call output dictionary. 167 | 168 | Args: 169 | tool_call_output: The dictionary representing the populated tool_call output. 170 | keys_to_find: A list of key names to search for (e.g., ["id", "name", "arguments"]). 171 | 172 | Returns: 173 | A dictionary mapping each specified key to its value(s) from the output. 174 | """ 175 | result = { 176 | key: [] for key in keys_to_find 177 | } # Initialize with empty lists for each key 178 | 179 | # Helper function to recursively search the dictionary 180 | def search_dict(data, target_keys): 181 | if isinstance(data, dict): 182 | for key, value in data.items(): 183 | if key in target_keys: 184 | result[key].append(value) 185 | search_dict(value, target_keys) 186 | elif isinstance(data, list): 187 | for item in data: 188 | search_dict(item, target_keys) 189 | 190 | # Start the search 191 | search_dict(tool_call_output, keys_to_find) 192 | 193 | # Clean up the result: single value if found once, list if multiple, omit if not found 194 | cleaned_result = {} 195 | for key, values in result.items(): 196 | if values: # Only include keys that were found 197 | cleaned_result[key] = values[0] if len(values) == 1 else values 198 | 199 | return cleaned_result 200 | 201 | 202 | def replace_placeholder(instruction: str, result: bytes) -> str: 203 | return instruction.replace("{result}", result.decode("utf-8")) 204 | 205 | 206 | def handover(agent_name: str, description: str, share_context: bool = False): 207 | """ 208 | Hands over the task to the given agent. 209 | 210 | Args: 211 | agent_name: name of the agent you want to hand over to 212 | description: why do you want to handover 213 | """ 214 | 215 | def handover_inner() -> str: 216 | return agent_name 217 | 218 | handover_inner.__name__ = f"handover_{agent_name}" 219 | handover_inner.__doc__ = description 220 | handover_inner.share_context = share_context 221 | 222 | return handover_inner 223 | 224 | 225 | def write_log(log, logger, message, level="INFO"): 226 | if log: 227 | if level == "INFO": 228 | logger.info(message) 229 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Agents Manager 2 | 3 | [![PyPI version](https://badge.fury.io/py/agents-manager.svg)](https://badge.fury.io/py/agents-manager) 4 | [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) 5 | ![Python Version](https://img.shields.io/badge/python-3.7%2B-blue) 6 | [![Downloads](https://img.shields.io/pypi/dm/agents-manager.svg)](https://pypi.org/project/agents-manager/) 7 | 8 | A lightweight Python package for managing multi-agent orchestration. Easily define agents with custom instructions, tools, and models, and orchestrate their interactions seamlessly. Perfect for building modular, collaborative AI systems. 9 | 10 | ## Features 11 | 12 | - Define agents with specific roles and instructions 13 | - Assign models to agents (e.g., OpenAI models) 14 | - Equip agents with tools and containers for performing tasks 15 | - Seamlessly orchestrate interactions between multiple agents 16 | 17 | ## Supported Models 18 | 19 | - OpenAI 20 | - Grok 21 | - DeepSeek 22 | - Anthropic 23 | - Llama 24 | - Genai 25 | 26 | ## Installation 27 | 28 | Install the package via pip: 29 | 30 | ```sh 31 | pip install agents-manager 32 | ``` 33 | 34 | ## Quick Start 35 | 36 | ```python 37 | from agents_manager.utils import handover 38 | from agents_manager import Agent, AgentManager 39 | from agents_manager.models import OpenAi, Anthropic, Genai 40 | 41 | from dotenv import load_dotenv 42 | 43 | load_dotenv() 44 | 45 | # Define the OpenAi model 46 | openaiModel = OpenAi(name="gpt-4o-mini") 47 | 48 | #Define the Anthropic model 49 | anthropicModel = Anthropic( 50 | name="claude-3-5-sonnet-20241022", 51 | max_tokens= 1024, 52 | ) 53 | 54 | #Define the Genai model 55 | genaiModel = Genai(name="gemini-2.0-flash-001") 56 | 57 | def multiply(a: int, b: int) -> int: 58 | """ 59 | Multiply two numbers. 60 | """ 61 | return a * b 62 | 63 | 64 | def transfer_to_agent_3_for_math_calculation() -> Agent: 65 | """ 66 | Transfer to agent 3 for math calculation. 67 | """ 68 | return agent3 69 | 70 | # The 'handover' function allows for transferring tasks to specific agents by name instead of instance. 71 | # When `share_context` is set to True, the receiving agent will receive the 72 | # chat history of the agent that is invoking the handover. 73 | handover_to_agent2 = handover(name="agent2", description="This is a calculator", share_context=False) 74 | 75 | # Define agents 76 | agent3 = Agent( 77 | name="agent3", 78 | instruction="You are a maths teacher, explain properly how you calculated the answer.", 79 | model=genaiModel, 80 | tools=[multiply] 81 | ) 82 | 83 | agent2 = Agent( 84 | name="agent2", 85 | instruction="You are a maths calculator bro", 86 | model=anthropicModel, 87 | tools=[transfer_to_agent_3_for_math_calculation] 88 | ) 89 | 90 | agent1 = Agent( 91 | name="agent1", 92 | instruction="You are a helpful assistant", 93 | model=openaiModel, 94 | tools=[handover_to_agent2] 95 | ) 96 | 97 | # Initialize Agent Manager and run agent 98 | agent_manager = AgentManager() 99 | agent_manager.add_agent(agent1) 100 | 101 | # Using transfer doesn't require pre-adding the agent, but with handover, the agent must be added to 102 | # the agent_manager beforehand. 103 | agent_manager.add_agent(agent2) 104 | 105 | response = agent_manager.run_agent("agent1", "What is 2 multiplied by 3?") 106 | print(response["content"]) 107 | ``` 108 | 109 | You can run for stream response as well. 110 | ```python 111 | response_stream = agent_manager.run_agent_stream("agent1", [ 112 | {"role": "user", "content": "What is 2 multiplied by 3?"}, 113 | ]) 114 | for chunk in response_stream: 115 | print(chunk["content"], end="") 116 | ``` 117 | 118 | You can also pass container as tool to the agent. 119 | ```python 120 | from agents_manager import Agent, AgentManager, Container 121 | 122 | ... 123 | 124 | agent4 = Agent( 125 | name="agent4", 126 | instruction="You are a helpful assistant", 127 | model=model, 128 | tools=[Container( 129 | name="hello", 130 | description="A simple hello world container", 131 | image="hello-world:latest", 132 | )] 133 | ) 134 | ``` 135 | 136 | You can also pass the result of the container to the next agent with result variable. 137 | ```python 138 | from agents_manager import Agent, Container 139 | 140 | ... 141 | 142 | agent5 = Agent( 143 | name="agent1", 144 | instruction="You are a helpful assistant", 145 | model=model, 146 | tools=[Container( 147 | name="processing", 148 | description="Container to do some processing...", 149 | image="docker/xxxx:latest", 150 | environment=[ 151 | {"name": "input1", "type": "integer"}, 152 | {"name": "input2", "type": "integer"} 153 | ], 154 | authenticate={ 155 | "username": "xxxxx", 156 | "password": "xxxxx", 157 | "registry": "xxxxx" 158 | }, 159 | return_to={ 160 | "agent": agent6, 161 | "instruction": "The result is: {result}" # {result} will be replaced with the result of the container 162 | }, 163 | )] 164 | ) 165 | ``` 166 | 167 | You can also pass output_format to agent to format the output. 168 | 169 | ```python 170 | from pydantic import BaseModel 171 | 172 | from agents_manager import Agent 173 | 174 | 175 | class Answer(BaseModel): 176 | value: str 177 | 178 | agent1 = Agent( 179 | name="agent1", 180 | instruction="You are a helpful assistant", 181 | model=model, 182 | output_format=Answer 183 | ) 184 | ``` 185 | Note 1: The output_format should be a pydantic model. 186 | 187 | Note 2: Anthropic model does not support output_format, you can use tool to format the output. 188 | 189 | Note 3: `handover` with share_context does not work correctly for Genai 190 | 191 | You can also run the agent with a dictionary as the input content. 192 | ```python 193 | 194 | response = agent_manager.run_agent("agent1", {"role": "user", "content": "What is 2 multiplied by 3?"}) 195 | 196 | ``` 197 | 198 | You can also run the agent with a list of history of messages as the input. 199 | ```python 200 | response = agent_manager.run_agent("agent1", [ 201 | {"role": "user", "content": "What is 2 multiplied by 3?"}, 202 | ]) 203 | ``` 204 | 205 | 206 | 207 | ## More models 208 | ```python 209 | from agents_manager.models import Grok, DeepSeek, Llama 210 | 211 | #Define the Grok model 212 | modelGrok = Grok(name="grok-2-latest") 213 | 214 | 215 | #Define the DeepSeek model 216 | modelDeepSeek = DeepSeek(name="deepseek-chat") 217 | 218 | 219 | #Define the Llama model 220 | modelLlama = Llama(name="llama3.1-70b") 221 | 222 | ``` 223 | 224 | 225 | ## Troubleshooting 226 | 227 | 1. While using Genai model with functions, if you get the following error: 228 | 229 | ```python 230 | google.genai.errors.ClientError: 400 INVALID_ARGUMENT. {'error': {'code': 400, 'message': '* GenerateContentRequest.tools[0].function_declarations[0].parameters.properties: should be non-empty for OBJECT type\n', 'status': 'INVALID_ARGUMENT'}} 231 | 232 | ``` 233 | It is because google genai does not support functions without parameters. You can fix this by providing a dummy parameter. Please let me know if you have a better solution for this. 234 | 235 | 2. If you get the following error while running the container tool: 236 | ```python 237 | Error: Error while fetching server API version: ('Connection aborted.', FileNotFoundError(2, 'No such file or directory')) 238 | ``` 239 | 240 | It is because the docker daemon is not running. You can fix this by starting the docker daemon. 241 | and export the following environment variable: 242 | 243 | ```bash 244 | #linux 245 | export DOCKER_HOST=unix:///var/run/docker.sock 246 | 247 | #colima 248 | export DOCKER_HOST=unix://$HOME/.colima/default/docker.sock 249 | ``` 250 | 251 | 252 | ## How It Works 253 | 254 | 1. **Define Agents**: Each agent has a name, a specific role (instruction), and a model. 255 | 2. **Assign Tools**: Agents can be assigned tools (functions and containers) to perform tasks. 256 | 3. **Create an Agent Manager**: The `AgentManager` manages the orchestration of agents. 257 | 4. **Run an Agent**: Start an agent to process a request and interact with other agents as needed. 258 | 259 | 260 | 261 | 262 | ## Use Cases 263 | 264 | - AI-powered automation systems 265 | - Multi-agent chatbots 266 | - Complex workflow orchestration 267 | - Research on AI agent collaboration 268 | 269 | ## Contributing 270 | 271 | Contributions are welcome! Feel free to submit issues and pull requests. 272 | 273 | ## License 274 | 275 | MIT License 276 | 277 | -------------------------------------------------------------------------------- /agents_manager/models/OpenAi.py: -------------------------------------------------------------------------------- 1 | from typing import List, Dict, Any, Union, Optional, Generator, Callable 2 | import json, re 3 | 4 | from openai import OpenAI 5 | from openai.types.chat import ChatCompletion 6 | 7 | from agents_manager.Container import Container 8 | from agents_manager.Model import Model 9 | from agents_manager.utils import populate_template, function_to_json, container_to_json 10 | 11 | 12 | class OpenAi(Model): 13 | def __init__(self, name: str, **kwargs: Any) -> None: 14 | """ 15 | Initialize the OpenAi model with a name and optional keyword arguments. 16 | 17 | Args: 18 | name (str): The name of the OpenAI model (e.g., "gpt-3.5-turbo"). 19 | **kwargs (Any): Additional arguments, including optional "api_key". 20 | """ 21 | super().__init__(name, **kwargs) 22 | 23 | if name is None: 24 | raise ValueError("A valid OpenAI model name is required") 25 | 26 | self.client = OpenAI( 27 | api_key=self.kwargs.pop("api_key", None), # type: Optional[str] 28 | ) 29 | 30 | def generate_response(self) -> Dict: 31 | """ 32 | Generate a non-streaming response from the OpenAI model. 33 | 34 | Returns: 35 | Union[ChatCompletion, str]: The raw ChatCompletion object if stream=False, 36 | or a string if further processed. 37 | """ 38 | kwargs = self.kwargs.copy() 39 | output_format = kwargs.pop("output_format") 40 | if not self.kwargs.get("output_format", None): 41 | response = self.client.chat.completions.create( 42 | model=self.name, # type: str 43 | messages=self.get_messages(), # type: List[Dict[str, str]] 44 | **kwargs, # type: Dict[str, Any], 45 | stream=False, 46 | ) 47 | else: 48 | response = self.client.beta.chat.completions.parse( 49 | model=self.name, # type: str 50 | messages=self.get_messages(), # type: List[Dict[str, str]] 51 | response_format=output_format, 52 | **kwargs, # type: Dict[str, Any] 53 | ) 54 | 55 | message = response.choices[0].message 56 | 57 | return { 58 | "tool_calls": message.tool_calls or [], 59 | "content": message.content, 60 | } 61 | 62 | def generate_stream_response(self) -> Generator[Dict, None, None]: 63 | """ 64 | Generate a streaming response from the OpenAI model. 65 | 66 | Returns: 67 | Union[ChatCompletion, str]: The raw ChatCompletion object if stream=False, 68 | or a string if further processed. 69 | """ 70 | 71 | kwargs = self.kwargs.copy() 72 | output_format = kwargs.pop("output_format") 73 | 74 | if not self.kwargs.get("output_format", None): 75 | response = self.client.chat.completions.create( 76 | model=self.name, # type: str 77 | messages=self.get_messages(), # type: List[Dict[str, str]] 78 | **kwargs, # type: Dict[str, Any] 79 | stream=True, 80 | ) 81 | final_tool_calls = {} 82 | result = { 83 | "tool_calls": [], 84 | "content": "", 85 | } 86 | for chunk in response: 87 | for tool_call in chunk.choices[0].delta.tool_calls or []: 88 | index = tool_call.index 89 | if index not in final_tool_calls: 90 | final_tool_calls[index] = tool_call 91 | final_tool_calls[ 92 | index 93 | ].function.arguments += tool_call.function.arguments 94 | result["tool_calls"] = [v for _, v in final_tool_calls.items()] 95 | if chunk.choices[0].delta.content is not None: 96 | result["content"] = chunk.choices[0].delta.content 97 | yield result 98 | return 99 | else: 100 | with self.client.beta.chat.completions.stream( 101 | model=self.name, # type: str 102 | messages=self.get_messages(), # type: List[Dict[str, str]] 103 | response_format=output_format, 104 | **kwargs, # type: Dict[str, Any] 105 | ) as response: 106 | result = { 107 | "tool_calls": [], 108 | "content": "", 109 | } 110 | final_tool_calls = {} 111 | for event in response: 112 | if event.type == "content.delta": 113 | if event.parsed is not None: 114 | result["content"] = event.parsed 115 | 116 | elif event.type == "chunk": 117 | for tool_call in event.chunk.choices[0].delta.tool_calls or []: 118 | index = tool_call.index 119 | if index not in final_tool_calls: 120 | final_tool_calls[index] = tool_call 121 | 122 | final_tool_calls[ 123 | index 124 | ].function.arguments += tool_call.function.arguments 125 | result["tool_calls"] = [ 126 | v for _, v in final_tool_calls.items() 127 | ] 128 | 129 | yield result 130 | 131 | return 132 | 133 | def get_tool_format(self) -> Dict[str, Any]: 134 | return { 135 | "type": "function", 136 | "function": { 137 | "name": "{name}", 138 | "description": "{description}", 139 | "parameters": { 140 | "type": "object", 141 | "properties": "{parameters}", 142 | "required": "{required}", 143 | "additionalProperties": False, 144 | }, 145 | "strict": True, 146 | }, 147 | } 148 | 149 | @staticmethod 150 | def _get_tool_call_format() -> Dict[str, Any]: 151 | return { 152 | "id": "{id}", 153 | "type": "function", 154 | "function": {"name": "{name}", "arguments": "{arguments}"}, 155 | } 156 | 157 | def _merge_unique_json_objects(self, s): 158 | # Find all JSON objects using a regex that matches {...} 159 | json_objects = re.findall(r"\{.*?\}", s) 160 | 161 | merged = {} 162 | for obj_str in json_objects: 163 | obj = json.loads(obj_str) 164 | for key, value in obj.items(): 165 | if key not in merged: 166 | merged[key] = value 167 | return merged 168 | 169 | def get_keys_in_tool_output(self, tool_call: Any) -> Dict[str, Any]: 170 | return { 171 | "id": tool_call.id, 172 | "name": tool_call.function.name, 173 | "arguments": json.dumps( 174 | self._merge_unique_json_objects(tool_call.function.arguments) 175 | ), 176 | } 177 | 178 | def get_assistant_message(self, response: Any): 179 | 180 | tool_calls = response["tool_calls"] 181 | output_tool_calls = [] 182 | for tool_call in tool_calls: 183 | output = self.get_keys_in_tool_output(tool_call) 184 | populated_data = populate_template(self._get_tool_call_format(), output) 185 | output_tool_calls.append( 186 | { 187 | "role": "assistant", 188 | "content": response["content"] or "", 189 | "tool_calls": ( 190 | [populated_data] 191 | if type(populated_data) != list 192 | else populated_data 193 | ), 194 | } 195 | ) 196 | 197 | if tool_calls: 198 | return output_tool_calls 199 | else: 200 | [ 201 | { 202 | "role": "assistant", 203 | "content": response["content"] or "", 204 | "tool_calls": [], 205 | } 206 | ] 207 | 208 | def get_tool_message(self, tool_responses: List[Dict[str, Any]]) -> Any: 209 | tool_results = [] 210 | for tool_response in tool_responses: 211 | tool_results.append( 212 | { 213 | "role": "tool", 214 | "content": tool_response["tool_result"], 215 | "tool_call_id": tool_response["id"], 216 | } 217 | ) 218 | 219 | return tool_results 220 | 221 | def set_system_message(self, message: str) -> None: 222 | self.set_messages( 223 | [ 224 | { 225 | "role": "system", 226 | "content": message, 227 | } 228 | ] 229 | ) 230 | 231 | def set_user_message(self, message: str) -> None: 232 | current_messages = self.get_messages() or [] 233 | if isinstance(message, str): 234 | user_input = {"role": "user", "content": message} 235 | current_messages.append(user_input) 236 | if isinstance(message, dict): 237 | user_input = [message] 238 | current_messages.extend(user_input) 239 | if isinstance(message, list): 240 | current_messages.extend(message) 241 | self.set_messages(current_messages) 242 | 243 | def set_tools(self, tools: List[Callable]) -> None: 244 | 245 | json_tools: List[Dict[str, Any]] = [] 246 | for tool in tools: 247 | if isinstance(tool, Callable): 248 | json_tools.append(function_to_json(tool, self.get_tool_format())) 249 | if isinstance(tool, Container): 250 | json_tools.append(container_to_json(tool, self.get_tool_format())) 251 | self.kwargs.update({"tools": json_tools}) 252 | 253 | def set_output_format(self, output_format: Callable) -> None: 254 | self.kwargs.update({"output_format": output_format}) 255 | -------------------------------------------------------------------------------- /agents_manager/models/Anthropic.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import List, Dict, Any, Union, Optional, Generator, Callable 3 | 4 | from anthropic import Anthropic as Ap 5 | 6 | from agents_manager.Container import Container 7 | from agents_manager.Model import Model 8 | from agents_manager.utils import populate_template, function_to_json, container_to_json 9 | 10 | 11 | class Anthropic(Model): 12 | def __init__(self, name: str, **kwargs: Any) -> None: 13 | """ 14 | Initialize the Anthropic model with a name and optional keyword arguments. 15 | 16 | Args: 17 | name (str): The name of the Anthropic model (e.g., "claude-3-5-sonnet-20241022"). 18 | **kwargs (Any): Additional arguments, including optional "api_key". 19 | """ 20 | super().__init__(name, **kwargs) 21 | 22 | if name is None: 23 | raise ValueError("A valid OpenAI model name is required") 24 | 25 | self.instruction = "" 26 | self.client = Ap( 27 | api_key=kwargs.pop("api_key", None), # type: Optional[str] 28 | ) 29 | 30 | def generate_response(self) -> Dict: 31 | """ 32 | Generate a non-streaming response from the OpenAI model. 33 | 34 | Returns: 35 | Union[ChatCompletion, str]: The raw ChatCompletion object if stream=False, 36 | or a string if further processed. 37 | """ 38 | message = self.client.messages.create( 39 | model=self.name, 40 | system=self.instruction, 41 | messages=self.get_messages(), 42 | **self.kwargs, 43 | ) 44 | 45 | con = self.extract_content(message, "text") 46 | 47 | return { 48 | "tool_calls": self.extract_content(message, "tool_use"), 49 | "content": con[0].text if con else "", 50 | } 51 | 52 | def generate_stream_response(self) -> Generator[Dict, None, None]: 53 | """ 54 | Generate a streaming response from the Anthropic model with tool_calls and content. 55 | Yields dictionaries containing accumulated tool_calls and content. 56 | """ 57 | 58 | with self.client.messages.stream( 59 | model=self.name, 60 | system=self.instruction, 61 | messages=self.get_messages(), 62 | **self.kwargs, 63 | ) as stream: 64 | current_content_blocks = {} 65 | accumulated_json = {} 66 | result = {"tool_calls": [], "content": ""} 67 | 68 | current_tool = None # Track tool call metadata, but don't accumulate input 69 | 70 | for event in stream: 71 | result = { 72 | "content": None, 73 | "tool_calls": None, 74 | } # Fresh result dict each iteration 75 | 76 | # Handle text tokens as they arrive 77 | if ( 78 | event.type == "content_block_delta" 79 | and event.delta.type == "text_delta" 80 | ): 81 | result["content"] = ( 82 | event.delta.text 83 | ) # Yield only the current text token 84 | 85 | # Handle tool call start 86 | elif ( 87 | event.type == "content_block_start" 88 | and event.content_block.type == "tool_use" 89 | ): 90 | current_tool = { 91 | "id": event.content_block.id, 92 | "name": event.content_block.name, 93 | "input": None, 94 | } 95 | result["tool_calls"] = ( 96 | current_tool # Yield tool metadata without input yet 97 | ) 98 | 99 | # Handle tool call input tokens 100 | elif ( 101 | event.type == "content_block_delta" 102 | and event.delta.type == "input_json_delta" 103 | and current_tool 104 | ): 105 | # Yield the raw partial_json token as it arrives 106 | result["tool_calls"] = { 107 | "id": current_tool["id"], 108 | "name": current_tool["name"], 109 | "input": event.delta.partial_json, 110 | } 111 | 112 | # Handle block completion 113 | elif event.type == "content_block_stop": 114 | if current_tool: 115 | # No input to finalize since we're not appending; just clear the tool 116 | current_tool = None 117 | # No content to yield here since we're not accumulating 118 | 119 | elif event.type == "message_stop": 120 | con = self.extract_content(event.message, "text") 121 | 122 | result["content"] = con[0].text if con else "" 123 | result["tool_calls"] = self.extract_content( 124 | event.message, "tool_use" 125 | ) 126 | 127 | # Yield the result with the current token (if any) 128 | if result["content"] or result["tool_calls"]: 129 | yield result 130 | 131 | @staticmethod 132 | def parse_stream(stream): 133 | current_content_blocks = {} 134 | accumulated_json = {} 135 | 136 | for event in stream: 137 | # Handle different event types 138 | if event.type == "message_start": 139 | pass 140 | 141 | elif event.type == "content_block_start": 142 | # Initialize a new content block 143 | index = event.index 144 | content_block = event.content_block 145 | current_content_blocks[index] = content_block 146 | 147 | if content_block.type == "tool_use": 148 | accumulated_json[index] = "" 149 | 150 | elif event.type == "content_block_delta": 151 | index = event.index 152 | delta = event.delta 153 | 154 | # Handle text deltas 155 | if delta.type == "text_delta": 156 | if ( 157 | index in current_content_blocks 158 | and current_content_blocks[index].type == "text" 159 | ): 160 | if not hasattr(current_content_blocks[index], "text"): 161 | current_content_blocks[index].text = "" 162 | current_content_blocks[index].text += delta.text 163 | 164 | # Handle tool use input deltas 165 | elif delta.type == "input_json_delta": 166 | if index in accumulated_json: 167 | accumulated_json[index] += delta.partial_json 168 | if accumulated_json[index].endswith("}"): 169 | try: 170 | parsed_json = json.loads(accumulated_json[index]) 171 | except json.JSONDecodeError: 172 | pass 173 | 174 | elif event.type == "content_block_stop": 175 | index = event.index 176 | if index in current_content_blocks: 177 | block_type = current_content_blocks[index].type 178 | if block_type == "tool_use" and index in accumulated_json: 179 | # Final parse of the complete JSON 180 | try: 181 | parsed_json = json.loads(accumulated_json[index]) 182 | except json.JSONDecodeError as e: 183 | pass 184 | 185 | elif event.type == "message_delta": 186 | # Handle updates to the message metadata 187 | if event.delta.stop_reason: 188 | pass 189 | 190 | elif event.type == "message_stop": 191 | pass 192 | # Get the final message after streaming completes 193 | return stream.get_final_message() 194 | 195 | @staticmethod 196 | def extract_content(response, type_filter="tool_use"): 197 | """ 198 | Extract items of a specific type from a Claude API response object. 199 | 200 | Args: 201 | response: The response object from Claude API 202 | type_filter (str): The type of items to extract (default: "tool_use") 203 | 204 | Returns: 205 | list: A list of filtered items 206 | """ 207 | items = [] 208 | if hasattr(response, "content") and isinstance(response.content, list): 209 | for item in response.content: 210 | if hasattr(item, "type") and item.type == type_filter: 211 | items.append(item) 212 | return items 213 | 214 | def get_tool_format(self) -> Dict[str, Any]: 215 | return { 216 | "name": "{name}", 217 | "description": "{description}", 218 | "input_schema": { 219 | "type": "object", 220 | "properties": "{parameters}", 221 | "required": "{required}", 222 | }, 223 | } 224 | 225 | def get_keys_in_tool_output(self, tool_call: Any) -> Dict[str, Any]: 226 | return { 227 | "id": tool_call.id, 228 | "name": tool_call.name, 229 | "arguments": tool_call.input, 230 | } 231 | 232 | @staticmethod 233 | def _get_tool_call_format() -> Dict[str, Any]: 234 | return { 235 | "type": "tool_use", 236 | "id": "{id}", 237 | "name": "{name}", 238 | "input": "{arguments}", 239 | } 240 | 241 | def get_assistant_message(self, response: Any): 242 | 243 | tool_calls = response["tool_calls"] 244 | output_tool_calls = [] 245 | for tool_call in tool_calls: 246 | output = self.get_keys_in_tool_output(tool_call) 247 | populated_data = populate_template(self._get_tool_call_format(), output) 248 | output_tool_calls.append( 249 | { 250 | "role": "assistant", 251 | "content": ( 252 | [populated_data] 253 | if type(populated_data) != list 254 | else populated_data 255 | ), 256 | } 257 | ) 258 | 259 | if tool_calls: 260 | return output_tool_calls 261 | else: 262 | return [ 263 | { 264 | "role": "assistant", 265 | "content": [], 266 | } 267 | ] 268 | 269 | def get_tool_message(self, tool_responses: List[Dict[str, Any]]) -> Any: 270 | 271 | tool_results = [] 272 | for tool_response in tool_responses: 273 | tool_results.append( 274 | { 275 | "type": "tool_result", 276 | "tool_use_id": tool_response["id"], 277 | "content": tool_response["tool_result"], 278 | } 279 | ) 280 | 281 | return {"role": "user", "content": tool_results} 282 | 283 | def set_system_message(self, message: str) -> None: 284 | self.instruction = message 285 | 286 | def set_user_message(self, message: str) -> None: 287 | current_messages = self.get_messages() or [] 288 | if isinstance(message, str): 289 | user_input = {"role": "user", "content": message} 290 | current_messages.append(user_input) 291 | if isinstance(message, dict): 292 | user_input = [message] 293 | current_messages.extend(user_input) 294 | if isinstance(message, list): 295 | current_messages.extend(message) 296 | self.set_messages(current_messages) 297 | 298 | def set_tools(self, tools: List[Callable]) -> None: 299 | json_tools: List[Dict[str, Any]] = [] 300 | for tool in tools: 301 | if isinstance(tool, Callable): 302 | json_tools.append(function_to_json(tool, self.get_tool_format())) 303 | if isinstance(tool, Container): 304 | json_tools.append(container_to_json(tool, self.get_tool_format())) 305 | self.kwargs.update({"tools": json_tools}) 306 | 307 | def set_output_format(self, output_format: Callable) -> None: 308 | pass 309 | -------------------------------------------------------------------------------- /agents_manager/models/Genai.py: -------------------------------------------------------------------------------- 1 | from typing import List, Dict, Any, Union, Callable, Generator 2 | 3 | from google import genai 4 | from google.genai import types 5 | from openai.types.chat import ChatCompletion 6 | 7 | from agents_manager.Container import Container 8 | from agents_manager.Model import Model 9 | from agents_manager.utils import function_to_json, container_to_json 10 | 11 | 12 | class Genai(Model): 13 | def __init__(self, name: str, **kwargs: Any) -> None: 14 | """ 15 | Initialize the Genai model with a name and optional keyword arguments. 16 | 17 | Args: 18 | name (str): The name of the Genai model (e.g., "gemini-2.0-flash"). 19 | **kwargs (Any): Additional arguments, including optional "api_key". 20 | """ 21 | super().__init__(name, **kwargs) 22 | 23 | if name is None: 24 | raise ValueError("A valid Genai model name is required") 25 | 26 | args = {} 27 | if "api_key" in self.kwargs: 28 | args["api_key"] = self.kwargs.pop("api_key") 29 | if "api_version" in self.kwargs: 30 | args["api_version"] = types.HttpOptions( 31 | api_version=self.kwargs.pop("api_version") 32 | ) 33 | if "project" in self.kwargs: 34 | args["project"] = self.kwargs.pop("project") 35 | if "location" in self.kwargs: 36 | args["location"] = self.kwargs.pop("location") 37 | if "vertexai" in self.kwargs: 38 | args["vertexai"] = self.kwargs.pop("vertexai") 39 | 40 | self.instructions = "" 41 | self.tools = [] 42 | 43 | self.client = genai.Client(**args) 44 | 45 | def generate_response(self) -> Dict: 46 | """ 47 | Generate a non-streaming response from the Genai model. 48 | 49 | Returns: 50 | Union[ChatCompletion, str]: The raw ChatCompletion object if stream=False, 51 | or a string if further processed. 52 | """ 53 | 54 | kwargs = self.kwargs.copy() 55 | tools = kwargs.pop("tools", None) 56 | output_format = kwargs.pop("output_format", None) 57 | config = { 58 | "system_instruction": self.instructions, 59 | } 60 | if not self.has_tool_function_response(self.get_messages()): 61 | if tools: 62 | config.update( 63 | { 64 | "tools": [{"function_declarations": tools}], 65 | "automatic_function_calling": {"disable": True}, 66 | } 67 | ) 68 | 69 | if output_format: 70 | config.update( 71 | { 72 | "response_mime_type": "application/json", 73 | "response_schema": output_format, 74 | } 75 | ) 76 | 77 | config.update(kwargs) 78 | 79 | response = self.client.models.generate_content( 80 | model=self.name, 81 | contents=self._convert_to_contents(self.get_messages()), 82 | config=config, 83 | ) 84 | return { 85 | "tool_calls": response.function_calls, 86 | "content": response.text if not response.function_calls else "", 87 | "candidates": response.candidates or "", 88 | } 89 | 90 | def generate_stream_response(self) -> Generator[Dict, None, None]: 91 | """ 92 | Generate a non-streaming response from the Genai model. 93 | 94 | Returns: 95 | Union[ChatCompletion, str]: The raw ChatCompletion object if stream=False, 96 | or a string if further processed. 97 | """ 98 | 99 | kwargs = self.kwargs.copy() 100 | tools = kwargs.pop("tools", None) 101 | output_format = kwargs.pop("output_format", None) 102 | config = { 103 | "system_instruction": self.instructions, 104 | } 105 | if not self.has_tool_function_response(self.get_messages()): 106 | if tools: 107 | config.update( 108 | { 109 | "tools": [{"function_declarations": tools}], 110 | "automatic_function_calling": {"disable": True}, 111 | } 112 | ) 113 | 114 | if output_format: 115 | config.update( 116 | { 117 | "response_mime_type": "application/json", 118 | "response_schema": output_format, 119 | } 120 | ) 121 | 122 | config.update(kwargs) 123 | response = self.client.models.generate_content_stream( 124 | model=self.name, 125 | contents=self._convert_to_contents(self.get_messages()), 126 | config=config, 127 | ) 128 | 129 | result = { 130 | "tool_calls": [], 131 | "content": "", 132 | "candidate": "", 133 | } 134 | for chunk in response: 135 | if chunk.function_calls: 136 | final_tool_calls = chunk.function_calls 137 | result["tool_calls"] = final_tool_calls 138 | if chunk.text is not None: 139 | result["content"] = chunk.text 140 | if chunk.candidates: 141 | result["candidates"] = chunk.candidates 142 | yield result 143 | return 144 | 145 | @staticmethod 146 | def has_tool_function_response(messages): 147 | if not messages: 148 | return False 149 | 150 | last_message = messages[-1] 151 | 152 | return ( 153 | last_message.get("role") == "tool" 154 | and isinstance(last_message.get("content"), list) 155 | and any("function_response" in item for item in last_message["content"]) 156 | ) 157 | 158 | @staticmethod 159 | def _convert_to_contents(messages): 160 | """ 161 | Convert a list of dictionaries with 'role' and 'content' keys 162 | into a list of google-genai `types.Content` objects. 163 | 164 | Args: 165 | messages (list): List of dicts with 'role' and 'content' keys. 166 | 167 | Returns: 168 | list: List of `types.Content` objects. 169 | """ 170 | 171 | contents = [] 172 | for message in messages: 173 | parts = message.get("content", None) 174 | if isinstance(parts, str): 175 | parts = [types.Part.from_text(text=parts)] 176 | if isinstance(parts, list): 177 | for part in message.get("parts", []): # Safely get the "parts" list 178 | if "text" in part: 179 | parts.append(types.Part.from_text(text=part["text"])) 180 | elif "file_data" in part: 181 | file_data = part["file_data"] 182 | parts.append( 183 | types.Part.from_uri( 184 | uri=file_data["file_uri"], 185 | mime_type=file_data["mime_type"], 186 | ) 187 | ) 188 | elif "inline_data" in part: 189 | inline_data = part["inline_data"] 190 | parts.append( 191 | types.Part.from_data( 192 | data=inline_data[ 193 | "data" 194 | ], # Base64-encoded string or similar 195 | mime_type=inline_data["mime_type"], 196 | ) 197 | ) 198 | elif "function_response" in part: 199 | function_response = part["function_response"] 200 | name = function_response["name"] 201 | response = function_response["response"] 202 | parts.append( 203 | types.Part.from_function_response( 204 | name=name, response=response 205 | ) 206 | ) 207 | elif "function_call" in part: 208 | function_call = part["function_call"] 209 | name = function_call["name"] 210 | args = function_call["args"] 211 | parts.append( 212 | types.Part.from_function_call( 213 | name=name, 214 | args=args, 215 | ) 216 | ) 217 | contents.append(types.Content(parts=parts, role=message["role"])) 218 | return contents 219 | 220 | def get_tool_format(self) -> Dict[str, Any]: 221 | return { 222 | "name": "{name}", 223 | "description": "{description}", 224 | "parameters": { 225 | "type": "object", 226 | "properties": "{parameters}", 227 | "required": "{required}", 228 | }, 229 | } 230 | 231 | @staticmethod 232 | def _get_tool_call_format() -> Dict[str, Any]: 233 | return { 234 | "id": "{id}", 235 | "type": "function", 236 | "function": {"name": "{name}", "arguments": "{arguments}"}, 237 | } 238 | 239 | def get_keys_in_tool_output(self, tool_call: Any) -> Dict[str, Any]: 240 | return {"id": tool_call.id, "name": tool_call.name, "arguments": tool_call.args} 241 | 242 | @staticmethod 243 | def _content_to_json(content): 244 | parts_list = [] 245 | for part in content.parts: 246 | part_dict = {} 247 | if part.function_call: 248 | function_call_dict = { 249 | "name": part.function_call.name, 250 | "args": part.function_call.args, 251 | } 252 | part_dict["function_call"] = function_call_dict 253 | if part_dict: 254 | parts_list.append({"role": content.role, "content": [part_dict]}) 255 | 256 | if parts_list: 257 | return parts_list 258 | else: 259 | return [{"role": content.role, "content": []}] 260 | 261 | def get_assistant_message(self, response: Any): 262 | return self._content_to_json(response["candidates"][0].content) 263 | 264 | def get_tool_message(self, tool_responses: List[Dict[str, Any]]) -> Any: 265 | tool_results = {} 266 | content = [] 267 | for tool_response in tool_responses: 268 | content.append( 269 | { 270 | "function_response": { 271 | "name": tool_response["name"], 272 | "response": { 273 | "result": tool_response["tool_result"], 274 | }, 275 | } 276 | } 277 | ) 278 | tool_results["role"] = "tool" 279 | tool_results["content"] = content 280 | 281 | return tool_results 282 | 283 | def set_system_message(self, message: str) -> None: 284 | self.instructions = message 285 | 286 | def set_user_message(self, message: str) -> None: 287 | current_messages = self.get_messages() or [] 288 | if isinstance(message, str): 289 | user_input = {"role": "user", "content": message} 290 | current_messages.append(user_input) 291 | if isinstance(message, dict): 292 | user_input = [message] 293 | current_messages.extend(user_input) 294 | if isinstance(message, list): 295 | for i, msg in enumerate(message): 296 | if isinstance(msg, dict): 297 | content = msg["content"] 298 | if isinstance(content, list): 299 | for j, con in enumerate(content): 300 | if isinstance(con, types.File): 301 | parts_dict = { 302 | "file_data": { 303 | "file_uri": con.uri, 304 | "mime_type": con.mime_type, 305 | } 306 | } 307 | message[i]["content"][j] = parts_dict 308 | if isinstance(con, str): 309 | message[i]["content"][j] = {"text": con} 310 | 311 | current_messages.extend(message) 312 | self.set_messages(current_messages) 313 | 314 | def set_tools(self, tools: List[Callable]) -> None: 315 | json_tools: List[Dict[str, Any]] = [] 316 | for tool in tools: 317 | if isinstance(tool, Callable): 318 | json_tools.append(function_to_json(tool, self.get_tool_format())) 319 | if isinstance(tool, Container): 320 | json_tools.append(container_to_json(tool, self.get_tool_format())) 321 | self.kwargs.update({"tools": json_tools}) 322 | 323 | def set_output_format(self, output_format: Callable) -> None: 324 | self.kwargs.update({"output_format": output_format}) 325 | -------------------------------------------------------------------------------- /agents_manager/AgentManager.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from typing import List, Optional, Any, Generator, Dict, Callable 4 | 5 | from agents_manager.models import Genai 6 | from agents_manager.Container import Container 7 | from agents_manager.utils import write_log 8 | from agents_manager.Agent import Agent 9 | 10 | 11 | class AgentManager: 12 | def __init__(self, log: bool = True) -> None: 13 | """ 14 | Initialize the AgentManager with an empty list of agents. 15 | """ 16 | 17 | self.log = log 18 | self.tool_logger = logging.getLogger("agents_manager.Tool") 19 | self.logger = logging.getLogger("agents_manager.AgentManager") 20 | self.container_logger = logging.getLogger("agents_manager.Container") 21 | 22 | write_log(self.log, self.logger, "AgentManager log setup") 23 | 24 | self.agents: List[Agent] = [] 25 | 26 | def add_agent(self, agent: Agent) -> None: 27 | """ 28 | Add an agent to the manager's list. 29 | 30 | Args: 31 | agent (Agent): The agent instance to add. 32 | """ 33 | if not isinstance(agent, Agent): 34 | raise ValueError("Only Agent instances can be added") 35 | _, existing_agent = self.get_agent(agent.name) 36 | if not existing_agent: 37 | self.agents.append(agent) 38 | 39 | def get_agent(self, name: str) -> tuple[Optional[int], Optional[Agent]]: 40 | """ 41 | Retrieve an agent by name. 42 | Args: 43 | name (str): The name of the agent to find. 44 | Returns: 45 | tuple[Optional[int], Optional[Agent]]: A tuple containing the index and agent if found, else (None, None). 46 | """ 47 | 48 | for _, agent in enumerate(self.agents): 49 | if agent.name == name: 50 | return _, agent 51 | return None, None 52 | 53 | def _initialize_user_input( 54 | self, name: str, user_input: Optional[Any] = None 55 | ) -> tuple[Optional[int], Optional[Agent]]: 56 | 57 | _, agent = self.get_agent(name) 58 | 59 | if agent is None: 60 | raise ValueError(f"No agent found with name: {name}") 61 | agent.set_messages([]) 62 | agent.set_system_message(agent.instruction) 63 | agent.set_tools(agent.tools) 64 | agent.set_output_format() 65 | if user_input: 66 | agent.set_user_message(user_input) 67 | return _, agent 68 | 69 | @staticmethod 70 | def get_model_current_messages(agent: Agent, current_messages: list): 71 | if type(agent.get_model()) == Genai: 72 | return current_messages 73 | else: 74 | new_current_messages = [] 75 | 76 | for curr in current_messages: 77 | if curr["role"] != "system": 78 | new_current_messages.append(curr) 79 | 80 | return new_current_messages 81 | 82 | @staticmethod 83 | def _update_current_message( 84 | agent: Agent, current_messages: list, tool_responses: list, tool_call: Dict 85 | ): 86 | current_messages.append(tool_call) 87 | 88 | tool_response = agent.get_model().get_tool_message(tool_responses) 89 | if isinstance(tool_response, dict): 90 | current_messages.append(tool_response) 91 | if isinstance(tool_response, list): 92 | current_messages.extend(tool_response) 93 | 94 | agent.set_messages(current_messages) 95 | 96 | def _handle_agent_tool_call( 97 | self, 98 | tool_result: Agent, 99 | function_name: str, 100 | id: Any, 101 | user_input: Optional[Any], 102 | ): 103 | if not self.get_agent(tool_result.name)[1]: 104 | self.add_agent(tool_result) 105 | 106 | write_log( 107 | self.log, 108 | self.logger, 109 | f"Delegating execution to nested agent from container: {tool_result.name}", 110 | ) 111 | 112 | child_response = self.run_agent(tool_result.name, user_input) 113 | 114 | return { 115 | "id": id, 116 | "tool_result": str(child_response.get("content", child_response)), 117 | "name": function_name, 118 | } 119 | 120 | def _handle_tool_call( 121 | self, 122 | agent: Agent, 123 | tool: Callable, 124 | function_name: str, 125 | arguments: dict, 126 | index: int, 127 | user_input: Optional[Any], 128 | id: Any, 129 | current_messages: list, 130 | assistant_message: list, 131 | ): 132 | write_log( 133 | self.log, 134 | self.logger, 135 | f"Invoking callable tool: {tool.__name__}", 136 | ) 137 | 138 | tool_result = tool(**arguments) 139 | 140 | if isinstance(tool_result, Agent): 141 | write_log( 142 | self.log, 143 | self.tool_logger, 144 | f"{{tool_name: {tool.__name__}, arguments: {arguments}, result: {tool_result}}}", 145 | ) 146 | tool_response = self._handle_agent_tool_call( 147 | tool_result, function_name, id, user_input 148 | ) 149 | 150 | else: 151 | write_log( 152 | self.log, 153 | self.tool_logger, 154 | f"{{tool_name: {tool.__name__}, arguments: {arguments}, result: {tool_result}}}", 155 | ) 156 | tool_response = { 157 | "id": id, 158 | "tool_result": str(tool_result), 159 | "name": function_name, 160 | } 161 | 162 | self._update_current_message( 163 | agent, current_messages, [tool_response], assistant_message[index] 164 | ) 165 | 166 | write_log( 167 | self.log, 168 | self.logger, 169 | f"Tool '{tool.__name__}' completed successfully.", 170 | ) 171 | 172 | def _handle_handover_tool_call( 173 | self, 174 | agent: Agent, 175 | tool: Callable, 176 | function_name: str, 177 | index: int, 178 | user_input: Optional[Any], 179 | id: Any, 180 | current_messages: list, 181 | assistant_message: list, 182 | ): 183 | write_log( 184 | self.log, 185 | self.logger, 186 | f"Invoking handover tool: {tool.__name__}", 187 | ) 188 | 189 | tool_result = tool() 190 | 191 | write_log( 192 | self.log, 193 | self.tool_logger, 194 | f"{{tool_name: {tool.__name__}, arguments: {{}}, result: {tool_result}}}", 195 | ) 196 | 197 | write_log( 198 | self.log, 199 | self.logger, 200 | f"Delegating execution to agent: {tool_result}", 201 | ) 202 | 203 | if tool.share_context: 204 | child_response = self.run_agent( 205 | tool_result, 206 | self.get_model_current_messages( 207 | self.get_agent(tool_result)[1], current_messages 208 | ), 209 | ) 210 | else: 211 | child_response = self.run_agent(tool_result, user_input) 212 | 213 | tool_response = { 214 | "id": id, 215 | "tool_result": str(child_response.get("content", child_response)), 216 | "name": function_name, 217 | } 218 | 219 | self._update_current_message( 220 | agent, current_messages, [tool_response], assistant_message[index] 221 | ) 222 | 223 | write_log( 224 | self.log, 225 | self.logger, 226 | f"Handover tool '{tool.__name__}' completed successfully.", 227 | ) 228 | 229 | def _handle_container_tool_call( 230 | self, 231 | agent: Agent, 232 | tool: Callable, 233 | function_name: str, 234 | arguments: dict, 235 | index: int, 236 | user_input: Optional[Any], 237 | id: Any, 238 | current_messages: list, 239 | assistant_message: list, 240 | ): 241 | write_log( 242 | self.log, 243 | self.logger, 244 | f"Invoking container tool: {tool.name} with arguments: {arguments}", 245 | ) 246 | 247 | tool_result = tool.run(arguments) 248 | 249 | if isinstance(tool_result, Agent): 250 | write_log( 251 | self.log, 252 | self.container_logger, 253 | f"{{tool_name: {tool.name}, arguments: {arguments}, result: {tool_result}}}", 254 | ) 255 | 256 | tool_response = self._handle_agent_tool_call( 257 | tool_result, function_name, id, user_input 258 | ) 259 | 260 | else: 261 | write_log( 262 | self.log, 263 | self.container_logger, 264 | f"{{tool_name: {tool.name}, arguments: {arguments}, result: {tool_result}}}", 265 | ) 266 | 267 | tool_response = { 268 | "id": id, 269 | "tool_result": str(tool_result), 270 | "name": function_name, 271 | } 272 | 273 | self._update_current_message( 274 | agent, current_messages, [tool_response], assistant_message[index] 275 | ) 276 | 277 | write_log( 278 | self.log, 279 | self.logger, 280 | f"Container tool '{tool.name}' completed successfully.", 281 | ) 282 | 283 | def process_tools( 284 | self, 285 | agent: Agent, 286 | tool_calls: list, 287 | user_input: Any, 288 | current_messages: list, 289 | assistant_message: list, 290 | ): 291 | for i, tool_call in enumerate(tool_calls): 292 | output = agent.get_model().get_keys_in_tool_output(tool_call) 293 | id, function_name = output["id"], output["name"] 294 | arguments = ( 295 | json.loads(output["arguments"]) 296 | if isinstance(output["arguments"], str) 297 | else output["arguments"] 298 | ) 299 | 300 | write_log( 301 | self.log, 302 | self.logger, 303 | f"Preparing to invoke tool '{function_name}' with arguments: {arguments}", 304 | ) 305 | 306 | for tool in agent.tools: 307 | if isinstance(tool, Callable) and ( 308 | tool.__name__ == function_name 309 | and not tool.__name__.startswith("handover_") 310 | ): 311 | self._handle_tool_call( 312 | agent, 313 | tool, 314 | function_name, 315 | arguments, 316 | i, 317 | user_input, 318 | id, 319 | current_messages, 320 | assistant_message, 321 | ) 322 | 323 | elif isinstance(tool, Callable) and ( 324 | tool.__name__.startswith("handover_") 325 | and tool.__name__ == function_name 326 | ): 327 | self._handle_handover_tool_call( 328 | agent, 329 | tool, 330 | function_name, 331 | i, 332 | user_input, 333 | id, 334 | current_messages, 335 | assistant_message, 336 | ) 337 | 338 | elif isinstance(tool, Container) and ( 339 | tool.name == function_name and not tool.name.startswith("handover_") 340 | ): 341 | self._handle_container_tool_call( 342 | agent, 343 | tool, 344 | function_name, 345 | arguments, 346 | i, 347 | user_input, 348 | id, 349 | current_messages, 350 | assistant_message, 351 | ) 352 | 353 | def run_agent(self, name: str, user_input: Optional[Any] = None) -> Dict: 354 | """ 355 | Run a specific agent's non-streaming response. 356 | 357 | Args: 358 | name (str): The name of the agent to run. 359 | user_input (str, optional): Additional user input to append to messages. 360 | 361 | Returns: 362 | Any: The agent's response. 363 | """ 364 | _, agent = self._initialize_user_input(name, user_input) 365 | 366 | write_log(self.log, self.logger, f"Executing agent: {agent.name}") 367 | response = agent.get_response() 368 | 369 | if not response["tool_calls"]: 370 | write_log( 371 | self.log, 372 | self.logger, 373 | f"Agent {agent.name} returned a response without tool calls.", 374 | ) 375 | return response 376 | 377 | tool_calls = response["tool_calls"] 378 | current_messages = agent.get_messages() 379 | assistant_message = agent.get_model().get_assistant_message(response) 380 | 381 | self.process_tools( 382 | agent, tool_calls, user_input, current_messages, assistant_message 383 | ) 384 | 385 | return self.run_agent( 386 | agent.name, self.get_model_current_messages(agent, current_messages) 387 | ) 388 | 389 | def run_agent_stream( 390 | self, 391 | name: str, 392 | user_input: Optional[Any] = None, 393 | ) -> Generator[Dict, None, None]: 394 | """ 395 | Run a specific agent's streaming response. 396 | 397 | Args: 398 | name (str): The name of the agent to run. 399 | user_input (str, optional): Additional user input to append to messages. 400 | 401 | Returns: 402 | Any: The agent's response. 403 | """ 404 | _, agent = self._initialize_user_input(name, user_input) 405 | 406 | write_log(self.log, self.logger, f"Executing agent (streaming): {agent.name}") 407 | 408 | result = agent.get_stream_response() 409 | response = "" 410 | 411 | for resp in result: 412 | if resp["tool_calls"]: 413 | response = resp 414 | else: 415 | yield resp 416 | 417 | if not response: 418 | write_log( 419 | self.log, self.logger, f"{agent.name} returned without any tool calls" 420 | ) 421 | return 422 | 423 | tool_calls = response["tool_calls"] 424 | current_messages = agent.get_messages() 425 | assistant_message = agent.get_model().get_assistant_message(response) 426 | 427 | self.process_tools( 428 | agent, tool_calls, user_input, current_messages, assistant_message 429 | ) 430 | 431 | write_log( 432 | self.log, self.logger, f"Streaming final response from agent: {agent.name}" 433 | ) 434 | 435 | yield from self.run_agent_stream( 436 | agent.name, self.get_model_current_messages(agent, current_messages) 437 | ) 438 | return 439 | --------------------------------------------------------------------------------