├── .gitignore ├── LICENSE ├── README.md ├── assets └── microagent.png ├── examples ├── basic │ ├── agent_handoff_anthropic.py │ ├── agent_handoff_groq.py │ ├── agent_handoff_openai.py │ ├── anthropic_example.py │ ├── docs_example.py │ ├── groq_example.py │ └── minimum.py └── triage_agent │ └── groq_triage_example.py ├── microagent ├── __init__.py ├── core.py ├── llm │ ├── __init__.py │ ├── anthropic_client.py │ ├── base.py │ ├── factory.py │ ├── groq_client.py │ └── openai_client.py ├── repl │ ├── __init__.py │ └── repl.py ├── types.py └── util.py ├── pyproject.toml └── tests ├── __init__.py ├── conftest.py ├── mock_client.py ├── test_core.py ├── test_llm ├── test_anthropic_client.py ├── test_groq_client.py └── test_openai_client.py └── test_util.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | *.manifest 30 | *.spec 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | 36 | # Unit test / coverage reports 37 | htmlcov/ 38 | .tox/ 39 | .nox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *.cover 46 | *.py,cover 47 | .hypothesis/ 48 | 49 | # pytest 50 | .pytest_cache/ 51 | 52 | # MyPy 53 | .mypy_cache/ 54 | .dmypy.json 55 | dmypy.json 56 | 57 | # PEP 582; used by e.g. https://github.com/David-OConnor/pyflow 58 | __pypackages__/ 59 | 60 | # Environments 61 | env/ 62 | venv/ 63 | ENV/ 64 | env.bak/ 65 | venv.bak/ 66 | myenv/ 67 | 68 | # Jupyter Notebook 69 | .ipynb_checkpoints/ 70 | 71 | # PyCharm 72 | .idea/ 73 | 74 | # VSCode 75 | .vscode/ 76 | 77 | # Spyder project settings 78 | .spyderproject 79 | .spyproject 80 | 81 | # Rope project settings 82 | .ropeproject 83 | 84 | # MacOS 85 | .DS_Store 86 | 87 | # Windows 88 | Thumbs.db 89 | ehthumbs.db 90 | Desktop.ini 91 | 92 | # Other backups 93 | *.swp 94 | *.swo 95 | *.bak 96 | *~ 97 | 98 | # Add this line to .gitignore 99 | tests/fixtures/vcr_cassettes/*.yaml -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Vectorize AI, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![Microagent Logo](assets/microagent.png) 2 | 3 | # Microagent Framework 4 | 5 | Microagent is a lightweight framework for orchestrating multi-agent systems, inspired by and forked from OpenAI's Swarm project. 6 | 7 | It adds support for Groq and Anthropic LLMs while retaining the same agent semantics. 8 | 9 | ## Overview 10 | 11 | Microagent focuses on providing a simple yet powerful interface for creating and managing networks of AI agents. It leverages the core concepts introduced in Swarm, such as agent coordination and handoffs, while introducing its own enhancements and modifications. 12 | 13 | > **Note**: Microagent is a separate project from OpenAI's Swarm. Because Swarm is positioned as an experimental framework with no intention to maintain it, Microagent looks to pick up the torch and build on it. While it shares some foundational concepts, it has its own development trajectory and feature set and has already deviated quite a bit. 14 | 15 | ## Key Features 16 | 17 | - **Lightweight Agent Orchestration**: Create and manage networks of AI agents with ease. 18 | - **Flexible Handoffs**: Seamlessly transfer control between agents during execution. 19 | - **Function Integration**: Easily integrate Python functions as tools for your agents. 20 | - **Context Management**: Maintain and update context variables across agent interactions. 21 | - **Streaming Support**: Real-time streaming of agent responses for interactive applications. 22 | 23 | ## Installation 24 | 25 | ```shell 26 | pip install -r requirements.txt 27 | ``` 28 | 29 | ## Cloning the Repository 30 | 31 | To clone the repository to your local machine, use the following commands: 32 | 33 | ```shell 34 | git clone https://github.com/chrislatimer/microagent.git 35 | cd microagent 36 | ``` 37 | 38 | ## Quick Start 39 | 40 | ```python 41 | from microagent import Microagent, Agent 42 | 43 | client = Microagent(llm_type='openai') 44 | 45 | agent_a = Agent( 46 | name="Agent A", 47 | instructions="You are a helpful agent.", 48 | model="gpt-3.5-turbo", 49 | ) 50 | 51 | agent_b = Agent( 52 | name="Agent B", 53 | instructions="You specialize in concise responses.", 54 | model="gpt-3.5-turbo", 55 | ) 56 | 57 | def transfer_to_concise_agent(): 58 | """Transfer spanish speaking users immediately.""" 59 | return agent_b 60 | 61 | agent_a.functions.append(transfer_to_concise_agent) 62 | 63 | response = client.run( 64 | agent=agent_a, 65 | messages=[{"role": "user", "content": "I need a brief answer."}], 66 | ) 67 | 68 | print(response.messages[-1]["content"]) 69 | ``` 70 | 71 | ## Acknowledgments 72 | 73 | Microagent builds upon the innovative work done by OpenAI in their Swarm project. We are grateful for their contributions to the field of multi-agent systems and open-source AI development. 74 | 75 | ## License 76 | 77 | Microagent is released under the MIT License. See the [LICENSE](LICENSE) file for details. 78 | 79 | --- 80 | 81 | Microagent: Empowering developers to build sophisticated multi-agent systems with ease. 82 | 83 | # Thanks to the creators of Swarm: 84 | 85 | - Ilan Bigio - [ibigio](https://github.com/ibigio) 86 | - James Hills - [jhills20](https://github.com/jhills20) 87 | - Shyamal Anadkat - [shyamal-anadkat](https://github.com/shyamal-anadkat) 88 | - Charu Jaiswal - [charuj](https://github.com/charuj) 89 | - Colin Jarvis - [colin-openai](https://github.com/colin-openai) 90 | - Katia Gil Guzman - [katia-openai](https://github.com/katia-openai) 91 | -------------------------------------------------------------------------------- /assets/microagent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chrislatimer/microagent/06807e38a39337f3fc2a190d5cd75dd79a165bde/assets/microagent.png -------------------------------------------------------------------------------- /examples/basic/agent_handoff_anthropic.py: -------------------------------------------------------------------------------- 1 | # This is an example script demonstrating agent handoff using Anthropic LLM 2 | 3 | from microagent import Microagent, Agent 4 | from microagent.repl import run_demo_loop 5 | 6 | client = Microagent(llm_type='anthropic') 7 | 8 | english_agent = Agent( 9 | name="EnglishAgent", 10 | instructions="You only speak English.", 11 | model="claude-3-sonnet-20240229", # Use an appropriate Claude model 12 | tool_choice="auto" 13 | ) 14 | 15 | spanish_agent = Agent( 16 | name="SpanishAgent", 17 | instructions="You only speak Spanish.", 18 | model="claude-3-sonnet-20240229", # Use an appropriate Claude model 19 | tool_choice="auto" 20 | ) 21 | 22 | def transfer_to_spanish_agent(): 23 | """Transfer spanish speaking users immediately.""" 24 | return spanish_agent 25 | 26 | english_agent.functions.append(transfer_to_spanish_agent) 27 | 28 | def transfer_to_english_agent(): 29 | """Transfer english speaking users immediately.""" 30 | return english_agent 31 | 32 | spanish_agent.functions.append(transfer_to_english_agent) 33 | 34 | messages = [{"role": "user", "content": "Hola. ¿Como estás?"}] 35 | response = client.run(agent=english_agent, messages=messages) 36 | 37 | print(response.messages[-1]["content"]) 38 | 39 | if __name__ == "__main__": 40 | run_demo_loop(english_agent, llm_type='anthropic', debug=True) 41 | -------------------------------------------------------------------------------- /examples/basic/agent_handoff_groq.py: -------------------------------------------------------------------------------- 1 | # This is an example script demonstrating agent handoff using Groq LLM 2 | 3 | from microagent import Microagent, Agent 4 | from microagent.repl import run_demo_loop 5 | 6 | client = Microagent(llm_type='groq') 7 | 8 | english_agent = Agent( 9 | name="EnglishAgent", 10 | instructions="You only speak English.", 11 | model="llama3-groq-70b-8192-tool-use-preview", # Correct Groq model 12 | tool_choice="auto" 13 | ) 14 | 15 | spanish_agent = Agent( 16 | name="SpanishAgent", 17 | instructions="You only speak Spanish.", 18 | model="llama3-groq-70b-8192-tool-use-preview", # Correct Groq model 19 | tool_choice="auto" 20 | ) 21 | 22 | def transfer_to_spanish_agent(): 23 | """Transfer spanish speaking users immediately.""" 24 | return spanish_agent 25 | 26 | english_agent.functions.append(transfer_to_spanish_agent) 27 | 28 | def transfer_to_english_agent(): 29 | """Transfer english speaking users immediately.""" 30 | return english_agent 31 | 32 | spanish_agent.functions.append(transfer_to_english_agent) 33 | 34 | messages = [{"role": "user", "content": "Hola. ¿Como estás?"}] 35 | response = client.run(agent=english_agent, messages=messages) 36 | 37 | print(response.messages[-1]["content"]) 38 | 39 | if __name__ == "__main__": 40 | run_demo_loop(english_agent, llm_type='groq') 41 | -------------------------------------------------------------------------------- /examples/basic/agent_handoff_openai.py: -------------------------------------------------------------------------------- 1 | # This is an example script demonstrating agent handoff using OpenAI LLM 2 | 3 | from microagent import Microagent, Agent 4 | from microagent.repl import run_demo_loop 5 | 6 | client = Microagent(llm_type='openai') 7 | 8 | english_agent = Agent( 9 | name="EnglishAgent", 10 | instructions="You only speak English.", 11 | model="gpt-3.5-turbo", # Make sure to use a valid OpenAI model 12 | tool_choice="auto" 13 | ) 14 | 15 | spanish_agent = Agent( 16 | name="SpanishAgent", 17 | instructions="You only speak Spanish.", 18 | model="gpt-3.5-turbo", # Make sure to use a valid OpenAI model 19 | tool_choice="auto" 20 | ) 21 | 22 | def transfer_to_spanish_agent(): 23 | """Transfer spanish speaking users immediately.""" 24 | return spanish_agent 25 | 26 | english_agent.functions.append(transfer_to_spanish_agent) 27 | 28 | def transfer_to_english_agent(): 29 | """Transfer english speaking users immediately.""" 30 | return english_agent 31 | 32 | spanish_agent.functions.append(transfer_to_english_agent) 33 | 34 | messages = [{"role": "user", "content": "Hola. ¿Como estás?"}] 35 | response = client.run(agent=english_agent, messages=messages) 36 | 37 | print(response.messages[-1]["content"]) 38 | 39 | if __name__ == "__main__": 40 | run_demo_loop(english_agent) 41 | -------------------------------------------------------------------------------- /examples/basic/anthropic_example.py: -------------------------------------------------------------------------------- 1 | # This is an example script demonstrating the usage of the microagent framework with Anthropic LLM 2 | 3 | import os 4 | from microagent.core import Microagent 5 | from microagent.types import Agent 6 | 7 | # Check if ANTHROPIC_API_KEY is set in the environment 8 | if "ANTHROPIC_API_KEY" not in os.environ: 9 | raise ValueError("ANTHROPIC_API_KEY environment variable is not set. Please set it before running this script.") 10 | 11 | # Initialize Microagent with Anthropic as the LLM provider 12 | client = Microagent(llm_type='anthropic') 13 | 14 | agent = Agent( 15 | name="ClaudeHaikuAgent", 16 | instructions="You are Claude 3 Haiku, a helpful and concise AI assistant.", 17 | model="claude-3-haiku-20240307" 18 | ) 19 | 20 | # The system message will be handled separately by the AnthropicClient 21 | messages = [ 22 | {"role": "system", "content": agent.instructions}, 23 | {"role": "user", "content": "Hi Claude! Can you briefly explain what makes you unique?"} 24 | ] 25 | 26 | response = client.run(agent=agent, messages=messages) 27 | 28 | print(f"Claude Haiku: {response.messages[-1]['content']}") 29 | -------------------------------------------------------------------------------- /examples/basic/docs_example.py: -------------------------------------------------------------------------------- 1 | # This is an example script demonstrating the usage of the microagent framework 2 | 3 | from microagent import Microagent, Agent 4 | 5 | client = Microagent(llm_type='openai') 6 | 7 | agent_a = Agent( 8 | name="Agent A", 9 | instructions="You are a helpful agent.", 10 | model="gpt-3.5-turbo", 11 | ) 12 | 13 | agent_b = Agent( 14 | name="Agent B", 15 | instructions="You specialize in concise responses.", 16 | model="gpt-3.5-turbo", 17 | ) 18 | 19 | def transfer_to_concise_agent(): 20 | """Transfer spanish speaking users immediately.""" 21 | return agent_b 22 | 23 | agent_a.functions.append(transfer_to_concise_agent) 24 | 25 | response = client.run( 26 | agent=agent_a, 27 | messages=[{"role": "user", "content": "I need a brief answer."}], 28 | ) 29 | 30 | print(response.messages[-1]["content"]) 31 | -------------------------------------------------------------------------------- /examples/basic/groq_example.py: -------------------------------------------------------------------------------- 1 | # This is an example script demonstrating the usage of the microagent framework with Groq LLM 2 | 3 | import os 4 | from microagent.core import Microagent 5 | from microagent.types import Agent 6 | 7 | # Check if GROQ_API_KEY is set in the environment 8 | if "GROQ_API_KEY" not in os.environ: 9 | raise ValueError("GROQ_API_KEY environment variable is not set. Please set it before running this script.") 10 | 11 | # Initialize Microagent with Groq as the LLM provider 12 | client = Microagent(llm_type='groq') 13 | 14 | agent = Agent( 15 | name="GroqAgent", 16 | instructions="You are a helpful agent powered by Groq.", 17 | model="llama-3.1-8b-instant", 18 | tool_choice="auto" 19 | ) 20 | 21 | messages = [{"role": "user", "content": "Hi! Tell me about Groq."}] 22 | response = client.run(agent=agent, messages=messages) 23 | 24 | print(response.messages[-1]["content"]) 25 | -------------------------------------------------------------------------------- /examples/basic/minimum.py: -------------------------------------------------------------------------------- 1 | # This is an example script demonstrating the usage of the microagent framework 2 | 3 | from microagent import Agent 4 | from microagent.core import Microagent 5 | 6 | client = Microagent(llm_type='openai') 7 | 8 | agent = Agent( 9 | name="Agent", 10 | instructions="You are a helpful agent.", 11 | model="gpt-3.5-turbo", 12 | ) 13 | 14 | messages = [{"role": "user", "content": "Hi!"}] 15 | response = client.run(agent=agent, messages=messages) 16 | 17 | print(response.messages[-1]["content"]) 18 | -------------------------------------------------------------------------------- /examples/triage_agent/groq_triage_example.py: -------------------------------------------------------------------------------- 1 | from microagent.core import Microagent 2 | from microagent.types import Agent 3 | from microagent.repl import run_demo_loop 4 | 5 | # Initialize Microagent with Groq as the LLM provider 6 | client = Microagent(llm_type='groq') 7 | 8 | # Start of new agent section 9 | def process_refund(item_id, reason="NOT SPECIFIED"): 10 | """Refund an item. Refund an item. Make sure you have the item_id of the form item_... Ask for user confirmation before processing the refund.""" 11 | print(f"[mock] Refunding item {item_id} because {reason}...") 12 | return "Success!" 13 | 14 | def apply_discount(): 15 | """Apply a discount to the user's cart.""" 16 | print("[mock] Applying discount...") 17 | return "Applied discount of 11%" 18 | 19 | triage_agent = Agent( 20 | model="llama-3.1-70b-versatile", 21 | tool_choice="auto", 22 | name="Triage Agent", 23 | instructions="Determine which agent is best suited to handle the user's request, and transfer the conversation to that agent.", 24 | ) 25 | sales_agent = Agent( 26 | model="llama-3.1-70b-versatile", 27 | tool_choice="auto", 28 | name="Sales Agent", 29 | instructions="Be super enthusiastic about selling bees.", 30 | ) 31 | refunds_agent = Agent( 32 | model="llama-3.1-70b-versatile", 33 | tool_choice="auto", 34 | name="Refunds Agent", 35 | instructions="Help the user with a refund. If the reason is that it was too expensive, offer the user a refund code. If they insist, then process the refund.", 36 | functions=[process_refund, apply_discount], 37 | ) 38 | 39 | def transfer_back_to_triage(): 40 | """Call this function if a user is asking about a topic that is not handled by the current agent.""" 41 | return triage_agent 42 | 43 | def transfer_to_sales(): 44 | return sales_agent 45 | 46 | def transfer_to_refunds(): 47 | return refunds_agent 48 | 49 | triage_agent.functions = [transfer_to_sales, transfer_to_refunds] 50 | sales_agent.functions.append(transfer_back_to_triage) 51 | refunds_agent.functions.append(transfer_back_to_triage) 52 | 53 | run_demo_loop(triage_agent, llm_type='groq', debug=True) -------------------------------------------------------------------------------- /microagent/__init__.py: -------------------------------------------------------------------------------- 1 | from .core import Microagent 2 | from .types import Agent, Response, Result 3 | 4 | __all__ = ['Microagent', 'Agent', 'Response', 'Result'] -------------------------------------------------------------------------------- /microagent/core.py: -------------------------------------------------------------------------------- 1 | from typing import List, Dict, Any 2 | from microagent.llm.factory import LLMFactory 3 | from .types import Agent, Response, Result 4 | from .util import function_to_json, debug_print 5 | import json 6 | 7 | 8 | class Microagent: 9 | def __init__(self, llm_type='openai'): 10 | self.client = LLMFactory.create(llm_type) 11 | 12 | def get_chat_completion( 13 | self, 14 | agent: Agent, 15 | history: List[Dict[str, Any]], 16 | context_variables: Dict[str, Any], 17 | model_override: str, 18 | stream: bool, 19 | debug: bool, 20 | ) -> Dict[str, Any]: 21 | messages = self._prepare_messages(agent, history, context_variables, debug) 22 | tools = self._prepare_tools(agent, debug) 23 | 24 | params = { 25 | "model": model_override or agent.model, 26 | "messages": messages, 27 | "tools": tools, 28 | "tool_choice": agent.tool_choice if agent.tool_choice is not None else "auto", 29 | } 30 | 31 | if stream: 32 | return self.client.stream_chat_completion(**params) 33 | else: 34 | return self.client.chat_completion(**params) 35 | 36 | def _prepare_messages(self, agent: Agent, history: List[Dict[str, Any]], context_variables: Dict[str, Any], debug: bool) -> List[Dict[str, Any]]: 37 | instructions = agent.instructions(context_variables) if callable(agent.instructions) else agent.instructions 38 | system_message = self.client.prepare_system_message(instructions) 39 | messages = [system_message] + history 40 | debug_print(debug, "Using instructions:", instructions) 41 | debug_print(debug, "Getting chat completion for:", messages) 42 | 43 | return messages 44 | 45 | def _prepare_tools(self, agent: Agent, debug: bool) -> List[Dict[str, Any]]: 46 | tools = [function_to_json(f) for f in agent.functions] 47 | debug_print(debug, "Tools is set to:", tools) 48 | return tools 49 | 50 | def handle_tool_calls( 51 | self, 52 | tool_calls: Any, 53 | functions: List[Any], 54 | context_variables: Dict[str, Any], 55 | debug: bool, 56 | ) -> Response: 57 | function_map = {f.__name__: f for f in functions} 58 | partial_response = Response(messages=[], agent=None, context_variables={}) 59 | 60 | for tool_call in tool_calls: 61 | try: 62 | name = tool_call['function']['name'] 63 | arguments = tool_call['function']['arguments'] 64 | tool_call_id = tool_call['id'] 65 | 66 | if name not in function_map: 67 | raise ValueError(f"Tool {name} not found in function map.") 68 | 69 | debug_print(debug, f"Processing tool call: {name} with arguments {arguments}") 70 | 71 | func = function_map[name] 72 | args = json.loads(arguments) 73 | if "context_variables" in func.__code__.co_varnames: 74 | args["context_variables"] = context_variables 75 | raw_result = func(**args) 76 | result: Result = self._handle_function_result(raw_result, debug) 77 | tool_response = self.client.prepare_tool_response( 78 | tool_call_id=tool_call_id, 79 | tool_name=name, 80 | content=result.value 81 | ) 82 | partial_response.messages.append(tool_response) 83 | partial_response.context_variables.update(result.context_variables) 84 | if result.agent: 85 | partial_response.agent = result.agent 86 | return partial_response 87 | 88 | except Exception as e: 89 | error_message = f"Error processing tool call: {str(e)}" 90 | debug_print(debug, error_message) 91 | partial_response.messages.append({ 92 | "role": "tool", #TODO: OAI lets you use tool, Anthropic needs user 93 | "tool_call_id": tool_call.get('id', 'unknown'), 94 | "tool_name": tool_call['function']['name'], 95 | "content": error_message, 96 | }) 97 | 98 | return partial_response 99 | 100 | def _handle_function_result(self, result: Any, debug: bool) -> Result: 101 | if isinstance(result, Result): 102 | return result 103 | elif isinstance(result, Agent): 104 | return Result(value=json.dumps({"assistant": result.name}), agent=result) 105 | else: 106 | try: 107 | return Result(value=str(result)) 108 | except Exception as e: 109 | error_message = f"Failed to cast response to string: {result}. Make sure agent functions return a string or Result object. Error: {str(e)}" 110 | debug_print(debug, error_message) 111 | raise TypeError(error_message) 112 | 113 | def run( 114 | self, 115 | agent: Agent, 116 | messages: List[Dict[str, Any]], 117 | context_variables: Dict[str, Any] = {}, 118 | model_override: str = None, 119 | stream: bool = False, 120 | debug: bool = False, 121 | max_turns: int = float("inf"), 122 | execute_tools: bool = True, 123 | ) -> Response: 124 | active_agent = agent 125 | context_variables = context_variables.copy() 126 | history = messages.copy() 127 | init_len = len(messages) 128 | turn_count = 0 129 | 130 | while turn_count < max_turns and active_agent: 131 | print(f"Turn {turn_count} - Active agent: {active_agent.name}") 132 | 133 | # Get LLM completion 134 | completion = self.get_chat_completion( 135 | agent=active_agent, 136 | history=history, 137 | context_variables=context_variables, 138 | model_override=model_override, 139 | stream=stream, 140 | debug=debug 141 | ) 142 | 143 | # Parse response 144 | message = self.client.parse_response(completion) 145 | message['sender'] = active_agent.name 146 | 147 | # Update history 148 | history.append(message) 149 | 150 | # Handle tool calls if applicable 151 | tool_calls = message.get('tool_calls', []) 152 | 153 | if not tool_calls or not execute_tools: 154 | print("Ending turn. No tool calls or tool execution disabled.") 155 | break 156 | 157 | partial_response = self.handle_tool_calls( 158 | tool_calls, active_agent.functions, context_variables, debug 159 | ) 160 | 161 | # Update history and context variables 162 | history.extend(partial_response.messages) 163 | context_variables.update(partial_response.context_variables) 164 | 165 | # Update agent if applicable 166 | if partial_response.agent: 167 | active_agent = partial_response.agent 168 | print("Agent updated to:", active_agent) 169 | 170 | turn_count += 1 171 | 172 | print("Run method complete. Returning response.") 173 | return Response( 174 | messages=history[init_len:], 175 | agent=active_agent, 176 | context_variables=context_variables, 177 | ) -------------------------------------------------------------------------------- /microagent/llm/__init__.py: -------------------------------------------------------------------------------- 1 | from .factory import LLMFactory 2 | from .base import LLMClient 3 | from .openai_client import OpenAIClient 4 | from .anthropic_client import AnthropicClient 5 | from .groq_client import GroqClient 6 | 7 | __all__ = ['LLMFactory', 'LLMClient', 'OpenAIClient', 'AnthropicClient', 'GroqClient'] -------------------------------------------------------------------------------- /microagent/llm/anthropic_client.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, List 2 | from anthropic import Anthropic 3 | from .base import LLMClient 4 | import json 5 | 6 | class AnthropicClient(LLMClient): 7 | def __init__(self): 8 | self.client = Anthropic() 9 | self.default_model = "claude-3-opus-20240229" 10 | self.default_max_tokens = 1000 11 | 12 | def chat_completion(self, messages: List[Dict[str, Any]], **kwargs) -> Dict[str, Any]: 13 | # Prepare parameters for the API call 14 | params = self.prepare_chat_params(messages=messages, **kwargs) 15 | 16 | # Call the LLM API 17 | response = self.client.messages.create(**params) 18 | 19 | return response 20 | 21 | 22 | def stream_chat_completion(self, messages: List[Dict[str, Any]], **kwargs) -> Any: 23 | # Prepare parameters for the API call 24 | params = self.prepare_chat_params(messages=messages, **kwargs) 25 | params['stream'] = True # Enable streaming 26 | 27 | # Call the LLM API 28 | return self.client.messages.create(**params) 29 | 30 | def prepare_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: 31 | # return [ 32 | # {'role': m['role'], 'content': m['content']} 33 | # for m in messages 34 | # ] 35 | return messages 36 | 37 | def prepare_tools(self, tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]: 38 | prepared_tools = [] 39 | for tool in tools: 40 | if 'function' in tool: 41 | # This is the initial format 42 | function = tool['function'] 43 | prepared_tool = { 44 | "name": function['name'], 45 | "description": function.get('description', ''), 46 | "input_schema": { 47 | "type": "object", 48 | "properties": function['parameters']['properties'], 49 | "required": function['parameters'].get('required', []) 50 | } 51 | } 52 | else: 53 | # This is the already processed format 54 | prepared_tool = { 55 | "name": tool['name'], 56 | "description": tool.get('description', ''), 57 | "input_schema": tool['input_schema'] 58 | } 59 | prepared_tools.append(prepared_tool) 60 | return prepared_tools 61 | 62 | def parse_response(self, response: Any) -> Dict[str, Any]: 63 | content = [] 64 | tool_calls = [] 65 | 66 | # Loop through response content and differentiate based on the type 67 | if hasattr(response, 'content') and isinstance(response.content, list): 68 | for block in response.content: 69 | if hasattr(block, 'type'): 70 | if block.type == 'text': 71 | # Collect text content from TextBlock 72 | content.append(getattr(block, 'text', '')) 73 | elif block.type == 'tool_use': 74 | # Collect tool usage details from ToolUseBlock 75 | tool_calls.append({ 76 | "id": getattr(block, 'id', 'unknown'), 77 | "function": { 78 | "name": getattr(block, 'name', ''), 79 | "arguments": json.dumps(getattr(block, 'input', {})) 80 | } 81 | }) 82 | 83 | # Return the structured response 84 | return { 85 | "role": "assistant", 86 | "content": "\n".join(content) if content else "None", 87 | "tool_calls": tool_calls if tool_calls else None 88 | } 89 | 90 | def prepare_chat_params(self, **kwargs) -> Dict[str, Any]: 91 | 92 | params = { 93 | "model": kwargs.get('model', self.default_model), 94 | "max_tokens": kwargs.get('max_tokens', self.default_max_tokens), 95 | "messages": [ 96 | {k: v for k, v in message.items() if k != 'tool_calls' and k!= 'sender'} 97 | for message in kwargs['messages'] 98 | ], 99 | } 100 | if 'tools' in kwargs and kwargs['tools']: 101 | params["tools"] = self.prepare_tools(kwargs['tools']) 102 | 103 | # Handle system message 104 | if params['messages'] and params['messages'][0]['role'] == 'system': 105 | system_message = params['messages'].pop(0) 106 | params['system'] = [{"type": "text", "text": system_message['content']}] 107 | 108 | 109 | return params 110 | 111 | def prepare_tool_response(self, tool_call_id: str, tool_name: str, content: str) -> Dict[str, Any]: 112 | return { 113 | "role": "user", 114 | "content": f"Tool '{tool_name}' response: {content}" 115 | } 116 | 117 | def prepare_system_message(self, instructions: str) -> Dict[str, Any]: 118 | return {"role": "system", "content": instructions} -------------------------------------------------------------------------------- /microagent/llm/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Dict, Any, List 3 | 4 | class LLMClient(ABC): 5 | @abstractmethod 6 | def chat_completion(self, messages: List[Dict[str, Any]] = None, **kwargs) -> Dict[str, Any]: 7 | if messages is None: 8 | messages = [] 9 | pass 10 | 11 | @abstractmethod 12 | def stream_chat_completion(self, messages: List[Dict[str, Any]] = None, **kwargs) -> Any: 13 | if messages is None: 14 | messages = [] 15 | pass 16 | 17 | @abstractmethod 18 | def prepare_messages(self, messages: List[Dict[str, Any]] = None) -> List[Dict[str, Any]]: 19 | if messages is None: 20 | messages = [] 21 | pass 22 | 23 | @abstractmethod 24 | def prepare_tools(self, tools: List[Dict[str, Any]] = None) -> List[Dict[str, Any]]: 25 | if tools is None: 26 | tools = [] 27 | pass 28 | 29 | @abstractmethod 30 | def parse_response(self, response: Any) -> Dict[str, Any]: 31 | pass 32 | 33 | @abstractmethod 34 | def prepare_chat_params(self, **kwargs) -> Dict[str, Any]: 35 | pass 36 | 37 | @abstractmethod 38 | def prepare_system_message(self, instructions: str) -> Dict[str, Any]: 39 | pass 40 | 41 | @abstractmethod 42 | def prepare_tool_response(self, tool_call_id: str, tool_name: str, content: str) -> Dict[str, Any]: 43 | pass 44 | -------------------------------------------------------------------------------- /microagent/llm/factory.py: -------------------------------------------------------------------------------- 1 | from .openai_client import OpenAIClient 2 | from .anthropic_client import AnthropicClient 3 | from .groq_client import GroqClient 4 | 5 | class LLMFactory: 6 | @staticmethod 7 | def create(llm_type): 8 | if llm_type == 'openai': 9 | return OpenAIClient() 10 | elif llm_type == 'anthropic': 11 | return AnthropicClient() 12 | elif llm_type == 'groq': 13 | return GroqClient() 14 | else: 15 | raise ValueError(f"Unsupported LLM type: {llm_type}") -------------------------------------------------------------------------------- /microagent/llm/groq_client.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, List 2 | import groq 3 | from .base import LLMClient 4 | import json 5 | 6 | class GroqClient(LLMClient): 7 | def __init__(self): 8 | self.client = groq.Groq() 9 | 10 | def chat_completion(self, messages: List[Dict[str, Any]], **kwargs) -> Dict[str, Any]: 11 | prepared_messages = self.prepare_messages(messages) 12 | chat_params = self.prepare_chat_params(messages=prepared_messages, **kwargs) 13 | 14 | response = self.client.chat.completions.create(**chat_params) 15 | return self.parse_response(response) 16 | 17 | def stream_chat_completion(self, messages: List[Dict[str, Any]], **kwargs) -> Any: 18 | prepared_messages = self.prepare_messages(messages) 19 | chat_params = self.prepare_chat_params(messages=prepared_messages, **kwargs) 20 | return self.client.chat.completions.create(stream=True, **chat_params) 21 | 22 | def prepare_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: 23 | return [ 24 | {k: v for k, v in msg.items() if k not in ['sender', 'tool_name']} 25 | for msg in messages 26 | ] 27 | 28 | def prepare_tools(self, tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]: 29 | return tools 30 | 31 | def parse_response(self, response: Any) -> Dict[str, Any]: 32 | if isinstance(response, dict): 33 | return response 34 | 35 | else: 36 | # Extract the first choice from the response 37 | choice = response.choices[0] if response.choices else None 38 | 39 | if choice and choice.message: 40 | parsed_response = { 41 | "role": choice.message.role, 42 | "content": choice.message.content, 43 | } 44 | 45 | # Handle tool calls 46 | if choice.message.tool_calls: 47 | parsed_response["tool_calls"] = [ 48 | { 49 | "id": tool_call.id, 50 | "type": tool_call.type, 51 | "function": { 52 | "name": tool_call.function.name, 53 | "arguments": tool_call.function.arguments 54 | } 55 | } 56 | for tool_call in choice.message.tool_calls 57 | ] 58 | 59 | return parsed_response 60 | else: 61 | return { 62 | "role": None, 63 | "content": None, 64 | "tool_calls": None 65 | } 66 | 67 | def prepare_chat_params(self, messages: List[Dict[str, Any]], **kwargs) -> Dict[str, Any]: 68 | params = { 69 | "model": kwargs.get('model', 'llama3-groq-70b-8192-tool-use-preview'), # Default model for Groq 70 | "messages": messages 71 | } 72 | if 'tools' in kwargs and kwargs['tools']: 73 | params["tools"] = kwargs['tools'] 74 | if 'tool_choice' in kwargs: 75 | params["tool_choice"] = kwargs['tool_choice'] 76 | return params 77 | 78 | def prepare_system_message(self, instructions: str) -> Dict[str, Any]: 79 | return {"role": "system", "content": instructions} 80 | 81 | def prepare_tool_response(self, tool_call_id: str, tool_name: str, content: str) -> Dict[str, Any]: 82 | return { 83 | "role": "tool", 84 | "tool_call_id": tool_call_id, 85 | "name": tool_name, 86 | "content": content, 87 | } -------------------------------------------------------------------------------- /microagent/llm/openai_client.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, List 2 | from openai import OpenAI 3 | 4 | class OpenAIClient: 5 | def __init__(self): 6 | self.client = OpenAI() 7 | 8 | def chat_completion(self, messages: List[Dict[str, Any]], **kwargs) -> Dict[str, Any]: 9 | if 'tools' in kwargs and not kwargs['tools']: 10 | del kwargs['tools'] 11 | if 'model' not in kwargs: 12 | kwargs['model'] = 'gpt-3.5-turbo' # Default model 13 | params = self.prepare_chat_params(messages=messages, **kwargs) 14 | response = self.client.chat.completions.create( **params) 15 | return self.parse_response(response) 16 | 17 | def stream_chat_completion(self, messages: List[Dict[str, Any]], **kwargs) -> Any: 18 | if 'tools' in kwargs and not kwargs['tools']: 19 | del kwargs['tools'] 20 | if 'model' not in kwargs: 21 | kwargs['model'] = 'gpt-3.5-turbo' # Default model 22 | return self.client.chat.completions.create(messages=messages, stream=True, **kwargs) 23 | 24 | def prepare_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: 25 | return messages 26 | 27 | def prepare_tools(self, tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]: 28 | return tools 29 | 30 | def parse_response(self, response: Any) -> Dict[str, Any]: 31 | if isinstance(response, dict): 32 | return response 33 | if isinstance(response, list): 34 | return { 35 | "role": "assistant", 36 | "content": None, 37 | "tool_calls": [ 38 | { 39 | "id": tool_call['id'], 40 | "type": tool_call['type'], 41 | "function": { 42 | "name": tool_call['function']['name'], 43 | "arguments": tool_call['function']['arguments'] 44 | } 45 | } for tool_call in response 46 | ] 47 | } 48 | message = response.choices[0].message 49 | return { 50 | "role": message.role, 51 | "content": message.content, 52 | "tool_calls": [ 53 | { 54 | "id": tool_call.id, 55 | "type": tool_call.type, 56 | "function": { 57 | "name": tool_call.function.name, 58 | "arguments": tool_call.function.arguments 59 | } 60 | } for tool_call in (message.tool_calls or []) 61 | ] 62 | } 63 | 64 | def prepare_chat_params(self, **kwargs) -> Dict[str, Any]: 65 | params = { 66 | "model": kwargs.get('model', 'gpt-3.5-turbo'), 67 | "messages": [ 68 | {k: v for k, v in message.items() if k != 'tool_calls' or v} 69 | for message in kwargs['messages'] 70 | ], 71 | } 72 | if 'tools' in kwargs and kwargs['tools']: # Check if tools exist and are not empty 73 | params["tools"] = kwargs['tools'] 74 | if 'tool_choice' in kwargs: # Only add tool_choice if tools exist 75 | params["tool_choice"] = kwargs['tool_choice'] 76 | 77 | return params 78 | 79 | def prepare_system_message(self, instructions: str) -> Dict[str, Any]: 80 | return {"role": "system", "content": instructions} 81 | 82 | def prepare_tool_response(self, tool_call_id: str, tool_name: str, content: str) -> Dict[str, Any]: 83 | return { 84 | "role": "tool", 85 | "tool_call_id": tool_call_id, 86 | "tool_name": tool_name, 87 | "content": content, 88 | } 89 | -------------------------------------------------------------------------------- /microagent/repl/__init__.py: -------------------------------------------------------------------------------- 1 | from .repl import run_demo_loop 2 | -------------------------------------------------------------------------------- /microagent/repl/repl.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Dict, Any, List 3 | from microagent import Microagent, Agent 4 | 5 | def process_and_print_streaming_response(response): 6 | content = "" 7 | last_sender = "" 8 | 9 | for chunk in response: 10 | if "sender" in chunk: 11 | last_sender = chunk["sender"] 12 | 13 | if "content" in chunk and chunk["content"] is not None: 14 | if not content and last_sender: 15 | print(f"\033[94m{last_sender}:\033[0m", end=" ", flush=True) 16 | last_sender = "" 17 | print(chunk["content"], end="", flush=True) 18 | content += chunk["content"] 19 | 20 | if "tool_calls" in chunk and chunk["tool_calls"] is not None: 21 | for tool_call in chunk["tool_calls"]: 22 | f = tool_call["function"] 23 | name = f["name"] 24 | if not name: 25 | continue 26 | print(f"\033[94m{last_sender}: \033[95m{name}\033[0m()") 27 | 28 | if "delim" in chunk and chunk["delim"] == "end" and content: 29 | print() # End of response message 30 | content = "" 31 | 32 | if "response" in chunk: 33 | return chunk["response"] 34 | 35 | def pretty_print_messages(messages: List[Dict[str, Any]]) -> None: 36 | for message in messages: 37 | if message["role"] != "assistant": 38 | continue 39 | 40 | print(f"\033[94m{message['sender']}\033[0m:", end=" ") 41 | 42 | if message["content"]: 43 | print(message["content"]) 44 | 45 | tool_calls = message.get("tool_calls") or [] 46 | if len(tool_calls) > 1: 47 | print() 48 | for tool_call in tool_calls: 49 | f = tool_call["function"] 50 | name, args = f["name"], f["arguments"] 51 | arg_str = json.dumps(json.loads(args)).replace(":", "=") 52 | print(f"\033[95m{name}\033[0m({arg_str[1:-1]})") 53 | 54 | def run_demo_loop( 55 | starting_agent: Agent, context_variables=None, stream=False, debug=False, llm_type='openai' 56 | ) -> None: 57 | client = Microagent(llm_type=llm_type) 58 | print(f"Starting Microagent CLI 🤖 using {llm_type.capitalize()} LLM") 59 | 60 | messages = [] 61 | agent = starting_agent 62 | 63 | while True: 64 | user_input = input("\033[90mUser\033[0m: ") 65 | messages.append({"role": "user", "content": user_input}) 66 | 67 | response = client.run( 68 | agent=agent, 69 | messages=messages, 70 | context_variables=context_variables or {}, 71 | stream=stream, 72 | debug=debug, 73 | ) 74 | 75 | if stream: 76 | response = process_and_print_streaming_response(response) 77 | else: 78 | pretty_print_messages(response.messages) 79 | 80 | messages.extend(response.messages) 81 | agent = response.agent 82 | -------------------------------------------------------------------------------- /microagent/types.py: -------------------------------------------------------------------------------- 1 | from typing import List, Callable, Union, Optional, Dict, Any 2 | from pydantic import BaseModel 3 | 4 | # Remove OpenAI-specific imports 5 | # from openai.types.chat import ChatCompletionMessage 6 | # from openai.types.chat.chat_completion_message_tool_call import ( 7 | # ChatCompletionMessageToolCall, 8 | # Function, 9 | # ) 10 | 11 | AgentFunction = Callable[[], Union[str, "Agent", dict]] 12 | 13 | class Function(BaseModel): 14 | arguments: str 15 | name: str 16 | 17 | class ChatCompletionMessageToolCall(BaseModel): 18 | id: str 19 | function: Function 20 | type: str 21 | 22 | class ChatCompletionMessage(BaseModel): 23 | content: Optional[str] 24 | role: str 25 | tool_calls: Optional[List[ChatCompletionMessageToolCall]] 26 | 27 | class Agent(BaseModel): 28 | name: str 29 | instructions: Union[str, Callable[..., str]] 30 | model: str 31 | functions: List[Callable] = [] 32 | tool_choice: Optional[Union[str, Dict[str, Any]]] = None 33 | parallel_tool_calls: bool = True 34 | 35 | class Response(BaseModel): 36 | messages: List[Dict[str, Any]] 37 | agent: Optional[Agent] 38 | context_variables: Dict[str, Any] 39 | class Result(BaseModel): 40 | value: str = "" 41 | agent: Optional[Agent] = None 42 | context_variables: Dict[str, Any] = {} -------------------------------------------------------------------------------- /microagent/util.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | from datetime import datetime 3 | from typing import Dict, Any 4 | 5 | def debug_print(debug: bool, *args: str) -> None: 6 | if not debug: 7 | return 8 | timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") 9 | message = " ".join(map(str, args)) 10 | print(f"\033[97m[\033[90m{timestamp}\033[97m]\033[90m {message}\033[0m") 11 | 12 | def merge_fields(target: Dict[str, Any], source: Dict[str, Any]) -> None: 13 | for key, value in source.items(): 14 | if isinstance(value, str): 15 | target[key] += value 16 | elif value is not None and isinstance(value, dict): 17 | merge_fields(target[key], value) 18 | 19 | def merge_chunk(final_response: Dict[str, Any], delta: Dict[str, Any]) -> None: 20 | delta.pop("role", None) 21 | merge_fields(final_response, delta) 22 | 23 | tool_calls = delta.get("tool_calls") 24 | if tool_calls and len(tool_calls) > 0: 25 | index = tool_calls[0].pop("index") 26 | merge_fields(final_response["tool_calls"][index], tool_calls[0]) 27 | 28 | def function_to_json(func) -> dict: 29 | """ 30 | Converts a Python function into a JSON-serializable dictionary 31 | that describes the function's signature, including its name, 32 | description, and parameters. 33 | 34 | Args: 35 | func: The function to be converted. 36 | 37 | Returns: 38 | A dictionary representing the function's signature in JSON format. 39 | """ 40 | type_map = { 41 | str: "string", 42 | int: "integer", 43 | float: "number", 44 | bool: "boolean", 45 | list: "array", 46 | dict: "object", 47 | type(None): "null", 48 | } 49 | 50 | try: 51 | signature = inspect.signature(func) 52 | except ValueError as e: 53 | raise ValueError( 54 | f"Failed to get signature for function {func.__name__}: {str(e)}" 55 | ) 56 | 57 | parameters = {} 58 | for param in signature.parameters.values(): 59 | try: 60 | param_type = type_map.get(param.annotation, "string") 61 | except KeyError as e: 62 | raise KeyError( 63 | f"Unknown type annotation {param.annotation} for parameter {param.name}: {str(e)}" 64 | ) 65 | parameters[param.name] = {"type": param_type} 66 | 67 | required = [ 68 | param.name 69 | for param in signature.parameters.values() 70 | if param.default == inspect._empty 71 | ] 72 | 73 | return { 74 | "type": "function", 75 | "function": { 76 | "name": func.__name__, 77 | "description": func.__doc__ or "", 78 | "parameters": { 79 | "type": "object", 80 | "properties": parameters, 81 | "required": required, 82 | }, 83 | }, 84 | } 85 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "microagent" 7 | version = "0.1.0" 8 | description = "A microagent framework for AI interactions" 9 | readme = "README.md" 10 | requires-python = ">=3.7" 11 | license = "MIT" 12 | keywords = ["AI", "agent", "microagent"] 13 | authors = [ 14 | { name = "Chris Latimer", email = "chris.latimer@vectorize.io" }, 15 | ] 16 | classifiers = [ 17 | "Development Status :: 3 - Alpha", 18 | "Intended Audience :: Developers", 19 | "License :: OSI Approved :: MIT License", 20 | "Programming Language :: Python :: 3", 21 | "Programming Language :: Python :: 3.7", 22 | "Programming Language :: Python :: 3.8", 23 | "Programming Language :: Python :: 3.9", 24 | "Programming Language :: Python :: 3.10", 25 | "Programming Language :: Python :: 3.11", 26 | ] 27 | dependencies = [ 28 | "openai>=1.0.0", 29 | "anthropic>=0.3.0", 30 | "google-generativeai>=0.1.0", 31 | "groq>=0.4.0", 32 | "pydantic>=2.0.0", 33 | ] 34 | 35 | [project.optional-dependencies] 36 | dev = [ 37 | "pytest>=6.0", 38 | "black>=22.0", 39 | "isort>=5.0", 40 | "mypy>=0.900", 41 | "vcrpy>=4.1.1", 42 | ] 43 | 44 | [project.urls] 45 | Homepage = "https://github.com/yourusername/microagent" 46 | Repository = "https://github.com/yourusername/microagent.git" 47 | Issues = "https://github.com/yourusername/microagent/issues" 48 | 49 | [tool.hatch.build.targets.wheel] 50 | packages = ["microagent"] -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # from .core import Microagent 2 | # from .types import Agent 3 | 4 | __all__ = ['Microagent', 'Agent'] -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from pathlib import Path 3 | import vcr 4 | 5 | def scrub_api_key(request): 6 | if 'Authorization' in request.headers: 7 | request.headers['Authorization'] = 'REDACTED' 8 | return request 9 | 10 | my_vcr = vcr.VCR( 11 | filter_headers=['Authorization'], 12 | before_record_request=scrub_api_key, 13 | ) 14 | 15 | # Add the project root directory to Python's path 16 | project_root = Path(__file__).parent 17 | sys.path.insert(0, str(project_root)) -------------------------------------------------------------------------------- /tests/mock_client.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import Mock 2 | import json 3 | 4 | class MockLLMClient: 5 | def __init__(self, llm_type): 6 | self.llm_type = llm_type 7 | self.responses = [] 8 | 9 | def set_response(self, response): 10 | self.responses = [response] 11 | 12 | def set_sequential_responses(self, responses): 13 | self.responses = responses 14 | 15 | def chat_completion(self, **kwargs): 16 | if not self.responses: 17 | raise ValueError("No mock responses set") 18 | response = self.responses.pop(0) 19 | if self.llm_type == 'anthropic': 20 | return response 21 | return {'choices': [{'message': response}]} 22 | 23 | def stream_chat_completion(self, **kwargs): 24 | return self.chat_completion(**kwargs) 25 | 26 | 27 | def create_mock_response(llm_type, message, function_calls=None): 28 | if llm_type == 'anthropic': 29 | response = Mock() 30 | response.content = message['content'] 31 | response.tool_calls = function_calls or [] 32 | return response 33 | else: 34 | response = message 35 | if function_calls: 36 | response['tool_calls'] = function_calls 37 | return response -------------------------------------------------------------------------------- /tests/test_core.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import vcr 3 | from microagent.core import Microagent 4 | from microagent.types import Agent, Result 5 | 6 | # Configure VCR 7 | my_vcr = vcr.VCR( 8 | cassette_library_dir='tests/fixtures/vcr_cassettes', 9 | record_mode='new_episodes', 10 | match_on=['method', 'scheme', 'host', 'port', 'path', 'query'], 11 | filter_headers=['authorization'], 12 | ) 13 | 14 | LLM_TYPES = ['openai', 'anthropic', 'groq'] 15 | # LLM_TYPES = ['openai'] 16 | 17 | @pytest.fixture(params=LLM_TYPES) 18 | def microagent(request): 19 | llm_type = request.param 20 | model_map = { 21 | 'openai': "gpt-3.5-turbo", 22 | 'anthropic': "claude-3-sonnet-20240229", 23 | 'groq': "llama3-groq-70b-8192-tool-use-preview", 24 | 'gemini': "gemini-pro" 25 | } 26 | return Microagent(llm_type=llm_type), model_map[llm_type], llm_type 27 | 28 | def use_vcr_for_llm(test_function): 29 | """Decorator to use VCR with the specific LLM type.""" 30 | def wrapper(microagent, *args, **kwargs): 31 | client, model, llm_type = microagent 32 | cassette_name = f'{test_function.__name__}_{llm_type}.yaml' 33 | with my_vcr.use_cassette(cassette_name): 34 | return test_function(microagent, *args, **kwargs) 35 | return wrapper 36 | 37 | @use_vcr_for_llm 38 | def test_tool_call(microagent): 39 | client, model, _ = microagent 40 | def test_function(arg1, arg2): 41 | """Test function with two args""" 42 | return f"Function called with {arg1} and {arg2}" 43 | agent = Agent(name="Test Agent", instructions="Test instructions", model=model, functions=[test_function]) 44 | messages = [{"role": "user", "content": "Call the test function with arg1=value1 and arg2=value2"}] 45 | response = client.run(agent=agent, messages=messages, max_turns=3) 46 | assert len(response.messages) >= 2 47 | assert any("Function called with value1 and value2" in (msg.get('content') or '') for msg in response.messages) 48 | 49 | @use_vcr_for_llm 50 | def test_multiple_tool_calls(microagent): 51 | client, model, _ = microagent 52 | def function1(arg): 53 | """Function1 with an arg""" 54 | return f"Function 1 called with {arg}" 55 | def function2(arg): 56 | """Function2 with an arg""" 57 | return f"Function 2 called with {arg}" 58 | agent = Agent(name="Test Agent", instructions="Test instructions", model=model, functions=[function1, function2]) 59 | messages = [{"role": "user", "content": "Call both functions with different arguments"}] 60 | response = client.run(agent=agent, messages=messages, max_turns=5) 61 | assert len(response.messages) >= 3 62 | content = ' '.join(msg.get('content') or '' for msg in response.messages) 63 | assert "Function 1 called with" in content 64 | assert "Function 2 called with" in content 65 | 66 | @use_vcr_for_llm 67 | def test_agent_handoff(microagent): 68 | client, model, _ = microagent 69 | agent2 = Agent(name="Agent 2", instructions="Agent 2 instructions", model=model) 70 | def handoff_function(): 71 | """handoff Function with no params""" 72 | return agent2 73 | agent1 = Agent(name="Agent 1", instructions="Agent 1 instructions", model=model, functions=[handoff_function]) 74 | messages = [{"role": "user", "content": "call function to handoff to Agent 2"}] 75 | response = client.run(agent=agent1, messages=messages, max_turns=3) 76 | assert response.agent.name == "Agent 2" 77 | 78 | @use_vcr_for_llm 79 | def test_context_variables(microagent): 80 | client, model, _ = microagent 81 | def update_context(key, value): 82 | """Update context function with key and value""" 83 | return Result(value=f"Updated {key} to {value}", context_variables={key: value}) 84 | agent = Agent(name="Test Agent", instructions="Test instructions", model=model, functions=[update_context]) 85 | messages = [{"role": "user", "content": "Update context with key 'test_key' and value 'test_value'"}] 86 | response = client.run(agent=agent, messages=messages) 87 | assert "test_key" in response.context_variables 88 | assert response.context_variables["test_key"] == "test_value" 89 | 90 | @use_vcr_for_llm 91 | def test_max_turns(microagent): 92 | client, model, _ = microagent 93 | def loop_function(): 94 | """Function to start the loop""" 95 | return "Looping" 96 | agent = Agent(name="Test Agent", instructions="Test instructions", model=model, functions=[loop_function]) 97 | messages = [{"role": "user", "content": "Start a loop that calls the loop_function repeatedly"}] 98 | response = client.run(agent=agent, messages=messages, max_turns=3) 99 | assert len(response.messages) <= 7 100 | 101 | @use_vcr_for_llm 102 | def test_execute_tools_false(microagent): 103 | client, model, _ = microagent 104 | def test_function(): 105 | """Function that shouldn't be called""" 106 | return "This should not be called" 107 | agent = Agent(name="Test Agent", instructions="Test instructions", model=model, functions=[test_function]) 108 | messages = [{"role": "user", "content": "Call the test_function"}] 109 | response = client.run(agent=agent, messages=messages, execute_tools=False, max_turns=3) 110 | assert len(response.messages) == 1 111 | assert "tool_calls" in response.messages[0] -------------------------------------------------------------------------------- /tests/test_llm/test_anthropic_client.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import vcr 3 | from anthropic import Anthropic 4 | from microagent.llm.anthropic_client import AnthropicClient 5 | 6 | # VCR configuration 7 | my_vcr = vcr.VCR( 8 | cassette_library_dir='tests/fixtures/vcr_cassettes', 9 | record_mode='once', 10 | match_on=['method', 'scheme', 'host', 'port', 'path', 'query'], 11 | filter_headers=['authorization'], 12 | ) 13 | 14 | @pytest.fixture 15 | def anthropic_client(): 16 | return AnthropicClient() 17 | 18 | @my_vcr.use_cassette('test_anthropic_chat_completion.yaml') 19 | def test_chat_completion(anthropic_client): 20 | messages = [{"role": "user", "content": "Say this is a test"}] 21 | response = anthropic_client.chat_completion(messages) 22 | 23 | assert isinstance(response, dict) 24 | assert "content" in response 25 | assert isinstance(response["content"], list) 26 | assert len(response["content"]) > 0 27 | assert isinstance(response["content"][0].text, str) 28 | 29 | @my_vcr.use_cassette('test_anthropic_stream_chat_completion.yaml') 30 | def test_stream_chat_completion(anthropic_client): 31 | messages = [{"role": "user", "content": "Say this is a test"}] 32 | stream = anthropic_client.stream_chat_completion(messages) 33 | 34 | assert hasattr(stream, '__iter__') 35 | 36 | response_content = "" 37 | for chunk in stream: 38 | if hasattr(chunk, 'delta'): 39 | if hasattr(chunk.delta, 'text'): 40 | response_content += chunk.delta.text 41 | elif hasattr(chunk, 'content'): 42 | for content_block in chunk.content: 43 | if hasattr(content_block, 'text'): 44 | response_content += content_block.text 45 | 46 | assert len(response_content) > 0 47 | 48 | 49 | def test_prepare_messages(anthropic_client): 50 | messages = [ 51 | {"role": "user", "content": "Hello"}, 52 | {"role": "assistant", "content": "Hi there!"} 53 | ] 54 | prepared_messages = anthropic_client.prepare_messages(messages) 55 | assert len(prepared_messages) == 2 56 | assert prepared_messages[0]['role'] == 'user' 57 | assert prepared_messages[1]['role'] == 'assistant' 58 | 59 | def test_prepare_tools(anthropic_client): 60 | tools = [{ 61 | "function": { 62 | "name": "test_func", 63 | "description": "A test function", 64 | "parameters": { 65 | "type": "object", 66 | "properties": { 67 | "param1": {"type": "string"} 68 | }, 69 | "required": ["param1"] 70 | } 71 | } 72 | }] 73 | prepared_tools = anthropic_client.prepare_tools(tools) 74 | assert len(prepared_tools) == 1 75 | assert prepared_tools[0]['type'] == 'function' 76 | assert prepared_tools[0]['function']['name'] == 'test_func' 77 | assert 'parameters' in prepared_tools[0]['function'] 78 | 79 | def test_parse_response(anthropic_client): 80 | class MockResponse: 81 | content = "This is a test response" 82 | tool_calls = None 83 | 84 | response = MockResponse() 85 | parsed_response = anthropic_client.parse_response(response) 86 | 87 | assert isinstance(parsed_response, dict) 88 | assert parsed_response['role'] == 'assistant' 89 | assert parsed_response['content'] == "This is a test response" 90 | assert parsed_response['tool_calls'] is None -------------------------------------------------------------------------------- /tests/test_llm/test_groq_client.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import vcr 3 | from microagent.llm.groq_client import GroqClient 4 | 5 | my_vcr = vcr.VCR( 6 | cassette_library_dir='tests/fixtures/vcr_cassettes', 7 | record_mode='once', 8 | match_on=['method', 'scheme', 'host', 'port', 'path', 'query'], 9 | filter_headers=['authorization'], 10 | ) 11 | 12 | @pytest.fixture 13 | def groq_client(): 14 | return GroqClient() 15 | 16 | @my_vcr.use_cassette('test_groq_chat_completion.yaml') 17 | def test_chat_completion(groq_client): 18 | messages = [{"role": "user", "content": "Say this is a test"}] 19 | response = groq_client.chat_completion(messages, model="llama3-groq-70b-8192-tool-use-preview") 20 | 21 | assert isinstance(response, dict) 22 | assert "content" in response 23 | assert isinstance(response["content"], str) 24 | assert len(response["content"]) > 0 25 | 26 | @my_vcr.use_cassette('test_groq_stream_chat_completion.yaml') 27 | def test_stream_chat_completion(groq_client): 28 | messages = [{"role": "user", "content": "Say this is a test"}] 29 | stream = groq_client.stream_chat_completion(messages, model="llama3-groq-70b-8192-tool-use-preview") 30 | 31 | assert hasattr(stream, '__iter__') 32 | 33 | response_content = "" 34 | for chunk in stream: 35 | if chunk.choices[0].delta.content: 36 | response_content += chunk.choices[0].delta.content 37 | 38 | assert len(response_content) > 0 39 | 40 | def test_prepare_messages(groq_client): 41 | messages = [ 42 | {"role": "system", "content": "You are a helpful assistant."}, 43 | {"role": "user", "content": "Hello", "sender": "John", "tool_name": "chat"}, 44 | {"role": "assistant", "content": "Hi there!", "tool_name": "chat"} 45 | ] 46 | prepared_messages = groq_client.prepare_messages(messages) 47 | assert len(prepared_messages) == 3 48 | assert all('sender' not in msg for msg in prepared_messages) 49 | assert all('tool_name' not in msg for msg in prepared_messages) 50 | assert prepared_messages[1]['role'] == 'user' 51 | assert prepared_messages[1]['content'] == 'Hello' 52 | 53 | def test_prepare_tools(groq_client): 54 | tools = [{ 55 | "type": "function", 56 | "function": { 57 | "name": "test_func", 58 | "description": "A test function", 59 | "parameters": { 60 | "type": "object", 61 | "properties": { 62 | "param1": {"type": "string"} 63 | }, 64 | "required": ["param1"] 65 | } 66 | } 67 | }] 68 | prepared_tools = groq_client.prepare_tools(tools) 69 | assert prepared_tools == tools 70 | 71 | def test_parse_response(groq_client): 72 | class MockChoice: 73 | class MockMessage: 74 | role = "assistant" 75 | content = "This is a test response" 76 | tool_calls = None 77 | message = MockMessage() 78 | 79 | class MockResponse: 80 | choices = [MockChoice()] 81 | 82 | response = MockResponse() 83 | parsed_response = groq_client.parse_response(response) 84 | 85 | assert isinstance(parsed_response, dict) 86 | assert parsed_response['role'] == 'assistant' 87 | assert parsed_response['content'] == "This is a test response" 88 | assert parsed_response['tool_calls'] is None 89 | 90 | def test_parse_response_with_tool_calls(groq_client): 91 | class MockToolCall: 92 | id = "call_1" 93 | type = "function" 94 | function = {"name": "test_func", "arguments": '{"param1": "value1"}'} 95 | 96 | class MockChoice: 97 | class MockMessage: 98 | role = "assistant" 99 | content = None 100 | tool_calls = [MockToolCall()] 101 | message = MockMessage() 102 | 103 | class MockResponse: 104 | choices = [MockChoice()] 105 | 106 | response = MockResponse() 107 | parsed_response = groq_client.parse_response(response) 108 | 109 | assert isinstance(parsed_response, dict) 110 | assert parsed_response['role'] == 'assistant' 111 | assert parsed_response['content'] is None 112 | assert parsed_response['tool_calls'] is not None 113 | assert parsed_response['tool_calls'][0].id == "call_1" 114 | assert parsed_response['tool_calls'][0].function['name'] == "test_func" 115 | assert parsed_response['tool_calls'][0].function['arguments'] == '{"param1": "value1"}' -------------------------------------------------------------------------------- /tests/test_llm/test_openai_client.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import vcr 3 | from microagent.llm.openai_client import OpenAIClient 4 | 5 | my_vcr = vcr.VCR( 6 | cassette_library_dir='tests/fixtures/vcr_cassettes', 7 | record_mode='once', 8 | match_on=['method', 'scheme', 'host', 'port', 'path', 'query'], 9 | filter_headers=['authorization'], 10 | ) 11 | 12 | @pytest.fixture 13 | def openai_client(): 14 | return OpenAIClient() 15 | 16 | @my_vcr.use_cassette('test_chat_completion.yaml') 17 | def test_chat_completion(openai_client): 18 | messages = [{"role": "user", "content": "Say this is a test"}] 19 | response = openai_client.chat_completion(messages, model="gpt-3.5-turbo") 20 | 21 | assert isinstance(response, dict) 22 | assert "content" in response 23 | assert isinstance(response["content"], str) 24 | assert len(response["content"]) > 0 25 | 26 | @my_vcr.use_cassette('test_stream_chat_completion.yaml') 27 | def test_stream_chat_completion(openai_client): 28 | messages = [{"role": "user", "content": "Say this is a test"}] 29 | stream = openai_client.stream_chat_completion(messages, model="gpt-3.5-turbo") 30 | 31 | assert hasattr(stream, '__iter__') 32 | 33 | response_content = "" 34 | for chunk in stream: 35 | if chunk.choices[0].delta.content is not None: 36 | response_content += chunk.choices[0].delta.content 37 | 38 | assert len(response_content) > 0 39 | 40 | def test_prepare_messages(openai_client): 41 | messages = [{"role": "user", "content": "Hello"}] 42 | assert openai_client.prepare_messages(messages) == messages 43 | 44 | def test_prepare_tools(openai_client): 45 | tools = [{"function": {"name": "test_func"}}] 46 | assert openai_client.prepare_tools(tools) == tools 47 | 48 | @my_vcr.use_cassette('test_parse_response.yaml') 49 | def test_parse_response(openai_client): 50 | messages = [{"role": "user", "content": "Say this is a test"}] 51 | response = openai_client.chat_completion(messages, model="gpt-3.5-turbo") 52 | 53 | assert isinstance(response, dict) 54 | assert "content" in response 55 | assert isinstance(response["content"], str) 56 | assert len(response["content"]) > 0 57 | 58 | class MockResponse: 59 | class MockChoice: 60 | class MockMessage: 61 | role = "assistant" 62 | content = "Mocked response" 63 | tool_calls = None 64 | message = MockMessage() 65 | choices = [MockChoice()] 66 | 67 | mock_response = MockResponse() 68 | parsed_response = openai_client.parse_response(mock_response) 69 | assert parsed_response == {"role": "assistant", "content": "Mocked response", "tool_calls": []} -------------------------------------------------------------------------------- /tests/test_util.py: -------------------------------------------------------------------------------- 1 | from microagent.util import function_to_json 2 | 3 | def test_basic_function(): 4 | def basic_function(arg1, arg2): 5 | return arg1 + arg2 6 | result = function_to_json(basic_function) 7 | assert result == { 8 | "type": "function", 9 | "function": { 10 | "name": "basic_function", 11 | "description": "", 12 | "parameters": { 13 | "type": "object", 14 | "properties": { 15 | "arg1": {"type": "string"}, 16 | "arg2": {"type": "string"}, 17 | }, 18 | "required": ["arg1", "arg2"], 19 | }, 20 | }, 21 | } 22 | 23 | def test_complex_function(): 24 | def complex_function_with_types_and_descriptions( 25 | arg1: int, arg2: str, arg3: float = 3.14, arg4: bool = False 26 | ): 27 | """This is a complex function with a docstring.""" 28 | pass 29 | result = function_to_json(complex_function_with_types_and_descriptions) 30 | assert result == { 31 | "type": "function", 32 | "function": { 33 | "name": "complex_function_with_types_and_descriptions", 34 | "description": "This is a complex function with a docstring.", 35 | "parameters": { 36 | "type": "object", 37 | "properties": { 38 | "arg1": {"type": "integer"}, 39 | "arg2": {"type": "string"}, 40 | "arg3": {"type": "number"}, 41 | "arg4": {"type": "boolean"}, 42 | }, 43 | "required": ["arg1", "arg2"], 44 | }, 45 | }, 46 | } --------------------------------------------------------------------------------