├── .gitignore ├── README.md ├── agent.py ├── mcp_client ├── __init__.py ├── agent_tools.py ├── server.py └── util.py └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | venv/ 3 | __pycache__/ -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LiveKit Agent with MCP Tools 2 | 3 | A voice assistant application built using the LiveKit Agents framework, capable of using Multimodal Control Protocol (MCP) tools to interact with external services. 4 | 5 | ## Features 6 | 7 | - Voice-based interaction with a helpful AI assistant 8 | - Integration with MCP tools from external servers 9 | - Speech-to-text using Deepgram 10 | - Natural language processing using OpenAI's GPT-4o 11 | - Text-to-speech using OpenAI 12 | - Voice activity detection using Silero 13 | 14 | ## Prerequisites 15 | 16 | - Python 3.9+ 17 | - API keys for OpenAI and Deepgram 18 | - MCP server endpoint 19 | 20 | ## Installation 21 | 22 | 1. Clone this repository: 23 | ``` 24 | git clone https://github.com/livekit-examples/basic-mcp.git 25 | cd basic-mcp 26 | ``` 27 | 28 | 2. Install the required packages: 29 | ``` 30 | pip install -r requirements.txt 31 | ``` 32 | 33 | 3. Create a `.env` file with your API keys and configuration: 34 | ``` 35 | OPENAI_API_KEY=your_openai_api_key 36 | DEEPGRAM_API_KEY=your_deepgram_api_key 37 | ZAPIER_MCP_URL=your_mcp_server_url 38 | ``` 39 | 40 | ## Usage 41 | 42 | Run the agent with the LiveKit CLI: 43 | 44 | ``` 45 | python agent.py console 46 | ``` 47 | 48 | The agent will connect to the specified LiveKit room and start listening for voice commands. 49 | 50 | ## Project Structure 51 | 52 | - `agent.py`: Main agent implementation and entrypoint 53 | - `mcp_client/`: Package for MCP server integration 54 | - `server.py`: MCP server connection handlers 55 | - `agent_tools.py`: Integration of MCP tools with LiveKit agents 56 | - `util.py`: Utility functions for MCP client 57 | 58 | ## Acknowledgements 59 | 60 | - [LiveKit](https://livekit.io/) for the underlying real-time communication infrastructure 61 | - [OpenAI](https://openai.com/) for GPT-4o and text-to-speech 62 | - [Deepgram](https://deepgram.com/) for speech-to-text 63 | - [Silero](https://github.com/snakers4/silero-vad) for Voice Activity Detection -------------------------------------------------------------------------------- /agent.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | from pathlib import Path 4 | from dotenv import load_dotenv 5 | from livekit.agents import JobContext, WorkerOptions, cli 6 | from livekit.agents.llm import function_tool, ChatChunk 7 | from livekit.agents.voice import Agent, AgentSession 8 | from livekit.plugins import deepgram, openai, silero 9 | from livekit.agents import JobContext, WorkerOptions, cli, Agent, AgentSession 10 | from livekit.plugins import deepgram, openai, silero 11 | 12 | from mcp_client import MCPServerSse 13 | from mcp_client.agent_tools import MCPToolsIntegration 14 | 15 | load_dotenv() 16 | 17 | class FunctionAgent(Agent): 18 | """A LiveKit agent that uses MCP tools from one or more MCP servers.""" 19 | 20 | def __init__(self): 21 | super().__init__( 22 | instructions=""" 23 | You are a helpful assistant communicating through voice. 24 | Use the available MCP tools to answer questions. 25 | """, 26 | stt=deepgram.STT(), 27 | llm=openai.LLM(model="gpt-4o"), 28 | tts=openai.TTS(), 29 | vad=silero.VAD.load(), 30 | allow_interruptions=True 31 | ) 32 | 33 | async def llm_node(self, chat_ctx, tools, model_settings): 34 | """Override the llm_node to say a message when a tool call is detected.""" 35 | activity = self._activity 36 | tool_call_detected = False 37 | 38 | # Get the original response from the parent class 39 | async for chunk in super().llm_node(chat_ctx, tools, model_settings): 40 | # Check if this chunk contains a tool call 41 | if isinstance(chunk, ChatChunk) and chunk.delta and chunk.delta.tool_calls and not tool_call_detected: 42 | # Say the checking message only once when we detect the first tool call 43 | tool_call_detected = True 44 | activity.agent.say("Sure, I'll check that for you.") 45 | 46 | yield chunk 47 | 48 | async def entrypoint(ctx: JobContext): 49 | """Main entrypoint for the LiveKit agent application.""" 50 | mcp_server = MCPServerSse( 51 | params={"url": os.environ.get("ZAPIER_MCP_URL")}, 52 | cache_tools_list=True, 53 | name="SSE MCP Server" 54 | ) 55 | 56 | agent = await MCPToolsIntegration.create_agent_with_tools( 57 | agent_class=FunctionAgent, 58 | mcp_servers=[mcp_server] 59 | ) 60 | 61 | await ctx.connect() 62 | 63 | session = AgentSession() 64 | await session.start(agent=agent, room=ctx.room) 65 | 66 | if __name__ == "__main__": 67 | cli.run_app(WorkerOptions(entrypoint_fnc=entrypoint)) 68 | -------------------------------------------------------------------------------- /mcp_client/__init__.py: -------------------------------------------------------------------------------- 1 | from .server import MCPServer, MCPServerSse, MCPServerStdio, MCPServerSseParams, MCPServerStdioParams -------------------------------------------------------------------------------- /mcp_client/agent_tools.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import json 4 | import inspect 5 | import typing 6 | from typing import Any, List, Dict, Callable, Optional, Awaitable, Sequence, Tuple, Type, Union, cast 7 | from uuid import uuid4 8 | 9 | # Import from the MCP module 10 | from .util import MCPUtil, FunctionTool 11 | from .server import MCPServer, MCPServerSse 12 | from livekit.agents import ChatContext, AgentSession, JobContext, FunctionTool as Tool 13 | from mcp import CallToolRequest 14 | 15 | logger = logging.getLogger("mcp-agent-tools") 16 | 17 | class MCPToolsIntegration: 18 | """ 19 | Helper class for integrating MCP tools with LiveKit agents. 20 | Provides utilities for registering dynamic tools from MCP servers. 21 | """ 22 | 23 | @staticmethod 24 | async def prepare_dynamic_tools(mcp_servers: List[MCPServer], 25 | convert_schemas_to_strict: bool = True, 26 | auto_connect: bool = True) -> List[Callable]: 27 | """ 28 | Fetches tools from multiple MCP servers and prepares them for use with LiveKit agents. 29 | 30 | Args: 31 | mcp_servers: List of MCPServer instances 32 | convert_schemas_to_strict: Whether to convert JSON schemas to strict format 33 | auto_connect: Whether to automatically connect to servers if they're not connected 34 | 35 | Returns: 36 | List of decorated tool functions ready to be added to a LiveKit agent 37 | """ 38 | prepared_tools = [] 39 | 40 | # Ensure all servers are connected if auto_connect is True 41 | if auto_connect: 42 | for server in mcp_servers: 43 | if not getattr(server, 'connected', False): 44 | try: 45 | logger.debug(f"Auto-connecting to MCP server: {server.name}") 46 | await server.connect() 47 | except Exception as e: 48 | logger.error(f"Failed to connect to MCP server {server.name}: {e}") 49 | 50 | # Process each server 51 | for server in mcp_servers: 52 | logger.info(f"Fetching tools from MCP server: {server.name}") 53 | try: 54 | mcp_tools = await MCPUtil.get_function_tools( 55 | server, convert_schemas_to_strict=convert_schemas_to_strict 56 | ) 57 | logger.info(f"Received {len(mcp_tools)} tools from {server.name}") 58 | except Exception as e: 59 | logger.error(f"Failed to fetch tools from {server.name}: {e}") 60 | continue 61 | 62 | # Process each tool from this server 63 | for tool_instance in mcp_tools: 64 | try: 65 | decorated_tool = MCPToolsIntegration._create_decorated_tool(tool_instance) 66 | prepared_tools.append(decorated_tool) 67 | logger.debug(f"Successfully prepared tool: {tool_instance.name}") 68 | except Exception as e: 69 | logger.error(f"Failed to prepare tool '{tool_instance.name}': {e}") 70 | 71 | return prepared_tools 72 | 73 | @staticmethod 74 | def _create_decorated_tool(tool: FunctionTool) -> Callable: 75 | """ 76 | Creates a decorated function for a single MCP tool that can be used with LiveKit agents. 77 | 78 | Args: 79 | tool: The FunctionTool instance to convert 80 | 81 | Returns: 82 | A decorated async function that can be added to a LiveKit agent's tools 83 | """ 84 | # Get function_tool decorator from LiveKit 85 | # Import locally to avoid circular imports 86 | from livekit.agents.llm import function_tool 87 | 88 | # Create parameters list from JSON schema 89 | params = [] 90 | annotations = {} 91 | schema_props = tool.params_json_schema.get("properties", {}) 92 | schema_required = set(tool.params_json_schema.get("required", [])) 93 | type_map = { 94 | "string": str, "integer": int, "number": float, 95 | "boolean": bool, "array": list, "object": dict, 96 | } 97 | 98 | # Build parameters from the schema properties 99 | for p_name, p_details in schema_props.items(): 100 | json_type = p_details.get("type", "string") 101 | py_type = type_map.get(json_type, typing.Any) 102 | annotations[p_name] = py_type 103 | 104 | # Use inspect.Parameter.empty for required params, None otherwise 105 | default = inspect.Parameter.empty if p_name in schema_required else p_details.get("default", None) 106 | params.append(inspect.Parameter( 107 | name=p_name, 108 | kind=inspect.Parameter.KEYWORD_ONLY, 109 | annotation=py_type, 110 | default=default 111 | )) 112 | 113 | # Define the actual function that will be called by the agent 114 | async def tool_impl(**kwargs): 115 | input_json = json.dumps(kwargs) 116 | logger.info(f"Invoking tool '{tool.name}' with args: {kwargs}") 117 | result_str = await tool.on_invoke_tool(None, input_json) 118 | logger.info(f"Tool '{tool.name}' result: {result_str}") 119 | return result_str 120 | 121 | # Set function metadata 122 | tool_impl.__signature__ = inspect.Signature(parameters=params) 123 | tool_impl.__name__ = tool.name 124 | tool_impl.__doc__ = tool.description 125 | tool_impl.__annotations__ = {'return': str, **annotations} 126 | 127 | # Apply the decorator and return 128 | return function_tool()(tool_impl) 129 | 130 | @staticmethod 131 | async def register_with_agent(agent, mcp_servers: List[MCPServer], 132 | convert_schemas_to_strict: bool = True, 133 | auto_connect: bool = True) -> List[Callable]: 134 | """ 135 | Helper method to prepare and register MCP tools with a LiveKit agent. 136 | 137 | Args: 138 | agent: The LiveKit agent instance 139 | mcp_servers: List of MCPServer instances 140 | convert_schemas_to_strict: Whether to convert schemas to strict format 141 | auto_connect: Whether to auto-connect to servers 142 | 143 | Returns: 144 | List of tool functions that were registered 145 | """ 146 | # Prepare the dynamic tools 147 | tools = await MCPToolsIntegration.prepare_dynamic_tools( 148 | mcp_servers, 149 | convert_schemas_to_strict=convert_schemas_to_strict, 150 | auto_connect=auto_connect 151 | ) 152 | 153 | # Register with the agent 154 | if hasattr(agent, '_tools') and isinstance(agent._tools, list): 155 | agent._tools.extend(tools) 156 | logger.info(f"Registered {len(tools)} MCP tools with agent") 157 | 158 | # Log the names of registered tools 159 | if tools: 160 | tool_names = [getattr(t, '__name__', 'unknown') for t in tools] 161 | logger.info(f"Registered tool names: {tool_names}") 162 | else: 163 | logger.warning("Agent does not have a '_tools' attribute, tools were not registered") 164 | 165 | return tools 166 | 167 | @staticmethod 168 | async def create_agent_with_tools(agent_class, mcp_servers: List[MCPServer], agent_kwargs: Dict = None, 169 | convert_schemas_to_strict: bool = True) -> Any: 170 | """ 171 | Factory method to create and initialize an agent with MCP tools already loaded. 172 | 173 | Args: 174 | agent_class: Agent class to instantiate 175 | mcp_servers: List of MCP servers to register with the agent 176 | agent_kwargs: Additional keyword arguments to pass to the agent constructor 177 | convert_schemas_to_strict: Whether to convert JSON schemas to strict format 178 | 179 | Returns: 180 | An initialized agent instance with MCP tools registered 181 | """ 182 | # Connect to MCP servers 183 | for server in mcp_servers: 184 | if not getattr(server, 'connected', False): 185 | try: 186 | logger.debug(f"Connecting to MCP server: {server.name}") 187 | await server.connect() 188 | except Exception as e: 189 | logger.error(f"Failed to connect to MCP server {server.name}: {e}") 190 | 191 | # Create agent instance 192 | agent_kwargs = agent_kwargs or {} 193 | agent = agent_class(**agent_kwargs) 194 | 195 | # Prepare tools 196 | tools = await MCPToolsIntegration.prepare_dynamic_tools( 197 | mcp_servers, 198 | convert_schemas_to_strict=convert_schemas_to_strict, 199 | auto_connect=False # Already connected above 200 | ) 201 | 202 | # Register tools with agent 203 | if tools and hasattr(agent, '_tools') and isinstance(agent._tools, list): 204 | agent._tools.extend(tools) 205 | logger.info(f"Registered {len(tools)} MCP tools with agent") 206 | 207 | # Log the names of registered tools 208 | tool_names = [getattr(t, '__name__', 'unknown') for t in tools] 209 | logger.info(f"Registered tool names: {tool_names}") 210 | else: 211 | if not tools: 212 | logger.warning("No tools were found to register with the agent") 213 | else: 214 | logger.warning("Agent does not have a '_tools' attribute, tools were not registered") 215 | 216 | return agent 217 | -------------------------------------------------------------------------------- /mcp_client/server.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from contextlib import AbstractAsyncContextManager, AsyncExitStack 3 | from typing import Any, Dict, List, Optional, Tuple 4 | import logging 5 | 6 | # Import from the installed mcp package 7 | from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream 8 | import mcp.types 9 | from mcp.types import CallToolResult, JSONRPCMessage, Tool as MCPTool 10 | from mcp.client.sse import sse_client 11 | from mcp.client.session import ClientSession 12 | 13 | # Base class for MCP servers 14 | class MCPServer: 15 | async def connect(self): 16 | """Connect to the server.""" 17 | raise NotImplementedError 18 | 19 | @property 20 | def name(self) -> str: 21 | """A readable name for the server.""" 22 | raise NotImplementedError 23 | 24 | async def list_tools(self) -> List[MCPTool]: 25 | """List the tools available on the server.""" 26 | raise NotImplementedError 27 | 28 | async def call_tool(self, tool_name: str, arguments: Optional[Dict[str, Any]] = None) -> CallToolResult: 29 | """Invoke a tool on the server.""" 30 | raise NotImplementedError 31 | 32 | async def cleanup(self): 33 | """Cleanup the server.""" 34 | raise NotImplementedError 35 | 36 | # Base class for MCP servers that use a ClientSession 37 | class _MCPServerWithClientSession(MCPServer): 38 | """Base class for MCP servers that use a ClientSession to communicate with the server.""" 39 | 40 | def __init__(self, cache_tools_list: bool): 41 | """ 42 | Args: 43 | cache_tools_list: Whether to cache the tools list. If True, the tools list will be 44 | cached and only fetched from the server once. If False, the tools list will be 45 | fetched from the server on each call to list_tools(). You should set this to True 46 | if you know the server will not change its tools list, because it can drastically 47 | improve latency. 48 | """ 49 | self.session: Optional[ClientSession] = None 50 | self.exit_stack: AsyncExitStack = AsyncExitStack() 51 | self._cleanup_lock: asyncio.Lock = asyncio.Lock() 52 | self.cache_tools_list = cache_tools_list 53 | 54 | # The cache is always dirty at startup, so that we fetch tools at least once 55 | self._cache_dirty = True 56 | self._tools_list: Optional[List[MCPTool]] = None 57 | self.logger = logging.getLogger(__name__) 58 | 59 | def create_streams( 60 | self, 61 | ) -> AbstractAsyncContextManager[ 62 | Tuple[ 63 | MemoryObjectReceiveStream[JSONRPCMessage | Exception], 64 | MemoryObjectSendStream[JSONRPCMessage], 65 | ] 66 | ]: 67 | """Create the streams for the server.""" 68 | raise NotImplementedError 69 | 70 | async def __aenter__(self): 71 | await self.connect() 72 | return self 73 | 74 | async def __aexit__(self, exc_type, exc_value, traceback): 75 | await self.cleanup() 76 | 77 | def invalidate_tools_cache(self): 78 | """Invalidate the tools cache.""" 79 | self._cache_dirty = True 80 | 81 | async def connect(self): 82 | """Connect to the server.""" 83 | try: 84 | transport = await self.exit_stack.enter_async_context(self.create_streams()) 85 | read, write = transport 86 | session = await self.exit_stack.enter_async_context(ClientSession(read, write)) 87 | await session.initialize() 88 | self.session = session 89 | self.logger.info(f"Connected to MCP server: {self.name}") 90 | except Exception as e: 91 | self.logger.error(f"Error initializing MCP server: {e}") 92 | await self.cleanup() 93 | raise 94 | 95 | async def list_tools(self) -> List[MCPTool]: 96 | """List the tools available on the server.""" 97 | if not self.session: 98 | raise RuntimeError("Server not initialized. Make sure you call connect() first.") 99 | 100 | # Return from cache if caching is enabled, we have tools, and the cache is not dirty 101 | if self.cache_tools_list and not self._cache_dirty and self._tools_list: 102 | return self._tools_list 103 | 104 | # Reset the cache dirty to False 105 | self._cache_dirty = False 106 | 107 | try: 108 | # Fetch the tools from the server 109 | result = await self.session.list_tools() 110 | self._tools_list = result.tools 111 | return self._tools_list 112 | except Exception as e: 113 | self.logger.error(f"Error listing tools: {e}") 114 | raise 115 | 116 | async def call_tool(self, tool_name: str, arguments: Optional[Dict[str, Any]] = None) -> CallToolResult: 117 | """Invoke a tool on the server.""" 118 | if not self.session: 119 | raise RuntimeError("Server not initialized. Make sure you call connect() first.") 120 | 121 | arguments = arguments or {} 122 | try: 123 | return await self.session.call_tool(tool_name, arguments) 124 | except Exception as e: 125 | self.logger.error(f"Error calling tool {tool_name}: {e}") 126 | raise 127 | 128 | async def cleanup(self): 129 | """Cleanup the server.""" 130 | async with self._cleanup_lock: 131 | try: 132 | await self.exit_stack.aclose() 133 | self.session = None 134 | self.logger.info(f"Cleaned up MCP server: {self.name}") 135 | except Exception as e: 136 | self.logger.error(f"Error cleaning up server: {e}") 137 | 138 | # Define parameter types for clarity 139 | MCPServerSseParams = Dict[str, Any] 140 | MCPServerStdioParams = Dict[str, Any] 141 | 142 | # SSE server implementation 143 | class MCPServerSse(_MCPServerWithClientSession): 144 | """MCP server implementation that uses the HTTP with SSE transport.""" 145 | 146 | def __init__( 147 | self, 148 | params: MCPServerSseParams, 149 | cache_tools_list: bool = False, 150 | name: Optional[str] = None, 151 | ): 152 | """Create a new MCP server based on the HTTP with SSE transport. 153 | 154 | Args: 155 | params: The params that configure the server including the URL, headers, 156 | timeout, and SSE read timeout. 157 | cache_tools_list: Whether to cache the tools list. 158 | name: A readable name for the server. 159 | """ 160 | super().__init__(cache_tools_list) 161 | self.params = params 162 | self._name = name or f"SSE Server at {self.params.get('url', 'unknown')}" 163 | 164 | def create_streams( 165 | self, 166 | ) -> AbstractAsyncContextManager[ 167 | Tuple[ 168 | MemoryObjectReceiveStream[JSONRPCMessage | Exception], 169 | MemoryObjectSendStream[JSONRPCMessage], 170 | ] 171 | ]: 172 | """Create the streams for the server.""" 173 | return sse_client( 174 | url=self.params["url"], 175 | headers=self.params.get("headers"), 176 | timeout=self.params.get("timeout", 5), 177 | sse_read_timeout=self.params.get("sse_read_timeout", 60 * 5), 178 | ) 179 | 180 | @property 181 | def name(self) -> str: 182 | """A readable name for the server.""" 183 | return self._name 184 | 185 | # Stdio server implementation 186 | class MCPServerStdio(MCPServer): 187 | """An example (minimal) Stdio server implementation.""" 188 | 189 | def __init__(self, params: MCPServerStdioParams, cache_tools_list: bool = False, name: Optional[str] = None): 190 | self.params = params 191 | self.cache_tools_list = cache_tools_list 192 | self._tools_cache: Optional[List[MCPTool]] = None 193 | self._name = name or f"Stdio Server: {self.params.get('command', 'unknown')}" 194 | self.connected = False 195 | self.logger = logging.getLogger(__name__) 196 | 197 | @property 198 | def name(self) -> str: 199 | return self._name 200 | 201 | async def connect(self): 202 | await asyncio.sleep(0.5) 203 | self.connected = True 204 | self.logger.info(f"Connected to MCP Stdio server: {self.name}") 205 | 206 | async def list_tools(self) -> List[MCPTool]: 207 | if self.cache_tools_list and self._tools_cache is not None: 208 | return self._tools_cache 209 | # For demonstration, return an empty list or similar static tools. 210 | tools: List[MCPTool] = [] 211 | if self.cache_tools_list: 212 | self._tools_cache = tools 213 | return tools 214 | 215 | async def call_tool(self, tool_name: str, arguments: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: 216 | return {"content": [f"Called {tool_name} with args {arguments} via Stdio"]} 217 | 218 | async def cleanup(self): 219 | self.connected = False 220 | self.logger.info(f"Cleaned up MCP Stdio server: {self.name}") -------------------------------------------------------------------------------- /mcp_client/util.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | import functools 4 | from typing import Any, Dict, List 5 | 6 | # Import from mcp libraries 7 | from mcp.types import Tool as MCPTool, CallToolResult 8 | from .server import MCPServer 9 | 10 | # A minimal FunctionTool class used by the agent. 11 | class FunctionTool: 12 | def __init__(self, name: str, description: str, params_json_schema: Dict[str, Any], on_invoke_tool, strict_json_schema: bool = False): 13 | self.name = name 14 | self.description = description 15 | self.params_json_schema = params_json_schema 16 | self.on_invoke_tool = on_invoke_tool # This should be an async function. 17 | self.strict_json_schema = strict_json_schema 18 | 19 | def __repr__(self): 20 | return f"FunctionTool(name={self.name})" 21 | 22 | class MCPUtil: 23 | @classmethod 24 | async def get_function_tools(cls, server, convert_schemas_to_strict: bool) -> List[FunctionTool]: 25 | tools = await server.list_tools() 26 | function_tools = [] 27 | for tool in tools: 28 | ft = cls.to_function_tool(tool, server, convert_schemas_to_strict) 29 | function_tools.append(ft) 30 | return function_tools 31 | 32 | @classmethod 33 | def to_function_tool(cls, tool, server, convert_schemas_to_strict: bool) -> FunctionTool: 34 | # In a more complete implementation, you might convert the JSON schema into a strict version. 35 | schema = tool.inputSchema 36 | 37 | # Use a default argument to capture the current tool correctly in the closure 38 | async def invoke_tool(context: Any, input_json: str, current_tool_name=tool.name) -> str: 39 | try: 40 | arguments = json.loads(input_json) if input_json else {} 41 | except Exception as e: 42 | # Return error message as string 43 | return f"Error parsing input JSON for tool '{current_tool_name}': {e}" 44 | try: 45 | result = await server.call_tool(current_tool_name, arguments) 46 | # Ensure the final return value is a string 47 | if "content" in result and isinstance(result["content"], list) and len(result["content"]) >= 1: 48 | # Handle single or multiple content items - convert to string 49 | if len(result["content"]) == 1: 50 | content_item = result["content"][0] 51 | # Convert simple types explicitly to string 52 | if isinstance(content_item, (str, int, float, bool)): 53 | return str(content_item) 54 | # Convert complex types (like dict, list) to JSON string 55 | else: 56 | try: 57 | return json.dumps(content_item) 58 | except TypeError: 59 | return str(content_item) # Fallback to default string representation 60 | else: 61 | # Multiple content items, return as JSON array string 62 | try: 63 | return json.dumps(result["content"]) 64 | except TypeError: 65 | return str(result["content"]) # Fallback 66 | else: 67 | # If 'content' is missing, not a list, or empty, return string representation of the whole result 68 | try: 69 | return json.dumps(result) 70 | except TypeError: 71 | return str(result) # Fallback 72 | except Exception as e: 73 | # Catch errors during tool call itself 74 | return f"Error calling tool '{current_tool_name}': {e}" 75 | 76 | return FunctionTool( 77 | name=tool.name, 78 | description=tool.description, 79 | params_json_schema=schema, 80 | on_invoke_tool=invoke_tool, 81 | strict_json_schema=convert_schemas_to_strict, 82 | ) -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | livekit-agents>=1.0.0rc7 2 | livekit-plugins-openai>=1.0.0rc7 3 | livekit-plugins-silero>=1.0.0rc7 4 | livekit-plugins-deepgram>=1.0.0rc7 5 | mcp 6 | python-dotenv --------------------------------------------------------------------------------