├── .env.example ├── requirements.txt ├── servers_config.json ├── .gitignore ├── .github └── FUNDING.yml ├── LICENSE ├── README.MD └── main.py /.env.example: -------------------------------------------------------------------------------- 1 | GROQ_API_KEY=gsk_1234567890 2 | GITHUB_API_KEY=github_pat_11223344556677889900 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | python-dotenv>=1.0.0 2 | requests>=2.31.0 3 | mcp>=1.0.0 4 | uvicorn>=0.32.1 -------------------------------------------------------------------------------- /servers_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "sqlite": { 4 | "command": "uvx", 5 | "args": ["mcp-server-sqlite", "--db-path", "./test.db"] 6 | }, 7 | "puppeteer": { 8 | "command": "npx", 9 | "args": ["-y", "@modelcontextprotocol/server-puppeteer"] 10 | } 11 | } 12 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | *.so 6 | .Python 7 | build/ 8 | versions/ 9 | docs/ 10 | develop-eggs/ 11 | dist/ 12 | downloads/ 13 | eggs/ 14 | .eggs/ 15 | lib/ 16 | lib64/ 17 | parts/ 18 | sdist/ 19 | var/ 20 | wheels/ 21 | *.egg-info/ 22 | .installed.cfg 23 | *.egg 24 | 25 | # Virtual Environment 26 | venv/ 27 | env/ 28 | ENV/ 29 | .env 30 | .venv 31 | 32 | # IDE 33 | .idea/ 34 | .vscode/ 35 | *.swp 36 | *.swo 37 | .DS_Store 38 | 39 | # Project specific 40 | *.db 41 | *.sqlite3 42 | *.log 43 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: 3choff # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 12 | polar: # Replace with a single Polar username 13 | buy_me_a_coffee: # Replace with a single Buy Me a Coffee username 14 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Edoardo Cilia 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.MD: -------------------------------------------------------------------------------- 1 | # MCP Chatbot 2 | 3 | This chatbot example demonstrates how to integrate the Model Context Protocol (MCP) into a simple CLI chatbot. The implementation showcases MCP's flexibility by supporting multiple tools through MCP servers and is compatible with any LLM provider that follows OpenAI API standards. 4 | 5 | If you find this project helpful, don’t forget to ⭐ star the [repository](https://github.com/3choff/mcp-chatbot) or buy me a ☕ [coffee](https://ko-fi.com/3choff). 6 | 7 | ## Key Features 8 | 9 | - **LLM Provider Flexibility**: Works with any LLM that follows OpenAI API standards (tested with Llama 3.2 90b on Groq and GPT-4o mini on GitHub Marketplace). 10 | - **Dynamic Tool Integration**: Tools are declared in the system prompt, ensuring maximum compatibility across different LLMs. 11 | - **Server Configuration**: Supports multiple MCP servers through a simple JSON configuration file like the Claude Desktop App. 12 | 13 | ## Requirements 14 | 15 | - Python 3.10 16 | - `python-dotenv` 17 | - `requests` 18 | - `mcp` 19 | - `uvicorn` 20 | 21 | ## Installation 22 | 23 | 1. **Clone the repository:** 24 | 25 | ```bash 26 | git clone https://github.com/3choff/mcp-chatbot.git 27 | cd mcp-chatbot 28 | ``` 29 | 30 | 2. **Install the dependencies:** 31 | 32 | ```bash 33 | pip install -r requirements.txt 34 | ``` 35 | 36 | 3. **Set up environment variables:** 37 | 38 | Create a `.env` file in the root directory and add your API key: 39 | 40 | ```plaintext 41 | LLM_API_KEY=your_api_key_here 42 | ``` 43 | 44 | 4. **Configure servers:** 45 | 46 | The `servers_config.json` follows the same structure as Claude Desktop, allowing for easy integration of multiple servers. 47 | Here's an example: 48 | 49 | ```json 50 | { 51 | "mcpServers": { 52 | "sqlite": { 53 | "command": "uvx", 54 | "args": ["mcp-server-sqlite", "--db-path", "./test.db"] 55 | }, 56 | "puppeteer": { 57 | "command": "npx", 58 | "args": ["-y", "@modelcontextprotocol/server-puppeteer"] 59 | } 60 | } 61 | } 62 | ``` 63 | Environment variables are supported as well. Pass them as you would with the Claude Desktop App. 64 | 65 | Example: 66 | ```json 67 | { 68 | "mcpServers": { 69 | "server_name": { 70 | "command": "uvx", 71 | "args": ["mcp-server-name", "--additional-args"], 72 | "env": { 73 | "API_KEY": "your_api_key_here" 74 | } 75 | } 76 | } 77 | } 78 | ``` 79 | 80 | ## Usage 81 | 82 | 1. **Run the client:** 83 | 84 | ```bash 85 | python main.py 86 | ``` 87 | 88 | 2. **Interact with the assistant:** 89 | 90 | The assistant will automatically detect available tools and can respond to queries based on the tools provided by the configured servers. 91 | 92 | 3. **Exit the session:** 93 | 94 | Type `quit` or `exit` to end the session. 95 | 96 | ## Architecture 97 | 98 | - **Tool Discovery**: Tools are automatically discovered from configured servers. 99 | - **System Prompt**: Tools are dynamically included in the system prompt, allowing the LLM to understand available capabilities. 100 | - **Server Integration**: Supports any MCP-compatible server, tested with various server implementations including Uvicorn and Node.js. 101 | 102 | ### Class Structure 103 | - **Configuration**: Manages environment variables and server configurations 104 | - **Server**: Handles MCP server initialization, tool discovery, and execution 105 | - **Tool**: Represents individual tools with their properties and formatting 106 | - **LLMClient**: Manages communication with the LLM provider 107 | - **ChatSession**: Orchestrates the interaction between user, LLM, and tools 108 | 109 | ### Logic Flow 110 | 111 | ```mermaid 112 | flowchart TD 113 | A[Start] --> B[Load Configuration] 114 | B --> C[Initialize Servers] 115 | C --> D[Discover Tools] 116 | D --> E[Format Tools for LLM] 117 | E --> F[Wait for User Input] 118 | 119 | F --> G{User Input} 120 | G --> H[Send Input to LLM] 121 | H --> I{LLM Decision} 122 | I -->|Tool Call| J[Execute Tool] 123 | I -->|Direct Response| K[Return Response to User] 124 | 125 | J --> L[Return Tool Result] 126 | L --> M[Send Result to LLM] 127 | M --> N[LLM Interprets Result] 128 | N --> O[Present Final Response to User] 129 | 130 | K --> O 131 | O --> F 132 | ``` 133 | 134 | 1. **Initialization**: 135 | - Configuration loads environment variables and server settings 136 | - Servers are initialized with their respective tools 137 | - Tools are discovered and formatted for LLM understanding 138 | 139 | 2. **Runtime Flow**: 140 | - User input is received 141 | - Input is sent to LLM with context of available tools 142 | - LLM response is parsed: 143 | - If it's a tool call → execute tool and return result 144 | - If it's a direct response → return to user 145 | - Tool results are sent back to LLM for interpretation 146 | - Final response is presented to user 147 | 148 | 3. **Tool Integration**: 149 | - Tools are dynamically discovered from MCP servers 150 | - Tool descriptions are automatically included in system prompt 151 | - Tool execution is handled through standardized MCP protocol 152 | 153 | ## Contributing 154 | 155 | Feedback and contributions are welcome. If you encounter any issues or have suggestions for improvements, please create a new [issue](https://github.com/3choff/mcp-chatbot/issues) on the GitHub repository. 156 | 157 | If you'd like to contribute to the development of the project, feel free to submit a pull request with your changes. 158 | 159 | ## License 160 | 161 | This project is licensed under the [MIT License](https://github.com/3choff/mcp-chatbot/blob/main/LICENSE). 162 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | import logging 4 | import os 5 | import shutil 6 | from typing import Dict, List, Optional, Any 7 | 8 | import requests 9 | from dotenv import load_dotenv 10 | from mcp import ClientSession, StdioServerParameters 11 | from mcp.client.stdio import stdio_client 12 | 13 | # Configure logging 14 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 15 | 16 | 17 | class Configuration: 18 | """Manages configuration and environment variables for the MCP client.""" 19 | 20 | def __init__(self) -> None: 21 | """Initialize configuration with environment variables.""" 22 | self.load_env() 23 | self.api_key = os.getenv("GROQ_API_KEY") 24 | # self.api_key = os.getenv("GITHUB_API_KEY") 25 | 26 | @staticmethod 27 | def load_env() -> None: 28 | """Load environment variables from .env file.""" 29 | load_dotenv() 30 | 31 | @staticmethod 32 | def load_config(file_path: str) -> Dict[str, Any]: 33 | """Load server configuration from JSON file. 34 | 35 | Args: 36 | file_path: Path to the JSON configuration file. 37 | 38 | Returns: 39 | Dict containing server configuration. 40 | 41 | Raises: 42 | FileNotFoundError: If configuration file doesn't exist. 43 | JSONDecodeError: If configuration file is invalid JSON. 44 | """ 45 | with open(file_path, 'r') as f: 46 | return json.load(f) 47 | 48 | @property 49 | def llm_api_key(self) -> str: 50 | """Get the LLM API key. 51 | 52 | Returns: 53 | The API key as a string. 54 | 55 | Raises: 56 | ValueError: If the API key is not found in environment variables. 57 | """ 58 | if not self.api_key: 59 | raise ValueError("LLM_API_KEY not found in environment variables") 60 | return self.api_key 61 | 62 | 63 | class Server: 64 | """Manages MCP server connections and tool execution.""" 65 | 66 | def __init__(self, name: str, config: Dict[str, Any]) -> None: 67 | self.name: str = name 68 | self.config: Dict[str, Any] = config 69 | self.stdio_context: Optional[Any] = None 70 | self.session: Optional[ClientSession] = None 71 | self._cleanup_lock: asyncio.Lock = asyncio.Lock() 72 | self.capabilities: Optional[Dict[str, Any]] = None 73 | 74 | async def initialize(self) -> None: 75 | """Initialize the server connection.""" 76 | server_params = StdioServerParameters( 77 | command=shutil.which("npx") if self.config['command'] == "npx" else self.config['command'], 78 | args=self.config['args'], 79 | env={**os.environ, **self.config['env']} if self.config.get('env') else None 80 | ) 81 | try: 82 | self.stdio_context = stdio_client(server_params) 83 | read, write = await self.stdio_context.__aenter__() 84 | self.session = ClientSession(read, write) 85 | await self.session.__aenter__() 86 | self.capabilities = await self.session.initialize() 87 | except Exception as e: 88 | logging.error(f"Error initializing server {self.name}: {e}") 89 | await self.cleanup() 90 | raise 91 | 92 | async def list_tools(self) -> List[Any]: 93 | """List available tools from the server. 94 | 95 | Returns: 96 | A list of available tools. 97 | 98 | Raises: 99 | RuntimeError: If the server is not initialized. 100 | """ 101 | if not self.session: 102 | raise RuntimeError(f"Server {self.name} not initialized") 103 | 104 | tools_response = await self.session.list_tools() 105 | tools = [] 106 | 107 | supports_progress = ( 108 | self.capabilities 109 | and 'progress' in self.capabilities 110 | ) 111 | 112 | if supports_progress: 113 | logging.info(f"Server {self.name} supports progress tracking") 114 | 115 | for item in tools_response: 116 | if isinstance(item, tuple) and item[0] == 'tools': 117 | for tool in item[1]: 118 | tools.append(Tool(tool.name, tool.description, tool.inputSchema)) 119 | if supports_progress: 120 | logging.info(f"Tool '{tool.name}' will support progress tracking") 121 | 122 | return tools 123 | 124 | async def execute_tool( 125 | self, 126 | tool_name: str, 127 | arguments: Dict[str, Any], 128 | retries: int = 2, 129 | delay: float = 1.0 130 | ) -> Any: 131 | """Execute a tool with retry mechanism. 132 | 133 | Args: 134 | tool_name: Name of the tool to execute. 135 | arguments: Tool arguments. 136 | retries: Number of retry attempts. 137 | delay: Delay between retries in seconds. 138 | 139 | Returns: 140 | Tool execution result. 141 | 142 | Raises: 143 | RuntimeError: If server is not initialized. 144 | Exception: If tool execution fails after all retries. 145 | """ 146 | if not self.session: 147 | raise RuntimeError(f"Server {self.name} not initialized") 148 | 149 | attempt = 0 150 | while attempt < retries: 151 | try: 152 | supports_progress = ( 153 | self.capabilities 154 | and 'progress' in self.capabilities 155 | ) 156 | 157 | if supports_progress: 158 | logging.info(f"Executing {tool_name} with progress tracking...") 159 | result = await self.session.call_tool( 160 | tool_name, 161 | arguments, 162 | progress_token=f"{tool_name}_execution" 163 | ) 164 | else: 165 | logging.info(f"Executing {tool_name}...") 166 | result = await self.session.call_tool(tool_name, arguments) 167 | 168 | return result 169 | 170 | except Exception as e: 171 | attempt += 1 172 | logging.warning(f"Error executing tool: {e}. Attempt {attempt} of {retries}.") 173 | if attempt < retries: 174 | logging.info(f"Retrying in {delay} seconds...") 175 | await asyncio.sleep(delay) 176 | else: 177 | logging.error("Max retries reached. Failing.") 178 | raise 179 | 180 | async def cleanup(self) -> None: 181 | """Clean up server resources.""" 182 | async with self._cleanup_lock: 183 | try: 184 | if self.session: 185 | try: 186 | await self.session.__aexit__(None, None, None) 187 | except Exception as e: 188 | logging.warning(f"Warning during session cleanup for {self.name}: {e}") 189 | finally: 190 | self.session = None 191 | 192 | if self.stdio_context: 193 | try: 194 | await self.stdio_context.__aexit__(None, None, None) 195 | except (RuntimeError, asyncio.CancelledError) as e: 196 | logging.info(f"Note: Normal shutdown message for {self.name}: {e}") 197 | except Exception as e: 198 | logging.warning(f"Warning during stdio cleanup for {self.name}: {e}") 199 | finally: 200 | self.stdio_context = None 201 | except Exception as e: 202 | logging.error(f"Error during cleanup of server {self.name}: {e}") 203 | 204 | 205 | class Tool: 206 | """Represents a tool with its properties and formatting.""" 207 | 208 | def __init__(self, name: str, description: str, input_schema: Dict[str, Any]) -> None: 209 | self.name: str = name 210 | self.description: str = description 211 | self.input_schema: Dict[str, Any] = input_schema 212 | 213 | def format_for_llm(self) -> str: 214 | """Format tool information for LLM. 215 | 216 | Returns: 217 | A formatted string describing the tool. 218 | """ 219 | args_desc = [] 220 | if 'properties' in self.input_schema: 221 | for param_name, param_info in self.input_schema['properties'].items(): 222 | arg_desc = f"- {param_name}: {param_info.get('description', 'No description')}" 223 | if param_name in self.input_schema.get('required', []): 224 | arg_desc += " (required)" 225 | args_desc.append(arg_desc) 226 | 227 | return f""" 228 | Tool: {self.name} 229 | Description: {self.description} 230 | Arguments: 231 | {chr(10).join(args_desc)} 232 | """ 233 | 234 | 235 | class LLMClient: 236 | """Manages communication with the LLM provider.""" 237 | 238 | def __init__(self, api_key: str) -> None: 239 | self.api_key: str = api_key 240 | 241 | def get_response(self, messages: List[Dict[str, str]]) -> str: 242 | """Get a response from the LLM. 243 | 244 | Args: 245 | messages: A list of message dictionaries. 246 | 247 | Returns: 248 | The LLM's response as a string. 249 | 250 | Raises: 251 | RequestException: If the request to the LLM fails. 252 | """ 253 | url = "https://api.groq.com/openai/v1/chat/completions" 254 | # url = "https://models.inference.ai.azure.com/chat/completions" 255 | 256 | headers = { 257 | "Content-Type": "application/json", 258 | "Authorization": f"Bearer {self.api_key}" 259 | } 260 | payload = { 261 | "messages": messages, 262 | "model": "llama-3.2-90b-vision-preview", 263 | "temperature": 0.7, 264 | "max_tokens": 4096, 265 | "top_p": 1, 266 | "stream": False, 267 | "stop": None 268 | } 269 | # payload = { 270 | # "messages": messages, 271 | # "temperature": 1.0, 272 | # "top_p": 1.0, 273 | # "max_tokens": 4000, 274 | # "model": "gpt-4o-mini" 275 | # } 276 | 277 | try: 278 | response = requests.post(url, headers=headers, json=payload) 279 | response.raise_for_status() 280 | data = response.json() 281 | return data['choices'][0]['message']['content'] 282 | 283 | except requests.exceptions.RequestException as e: 284 | error_message = f"Error getting LLM response: {str(e)}" 285 | logging.error(error_message) 286 | 287 | if e.response is not None: 288 | status_code = e.response.status_code 289 | logging.error(f"Status code: {status_code}") 290 | logging.error(f"Response details: {e.response.text}") 291 | 292 | return f"I encountered an error: {error_message}. Please try again or rephrase your request." 293 | 294 | 295 | class ChatSession: 296 | """Orchestrates the interaction between user, LLM, and tools.""" 297 | 298 | def __init__(self, servers: List[Server], llm_client: LLMClient) -> None: 299 | self.servers: List[Server] = servers 300 | self.llm_client: LLMClient = llm_client 301 | 302 | async def cleanup_servers(self) -> None: 303 | """Clean up all servers properly.""" 304 | cleanup_tasks = [] 305 | for server in self.servers: 306 | cleanup_tasks.append(asyncio.create_task(server.cleanup())) 307 | 308 | if cleanup_tasks: 309 | try: 310 | await asyncio.gather(*cleanup_tasks, return_exceptions=True) 311 | except Exception as e: 312 | logging.warning(f"Warning during final cleanup: {e}") 313 | 314 | async def process_llm_response(self, llm_response: str) -> str: 315 | """Process the LLM response and execute tools if needed. 316 | 317 | Args: 318 | llm_response: The response from the LLM. 319 | 320 | Returns: 321 | The result of tool execution or the original response. 322 | """ 323 | import json 324 | try: 325 | tool_call = json.loads(llm_response) 326 | if "tool" in tool_call and "arguments" in tool_call: 327 | logging.info(f"Executing tool: {tool_call['tool']}") 328 | logging.info(f"With arguments: {tool_call['arguments']}") 329 | 330 | for server in self.servers: 331 | tools = await server.list_tools() 332 | if any(tool.name == tool_call["tool"] for tool in tools): 333 | try: 334 | result = await server.execute_tool(tool_call["tool"], tool_call["arguments"]) 335 | 336 | if isinstance(result, dict) and 'progress' in result: 337 | progress = result['progress'] 338 | total = result['total'] 339 | logging.info(f"Progress: {progress}/{total} ({(progress/total)*100:.1f}%)") 340 | 341 | return f"Tool execution result: {result}" 342 | except Exception as e: 343 | error_msg = f"Error executing tool: {str(e)}" 344 | logging.error(error_msg) 345 | return error_msg 346 | 347 | return f"No server found with tool: {tool_call['tool']}" 348 | return llm_response 349 | except json.JSONDecodeError: 350 | return llm_response 351 | 352 | async def start(self) -> None: 353 | """Main chat session handler.""" 354 | try: 355 | for server in self.servers: 356 | try: 357 | await server.initialize() 358 | except Exception as e: 359 | logging.error(f"Failed to initialize server: {e}") 360 | await self.cleanup_servers() 361 | return 362 | 363 | all_tools = [] 364 | for server in self.servers: 365 | tools = await server.list_tools() 366 | all_tools.extend(tools) 367 | 368 | tools_description = "\n".join([tool.format_for_llm() for tool in all_tools]) 369 | 370 | system_message = f"""You are a helpful assistant with access to these tools: 371 | 372 | {tools_description} 373 | Choose the appropriate tool based on the user's question. If no tool is needed, reply directly. 374 | 375 | IMPORTANT: When you need to use a tool, you must ONLY respond with the exact JSON object format below, nothing else: 376 | {{ 377 | "tool": "tool-name", 378 | "arguments": {{ 379 | "argument-name": "value" 380 | }} 381 | }} 382 | 383 | After receiving a tool's response: 384 | 1. Transform the raw data into a natural, conversational response 385 | 2. Keep responses concise but informative 386 | 3. Focus on the most relevant information 387 | 4. Use appropriate context from the user's question 388 | 5. Avoid simply repeating the raw data 389 | 390 | Please use only the tools that are explicitly defined above.""" 391 | 392 | messages = [ 393 | { 394 | "role": "system", 395 | "content": system_message 396 | } 397 | ] 398 | 399 | while True: 400 | try: 401 | user_input = input("You: ").strip().lower() 402 | if user_input in ['quit', 'exit']: 403 | logging.info("\nExiting...") 404 | break 405 | 406 | messages.append({"role": "user", "content": user_input}) 407 | 408 | llm_response = self.llm_client.get_response(messages) 409 | logging.info("\nAssistant: %s", llm_response) 410 | 411 | result = await self.process_llm_response(llm_response) 412 | 413 | if result != llm_response: 414 | messages.append({"role": "assistant", "content": llm_response}) 415 | messages.append({"role": "system", "content": result}) 416 | 417 | final_response = self.llm_client.get_response(messages) 418 | logging.info("\nFinal response: %s", final_response) 419 | messages.append({"role": "assistant", "content": final_response}) 420 | else: 421 | messages.append({"role": "assistant", "content": llm_response}) 422 | 423 | except KeyboardInterrupt: 424 | logging.info("\nExiting...") 425 | break 426 | 427 | finally: 428 | await self.cleanup_servers() 429 | 430 | 431 | async def main() -> None: 432 | """Initialize and run the chat session.""" 433 | config = Configuration() 434 | server_config = config.load_config('servers_config.json') 435 | servers = [Server(name, srv_config) for name, srv_config in server_config['mcpServers'].items()] 436 | llm_client = LLMClient(config.llm_api_key) 437 | chat_session = ChatSession(servers, llm_client) 438 | await chat_session.start() 439 | 440 | if __name__ == "__main__": 441 | asyncio.run(main()) --------------------------------------------------------------------------------