├── mcp_client ├── __init__.py └── base.py ├── test.db ├── .gitignore ├── requirements.txt ├── pyproject.toml ├── mcp-server-config.json ├── LICENSE ├── README.md ├── cli.py └── app.py /mcp_client/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rakesh-eltropy/mcp-client/HEAD/test.db -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python-generated files 2 | __pycache__/ 3 | *.py[oc] 4 | build/ 5 | dist/ 6 | uv.lock 7 | 8 | # Virtual environments 9 | venv/ 10 | .venv 11 | .env 12 | 13 | # jetbrains files 14 | .idea/ 15 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | langchain-anthropic>=0.3.0 2 | langchain>=0.3.10 3 | langchain-core>=0.3.22 4 | langgraph<=0.2.59 5 | mcp>=1.0.0 6 | python-dotenv>=1.0.1 7 | langchain-openai>=0.2.10 8 | langchain-ollama<=0.2.2rc1 9 | langchain-anthropic>=0.3.0 10 | langchain-google-genai>=0.2.0 11 | langchain-groq>=0.2.0 12 | langchain-aws>=0.2.9 13 | langchain-together>=0.2.0 14 | langchain-fireworks>=0.2.0 15 | jsonschema-pydantic>=0.6 16 | aiosqlite>=0.20.0 17 | langgraph-checkpoint-sqlite>=2.0.1 18 | requests>=2.32.3 19 | fastapi>=0.70.0 20 | uvicorn>=0.15.0 -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "mcp_client" 3 | version = "0.1.0" 4 | description = "Command line and REST client for MCP servers" 5 | readme = "README.md" 6 | requires-python = ">=3.12" 7 | dependencies = [ 8 | "langchain-anthropic>=0.3.0", 9 | "langchain>=0.3.10", 10 | "langchain-core>=0.3.22", 11 | "langgraph>=0.2.59", 12 | "mcp>=1.0.0", 13 | "python-dotenv>=1.0.1", 14 | "langchain-openai>=0.2.10", 15 | "langchain-ollama<=0.2.2rc1", 16 | "jsonschema-pydantic>=0.6", 17 | "aiosqlite>=0.20.0", 18 | "langchain-anthropic>=0.3.0", 19 | "langchain-google-genai>=0.2.0", 20 | "langchain-groq>=0.2.0", 21 | "langchain-aws>=0.2.9", 22 | "langchain-together>=0.2.0", 23 | "langchain-fireworks>=0.2.0", 24 | "requests>=2.32.3", 25 | "fastapi>=0.70.0", 26 | "uvicorn>=0.15.0", 27 | ] 28 | license = { text = "MIT" } 29 | -------------------------------------------------------------------------------- /mcp-server-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "systemPrompt": "Welcome to the SQLite Query and Internet Search Application!\nFeatures:\nSQLite Database Queries:\nConnect to your local SQLite database and run SQL queries.\nPerform operations such as retrieving, updating, deleting, and inserting data.\nView query results in a formatted table directly within the application.\nInternet Search Capabilities:\nExecute web searches using integrated search engines.\nRetrieve and display search results conveniently.\nFilter and sort results based on relevance, date, or source.", 3 | "llm": { 4 | "provider": "openai", 5 | "model": "gpt-4o-mini", 6 | "api_key": "", 7 | "temperature": 0.2 8 | }, 9 | "mcpServers": { 10 | "brave-search": { 11 | "command": "npx", 12 | "args": ["-y", "@modelcontextprotocol/server-brave-search"], 13 | "env": { 14 | "BRAVE_API_KEY": "" 15 | } 16 | }, 17 | "sqlite": { 18 | "command": "uvx", 19 | "args": ["mcp-server-sqlite", "--db-path", "test.db"] 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Rakesh Goyal 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MCP REST API and CLI Client 2 | 3 | A **simple REST API** and **CLI client** to interact with [Model Context Protocol (MCP)](https://modelcontextprotocol.io/) servers. 4 | 5 | ## Key Features 6 | 7 | ### 1. MCP-Compatible Servers 8 | - Supports any [MCP-compatible servers](https://github.com/punkpeye/awesome-mcp-servers) servers. 9 | - Pre-configured default servers: 10 | - **SQLite** (test.db has been provided with sample products data) 11 | - **Brave Search** 12 | - Additional MCP servers can be added in the [mcp-server-config.json](mcp-server-config.json) file 13 | 14 | ### 2. Integrated with LangChain 15 | - Leverages LangChain to execute LLM prompts. 16 | - Enables multiple MCP servers to collaborate and respond to a specific query simultaneously. 17 | 18 | ### 3. LLM Provider Support 19 | - Compatible with any LLM provider that supports APIs with function capabilities. 20 | - Examples: 21 | - **OpenAI** 22 | - **Claude** 23 | - **Gemini** 24 | - **AWS Nova** 25 | - **Groq** 26 | - **Ollama** 27 | - Essentially all LLM providers are supported as long as they provide a function-based API. Please refer [langchain documentation](https://python.langchain.com/docs/integrations/chat/) for more details. 28 | 29 | 30 | ## Setup 31 | 32 | 1. Clone the repository: 33 | ```bash 34 | git clone https://github.com/rakesh-eltropy/mcp-client.git 35 | ``` 36 | 37 | 2. **Navigate to the Project Directory** 38 | After cloning the repository, move to the project directory: 39 | ```bash 40 | cd mcp-client 41 | ``` 42 | 43 | 3. Set the OPENAI_API_KEY environment variable: 44 | ```bash 45 | export OPENAI_API_KEY=your-openai-api-key 46 | ``` 47 | You can also set the `OPENAI_API_KEY` in the [mcp-server-config.json](mcp-server-config.json) file. 48 | 49 | You can also set the `provider` and `model` in the [mcp-server-config.json](mcp-server-config.json) file. 50 | e.g. `provider` can be `ollama` and `model` can be `llama3.2:3b`. 51 | 52 | 53 | 4.Set the BRAVE_API_KEY environment variable: 54 | ```bash 55 | export BRAVE_API_KEY=your-brave-api-key 56 | ``` 57 | You can also set the `BRAVE_API_KEY` in the [mcp-server-config.json](mcp-server-config.json) file. 58 | You can get the free `BRAVE_API_KEY` from [Brave Search API](https://brave.com/search/api/). 59 | 60 | 5. Running from the CLI: 61 | ```bash 62 | uv run cli.py 63 | ``` 64 | To explore the available commands, use the `help` option. You can chat with LLM using `chat` command. 65 | Sample prompts: 66 | ```bash 67 | What is the capital city of India? 68 | ``` 69 | ```bash 70 | Search the most expensive product from database and find more details about it from amazon? 71 | ``` 72 | 73 | 6. Running from the REST API: 74 | ```bash 75 | uvicorn app:app --reload 76 | ``` 77 | You can use the following curl command to chat with llm: 78 | ```bash 79 | curl -X POST -H "Content-Type: application/json" -d '{"message": "list all the products from my local database?"}' http://localhost:8000/chat 80 | ``` 81 | You can use the following curl command to chat with llm with streaming: 82 | ```bash 83 | curl -X POST -H "Content-Type: application/json" -d '{"message": "list all the products from my local database?", "streaming": true}' http://localhost:8000/chat 84 | ``` 85 | 86 | 87 | ## Contributing 88 | 89 | Feel free to submit issues and pull requests for improvements or bug fixes. 90 | -------------------------------------------------------------------------------- /cli.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains the Cli client for the MCP servers. 3 | """ 4 | import asyncio 5 | import os 6 | import sys 7 | import traceback 8 | from datetime import datetime 9 | from typing import TypedDict 10 | from langchain_core.messages import HumanMessage, AIMessage, AIMessageChunk 11 | from langgraph.graph.graph import CompiledGraph 12 | 13 | from mcp_client.base import ( 14 | load_server_config, 15 | create_server_parameters, 16 | convert_mcp_to_langchain_tools, 17 | create_agent_executor 18 | ) 19 | 20 | 21 | async def list_tools() -> None: 22 | """List available tools from the server.""" 23 | server_config = load_server_config() 24 | server_params = create_server_parameters(server_config) 25 | langchain_tools = await convert_mcp_to_langchain_tools(server_params) 26 | 27 | for tool in langchain_tools: 28 | print(f"{tool.name}") 29 | 30 | 31 | async def handle_chat_mode(): 32 | """Handle chat mode for the LangChain agent.""" 33 | print("\nInitializing chat mode...") 34 | agent_executor_cli = await create_agent_executor("cli") 35 | print("\nInitialized chat mode...") 36 | 37 | # Maintain a chat history of messages 38 | chat_history = [] 39 | 40 | # Start the chat loop 41 | while True: 42 | try: 43 | user_message = input("\nYou: ").strip() 44 | if user_message.lower() in ["exit", "quit"]: 45 | print("Exiting chat mode.") 46 | break 47 | if user_message.lower() in ["clear", "cls"]: 48 | os.system("cls" if sys.platform == "win32" else "clear") 49 | chat_history = [] 50 | continue 51 | all_messages = [] 52 | # Append the chat history to all messages 53 | all_messages.extend(chat_history) 54 | all_messages = [HumanMessage(content=user_message)] 55 | input_messages = { 56 | "messages": all_messages, 57 | "today_datetime": datetime.now().isoformat(), 58 | } 59 | # Query the assistant and get a fully formed response 60 | assistant_response = await query_response(input_messages, agent_executor_cli) 61 | 62 | # Append the assistant's response to the history 63 | chat_history.append(AIMessage(content=assistant_response)) 64 | except Exception as e: 65 | error_trace = traceback.format_exc() 66 | print(error_trace) 67 | print(f"\nError processing message: {e}") 68 | continue 69 | 70 | 71 | async def query_response(input_messages: TypedDict, agent_executor: CompiledGraph) -> str: 72 | """Query the assistant and get a fully formed response.""" 73 | collected_response = [] 74 | 75 | async for chunk in agent_executor.astream( 76 | input_messages, 77 | stream_mode=["messages", "values"] 78 | ): 79 | # Process the chunk and append the response to the collected response 80 | process_chunk(chunk) 81 | if isinstance(chunk, dict) and "messages" in chunk: 82 | collected_response.append(chunk["messages"][-1].content) 83 | 84 | print("") # Ensure a newline after the conversation ends 85 | return "".join(collected_response) 86 | 87 | 88 | def process_chunk(chunk): 89 | """Process the chunk and print the response.""" 90 | if isinstance(chunk, tuple) and chunk[0] == "messages": 91 | process_message_chunk(chunk[1][0]) 92 | elif isinstance(chunk, dict) and "messages" in chunk: 93 | process_final_value_chunk() 94 | elif isinstance(chunk, tuple) and chunk[0] == "values": 95 | process_tool_calls(chunk[1]['messages'][-1]) 96 | 97 | 98 | def process_message_chunk(message_chunk): 99 | """Process the message chunk and print the content.""" 100 | if isinstance(message_chunk, AIMessageChunk): 101 | content = message_chunk.content # Get the content of the message chunk 102 | if isinstance(content, list): 103 | extracted_text = ''.join(item['text'] for item in content if 'text' in item) 104 | print(extracted_text, end="", flush=True) # Print message content incrementally 105 | else: 106 | print(content, end="", flush=True) 107 | 108 | 109 | def process_final_value_chunk(): 110 | """Process the final value chunk and print the content.""" 111 | print("\n", flush=True) # Ensure a newline after complete message 112 | 113 | 114 | def process_tool_calls(message): 115 | """Process the tool calls and print the results.""" 116 | if isinstance(message, AIMessage) and message.tool_calls: 117 | message.pretty_print() # Format and print tool call results 118 | 119 | 120 | async def interactive_mode(): 121 | """Run the CLI in interactive mode.""" 122 | print("\nWelcome to the Interactive MCP Command-Line Tool") 123 | print("Type 'help' for available commands or 'chat' to start chat or 'quit' to exit") 124 | 125 | while True: 126 | try: 127 | command = input(">>> ").strip() # Get user input 128 | if not command: 129 | continue 130 | should_continue = await handle_command(command) # Handle the command 131 | if not should_continue: 132 | return 133 | except KeyboardInterrupt: 134 | print("\nUse 'quit' or 'exit' to close the program") 135 | except EOFError: 136 | break 137 | except Exception as e: 138 | print(f"\nError: {e}") 139 | 140 | 141 | async def handle_command(command: str): 142 | """ Handle specific commands dynamically.""" 143 | try: 144 | if command == "list-tools": 145 | print("\nFetching Tools List...\n") 146 | # Implement list-tools logic here 147 | await list_tools() 148 | elif command == "chat": 149 | print("\nEntering chat mode...") 150 | await handle_chat_mode() 151 | # Implement chat mode logic here 152 | elif command in ["quit", "exit"]: 153 | print("\nGoodbye!") 154 | return False 155 | elif command == "clear": 156 | if sys.platform == "win32": 157 | os.system("cls") 158 | else: 159 | os.system("clear") 160 | elif command == "help": 161 | print("\nAvailable commands:") 162 | print(" list-tools - Display available tools") 163 | print(" chat - Enter chat mode") 164 | print(" clear - Clear the screen") 165 | print(" help - Show this help message") 166 | print(" quit/exit - Exit the program") 167 | else: 168 | print(f"\nUnknown command: {command}") 169 | print("Type 'help' for available commands") 170 | except Exception as e: 171 | print(f"\nError executing command: {e}") 172 | 173 | return True 174 | 175 | 176 | def main() -> None: 177 | """ Entry point for the script.""" 178 | 179 | 180 | asyncio.run(interactive_mode()) # Run the main asynchronous function 181 | 182 | if __name__ == "__main__": 183 | main() # Execute the main function when script is run directly 184 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains the REST API client for the MCP servers. 3 | """ 4 | import json 5 | import traceback 6 | from datetime import datetime 7 | 8 | from fastapi import FastAPI, HTTPException, Body 9 | from typing import List, Dict, Any 10 | 11 | from langchain_core.messages import HumanMessage, AIMessageChunk 12 | from langgraph.graph.graph import CompiledGraph 13 | from starlette.responses import StreamingResponse 14 | 15 | from mcp_client.base import ( 16 | load_server_config, 17 | create_server_parameters, 18 | convert_mcp_to_langchain_tools, 19 | create_agent_executor, 20 | is_json 21 | ) 22 | 23 | # Constants 24 | HTTP_500_ERROR_MESSAGE = "Error querying response" 25 | 26 | app = FastAPI() 27 | 28 | 29 | @app.get("/") 30 | def root(): 31 | """Root endpoint.""" 32 | return {"message": "Welcome to the MCP REST API"} 33 | 34 | 35 | @app.get("/tools") 36 | async def list_tools() -> List[str]: 37 | """List available tools from the server.""" 38 | try: 39 | server_config = load_server_config() 40 | server_params = create_server_parameters(server_config) 41 | langchain_tools = await convert_mcp_to_langchain_tools(server_params) 42 | return [tool.name for tool in langchain_tools] 43 | except Exception as e: 44 | error_trace = traceback.format_exc() 45 | print(error_trace) 46 | raise HTTPException(status_code=500, detail=f"Error fetching tools: {str(e)}") 47 | 48 | 49 | @app.post("/chat") 50 | async def handle_chat(input_message: Dict[str, Any] = Body(...)): 51 | """Handle chat messages.""" 52 | try: 53 | agent_executor_rest = await create_agent_executor("rest") 54 | user_message = input_message.get("message", "") 55 | streaming = input_message.get("streaming", False) # Check if streaming is enabled 56 | if not user_message: 57 | raise HTTPException(status_code=400, detail="Message content is required") 58 | 59 | input_messages = { 60 | "messages": [HumanMessage(content=user_message)], 61 | "is_last_step": True, 62 | "today_datetime": datetime.now().isoformat(), 63 | 64 | } 65 | if streaming is False: 66 | response = await query_response_without_streaming(input_messages, agent_executor_rest) 67 | return _process_json_response(response) 68 | else: 69 | async def event_stream(): 70 | async for message_chunk in query_response_with_streaming(input_messages, agent_executor_rest): 71 | yield message_chunk # Stream the message chunk 72 | 73 | return StreamingResponse(event_stream(), media_type="text/plain", 74 | headers={"Transfer-Encoding": "chunked"}) 75 | except Exception as e: 76 | error_trace = traceback.format_exc() 77 | print(error_trace) 78 | raise HTTPException(status_code=500, detail=f"Error processing chat: {str(e)}") 79 | 80 | def remove_json_wrappers(input_string): 81 | # Check if the string starts with ```json and ends with ``` 82 | if input_string.startswith("```json") and input_string.endswith("```"): 83 | return input_string[7:-3].strip() # Remove the ```json and ``` and strip leading/trailing spaces 84 | return input_string # Return as-is if no ```json wrapper is found 85 | 86 | # Helper function to process JSON responses 87 | def _process_json_response(response_content: str) -> Any: 88 | response_content = remove_json_wrappers(response_content) 89 | return json.loads(response_content) if is_json(response_content) else response_content 90 | 91 | 92 | # Helper function to handle single response 93 | def _handle_single_response(output: str) -> Dict[str, Any]: 94 | return {"responses": _process_json_response(output)} 95 | 96 | 97 | async def query_response_with_streaming(input_messages: Dict[str, Any], agent_executor: CompiledGraph): 98 | """Query the assistant for a response and stream the response.""" 99 | try: 100 | async for chunk in agent_executor.astream( 101 | input_messages, 102 | stream_mode=["messages", "values"] 103 | ): 104 | # Process the chunk and append the response to the collected response 105 | 106 | content = process_message_chunk(chunk) 107 | 108 | if content: 109 | # Stream the content directly 110 | if isinstance(content, list): # Handle multiple messages 111 | for item in content: 112 | message_chunk = _process_message_chunk(item) 113 | # print(message_chunk) 114 | yield message_chunk # Stream the message chunk 115 | else: # Handle single message 116 | message_chunk = _process_message_chunk(content) 117 | yield message_chunk # Stream the message chunk 118 | except Exception as e: 119 | error_trace = traceback.format_exc() 120 | print(error_trace) 121 | print(f"Error processing messages: {e}") 122 | yield "" 123 | 124 | async def query_response_without_streaming(input_messages: Dict[str, Any], agent_executor: CompiledGraph): 125 | """Query the assistant for a response and send a single response.""" 126 | try: 127 | # Collect all chunks into a list 128 | collected_responses = [] 129 | 130 | async for chunk in agent_executor.astream( 131 | input_messages, 132 | stream_mode=["messages", "values"] 133 | ): 134 | # Process the chunk and append the response to the collected response 135 | content = process_message_chunk(chunk) 136 | 137 | if content: 138 | if isinstance(content, list): # Handle multiple messages 139 | for item in content: 140 | message_chunk = _process_message_chunk(item) 141 | collected_responses.append(message_chunk.replace("\n", "")) 142 | else: # Handle single message 143 | message_chunk = _process_message_chunk(content) 144 | collected_responses.append(message_chunk.replace("\n", "")) 145 | 146 | # Join all collected responses and return as a single response 147 | return "".join(collected_responses) 148 | 149 | except Exception as e: 150 | error_trace = traceback.format_exc() 151 | print(error_trace) 152 | print(f"Error processing messages: {e}") 153 | return "" 154 | 155 | 156 | def process_message_chunk(message_chunk) -> str: 157 | """Process the message chunk and print the content.""" 158 | if isinstance(message_chunk, tuple) and message_chunk[0] == "messages": 159 | chunk = message_chunk[1][0] 160 | if isinstance(chunk, AIMessageChunk): 161 | return chunk.content # Get the content of the message chunk 162 | return "" 163 | 164 | 165 | def _process_message_chunk(content) -> str: 166 | """Process the message chunk and print the content""" 167 | if 'text' in content: # Check if the content is a message 168 | return content['text'] 169 | elif isinstance(content, str): # Check if the content is a string 170 | return content 171 | return "" 172 | -------------------------------------------------------------------------------- /mcp_client/base.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains the base functions and classes for the MCP client. 3 | """ 4 | 5 | import json 6 | import os 7 | from typing import List, Type, TypedDict, Annotated 8 | 9 | from langchain.tools.base import BaseTool, ToolException 10 | from langchain_core.messages import BaseMessage 11 | from langchain_core.prompts import ChatPromptTemplate 12 | from langgraph.graph.graph import CompiledGraph 13 | from langgraph.prebuilt import create_react_agent 14 | from langchain.chat_models import init_chat_model 15 | from mcp import ClientSession, StdioServerParameters, types 16 | from mcp.client.stdio import stdio_client 17 | from pydantic import BaseModel 18 | from jsonschema_pydantic import jsonschema_to_pydantic 19 | from langgraph.graph import add_messages 20 | from langgraph.managed import IsLastStep 21 | 22 | CONFIG_FILE = 'mcp-server-config.json' 23 | 24 | 25 | class AgentState(TypedDict): 26 | """Defines the state of the agent in terms of messages and other properties.""" 27 | messages: Annotated[list[BaseMessage], add_messages] 28 | is_last_step: IsLastStep 29 | today_datetime: str 30 | remaining_steps: int 31 | 32 | 33 | def create_mcp_tool( 34 | tool_schema: types.Tool, 35 | server_params: StdioServerParameters 36 | ) -> BaseTool: 37 | """Create a LangChain tool from MCP tool schema. 38 | 39 | This function generates a new LangChain tool based on the provided MCP tool schema 40 | and server parameters. The tool's behavior is defined within the McpTool inner class. 41 | 42 | :param tool_schema: The schema of the tool to be created. 43 | :param server_params: The server parameters needed by the tool for operation. 44 | :return: An instance of a newly created mcp tool. 45 | """ 46 | 47 | # Convert the input schema to a Pydantic model for validation 48 | input_model = jsonschema_to_pydantic(tool_schema.inputSchema) 49 | 50 | class McpTool(BaseTool): 51 | """McpTool class represents a tool that can execute operations asynchronously.""" 52 | 53 | # Tool attributes from the schema 54 | name: str = tool_schema.name 55 | description: str = tool_schema.description 56 | args_schema: Type[BaseModel] = input_model 57 | mcp_server_params: StdioServerParameters = server_params 58 | 59 | def _run(self, **kwargs): 60 | """Synchronous execution is not supported.""" 61 | raise NotImplementedError("Only async operations are supported") 62 | 63 | async def _arun(self, **kwargs): 64 | """Run the tool asynchronously with provided arguments.""" 65 | async with stdio_client(self.mcp_server_params) as (read, write): 66 | async with ClientSession(read, write) as session: 67 | await session.initialize() # Initialize the session 68 | result = await session.call_tool(self.name, arguments=kwargs) 69 | if result.isError: 70 | # Raise an exception if there is an error in the tool call 71 | raise ToolException(result.content) 72 | return result.content # Return the result if no error 73 | 74 | return McpTool() 75 | 76 | 77 | async def convert_mcp_to_langchain_tools(server_params: List[StdioServerParameters]) -> List[BaseTool]: 78 | """Convert MCP tools to LangChain tools.""" 79 | langchain_tools = [] 80 | # Retrieve tools from each server and add to the list 81 | for server_param in server_params: 82 | tools = await get_mcp_tools(server_param) 83 | langchain_tools.extend(tools) 84 | 85 | return langchain_tools 86 | 87 | 88 | async def get_mcp_tools(server_param: StdioServerParameters) -> List[BaseTool]: 89 | """Asynchronously retrieves and converts tools from a server using specified parameters""" 90 | mcp_tools = [] 91 | 92 | async with stdio_client(server_param) as (read, write): 93 | async with ClientSession(read, write) as session: 94 | await session.initialize() # Initialize the session 95 | tools: types.ListToolsResult = await session.list_tools() # Retrieve tools from the server 96 | # Convert each tool to LangChain format and add to list 97 | for tool in tools.tools: 98 | mcp_tools.append(create_mcp_tool(tool, server_param)) 99 | 100 | return mcp_tools 101 | 102 | 103 | def is_json(string): 104 | """Check if a string is a valid JSON.""" 105 | try: 106 | json.loads(string) 107 | return True 108 | except ValueError: 109 | return False 110 | 111 | 112 | def load_server_config() -> dict: 113 | """Load server configuration from available config files.""" 114 | # Load server configuration from the config file 115 | if os.path.exists(CONFIG_FILE): 116 | with open(CONFIG_FILE, 'r') as f: 117 | return json.load(f) # Load server configuration 118 | raise FileNotFoundError(f"Could not find config file {CONFIG_FILE}") 119 | 120 | 121 | def create_server_parameters(server_config: dict) -> List[StdioServerParameters]: 122 | """Create server parameters from the server configuration.""" 123 | server_parameters = [] 124 | # Create server parameters for each server configuration 125 | for config in server_config["mcpServers"].values(): 126 | server_parameter = StdioServerParameters( 127 | command=config["command"], 128 | args=config.get("args", []), 129 | env={**config.get("env", {}), "PATH": os.getenv("PATH")} 130 | ) 131 | # Add environment variables from the system if not provided 132 | for key, value in server_parameter.env.items(): 133 | if len(value) == 0 and key in os.environ: 134 | server_parameter.env[key] = os.getenv(key) 135 | server_parameters.append(server_parameter) 136 | return server_parameters 137 | 138 | 139 | def initialize_model(llm_config: dict): 140 | """Initialize the language model using the provided configuration.""" 141 | api_key = llm_config.get("api_key") 142 | # Initialize the language model with the provided configuration 143 | init_args = { 144 | "model": llm_config.get("model", "gpt-4o-mini"), 145 | "model_provider": llm_config.get("provider", "openai"), 146 | "temperature": llm_config.get("temperature", 0), 147 | "streaming": True, 148 | } 149 | # Add API key if provided 150 | if api_key: 151 | init_args["api_key"] = api_key 152 | return init_chat_model(**init_args) 153 | 154 | 155 | def create_chat_prompt(client: str, server_config: dict) -> ChatPromptTemplate: 156 | """Create chat prompt template from server configuration.""" 157 | system_prompt = server_config.get("systemPrompt", "") 158 | if client == "rest": 159 | system_prompt = system_prompt + "\nGive the output in the json format only. Provide the output without any code block wrappers (e.g., ```json or similar) or any extra formatting. Include the plain text output only." 160 | return ChatPromptTemplate.from_messages([ 161 | ("system", system_prompt), 162 | ("user", "{messages}"), 163 | ("placeholder", "{agent_scratchpad}"), 164 | ]) 165 | 166 | 167 | async def create_agent_executor(client: str) -> CompiledGraph: 168 | """Create an agent executor for the specified client.""" 169 | server_config = load_server_config() # Load server configuration 170 | server_params = create_server_parameters(server_config) # Create server parameters 171 | langchain_tools = await convert_mcp_to_langchain_tools(server_params) # Convert MCP tools to LangChain tools 172 | 173 | model = initialize_model(server_config.get("llm", {})) # Initialize the language model 174 | prompt = create_chat_prompt(client, server_config) # Create chat prompt template 175 | 176 | agent_executor = create_react_agent( 177 | model, 178 | langchain_tools, 179 | state_schema=AgentState, 180 | state_modifier=prompt, 181 | ) 182 | 183 | return agent_executor 184 | --------------------------------------------------------------------------------