├── README.md ├── jokes └── joke_server.py ├── mcp-client ├── client.py ├── client2.py └── config.json ├── pyproject.toml └── weather-server-python ├── README.md └── src └── weather ├── __init__.py └── server.py /README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Sample to show to integrate MCP (Model Context Protocol) servers with Pydantic.AI 3 | 4 | 5 | Parts of this example uses content from : https://github.com/modelcontextprotocol/quickstart-resources.git - Esp. the weather 'server' code 6 | 7 | Code uses two different LLMs just for demonstration. The Proxy Agent uses gpt-4o and the tool uses sonnet. 8 | So, export OPENAI_API_KEY as well as ANTHROPIC_API_KEY - OR - modify the code to suit your models 9 | 10 | The pyproject.toml assumes you are using 'uv' package manager 11 | 12 | ### Steps to run 13 | 1. Clone this repo 14 | 1. uv sync 15 | 3. cd mcp-client 16 | 2. uv run client.py (this requires openai and anthropic keys and uses anthropic libs directly) 17 | 2. uv run client2.py (for pure pydantic and works with any fn calling LLM) 18 | 19 | (Alternatively try client2.py - this uses only PydanticAI - no direct dep on Anthropic libs) 20 | 21 | Now, try interacting with some questions like: 22 | 23 | > What is the time in NY when it is 7:30pm in Bangalore? 24 | 25 | > What is the Weather currently in Chicago? 26 | 27 | (and quit when done) 28 | 29 | -------------------------------------------------------------------------------- /jokes/joke_server.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import random 3 | from mcp.server import Server 4 | import mcp.types as types 5 | 6 | server = Server("jokes") 7 | 8 | def get_dad_joke(): 9 | url = "https://icanhazdadjoke.com/" 10 | headers = {"Accept": "application/json"} 11 | response = requests.get(url, headers=headers) 12 | if response.status_code == 200: 13 | return response.json().get("joke", "No joke found.") 14 | return "Failed to fetch a dad joke." 15 | 16 | def get_general_joke(): 17 | url = "https://official-joke-api.appspot.com/jokes/random" 18 | response = requests.get(url) 19 | if response.status_code == 200: 20 | joke_data = response.json() 21 | return f"{joke_data.get('setup', 'No setup')} - {joke_data.get('punchline', 'No punchline')}" 22 | return "Failed to fetch a general joke." 23 | 24 | @server.list_tools() 25 | async def list_joke_tools() -> list[types.Tool]: 26 | """List available joke tools.""" 27 | return [ 28 | types.Tool( 29 | name="dad-jokes", 30 | description="Get a random dad joke", 31 | inputSchema={"type": "object", "properties": {}}, 32 | ), 33 | types.Tool( 34 | name="jokes", 35 | description="Get a random general joke", 36 | inputSchema={"type": "object", "properties": {}}, 37 | ), 38 | ] 39 | 40 | @server.call_tool() 41 | async def call_joke_tool(name: str, arguments: dict | None) -> list[types.TextContent]: 42 | """Call the appropriate joke function based on the tool name.""" 43 | if name == "dad-jokes": 44 | joke = get_dad_joke() 45 | return [types.TextContent(type="text", text=joke)] 46 | elif name == "jokes": 47 | joke = get_general_joke() 48 | return [types.TextContent(type="text", text=joke)] 49 | else: 50 | raise ValueError(f"Unknown tool: {name}") 51 | 52 | if __name__ == "__main__": 53 | import asyncio 54 | from mcp.server.stdio import stdio_server 55 | from mcp.server import NotificationOptions, InitializationOptions 56 | 57 | async def main(): 58 | async with stdio_server() as (read_stream, write_stream): 59 | await server.run( 60 | read_stream, 61 | write_stream, 62 | InitializationOptions( 63 | server_name="jokes", 64 | server_version="0.1.0", 65 | capabilities=server.get_capabilities( 66 | notification_options=NotificationOptions(), 67 | experimental_capabilities={}, 68 | ), 69 | ), 70 | ) 71 | 72 | asyncio.run(main()) 73 | -------------------------------------------------------------------------------- /mcp-client/client.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | from typing import Optional, Dict, List, Union, Any 4 | from contextlib import AsyncExitStack 5 | from colorama import init, Fore, Style 6 | 7 | from mcp import ClientSession, StdioServerParameters 8 | init(autoreset=True) # Initialize colorama with autoreset=True 9 | from pydantic import BaseModel 10 | from pydantic_ai import Agent, RunContext 11 | from pydantic_ai.tools import Tool, ToolDefinition 12 | from mcp.client.stdio import stdio_client 13 | 14 | from anthropic import Anthropic 15 | from dotenv import load_dotenv 16 | 17 | load_dotenv() # load environment variables from .env 18 | 19 | class MCPClient: 20 | def __init__(self): 21 | # Initialize sessions and agents dictionaries 22 | self.sessions: Dict[str, ClientSession] = {} # Dictionary to store {server_name: session} 23 | self.agents: Dict[str, Agent] = {} # Dictionary to store {server_name: agent} 24 | self.exit_stack = AsyncExitStack() 25 | self.anthropic = Anthropic() 26 | self.available_tools = [] # List to store all available tools across servers 27 | self.dynamic_tools: List[Tool] = [] # List to store dynamic pydantic tools 28 | 29 | async def connect_to_server(self): 30 | """Connect to an MCP server using config.json settings""" 31 | print("\nLoading config.json...") 32 | with open('config.json') as f: 33 | config = json.load(f) 34 | 35 | print("\nAvailable servers in config:", list(config['mcpServers'].keys())) 36 | print("\nFull config content:", json.dumps(config, indent=2)) 37 | 38 | # Connect to all servers in config 39 | for server_name, server_config in config['mcpServers'].items(): 40 | print(f"\nAttempting to load {server_name} server config...") 41 | print("Server config found:", json.dumps(server_config, indent=2)) 42 | 43 | server_params = StdioServerParameters( 44 | command=server_config['command'], 45 | args=server_config['args'], 46 | env=None 47 | ) 48 | print("\nCreated server parameters:", server_params) 49 | 50 | stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params)) 51 | stdio, write = stdio_transport 52 | session = await self.exit_stack.enter_async_context(ClientSession(stdio, write)) 53 | 54 | await session.initialize() 55 | 56 | # Store session with server name as key 57 | self.sessions[server_name] = session 58 | 59 | # Create and store an Agent for this server 60 | server_agent: Agent = Agent( 61 | 'openai:gpt-4', 62 | system_prompt=( 63 | f"You are an AI assistant that helps interact with the {server_name} server. " 64 | "You will use the available tools to process requests and provide responses." 65 | ) 66 | ) 67 | self.agents[server_name] = server_agent 68 | 69 | # List available tools for this server 70 | response = await session.list_tools() 71 | server_tools = [{ 72 | "name": f"{server_name}__{tool.name}", 73 | "description": tool.description, 74 | "input_schema": tool.inputSchema 75 | } for tool in response.tools] 76 | 77 | # Add server's tools to overall available tools 78 | self.available_tools.extend(server_tools) 79 | 80 | # Create corresponding dynamic pydantic tools 81 | for tool in response.tools: 82 | async def prepare_tool( 83 | ctx: RunContext[str], 84 | tool_def: ToolDefinition, 85 | tool_name: str = tool.name, 86 | server: str = server_name 87 | ) -> Union[ToolDefinition, None]: 88 | # Customize tool definition based on server context 89 | tool_def.name = f"{server}__{tool_name}" 90 | tool_def.description = f"Tool from {server} server: {tool.description}" 91 | return tool_def 92 | 93 | # Create a function that matches the tool's schema and uses server_agent 94 | async def tool_func(ctx: RunContext[Any], str_arg) -> str: 95 | agent_response = await server_agent.run_sync(str_arg) 96 | print(f"\nServer agent response: {agent_response}") 97 | return f"Tool {tool.name} called with {str_arg}. Agent response: {agent_response}" 98 | 99 | dynamic_tool = Tool( 100 | tool_func, 101 | prepare=prepare_tool, 102 | name=f"{server_name}__{tool.name}", 103 | description=tool.description 104 | ) 105 | self.dynamic_tools.append(dynamic_tool) 106 | print(f"\nAdded dynamic tool: {dynamic_tool.name}") 107 | print(f"Description: {dynamic_tool.description}") 108 | print(f"Function: {dynamic_tool.function}") 109 | print(f"Prepare function: {dynamic_tool.prepare}") 110 | 111 | print(f"\nConnected to server {server_name} with tools:", 112 | [tool["name"] for tool in server_tools]) 113 | 114 | async def process_query(self, query: str) -> str: 115 | """Process a query using Claude and available tools""" 116 | messages = [ 117 | { 118 | "role": "user", 119 | "content": query 120 | } 121 | ] 122 | 123 | # Initial Claude API call with all available tools 124 | response = self.anthropic.messages.create( 125 | model="claude-3-5-sonnet-20241022", 126 | max_tokens=1000, 127 | messages=messages, 128 | tools=self.available_tools 129 | ) 130 | 131 | # Process response and handle tool calls 132 | tool_results = [] 133 | final_text = [] 134 | 135 | for content in response.content: 136 | if content.type == 'text': 137 | final_text.append(content.text) 138 | elif content.type == 'tool_use': 139 | # Parse server name and tool name from the full tool name 140 | full_tool_name = content.name 141 | server_name, tool_name = full_tool_name.split('__', 1) 142 | tool_args = content.input 143 | 144 | # Get the appropriate session and execute tool call 145 | if server_name not in self.sessions: 146 | raise ValueError(f"Unknown server: {server_name}") 147 | 148 | result = await self.sessions[server_name].call_tool(tool_name, tool_args) 149 | tool_results.append({"call": tool_name, "result": result}) 150 | final_text.append(f"[Calling tool {tool_name} with args {tool_args}]") 151 | 152 | # Continue conversation with tool results 153 | if hasattr(content, 'text') and content.text: 154 | messages.append({ 155 | "role": "assistant", 156 | "content": content.text 157 | }) 158 | messages.append({ 159 | "role": "user", 160 | "content": result.content 161 | }) 162 | 163 | # Get next response from Claude 164 | response = self.anthropic.messages.create( 165 | model="claude-3-5-sonnet-20241022", 166 | max_tokens=1000, 167 | messages=messages, 168 | ) 169 | 170 | final_text.append(f"{Style.BRIGHT}{Fore.CYAN}{response.content[0].text}") 171 | 172 | return "\n".join(final_text) 173 | 174 | async def chat_loop(self): 175 | """Run an interactive chat loop""" 176 | print(f"{Fore.WHITE}\nMCP Client Started!") 177 | print(f"{Fore.WHITE}Type your queries or 'quit' to exit.") 178 | 179 | while True: 180 | try: 181 | query = input(f"\n{Fore.RED}Query: {Fore.LIGHTGREEN_EX}").strip() 182 | 183 | if query.lower() in ['quit', 'exit', 'bye', 'goodbye']: 184 | print("\nGoodbye!") 185 | break 186 | 187 | response = await self.process_query(query) 188 | print(f"\n{Fore.YELLOW}{response}") 189 | 190 | except Exception as e: 191 | print(f"\n{Fore.RED}Error: {str(e)}") 192 | 193 | async def cleanup(self): 194 | """Clean up resources""" 195 | await self.exit_stack.aclose() 196 | 197 | async def main(): 198 | client = MCPClient() 199 | try: 200 | await client.connect_to_server() 201 | await client.chat_loop() 202 | finally: 203 | await client.cleanup() 204 | 205 | if __name__ == "__main__": 206 | import sys 207 | asyncio.run(main()) 208 | -------------------------------------------------------------------------------- /mcp-client/client2.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | from contextlib import AsyncExitStack 4 | from typing import Any, Dict, List, Optional, Union 5 | 6 | import nest_asyncio 7 | from colorama import Fore, Style, init 8 | from mcp import ClientSession, StdioServerParameters 9 | 10 | init(autoreset=True) # Initialize colorama with autoreset=True 11 | 12 | from dotenv import load_dotenv 13 | from mcp.client.stdio import stdio_client 14 | from pydantic import BaseModel 15 | from pydantic_ai import Agent, RunContext 16 | from pydantic_ai.tools import Tool, ToolDefinition 17 | 18 | load_dotenv() 19 | 20 | 21 | # The following function is required to enable capture of the local variables within the 22 | # server config parsing loop 23 | def create_tool_fn(server_agent: Agent, tool_name: str, sessions: Dict[str, ClientSession], server_name: str ): 24 | 25 | async def tool_func(ctx: RunContext[Any], **kwargs) -> str: 26 | print("!"*100) 27 | print("#"*100) 28 | print(kwargs) 29 | print(f"Tool name: {tool_name}") 30 | print("!"*100) 31 | 32 | # agent_response = server_agent.run_sync(str_arg) 33 | agent_response = await sessions[server_name].call_tool(tool_name, kwargs) 34 | print(f"\nServer agent response: {agent_response}") 35 | return f"Tool {tool_name} called with {kwargs}. Agent response: {agent_response}" 36 | 37 | return tool_func 38 | 39 | class MCPClient: 40 | def __init__(self): 41 | # Initialize sessions and agents dictionaries 42 | self.sessions: Dict[str, ClientSession] = {} # Dictionary to store {server_name: session} 43 | self.exit_stack = AsyncExitStack() 44 | self.available_tools = [] # List to store all available tools across servers 45 | self.dynamic_tools: List[Tool] = [] # List to store dynamic pydantic tools 46 | 47 | self.server_names = [] 48 | self.super_agent = None 49 | 50 | 51 | async def connect_to_server(self): 52 | """Connect to an MCP server using config.json settings""" 53 | print("\nLoading config.json...") 54 | with open('config.json') as f: 55 | config = json.load(f) 56 | 57 | print("\nAvailable servers in config:", list(config['mcpServers'].keys())) 58 | print("\nFull config content:", json.dumps(config, indent=2)) 59 | 60 | # Connect to all servers in config 61 | print("Adding Server names") 62 | self.server_names.extend(list(config["mcpServers"].keys())) 63 | print("Looping over servers") 64 | for server_name, server_config in config['mcpServers'].items(): 65 | print(f"\nAttempting to load {server_name} server config...") 66 | print("Server config found:", json.dumps(server_config, indent=2)) 67 | 68 | server_params = StdioServerParameters( 69 | command=server_config['command'], 70 | args=server_config['args'], 71 | env=None 72 | ) 73 | print("\nCreated server parameters:", server_params) 74 | 75 | stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params)) 76 | stdio, write = stdio_transport 77 | session = await self.exit_stack.enter_async_context(ClientSession(stdio, write)) 78 | 79 | await session.initialize() 80 | 81 | # Store session with server name as key 82 | self.sessions[server_name] = session 83 | 84 | # Create and store an Agent for this server 85 | # This agent wrapper ensure the schema translation happens when the tool is called... 86 | server_agent: Agent = Agent( 87 | 'openai:gpt-4', 88 | system_prompt=( 89 | f"You are an AI assistant that helps interact with the {server_name} server. " 90 | "You will use the available tools to process requests and provide responses." 91 | ) 92 | ) 93 | 94 | # List available tools for this server 95 | response = await session.list_tools() 96 | 97 | server_tools = [{ 98 | "name": f"{server_name}__{tool.name}", 99 | "description": tool.description, 100 | "input_schema": tool.inputSchema 101 | } for tool in response.tools] 102 | 103 | print(f"\nAvailable tools for {server_tools}") 104 | 105 | # Add server's tools to overall available tools 106 | self.available_tools.extend(server_tools) 107 | 108 | server_agent: Agent = Agent( 109 | 'openai:gpt-4', 110 | system_prompt=( 111 | f"You are an AI assistant that helps interact with the {server_name} server. " 112 | "You will use the available tools to process requests and provide responses." 113 | ) 114 | ) 115 | 116 | # Create corresponding dynamic pydantic tools 117 | for tool in response.tools: 118 | 119 | # Create a function that matches the tool's schema and uses server_agent 120 | tool_func = create_tool_fn(server_agent=server_agent, tool_name=tool.name, sessions=self.sessions, server_name=server_name) 121 | 122 | dynamic_tool = Tool( 123 | tool_func, 124 | name=f"{server_name}__{tool.name}", 125 | description=tool.description, 126 | max_retries=3, 127 | ) 128 | dynamic_tool._parameters_json_schema = tool.inputSchema 129 | self.dynamic_tools.append(dynamic_tool) 130 | print(f"\nAdded dynamic tool: {dynamic_tool.name}") 131 | print(f"Description: {dynamic_tool.description}") 132 | print(f"Function: {dynamic_tool.function}") 133 | print(f"Prepare function: {dynamic_tool.prepare}") 134 | 135 | self.super_agent: Agent = Agent( 136 | 'openai:gpt-4o', 137 | tools=self.dynamic_tools, 138 | retries=3, 139 | ) 140 | @self.super_agent.system_prompt 141 | def super_agent_system_prompt(): 142 | print("Super agent system prompt called") 143 | print("Server names", self.server_names) 144 | return ( 145 | f"You are an AI assistant that helps interact with the {', '.join(self.server_names)} servers. " 146 | "You will use the available tools to process requests and provide responses." 147 | ) 148 | # print(self.super_agent._function_tools) 149 | print(f"\nConnected to server {server_name} with tools:", 150 | [tool["name"] for tool in server_tools]) 151 | 152 | async def process_query(self, query: str) -> str: 153 | """Process a query using Claude and available tools""" 154 | 155 | print("-"*100) 156 | # print(self.super_agent) 157 | # print("-"*100) 158 | # exit(1) 159 | result = self.super_agent.run_sync(query) 160 | for message in result._all_messages: 161 | print(f"{Fore.GREEN}{message}") 162 | print("="*100) 163 | 164 | return result.data 165 | 166 | async def chat_loop(self): 167 | """Run an interactive chat loop""" 168 | print(f"{Fore.WHITE}\nMCP Client Started!") 169 | print(f"{Fore.WHITE}Type your queries or 'quit' to exit.") 170 | 171 | while True: 172 | try: 173 | query = input(f"\n{Fore.RED}Query: {Fore.LIGHTGREEN_EX}").strip() 174 | 175 | if query.lower() in ['quit', 'exit', 'bye', 'goodbye']: 176 | print("\nGoodbye!") 177 | break 178 | 179 | response = await self.process_query(query) 180 | print(f"\n{Fore.YELLOW}{response}") 181 | 182 | except Exception as e: 183 | import traceback 184 | traceback.print_exc() 185 | print(f"\n{Fore.RED}Error: {str(e)}") 186 | 187 | async def cleanup(self): 188 | """Clean up resources""" 189 | await self.exit_stack.aclose() 190 | 191 | async def main(): 192 | client = MCPClient() 193 | try: 194 | await client.connect_to_server() 195 | await client.chat_loop() 196 | finally: 197 | await client.cleanup() 198 | 199 | if __name__ == "__main__": 200 | import sys 201 | nest_asyncio.apply() 202 | asyncio.run(main()) 203 | -------------------------------------------------------------------------------- /mcp-client/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "timezone": { 4 | "description": "MCP server for timezone can convert between timezones", 5 | "command": "python", 6 | "args": ["-m", "mcp_server_time", "--local-timezone=America/New_York"] 7 | }, 8 | "joker": { 9 | "description": "Fetches jokes - general and the Dad joke variation", 10 | "command": "python", 11 | "args": ["../jokes/joke_server.py"] 12 | }, 13 | "weather": { 14 | "description": "Fetches weather data from a weather server", 15 | "command": "python", 16 | "args": ["../weather-server-python/src/weather/server.py"] 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "mcp-proxy-pydantic-agent" 3 | version = "0.1.0" 4 | description = "Sample to show to integrate MCP (Model Context Protocl) servers with Pydantic.AI" 5 | readme = "README.md" 6 | requires-python = ">=3.11" 7 | dependencies = [ 8 | "anthropic>=0.42.0", 9 | "colorama>=0.4.6", 10 | "httpx>=0.28.1", 11 | "langgraph>=0.3.2", 12 | "mcp>=1.1.1", 13 | "mcp-server-time>=0.6.2", 14 | "nest-asyncio>=1.6.0", 15 | "pydantic-ai>=0.0.12", 16 | "python-dotenv>=1.0.1", 17 | ] 18 | -------------------------------------------------------------------------------- /weather-server-python/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/p2c2e/mcp_proxy_pydantic_agent/42f0f4d30d16d0a7234db9ce8af56eb2b610be5b/weather-server-python/README.md -------------------------------------------------------------------------------- /weather-server-python/src/weather/__init__.py: -------------------------------------------------------------------------------- 1 | from . import server 2 | import asyncio 3 | 4 | def main(): 5 | """Main entry point for the package.""" 6 | asyncio.run(server.main()) 7 | 8 | # Optionally expose other important items at package level 9 | __all__ = ['main', 'server'] 10 | -------------------------------------------------------------------------------- /weather-server-python/src/weather/server.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | import httpx 4 | from mcp.server.models import InitializationOptions 5 | import mcp.types as types 6 | from mcp.server import NotificationOptions, Server 7 | import mcp.server.stdio 8 | import asyncio 9 | NWS_API_BASE = "https://api.weather.gov" 10 | USER_AGENT = "weather-app/1.0" 11 | 12 | server = Server("weather") 13 | @server.list_tools() 14 | async def handle_list_tools() -> list[types.Tool]: 15 | """ 16 | List available tools. 17 | Each tool specifies its arguments using JSON Schema validation. 18 | """ 19 | return [ 20 | types.Tool( 21 | name="get-alerts", 22 | description="Get weather alerts for a state", 23 | inputSchema={ 24 | "type": "object", 25 | "properties": { 26 | "state": { 27 | "type": "string", 28 | "description": "Two-letter state code (e.g. CA, NY)", 29 | }, 30 | }, 31 | "required": ["state"], 32 | }, 33 | ), 34 | types.Tool( 35 | name="get-forecast", 36 | description="Get weather forecast for a location", 37 | inputSchema={ 38 | "type": "object", 39 | "properties": { 40 | "latitude": { 41 | "type": "number", 42 | "description": "Latitude of the location", 43 | }, 44 | "longitude": { 45 | "type": "number", 46 | "description": "Longitude of the location", 47 | }, 48 | }, 49 | "required": ["latitude", "longitude"], 50 | }, 51 | ), 52 | ] 53 | 54 | async def make_nws_request(client: httpx.AsyncClient, url: str) -> dict[str, Any] | None: 55 | """Make a request to the NWS API with proper error handling.""" 56 | headers = { 57 | "User-Agent": USER_AGENT, 58 | "Accept": "application/geo+json" 59 | } 60 | 61 | try: 62 | response = await client.get(url, headers=headers, timeout=30.0) 63 | response.raise_for_status() 64 | return response.json() 65 | except Exception: 66 | return None 67 | 68 | def format_alert(feature: dict) -> str: 69 | """Format an alert feature into a concise string.""" 70 | props = feature["properties"] 71 | return ( 72 | f"Event: {props.get('event', 'Unknown')}\n" 73 | f"Area: {props.get('areaDesc', 'Unknown')}\n" 74 | f"Severity: {props.get('severity', 'Unknown')}\n" 75 | f"Status: {props.get('status', 'Unknown')}\n" 76 | f"Headline: {props.get('headline', 'No headline')}\n" 77 | "---" 78 | ) 79 | @server.call_tool() 80 | async def handle_call_tool( 81 | name: str, arguments: dict | None 82 | ) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]: 83 | """ 84 | Handle tool execution requests. 85 | Tools can fetch weather data and notify clients of changes. 86 | """ 87 | if not arguments: 88 | raise ValueError("Missing arguments") 89 | 90 | if name == "get-alerts": 91 | state = arguments.get("state") 92 | if not state: 93 | raise ValueError("Missing state parameter") 94 | 95 | # Convert state to uppercase to ensure consistent format 96 | state = state.upper() 97 | if len(state) != 2: 98 | raise ValueError("State must be a two-letter code (e.g. CA, NY)") 99 | 100 | async with httpx.AsyncClient() as client: 101 | alerts_url = f"{NWS_API_BASE}/alerts?area={state}" 102 | alerts_data = await make_nws_request(client, alerts_url) 103 | 104 | if not alerts_data: 105 | return [types.TextContent(type="text", text="Failed to retrieve alerts data")] 106 | 107 | features = alerts_data.get("features", []) 108 | if not features: 109 | return [types.TextContent(type="text", text=f"No active alerts for {state}")] 110 | 111 | # Format each alert into a concise string 112 | formatted_alerts = [format_alert(feature) for feature in features] 113 | alerts_text = f"Active alerts for {state}:\n\n" + "\n".join(formatted_alerts) 114 | 115 | return [ 116 | types.TextContent( 117 | type="text", 118 | text=alerts_text 119 | ) 120 | ] 121 | elif name == "get-forecast": 122 | try: 123 | latitude = float(arguments.get("latitude")) 124 | longitude = float(arguments.get("longitude")) 125 | except (TypeError, ValueError): 126 | return [types.TextContent( 127 | type="text", 128 | text="Invalid coordinates. Please provide valid numbers for latitude and longitude." 129 | )] 130 | 131 | # Basic coordinate validation 132 | if not (-90 <= latitude <= 90) or not (-180 <= longitude <= 180): 133 | return [types.TextContent( 134 | type="text", 135 | text="Invalid coordinates. Latitude must be between -90 and 90, longitude between -180 and 180." 136 | )] 137 | 138 | async with httpx.AsyncClient() as client: 139 | # First get the grid point 140 | lat_str = f"{latitude}" 141 | lon_str = f"{longitude}" 142 | points_url = f"{NWS_API_BASE}/points/{lat_str},{lon_str}" 143 | points_data = await make_nws_request(client, points_url) 144 | 145 | if not points_data: 146 | return [types.TextContent(type="text", text=f"Failed to retrieve grid point data for coordinates: {latitude}, {longitude}. This location may not be supported by the NWS API (only US locations are supported).")] 147 | 148 | # Extract forecast URL from the response 149 | properties = points_data.get("properties", {}) 150 | forecast_url = properties.get("forecast") 151 | 152 | if not forecast_url: 153 | return [types.TextContent(type="text", text="Failed to get forecast URL from grid point data")] 154 | 155 | # Get the forecast 156 | forecast_data = await make_nws_request(client, forecast_url) 157 | 158 | if not forecast_data: 159 | return [types.TextContent(type="text", text="Failed to retrieve forecast data")] 160 | 161 | # Format the forecast periods 162 | periods = forecast_data.get("properties", {}).get("periods", []) 163 | if not periods: 164 | return [types.TextContent(type="text", text="No forecast periods available")] 165 | 166 | # Format each period into a concise string 167 | formatted_forecast = [] 168 | for period in periods: 169 | forecast_text = ( 170 | f"{period.get('name', 'Unknown')}:\n" 171 | f"Temperature: {period.get('temperature', 'Unknown')}°{period.get('temperatureUnit', 'F')}\n" 172 | f"Wind: {period.get('windSpeed', 'Unknown')} {period.get('windDirection', '')}\n" 173 | f"{period.get('shortForecast', 'No forecast available')}\n" 174 | "---" 175 | ) 176 | formatted_forecast.append(forecast_text) 177 | 178 | forecast_text = f"Forecast for {latitude}, {longitude}:\n\n" + "\n".join(formatted_forecast) 179 | 180 | return [types.TextContent( 181 | type="text", 182 | text=forecast_text 183 | )] 184 | else: 185 | raise ValueError(f"Unknown tool: {name}") 186 | async def main(): 187 | # Run the server using stdin/stdout streams 188 | async with mcp.server.stdio.stdio_server() as (read_stream, write_stream): 189 | await server.run( 190 | read_stream, 191 | write_stream, 192 | InitializationOptions( 193 | server_name="weather", 194 | server_version="0.1.0", 195 | capabilities=server.get_capabilities( 196 | notification_options=NotificationOptions(), 197 | experimental_capabilities={}, 198 | ), 199 | ), 200 | ) 201 | 202 | if __name__ == "__main__": 203 | asyncio.run(main()) --------------------------------------------------------------------------------