└── src └── mcp ├── __init__.py ├── client ├── __init__.py ├── __main__.py ├── session.py ├── sse.py └── stdio.py ├── py.typed ├── server ├── __init__.py ├── __main__.py ├── models.py ├── session.py ├── sse.py ├── stdio.py └── websocket.py ├── shared ├── __init__.py ├── context.py ├── exceptions.py ├── memory.py ├── progress.py ├── session.py └── version.py └── types.py /src/mcp/__init__.py: -------------------------------------------------------------------------------- from .client.session import ClientSession from .client.stdio import StdioServerParameters, stdio_client from .server.session import ServerSession from .server.stdio import stdio_server from .shared.exceptions import McpError from .types import ( CallToolRequest, ClientCapabilities, ClientNotification, ClientRequest, ClientResult, CompleteRequest, CreateMessageRequest, CreateMessageResult, ErrorData, GetPromptRequest, GetPromptResult, Implementation, IncludeContext, InitializedNotification, InitializeRequest, InitializeResult, JSONRPCError, JSONRPCRequest, JSONRPCResponse, ListPromptsRequest, ListPromptsResult, ListResourcesRequest, ListResourcesResult, ListToolsResult, LoggingLevel, LoggingMessageNotification, Notification, PingRequest, ProgressNotification, PromptsCapability, ReadResourceRequest, ReadResourceResult, Resource, ResourcesCapability, ResourceUpdatedNotification, RootsCapability, SamplingMessage, ServerCapabilities, ServerNotification, ServerRequest, ServerResult, SetLevelRequest, StopReason, SubscribeRequest, Tool, ToolsCapability, UnsubscribeRequest, ) from .types import ( Role as SamplingRole, ) __all__ = [ "CallToolRequest", "ClientCapabilities", "ClientNotification", "ClientRequest", "ClientResult", "ClientSession", "CreateMessageRequest", "CreateMessageResult", "ErrorData", "GetPromptRequest", "GetPromptResult", "Implementation", "IncludeContext", "InitializeRequest", "InitializeResult", "InitializedNotification", "JSONRPCError", "JSONRPCRequest", "ListPromptsRequest", "ListPromptsResult", "ListResourcesRequest", "ListResourcesResult", "ListToolsResult", "LoggingLevel", "LoggingMessageNotification", "McpError", "Notification", "PingRequest", "ProgressNotification", "PromptsCapability", "ReadResourceRequest", "ReadResourceResult", "ResourcesCapability", "ResourceUpdatedNotification", "Resource", "RootsCapability", "SamplingMessage", "SamplingRole", "ServerCapabilities", "ServerNotification", "ServerRequest", "ServerResult", "ServerSession", "SetLevelRequest", "StdioServerParameters", "StopReason", "SubscribeRequest", "Tool", "ToolsCapability", "UnsubscribeRequest", "stdio_client", "stdio_server", "CompleteRequest", "JSONRPCResponse", ] -------------------------------------------------------------------------------- /src/mcp/client/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelcontextprotocol/python-sdk/main/src/mcp/client/__init__.py -------------------------------------------------------------------------------- /src/mcp/client/__main__.py: -------------------------------------------------------------------------------- import logging import sys from functools import partial from urllib.parse import urlparse import anyio import click from mcp.client.session import ClientSession from mcp.client.sse import sse_client from mcp.client.stdio import StdioServerParameters, stdio_client if not sys.warnoptions: import warnings warnings.simplefilter("ignore") logging.basicConfig(level=logging.INFO) logger = logging.getLogger("client") async def receive_loop(session: ClientSession): logger.info("Starting receive loop") async for message in session.incoming_messages: if isinstance(message, Exception): logger.error("Error: %s", message) continue logger.info("Received message from server: %s", message) async def run_session(read_stream, write_stream): async with ( ClientSession(read_stream, write_stream) as session, anyio.create_task_group() as tg, ): tg.start_soon(receive_loop, session) logger.info("Initializing session") await session.initialize() logger.info("Initialized") async def main(command_or_url: str, args: list[str], env: list[tuple[str, str]]): env_dict = dict(env) if urlparse(command_or_url).scheme in ("http", "https"): # Use SSE client for HTTP(S) URLs async with sse_client(command_or_url) as streams: await run_session(*streams) else: # Use stdio client for commands server_parameters = StdioServerParameters( command=command_or_url, args=args, env=env_dict ) async with stdio_client(server_parameters) as streams: await run_session(*streams) @click.command() @click.argument("command_or_url") @click.argument("args", nargs=-1) @click.option( "--env", "-e", multiple=True, nargs=2, metavar="KEY VALUE", help="Environment variables to set. Can be used multiple times.", ) def cli(*args, **kwargs): anyio.run(partial(main, *args, **kwargs), backend="trio") if __name__ == "__main__": cli() -------------------------------------------------------------------------------- /src/mcp/client/session.py: -------------------------------------------------------------------------------- from datetime import timedelta from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from pydantic import AnyUrl import mcp.types as types from mcp.shared.session import BaseSession from mcp.shared.version import SUPPORTED_PROTOCOL_VERSIONS class ClientSession( BaseSession[ types.ClientRequest, types.ClientNotification, types.ClientResult, types.ServerRequest, types.ServerNotification, ] ): def __init__( self, read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception], write_stream: MemoryObjectSendStream[types.JSONRPCMessage], read_timeout_seconds: timedelta | None = None, ) -> None: super().__init__( read_stream, write_stream, types.ServerRequest, types.ServerNotification, read_timeout_seconds=read_timeout_seconds, ) async def initialize(self) -> types.InitializeResult: result = await self.send_request( types.ClientRequest( types.InitializeRequest( method="initialize", params=types.InitializeRequestParams( protocolVersion=types.LATEST_PROTOCOL_VERSION, capabilities=types.ClientCapabilities( sampling=None, experimental=None, roots=types.RootsCapability( # TODO: Should this be based on whether we # _will_ send notifications, or only whether # they're supported? listChanged=True ), ), clientInfo=types.Implementation(name="mcp", version="0.1.0"), ), ) ), types.InitializeResult, ) if result.protocolVersion not in SUPPORTED_PROTOCOL_VERSIONS: raise RuntimeError( "Unsupported protocol version from the server: " f"{result.protocolVersion}" ) await self.send_notification( types.ClientNotification( types.InitializedNotification(method="notifications/initialized") ) ) return result async def send_ping(self) -> types.EmptyResult: """Send a ping request.""" return await self.send_request( types.ClientRequest( types.PingRequest( method="ping", ) ), types.EmptyResult, ) async def send_progress_notification( self, progress_token: str | int, progress: float, total: float | None = None ) -> None: """Send a progress notification.""" await self.send_notification( types.ClientNotification( types.ProgressNotification( method="notifications/progress", params=types.ProgressNotificationParams( progressToken=progress_token, progress=progress, total=total, ), ), ) ) async def set_logging_level(self, level: types.LoggingLevel) -> types.EmptyResult: """Send a logging/setLevel request.""" return await self.send_request( types.ClientRequest( types.SetLevelRequest( method="logging/setLevel", params=types.SetLevelRequestParams(level=level), ) ), types.EmptyResult, ) async def list_resources(self) -> types.ListResourcesResult: """Send a resources/list request.""" return await self.send_request( types.ClientRequest( types.ListResourcesRequest( method="resources/list", ) ), types.ListResourcesResult, ) async def read_resource(self, uri: AnyUrl) -> types.ReadResourceResult: """Send a resources/read request.""" return await self.send_request( types.ClientRequest( types.ReadResourceRequest( method="resources/read", params=types.ReadResourceRequestParams(uri=uri), ) ), types.ReadResourceResult, ) async def subscribe_resource(self, uri: AnyUrl) -> types.EmptyResult: """Send a resources/subscribe request.""" return await self.send_request( types.ClientRequest( types.SubscribeRequest( method="resources/subscribe", params=types.SubscribeRequestParams(uri=uri), ) ), types.EmptyResult, ) async def unsubscribe_resource(self, uri: AnyUrl) -> types.EmptyResult: """Send a resources/unsubscribe request.""" return await self.send_request( types.ClientRequest( types.UnsubscribeRequest( method="resources/unsubscribe", params=types.UnsubscribeRequestParams(uri=uri), ) ), types.EmptyResult, ) async def call_tool( self, name: str, arguments: dict | None = None ) -> types.CallToolResult: """Send a tools/call request.""" return await self.send_request( types.ClientRequest( types.CallToolRequest( method="tools/call", params=types.CallToolRequestParams(name=name, arguments=arguments), ) ), types.CallToolResult, ) async def list_prompts(self) -> types.ListPromptsResult: """Send a prompts/list request.""" return await self.send_request( types.ClientRequest( types.ListPromptsRequest( method="prompts/list", ) ), types.ListPromptsResult, ) async def get_prompt( self, name: str, arguments: dict[str, str] | None = None ) -> types.GetPromptResult: """Send a prompts/get request.""" return await self.send_request( types.ClientRequest( types.GetPromptRequest( method="prompts/get", params=types.GetPromptRequestParams(name=name, arguments=arguments), ) ), types.GetPromptResult, ) async def complete( self, ref: types.ResourceReference | types.PromptReference, argument: dict ) -> types.CompleteResult: """Send a completion/complete request.""" return await self.send_request( types.ClientRequest( types.CompleteRequest( method="completion/complete", params=types.CompleteRequestParams( ref=ref, argument=types.CompletionArgument(**argument), ), ) ), types.CompleteResult, ) async def list_tools(self) -> types.ListToolsResult: """Send a tools/list request.""" return await self.send_request( types.ClientRequest( types.ListToolsRequest( method="tools/list", ) ), types.ListToolsResult, ) async def send_roots_list_changed(self) -> None: """Send a roots/list_changed notification.""" await self.send_notification( types.ClientNotification( types.RootsListChangedNotification( method="notifications/roots/list_changed", ) ) ) -------------------------------------------------------------------------------- /src/mcp/client/sse.py: -------------------------------------------------------------------------------- import logging from contextlib import asynccontextmanager from typing import Any from urllib.parse import urljoin, urlparse import anyio import httpx from anyio.abc import TaskStatus from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from httpx_sse import aconnect_sse import mcp.types as types logger = logging.getLogger(__name__) def remove_request_params(url: str) -> str: return urljoin(url, urlparse(url).path) @asynccontextmanager async def sse_client( url: str, headers: dict[str, Any] | None = None, timeout: float = 5, sse_read_timeout: float = 60 * 5, ): """ Client transport for SSE. `sse_read_timeout` determines how long (in seconds) the client will wait for a new event before disconnecting. All other HTTP operations are controlled by `timeout`. """ read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception] read_stream_writer: MemoryObjectSendStream[types.JSONRPCMessage | Exception] write_stream: MemoryObjectSendStream[types.JSONRPCMessage] write_stream_reader: MemoryObjectReceiveStream[types.JSONRPCMessage] read_stream_writer, read_stream = anyio.create_memory_object_stream(0) write_stream, write_stream_reader = anyio.create_memory_object_stream(0) async with anyio.create_task_group() as tg: try: logger.info(f"Connecting to SSE endpoint: {remove_request_params(url)}") async with httpx.AsyncClient(headers=headers) as client: async with aconnect_sse( client, "GET", url, timeout=httpx.Timeout(timeout, read=sse_read_timeout), ) as event_source: event_source.response.raise_for_status() logger.debug("SSE connection established") async def sse_reader( task_status: TaskStatus[str] = anyio.TASK_STATUS_IGNORED, ): try: async for sse in event_source.aiter_sse(): logger.debug(f"Received SSE event: {sse.event}") match sse.event: case "endpoint": endpoint_url = urljoin(url, sse.data) logger.info( f"Received endpoint URL: {endpoint_url}" ) url_parsed = urlparse(url) endpoint_parsed = urlparse(endpoint_url) if ( url_parsed.netloc != endpoint_parsed.netloc or url_parsed.scheme != endpoint_parsed.scheme ): error_msg = ( "Endpoint origin does not match " f"connection origin: {endpoint_url}" ) logger.error(error_msg) raise ValueError(error_msg) task_status.started(endpoint_url) case "message": try: message = types.JSONRPCMessage.model_validate_json( # noqa: E501 sse.data ) logger.debug( f"Received server message: {message}" ) except Exception as exc: logger.error( f"Error parsing server message: {exc}" ) await read_stream_writer.send(exc) continue await read_stream_writer.send(message) except Exception as exc: logger.error(f"Error in sse_reader: {exc}") await read_stream_writer.send(exc) finally: await read_stream_writer.aclose() async def post_writer(endpoint_url: str): try: async with write_stream_reader: async for message in write_stream_reader: logger.debug(f"Sending client message: {message}") response = await client.post( endpoint_url, json=message.model_dump( by_alias=True, mode="json", exclude_none=True, ), ) response.raise_for_status() logger.debug( "Client message sent successfully: " f"{response.status_code}" ) except Exception as exc: logger.error(f"Error in post_writer: {exc}") finally: await write_stream.aclose() endpoint_url = await tg.start(sse_reader) logger.info( f"Starting post writer with endpoint URL: {endpoint_url}" ) tg.start_soon(post_writer, endpoint_url) try: yield read_stream, write_stream finally: tg.cancel_scope.cancel() finally: await read_stream_writer.aclose() await write_stream.aclose() -------------------------------------------------------------------------------- /src/mcp/client/stdio.py: -------------------------------------------------------------------------------- import os import sys from contextlib import asynccontextmanager import anyio import anyio.lowlevel from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from anyio.streams.text import TextReceiveStream from pydantic import BaseModel, Field import mcp.types as types # Environment variables to inherit by default DEFAULT_INHERITED_ENV_VARS = ( [ "APPDATA", "HOMEDRIVE", "HOMEPATH", "LOCALAPPDATA", "PATH", "PROCESSOR_ARCHITECTURE", "SYSTEMDRIVE", "SYSTEMROOT", "TEMP", "USERNAME", "USERPROFILE", ] if sys.platform == "win32" else ["HOME", "LOGNAME", "PATH", "SHELL", "TERM", "USER"] ) def get_default_environment() -> dict[str, str]: """ Returns a default environment object including only environment variables deemed safe to inherit. """ env: dict[str, str] = {} for key in DEFAULT_INHERITED_ENV_VARS: value = os.environ.get(key) if value is None: continue if value.startswith("()"): # Skip functions, which are a security risk continue env[key] = value return env class StdioServerParameters(BaseModel): command: str """The executable to run to start the server.""" args: list[str] = Field(default_factory=list) """Command line arguments to pass to the executable.""" env: dict[str, str] | None = None """ The environment to use when spawning the process. If not specified, the result of get_default_environment() will be used. """ @asynccontextmanager async def stdio_client(server: StdioServerParameters): """ Client transport for stdio: this will connect to a server by spawning a process and communicating with it over stdin/stdout. """ read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception] read_stream_writer: MemoryObjectSendStream[types.JSONRPCMessage | Exception] write_stream: MemoryObjectSendStream[types.JSONRPCMessage] write_stream_reader: MemoryObjectReceiveStream[types.JSONRPCMessage] read_stream_writer, read_stream = anyio.create_memory_object_stream(0) write_stream, write_stream_reader = anyio.create_memory_object_stream(0) process = await anyio.open_process( [server.command, *server.args], env=server.env if server.env is not None else get_default_environment(), stderr=sys.stderr, ) async def stdout_reader(): assert process.stdout, "Opened process is missing stdout" try: async with read_stream_writer: buffer = "" async for chunk in TextReceiveStream(process.stdout): lines = (buffer + chunk).split("\n") buffer = lines.pop() for line in lines: try: message = types.JSONRPCMessage.model_validate_json(line) except Exception as exc: await read_stream_writer.send(exc) continue await read_stream_writer.send(message) except anyio.ClosedResourceError: await anyio.lowlevel.checkpoint() async def stdin_writer(): assert process.stdin, "Opened process is missing stdin" try: async with write_stream_reader: async for message in write_stream_reader: json = message.model_dump_json(by_alias=True, exclude_none=True) await process.stdin.send((json + "\n").encode()) except anyio.ClosedResourceError: await anyio.lowlevel.checkpoint() async with ( anyio.create_task_group() as tg, process, ): tg.start_soon(stdout_reader) tg.start_soon(stdin_writer) yield read_stream, write_stream -------------------------------------------------------------------------------- /src/mcp/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelcontextprotocol/python-sdk/main/src/mcp/py.typed -------------------------------------------------------------------------------- /src/mcp/server/__init__.py: -------------------------------------------------------------------------------- """ MCP Server Module This module provides a framework for creating an MCP (Model Context Protocol) server. It allows you to easily define and handle various types of requests and notifications in an asynchronous manner. Usage: 1. Create a Server instance: server = Server("your_server_name") 2. Define request handlers using decorators: @server.list_prompts() async def handle_list_prompts() -> list[types.Prompt]: # Implementation @server.get_prompt() async def handle_get_prompt( name: str, arguments: dict[str, str] | None ) -> types.GetPromptResult: # Implementation @server.list_tools() async def handle_list_tools() -> list[types.Tool]: # Implementation @server.call_tool() async def handle_call_tool( name: str, arguments: dict | None ) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]: # Implementation @server.list_resource_templates() async def handle_list_resource_templates() -> list[types.ResourceTemplate]: # Implementation 3. Define notification handlers if needed: @server.progress_notification() async def handle_progress( progress_token: str | int, progress: float, total: float | None ) -> None: # Implementation 4. Run the server: async def main(): async with mcp.server.stdio.stdio_server() as (read_stream, write_stream): await server.run( read_stream, write_stream, InitializationOptions( server_name="your_server_name", server_version="your_version", capabilities=server.get_capabilities( notification_options=NotificationOptions(), experimental_capabilities={}, ), ), ) asyncio.run(main()) The Server class provides methods to register handlers for various MCP requests and notifications. It automatically manages the request context and handles incoming messages from the client. """ import contextvars import logging import warnings from collections.abc import Awaitable, Callable from typing import Any, Sequence from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from pydantic import AnyUrl import mcp.types as types from mcp.server.models import InitializationOptions from mcp.server.session import ServerSession from mcp.server.stdio import stdio_server as stdio_server from mcp.shared.context import RequestContext from mcp.shared.exceptions import McpError from mcp.shared.session import RequestResponder logger = logging.getLogger(__name__) request_ctx: contextvars.ContextVar[RequestContext[ServerSession]] = ( contextvars.ContextVar("request_ctx") ) class NotificationOptions: def __init__( self, prompts_changed: bool = False, resources_changed: bool = False, tools_changed: bool = False, ): self.prompts_changed = prompts_changed self.resources_changed = resources_changed self.tools_changed = tools_changed class Server: def __init__(self, name: str): self.name = name self.request_handlers: dict[ type, Callable[..., Awaitable[types.ServerResult]] ] = { types.PingRequest: _ping_handler, } self.notification_handlers: dict[type, Callable[..., Awaitable[None]]] = {} self.notification_options = NotificationOptions() logger.debug(f"Initializing server '{name}'") def create_initialization_options( self, notification_options: NotificationOptions | None = None, experimental_capabilities: dict[str, dict[str, Any]] | None = None, ) -> InitializationOptions: """Create initialization options from this server instance.""" def pkg_version(package: str) -> str: try: from importlib.metadata import version v = version(package) if v is not None: return v except Exception: pass return "unknown" return InitializationOptions( server_name=self.name, server_version=pkg_version("mcp"), capabilities=self.get_capabilities( notification_options or NotificationOptions(), experimental_capabilities or {}, ), ) def get_capabilities( self, notification_options: NotificationOptions, experimental_capabilities: dict[str, dict[str, Any]], ) -> types.ServerCapabilities: """Convert existing handlers to a ServerCapabilities object.""" prompts_capability = None resources_capability = None tools_capability = None logging_capability = None # Set prompt capabilities if handler exists if types.ListPromptsRequest in self.request_handlers: prompts_capability = types.PromptsCapability( listChanged=notification_options.prompts_changed ) # Set resource capabilities if handler exists if types.ListResourcesRequest in self.request_handlers: resources_capability = types.ResourcesCapability( subscribe=False, listChanged=notification_options.resources_changed ) # Set tool capabilities if handler exists if types.ListToolsRequest in self.request_handlers: tools_capability = types.ToolsCapability( listChanged=notification_options.tools_changed ) # Set logging capabilities if handler exists if types.SetLevelRequest in self.request_handlers: logging_capability = types.LoggingCapability() return types.ServerCapabilities( prompts=prompts_capability, resources=resources_capability, tools=tools_capability, logging=logging_capability, experimental=experimental_capabilities, ) @property def request_context(self) -> RequestContext[ServerSession]: """If called outside of a request context, this will raise a LookupError.""" return request_ctx.get() def list_prompts(self): def decorator(func: Callable[[], Awaitable[list[types.Prompt]]]): logger.debug("Registering handler for PromptListRequest") async def handler(_: Any): prompts = await func() return types.ServerResult(types.ListPromptsResult(prompts=prompts)) self.request_handlers[types.ListPromptsRequest] = handler return func return decorator def get_prompt(self): def decorator( func: Callable[ [str, dict[str, str] | None], Awaitable[types.GetPromptResult] ], ): logger.debug("Registering handler for GetPromptRequest") async def handler(req: types.GetPromptRequest): prompt_get = await func(req.params.name, req.params.arguments) return types.ServerResult(prompt_get) self.request_handlers[types.GetPromptRequest] = handler return func return decorator def list_resources(self): def decorator(func: Callable[[], Awaitable[list[types.Resource]]]): logger.debug("Registering handler for ListResourcesRequest") async def handler(_: Any): resources = await func() return types.ServerResult( types.ListResourcesResult(resources=resources) ) self.request_handlers[types.ListResourcesRequest] = handler return func return decorator def list_resource_templates(self): def decorator(func: Callable[[], Awaitable[list[types.ResourceTemplate]]]): logger.debug("Registering handler for ListResourceTemplatesRequest") async def handler(_: Any): templates = await func() return types.ServerResult( types.ListResourceTemplatesResult(resourceTemplates=templates) ) self.request_handlers[types.ListResourceTemplatesRequest] = handler return func return decorator def read_resource(self): def decorator(func: Callable[[AnyUrl], Awaitable[str | bytes]]): logger.debug("Registering handler for ReadResourceRequest") async def handler(req: types.ReadResourceRequest): result = await func(req.params.uri) match result: case str(s): content = types.TextResourceContents( uri=req.params.uri, text=s, mimeType="text/plain", ) case bytes(b): import base64 content = types.BlobResourceContents( uri=req.params.uri, blob=base64.urlsafe_b64encode(b).decode(), mimeType="application/octet-stream", ) return types.ServerResult( types.ReadResourceResult( contents=[content], ) ) self.request_handlers[types.ReadResourceRequest] = handler return func return decorator def set_logging_level(self): def decorator(func: Callable[[types.LoggingLevel], Awaitable[None]]): logger.debug("Registering handler for SetLevelRequest") async def handler(req: types.SetLevelRequest): await func(req.params.level) return types.ServerResult(types.EmptyResult()) self.request_handlers[types.SetLevelRequest] = handler return func return decorator def subscribe_resource(self): def decorator(func: Callable[[AnyUrl], Awaitable[None]]): logger.debug("Registering handler for SubscribeRequest") async def handler(req: types.SubscribeRequest): await func(req.params.uri) return types.ServerResult(types.EmptyResult()) self.request_handlers[types.SubscribeRequest] = handler return func return decorator def unsubscribe_resource(self): def decorator(func: Callable[[AnyUrl], Awaitable[None]]): logger.debug("Registering handler for UnsubscribeRequest") async def handler(req: types.UnsubscribeRequest): await func(req.params.uri) return types.ServerResult(types.EmptyResult()) self.request_handlers[types.UnsubscribeRequest] = handler return func return decorator def list_tools(self): def decorator(func: Callable[[], Awaitable[list[types.Tool]]]): logger.debug("Registering handler for ListToolsRequest") async def handler(_: Any): tools = await func() return types.ServerResult(types.ListToolsResult(tools=tools)) self.request_handlers[types.ListToolsRequest] = handler return func return decorator def call_tool(self): def decorator( func: Callable[ ..., Awaitable[ Sequence[ types.TextContent | types.ImageContent | types.EmbeddedResource ] ], ], ): logger.debug("Registering handler for CallToolRequest") async def handler(req: types.CallToolRequest): try: results = await func(req.params.name, (req.params.arguments or {})) return types.ServerResult( types.CallToolResult(content=list(results), isError=False) ) except Exception as e: return types.ServerResult( types.CallToolResult( content=[types.TextContent(type="text", text=str(e))], isError=True, ) ) self.request_handlers[types.CallToolRequest] = handler return func return decorator def progress_notification(self): def decorator( func: Callable[[str | int, float, float | None], Awaitable[None]], ): logger.debug("Registering handler for ProgressNotification") async def handler(req: types.ProgressNotification): await func( req.params.progressToken, req.params.progress, req.params.total ) self.notification_handlers[types.ProgressNotification] = handler return func return decorator def completion(self): """Provides completions for prompts and resource templates""" def decorator( func: Callable[ [ types.PromptReference | types.ResourceReference, types.CompletionArgument, ], Awaitable[types.Completion | None], ], ): logger.debug("Registering handler for CompleteRequest") async def handler(req: types.CompleteRequest): completion = await func(req.params.ref, req.params.argument) return types.ServerResult( types.CompleteResult( completion=completion if completion is not None else types.Completion(values=[], total=None, hasMore=None), ) ) self.request_handlers[types.CompleteRequest] = handler return func return decorator async def run( self, read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception], write_stream: MemoryObjectSendStream[types.JSONRPCMessage], initialization_options: InitializationOptions, # When False, exceptions are returned as messages to the client. # When True, exceptions are raised, which will cause the server to shut down # but also make tracing exceptions much easier during testing and when using # in-process servers. raise_exceptions: bool = False, ): with warnings.catch_warnings(record=True) as w: async with ServerSession( read_stream, write_stream, initialization_options ) as session: async for message in session.incoming_messages: logger.debug(f"Received message: {message}") match message: case RequestResponder(request=types.ClientRequest(root=req)): logger.info( f"Processing request of type {type(req).__name__}" ) if type(req) in self.request_handlers: handler = self.request_handlers[type(req)] logger.debug( f"Dispatching request of type {type(req).__name__}" ) token = None try: # Set our global state that can be retrieved via # app.get_request_context() token = request_ctx.set( RequestContext( message.request_id, message.request_meta, session, ) ) response = await handler(req) except McpError as err: response = err.error except Exception as err: if raise_exceptions: raise err response = types.ErrorData( code=0, message=str(err), data=None ) finally: # Reset the global state after we are done if token is not None: request_ctx.reset(token) await message.respond(response) else: await message.respond( types.ErrorData( code=types.METHOD_NOT_FOUND, message="Method not found", ) ) logger.debug("Response sent") case types.ClientNotification(root=notify): if type(notify) in self.notification_handlers: assert type(notify) in self.notification_handlers handler = self.notification_handlers[type(notify)] logger.debug( f"Dispatching notification of type " f"{type(notify).__name__}" ) try: await handler(notify) except Exception as err: logger.error( f"Uncaught exception in notification handler: " f"{err}" ) for warning in w: logger.info( f"Warning: {warning.category.__name__}: {warning.message}" ) async def _ping_handler(request: types.PingRequest) -> types.ServerResult: return types.ServerResult(types.EmptyResult()) -------------------------------------------------------------------------------- /src/mcp/server/__main__.py: -------------------------------------------------------------------------------- import importlib.metadata import logging import sys import anyio from mcp.server.models import InitializationOptions from mcp.server.session import ServerSession from mcp.server.stdio import stdio_server from mcp.types import ServerCapabilities if not sys.warnoptions: import warnings warnings.simplefilter("ignore") logging.basicConfig(level=logging.INFO) logger = logging.getLogger("server") async def receive_loop(session: ServerSession): logger.info("Starting receive loop") async for message in session.incoming_messages: if isinstance(message, Exception): logger.error("Error: %s", message) continue logger.info("Received message from client: %s", message) async def main(): version = importlib.metadata.version("mcp") async with stdio_server() as (read_stream, write_stream): async with ( ServerSession( read_stream, write_stream, InitializationOptions( server_name="mcp", server_version=version, capabilities=ServerCapabilities(), ), ) as session, write_stream, ): await receive_loop(session) if __name__ == "__main__": anyio.run(main, backend="trio") -------------------------------------------------------------------------------- /src/mcp/server/models.py: -------------------------------------------------------------------------------- """ This module provides simpler types to use with the server for managing prompts and tools. """ from pydantic import BaseModel from mcp.types import ( ServerCapabilities, ) class InitializationOptions(BaseModel): server_name: str server_version: str capabilities: ServerCapabilities -------------------------------------------------------------------------------- /src/mcp/server/session.py: -------------------------------------------------------------------------------- """ ServerSession Module This module provides the ServerSession class, which manages communication between the server and client in the MCP (Model Context Protocol) framework. It is most commonly used in MCP servers to interact with the client. Common usage pattern: ``` server = Server(name) @server.call_tool() async def handle_tool_call(ctx: RequestContext, arguments: dict[str, Any]) -> Any: # Check client capabilities before proceeding if ctx.session.check_client_capability( types.ClientCapabilities(experimental={"advanced_tools": dict()}) ): # Perform advanced tool operations result = await perform_advanced_tool_operation(arguments) else: # Fall back to basic tool operations result = await perform_basic_tool_operation(arguments) return result @server.list_prompts() async def handle_list_prompts(ctx: RequestContext) -> list[types.Prompt]: # Access session for any necessary checks or operations if ctx.session.client_params: # Customize prompts based on client initialization parameters return generate_custom_prompts(ctx.session.client_params) else: return default_prompts ``` The ServerSession class is typically used internally by the Server class and should not be instantiated directly by users of the MCP framework. """ from enum import Enum from typing import Any import anyio import anyio.lowlevel from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from pydantic import AnyUrl import mcp.types as types from mcp.server.models import InitializationOptions from mcp.shared.session import ( BaseSession, RequestResponder, ) class InitializationState(Enum): NotInitialized = 1 Initializing = 2 Initialized = 3 class ServerSession( BaseSession[ types.ServerRequest, types.ServerNotification, types.ServerResult, types.ClientRequest, types.ClientNotification, ] ): _initialized: InitializationState = InitializationState.NotInitialized _client_params: types.InitializeRequestParams | None = None def __init__( self, read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception], write_stream: MemoryObjectSendStream[types.JSONRPCMessage], init_options: InitializationOptions, ) -> None: super().__init__( read_stream, write_stream, types.ClientRequest, types.ClientNotification ) self._initialization_state = InitializationState.NotInitialized self._init_options = init_options @property def client_params(self) -> types.InitializeRequestParams | None: return self._client_params def check_client_capability(self, capability: types.ClientCapabilities) -> bool: """Check if the client supports a specific capability.""" if self._client_params is None: return False # Get client capabilities from initialization params client_caps = self._client_params.capabilities # Check each specified capability in the passed in capability object if capability.roots is not None: if client_caps.roots is None: return False if capability.roots.listChanged and not client_caps.roots.listChanged: return False if capability.sampling is not None: if client_caps.sampling is None: return False if capability.experimental is not None: if client_caps.experimental is None: return False # Check each experimental capability for exp_key, exp_value in capability.experimental.items(): if ( exp_key not in client_caps.experimental or client_caps.experimental[exp_key] != exp_value ): return False return True async def _received_request( self, responder: RequestResponder[types.ClientRequest, types.ServerResult] ): match responder.request.root: case types.InitializeRequest(params=params): self._initialization_state = InitializationState.Initializing self._client_params = params await responder.respond( types.ServerResult( types.InitializeResult( protocolVersion=types.LATEST_PROTOCOL_VERSION, capabilities=self._init_options.capabilities, serverInfo=types.Implementation( name=self._init_options.server_name, version=self._init_options.server_version, ), ) ) ) case _: if self._initialization_state != InitializationState.Initialized: raise RuntimeError( "Received request before initialization was complete" ) async def _received_notification( self, notification: types.ClientNotification ) -> None: # Need this to avoid ASYNC910 await anyio.lowlevel.checkpoint() match notification.root: case types.InitializedNotification(): self._initialization_state = InitializationState.Initialized case _: if self._initialization_state != InitializationState.Initialized: raise RuntimeError( "Received notification before initialization was complete" ) async def send_log_message( self, level: types.LoggingLevel, data: Any, logger: str | None = None ) -> None: """Send a log message notification.""" await self.send_notification( types.ServerNotification( types.LoggingMessageNotification( method="notifications/message", params=types.LoggingMessageNotificationParams( level=level, data=data, logger=logger, ), ) ) ) async def send_resource_updated(self, uri: AnyUrl) -> None: """Send a resource updated notification.""" await self.send_notification( types.ServerNotification( types.ResourceUpdatedNotification( method="notifications/resources/updated", params=types.ResourceUpdatedNotificationParams(uri=uri), ) ) ) async def create_message( self, messages: list[types.SamplingMessage], *, max_tokens: int, system_prompt: str | None = None, include_context: types.IncludeContext | None = None, temperature: float | None = None, stop_sequences: list[str] | None = None, metadata: dict[str, Any] | None = None, model_preferences: types.ModelPreferences | None = None, ) -> types.CreateMessageResult: """Send a sampling/create_message request.""" return await self.send_request( types.ServerRequest( types.CreateMessageRequest( method="sampling/createMessage", params=types.CreateMessageRequestParams( messages=messages, systemPrompt=system_prompt, includeContext=include_context, temperature=temperature, maxTokens=max_tokens, stopSequences=stop_sequences, metadata=metadata, modelPreferences=model_preferences, ), ) ), types.CreateMessageResult, ) async def list_roots(self) -> types.ListRootsResult: """Send a roots/list request.""" return await self.send_request( types.ServerRequest( types.ListRootsRequest( method="roots/list", ) ), types.ListRootsResult, ) async def send_ping(self) -> types.EmptyResult: """Send a ping request.""" return await self.send_request( types.ServerRequest( types.PingRequest( method="ping", ) ), types.EmptyResult, ) async def send_progress_notification( self, progress_token: str | int, progress: float, total: float | None = None ) -> None: """Send a progress notification.""" await self.send_notification( types.ServerNotification( types.ProgressNotification( method="notifications/progress", params=types.ProgressNotificationParams( progressToken=progress_token, progress=progress, total=total, ), ) ) ) async def send_resource_list_changed(self) -> None: """Send a resource list changed notification.""" await self.send_notification( types.ServerNotification( types.ResourceListChangedNotification( method="notifications/resources/list_changed", ) ) ) async def send_tool_list_changed(self) -> None: """Send a tool list changed notification.""" await self.send_notification( types.ServerNotification( types.ToolListChangedNotification( method="notifications/tools/list_changed", ) ) ) async def send_prompt_list_changed(self) -> None: """Send a prompt list changed notification.""" await self.send_notification( types.ServerNotification( types.PromptListChangedNotification( method="notifications/prompts/list_changed", ) ) ) -------------------------------------------------------------------------------- /src/mcp/server/sse.py: -------------------------------------------------------------------------------- """ SSE Server Transport Module This module implements a Server-Sent Events (SSE) transport layer for MCP servers. Example usage: ``` # Create an SSE transport at an endpoint sse = SseServerTransport("/messages") # Create Starlette routes for SSE and message handling routes = [ Route("/sse", endpoint=handle_sse), Route("/messages", endpoint=handle_messages, methods=["POST"]) ] # Define handler functions async def handle_sse(request): async with sse.connect_sse( request.scope, request.receive, request._send ) as streams: await app.run( streams[0], streams[1], app.create_initialization_options() ) async def handle_messages(request): await sse.handle_post_message(request.scope, request.receive, request._send) # Create and run Starlette app starlette_app = Starlette(routes=routes) uvicorn.run(starlette_app, host="0.0.0.0", port=port) ``` See SseServerTransport class documentation for more details. """ import logging from contextlib import asynccontextmanager from typing import Any from urllib.parse import quote from uuid import UUID, uuid4 import anyio from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from pydantic import ValidationError from sse_starlette import EventSourceResponse from starlette.requests import Request from starlette.responses import Response from starlette.types import Receive, Scope, Send import mcp.types as types logger = logging.getLogger(__name__) class SseServerTransport: """ SSE server transport for MCP. This class provides _two_ ASGI applications, suitable to be used with a framework like Starlette and a server like Hypercorn: 1. connect_sse() is an ASGI application which receives incoming GET requests, and sets up a new SSE stream to send server messages to the client. 2. handle_post_message() is an ASGI application which receives incoming POST requests, which should contain client messages that link to a previously-established SSE session. """ _endpoint: str _read_stream_writers: dict[ UUID, MemoryObjectSendStream[types.JSONRPCMessage | Exception] ] def __init__(self, endpoint: str) -> None: """ Creates a new SSE server transport, which will direct the client to POST messages to the relative or absolute URL given. """ super().__init__() self._endpoint = endpoint self._read_stream_writers = {} logger.debug(f"SseServerTransport initialized with endpoint: {endpoint}") @asynccontextmanager async def connect_sse(self, scope: Scope, receive: Receive, send: Send): if scope["type"] != "http": logger.error("connect_sse received non-HTTP request") raise ValueError("connect_sse can only handle HTTP requests") logger.debug("Setting up SSE connection") read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception] read_stream_writer: MemoryObjectSendStream[types.JSONRPCMessage | Exception] write_stream: MemoryObjectSendStream[types.JSONRPCMessage] write_stream_reader: MemoryObjectReceiveStream[types.JSONRPCMessage] read_stream_writer, read_stream = anyio.create_memory_object_stream(0) write_stream, write_stream_reader = anyio.create_memory_object_stream(0) session_id = uuid4() session_uri = f"{quote(self._endpoint)}?session_id={session_id.hex}" self._read_stream_writers[session_id] = read_stream_writer logger.debug(f"Created new session with ID: {session_id}") sse_stream_writer, sse_stream_reader = anyio.create_memory_object_stream( 0, dict[str, Any] ) async def sse_writer(): logger.debug("Starting SSE writer") async with sse_stream_writer, write_stream_reader: await sse_stream_writer.send({"event": "endpoint", "data": session_uri}) logger.debug(f"Sent endpoint event: {session_uri}") async for message in write_stream_reader: logger.debug(f"Sending message via SSE: {message}") await sse_stream_writer.send( { "event": "message", "data": message.model_dump_json( by_alias=True, exclude_none=True ), } ) async with anyio.create_task_group() as tg: response = EventSourceResponse( content=sse_stream_reader, data_sender_callable=sse_writer ) logger.debug("Starting SSE response task") tg.start_soon(response, scope, receive, send) logger.debug("Yielding read and write streams") yield (read_stream, write_stream) async def handle_post_message( self, scope: Scope, receive: Receive, send: Send ) -> None: logger.debug("Handling POST message") request = Request(scope, receive) session_id_param = request.query_params.get("session_id") if session_id_param is None: logger.warning("Received request without session_id") response = Response("session_id is required", status_code=400) return await response(scope, receive, send) try: session_id = UUID(hex=session_id_param) logger.debug(f"Parsed session ID: {session_id}") except ValueError: logger.warning(f"Received invalid session ID: {session_id_param}") response = Response("Invalid session ID", status_code=400) return await response(scope, receive, send) writer = self._read_stream_writers.get(session_id) if not writer: logger.warning(f"Could not find session for ID: {session_id}") response = Response("Could not find session", status_code=404) return await response(scope, receive, send) json = await request.json() logger.debug(f"Received JSON: {json}") try: message = types.JSONRPCMessage.model_validate(json) logger.debug(f"Validated client message: {message}") except ValidationError as err: logger.error(f"Failed to parse message: {err}") response = Response("Could not parse message", status_code=400) await response(scope, receive, send) await writer.send(err) return logger.debug(f"Sending message to writer: {message}") response = Response("Accepted", status_code=202) await response(scope, receive, send) await writer.send(message) -------------------------------------------------------------------------------- /src/mcp/server/stdio.py: -------------------------------------------------------------------------------- """ Stdio Server Transport Module This module provides functionality for creating an stdio-based transport layer that can be used to communicate with an MCP client through standard input/output streams. Example usage: ``` async def run_server(): async with stdio_server() as (read_stream, write_stream): # read_stream contains incoming JSONRPCMessages from stdin # write_stream allows sending JSONRPCMessages to stdout server = await create_my_server() await server.run(read_stream, write_stream, init_options) anyio.run(run_server) ``` """ import sys from contextlib import asynccontextmanager import anyio import anyio.lowlevel from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream import mcp.types as types @asynccontextmanager async def stdio_server( stdin: anyio.AsyncFile[str] | None = None, stdout: anyio.AsyncFile[str] | None = None, ): """ Server transport for stdio: this communicates with an MCP client by reading from the current process' stdin and writing to stdout. """ # Purposely not using context managers for these, as we don't want to close # standard process handles. if not stdin: stdin = anyio.wrap_file(sys.stdin) if not stdout: stdout = anyio.wrap_file(sys.stdout) read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception] read_stream_writer: MemoryObjectSendStream[types.JSONRPCMessage | Exception] write_stream: MemoryObjectSendStream[types.JSONRPCMessage] write_stream_reader: MemoryObjectReceiveStream[types.JSONRPCMessage] read_stream_writer, read_stream = anyio.create_memory_object_stream(0) write_stream, write_stream_reader = anyio.create_memory_object_stream(0) async def stdin_reader(): try: async with read_stream_writer: async for line in stdin: try: message = types.JSONRPCMessage.model_validate_json(line) except Exception as exc: await read_stream_writer.send(exc) continue await read_stream_writer.send(message) except anyio.ClosedResourceError: await anyio.lowlevel.checkpoint() async def stdout_writer(): try: async with write_stream_reader: async for message in write_stream_reader: json = message.model_dump_json(by_alias=True, exclude_none=True) await stdout.write(json + "\n") await stdout.flush() except anyio.ClosedResourceError: await anyio.lowlevel.checkpoint() async with anyio.create_task_group() as tg: tg.start_soon(stdin_reader) tg.start_soon(stdout_writer) yield read_stream, write_stream -------------------------------------------------------------------------------- /src/mcp/server/websocket.py: -------------------------------------------------------------------------------- import logging from contextlib import asynccontextmanager import anyio from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from starlette.types import Receive, Scope, Send from starlette.websockets import WebSocket import mcp.types as types logger = logging.getLogger(__name__) @asynccontextmanager async def websocket_server(scope: Scope, receive: Receive, send: Send): """ WebSocket server transport for MCP. This is an ASGI application, suitable to be used with a framework like Starlette and a server like Hypercorn. """ websocket = WebSocket(scope, receive, send) await websocket.accept(subprotocol="mcp") read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception] read_stream_writer: MemoryObjectSendStream[types.JSONRPCMessage | Exception] write_stream: MemoryObjectSendStream[types.JSONRPCMessage] write_stream_reader: MemoryObjectReceiveStream[types.JSONRPCMessage] read_stream_writer, read_stream = anyio.create_memory_object_stream(0) write_stream, write_stream_reader = anyio.create_memory_object_stream(0) async def ws_reader(): try: async with read_stream_writer: async for message in websocket.iter_json(): try: client_message = types.JSONRPCMessage.model_validate(message) except Exception as exc: await read_stream_writer.send(exc) continue await read_stream_writer.send(client_message) except anyio.ClosedResourceError: await websocket.close() async def ws_writer(): try: async with write_stream_reader: async for message in write_stream_reader: obj = message.model_dump( by_alias=True, mode="json", exclude_none=True ) await websocket.send_json(obj) except anyio.ClosedResourceError: await websocket.close() async with anyio.create_task_group() as tg: tg.start_soon(ws_reader) tg.start_soon(ws_writer) yield (read_stream, write_stream) -------------------------------------------------------------------------------- /src/mcp/shared/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelcontextprotocol/python-sdk/main/src/mcp/shared/__init__.py -------------------------------------------------------------------------------- /src/mcp/shared/context.py: -------------------------------------------------------------------------------- from dataclasses import dataclass from typing import Generic, TypeVar from mcp.shared.session import BaseSession from mcp.types import RequestId, RequestParams SessionT = TypeVar("SessionT", bound=BaseSession) @dataclass class RequestContext(Generic[SessionT]): request_id: RequestId meta: RequestParams.Meta | None session: SessionT -------------------------------------------------------------------------------- /src/mcp/shared/exceptions.py: -------------------------------------------------------------------------------- from mcp.types import ErrorData class McpError(Exception): """ Exception type raised when an error arrives over an MCP connection. """ error: ErrorData -------------------------------------------------------------------------------- /src/mcp/shared/memory.py: -------------------------------------------------------------------------------- """ In-memory transports """ from contextlib import asynccontextmanager from datetime import timedelta from typing import AsyncGenerator import anyio from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from mcp.client.session import ClientSession from mcp.server import Server from mcp.types import JSONRPCMessage MessageStream = tuple[ MemoryObjectReceiveStream[JSONRPCMessage | Exception], MemoryObjectSendStream[JSONRPCMessage], ] @asynccontextmanager async def create_client_server_memory_streams() -> ( AsyncGenerator[tuple[MessageStream, MessageStream], None] ): """ Creates a pair of bidirectional memory streams for client-server communication. Returns: A tuple of (client_streams, server_streams) where each is a tuple of (read_stream, write_stream) """ # Create streams for both directions server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[ JSONRPCMessage | Exception ](1) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[ JSONRPCMessage | Exception ](1) client_streams = (server_to_client_receive, client_to_server_send) server_streams = (client_to_server_receive, server_to_client_send) async with ( server_to_client_receive, client_to_server_send, client_to_server_receive, server_to_client_send, ): yield client_streams, server_streams @asynccontextmanager async def create_connected_server_and_client_session( server: Server, read_timeout_seconds: timedelta | None = None, raise_exceptions: bool = False, ) -> AsyncGenerator[ClientSession, None]: """Creates a ClientSession that is connected to a running MCP server.""" async with create_client_server_memory_streams() as ( client_streams, server_streams, ): client_read, client_write = client_streams server_read, server_write = server_streams # Create a cancel scope for the server task async with anyio.create_task_group() as tg: tg.start_soon( lambda: server.run( server_read, server_write, server.create_initialization_options(), raise_exceptions=raise_exceptions, ) ) try: async with ClientSession( read_stream=client_read, write_stream=client_write, read_timeout_seconds=read_timeout_seconds, ) as client_session: await client_session.initialize() yield client_session finally: tg.cancel_scope.cancel() -------------------------------------------------------------------------------- /src/mcp/shared/progress.py: -------------------------------------------------------------------------------- from contextlib import contextmanager from dataclasses import dataclass, field from pydantic import BaseModel from mcp.shared.context import RequestContext from mcp.shared.session import BaseSession from mcp.types import ProgressToken class Progress(BaseModel): progress: float total: float | None @dataclass class ProgressContext: session: BaseSession progress_token: ProgressToken total: float | None current: float = field(default=0.0, init=False) async def progress(self, amount: float) -> None: self.current += amount await self.session.send_progress_notification( self.progress_token, self.current, total=self.total ) @contextmanager def progress(ctx: RequestContext, total: float | None = None): if ctx.meta is None or ctx.meta.progressToken is None: raise ValueError("No progress token provided") progress_ctx = ProgressContext(ctx.session, ctx.meta.progressToken, total) try: yield progress_ctx finally: pass -------------------------------------------------------------------------------- /src/mcp/shared/session.py: -------------------------------------------------------------------------------- from contextlib import AbstractAsyncContextManager from datetime import timedelta from typing import Generic, TypeVar import anyio import anyio.lowlevel import httpx from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from pydantic import BaseModel from mcp.shared.exceptions import McpError from mcp.types import ( ClientNotification, ClientRequest, ClientResult, ErrorData, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, RequestParams, ServerNotification, ServerRequest, ServerResult, ) SendRequestT = TypeVar("SendRequestT", ClientRequest, ServerRequest) SendResultT = TypeVar("SendResultT", ClientResult, ServerResult) SendNotificationT = TypeVar("SendNotificationT", ClientNotification, ServerNotification) ReceiveRequestT = TypeVar("ReceiveRequestT", ClientRequest, ServerRequest) ReceiveResultT = TypeVar("ReceiveResultT", bound=BaseModel) ReceiveNotificationT = TypeVar( "ReceiveNotificationT", ClientNotification, ServerNotification ) RequestId = str | int class RequestResponder(Generic[ReceiveRequestT, SendResultT]): def __init__( self, request_id: RequestId, request_meta: RequestParams.Meta | None, request: ReceiveRequestT, session: "BaseSession", ) -> None: self.request_id = request_id self.request_meta = request_meta self.request = request self._session = session self._responded = False async def respond(self, response: SendResultT | ErrorData) -> None: assert not self._responded, "Request already responded to" self._responded = True await self._session._send_response( request_id=self.request_id, response=response ) class BaseSession( AbstractAsyncContextManager, Generic[ SendRequestT, SendNotificationT, SendResultT, ReceiveRequestT, ReceiveNotificationT, ], ): """ Implements an MCP "session" on top of read/write streams, including features like request/response linking, notifications, and progress. This class is an async context manager that automatically starts processing messages when entered. """ _response_streams: dict[ RequestId, MemoryObjectSendStream[JSONRPCResponse | JSONRPCError] ] _request_id: int def __init__( self, read_stream: MemoryObjectReceiveStream[JSONRPCMessage | Exception], write_stream: MemoryObjectSendStream[JSONRPCMessage], receive_request_type: type[ReceiveRequestT], receive_notification_type: type[ReceiveNotificationT], # If none, reading will never time out read_timeout_seconds: timedelta | None = None, ) -> None: self._read_stream = read_stream self._write_stream = write_stream self._response_streams = {} self._request_id = 0 self._receive_request_type = receive_request_type self._receive_notification_type = receive_notification_type self._read_timeout_seconds = read_timeout_seconds self._incoming_message_stream_writer, self._incoming_message_stream_reader = ( anyio.create_memory_object_stream[ RequestResponder[ReceiveRequestT, SendResultT] | ReceiveNotificationT | Exception ]() ) async def __aenter__(self): self._task_group = anyio.create_task_group() await self._task_group.__aenter__() self._task_group.start_soon(self._receive_loop) return self async def __aexit__(self, exc_type, exc_val, exc_tb): # Using BaseSession as a context manager should not block on exit (this # would be very surprising behavior), so make sure to cancel the tasks # in the task group. self._task_group.cancel_scope.cancel() return await self._task_group.__aexit__(exc_type, exc_val, exc_tb) async def send_request( self, request: SendRequestT, result_type: type[ReceiveResultT], ) -> ReceiveResultT: """ Sends a request and wait for a response. Raises an McpError if the response contains an error. Do not use this method to emit notifications! Use send_notification() instead. """ request_id = self._request_id self._request_id = request_id + 1 response_stream, response_stream_reader = anyio.create_memory_object_stream[ JSONRPCResponse | JSONRPCError ](1) self._response_streams[request_id] = response_stream jsonrpc_request = JSONRPCRequest( jsonrpc="2.0", id=request_id, **request.model_dump(by_alias=True, mode="json", exclude_none=True), ) # TODO: Support progress callbacks await self._write_stream.send(JSONRPCMessage(jsonrpc_request)) try: with anyio.fail_after( None if self._read_timeout_seconds is None else self._read_timeout_seconds.total_seconds() ): response_or_error = await response_stream_reader.receive() except TimeoutError: raise McpError( ErrorData( code=httpx.codes.REQUEST_TIMEOUT, message=( f"Timed out while waiting for response to " f"{request.__class__.__name__}. Waited " f"{self._read_timeout_seconds} seconds." ), ) ) if isinstance(response_or_error, JSONRPCError): raise McpError(response_or_error.error) else: return result_type.model_validate(response_or_error.result) async def send_notification(self, notification: SendNotificationT) -> None: """ Emits a notification, which is a one-way message that does not expect a response. """ jsonrpc_notification = JSONRPCNotification( jsonrpc="2.0", **notification.model_dump(by_alias=True, mode="json", exclude_none=True), ) await self._write_stream.send(JSONRPCMessage(jsonrpc_notification)) async def _send_response( self, request_id: RequestId, response: SendResultT | ErrorData ) -> None: if isinstance(response, ErrorData): jsonrpc_error = JSONRPCError(jsonrpc="2.0", id=request_id, error=response) await self._write_stream.send(JSONRPCMessage(jsonrpc_error)) else: jsonrpc_response = JSONRPCResponse( jsonrpc="2.0", id=request_id, result=response.model_dump( by_alias=True, mode="json", exclude_none=True ), ) await self._write_stream.send(JSONRPCMessage(jsonrpc_response)) async def _receive_loop(self) -> None: async with ( self._read_stream, self._write_stream, self._incoming_message_stream_writer, ): async for message in self._read_stream: if isinstance(message, Exception): await self._incoming_message_stream_writer.send(message) elif isinstance(message.root, JSONRPCRequest): validated_request = self._receive_request_type.model_validate( message.root.model_dump( by_alias=True, mode="json", exclude_none=True ) ) responder = RequestResponder( request_id=message.root.id, request_meta=validated_request.root.params._meta if validated_request.root.params else None, request=validated_request, session=self, ) await self._received_request(responder) if not responder._responded: await self._incoming_message_stream_writer.send(responder) elif isinstance(message.root, JSONRPCNotification): notification = self._receive_notification_type.model_validate( message.root.model_dump( by_alias=True, mode="json", exclude_none=True ) ) await self._received_notification(notification) await self._incoming_message_stream_writer.send(notification) else: # Response or error stream = self._response_streams.pop(message.root.id, None) if stream: await stream.send(message.root) else: await self._incoming_message_stream_writer.send( RuntimeError( "Received response with an unknown " f"request ID: {message}" ) ) async def _received_request( self, responder: RequestResponder[ReceiveRequestT, SendResultT] ) -> None: """ Can be overridden by subclasses to handle a request without needing to listen on the message stream. If the request is responded to within this method, it will not be forwarded on to the message stream. """ async def _received_notification(self, notification: ReceiveNotificationT) -> None: """ Can be overridden by subclasses to handle a notification without needing to listen on the message stream. """ async def send_progress_notification( self, progress_token: str | int, progress: float, total: float | None = None ) -> None: """ Sends a progress notification for a request that is currently being processed. """ @property def incoming_messages( self, ) -> MemoryObjectReceiveStream[ RequestResponder[ReceiveRequestT, SendResultT] | ReceiveNotificationT | Exception ]: return self._incoming_message_stream_reader -------------------------------------------------------------------------------- /src/mcp/shared/version.py: -------------------------------------------------------------------------------- from mcp.types import LATEST_PROTOCOL_VERSION SUPPORTED_PROTOCOL_VERSIONS = [1, LATEST_PROTOCOL_VERSION] -------------------------------------------------------------------------------- /src/mcp/types.py: -------------------------------------------------------------------------------- from typing import Any, Generic, Literal, TypeVar from pydantic import BaseModel, ConfigDict, FileUrl, RootModel from pydantic.networks import AnyUrl """ Model Context Protocol bindings for Python These bindings were generated from https://github.com/modelcontextprotocol/specification, using Claude, with a prompt something like the following: Generate idiomatic Python bindings for this schema for MCP, or the "Model Context Protocol." The schema is defined in TypeScript, but there's also a JSON Schema version for reference. * For the bindings, let's use Pydantic V2 models. * Each model should allow extra fields everywhere, by specifying `model_config = ConfigDict(extra='allow')`. Do this in every case, instead of a custom base class. * Union types should be represented with a Pydantic `RootModel`. * Define additional model classes instead of using dictionaries. Do this even if they're not separate types in the schema. """ LATEST_PROTOCOL_VERSION = "2024-11-05" ProgressToken = str | int Cursor = str class RequestParams(BaseModel): class Meta(BaseModel): progressToken: ProgressToken | None = None """ If specified, the caller is requesting out-of-band progress notifications for this request (as represented by notifications/progress). The value of this parameter is an opaque token that will be attached to any subsequent notifications. The receiver is not obligated to provide these notifications. """ model_config = ConfigDict(extra="allow") _meta: Meta | None = None class NotificationParams(BaseModel): class Meta(BaseModel): model_config = ConfigDict(extra="allow") _meta: Meta | None = None """ This parameter name is reserved by MCP to allow clients and servers to attach additional metadata to their notifications. """ RequestParamsT = TypeVar("RequestParamsT", bound=RequestParams) NotificationParamsT = TypeVar("NotificationParamsT", bound=NotificationParams) MethodT = TypeVar("MethodT", bound=str) class Request(BaseModel, Generic[RequestParamsT, MethodT]): """Base class for JSON-RPC requests.""" method: MethodT params: RequestParamsT model_config = ConfigDict(extra="allow") class PaginatedRequest(Request[RequestParamsT, MethodT]): cursor: Cursor | None = None """ An opaque token representing the current pagination position. If provided, the server should return results starting after this cursor. """ class Notification(BaseModel, Generic[NotificationParamsT, MethodT]): """Base class for JSON-RPC notifications.""" method: MethodT model_config = ConfigDict(extra="allow") class Result(BaseModel): """Base class for JSON-RPC results.""" model_config = ConfigDict(extra="allow") _meta: dict[str, Any] | None = None """ This result property is reserved by the protocol to allow clients and servers to attach additional metadata to their responses. """ class PaginatedResult(Result): nextCursor: Cursor | None = None """ An opaque token representing the pagination position after the last returned result. If present, there may be more results available. """ RequestId = str | int class JSONRPCRequest(Request): """A request that expects a response.""" jsonrpc: Literal["2.0"] id: RequestId params: dict[str, Any] | None = None class JSONRPCNotification(Notification): """A notification which does not expect a response.""" jsonrpc: Literal["2.0"] params: dict[str, Any] | None = None class JSONRPCResponse(BaseModel): """A successful (non-error) response to a request.""" jsonrpc: Literal["2.0"] id: RequestId result: dict[str, Any] model_config = ConfigDict(extra="allow") # Standard JSON-RPC error codes PARSE_ERROR = -32700 INVALID_REQUEST = -32600 METHOD_NOT_FOUND = -32601 INVALID_PARAMS = -32602 INTERNAL_ERROR = -32603 class ErrorData(BaseModel): """Error information for JSON-RPC error responses.""" code: int """The error type that occurred.""" message: str """ A short description of the error. The message SHOULD be limited to a concise single sentence. """ data: Any | None = None """ Additional information about the error. The value of this member is defined by the sender (e.g. detailed error information, nested errors etc.). """ model_config = ConfigDict(extra="allow") class JSONRPCError(BaseModel): """A response to a request that indicates an error occurred.""" jsonrpc: Literal["2.0"] id: str | int error: ErrorData model_config = ConfigDict(extra="allow") class JSONRPCMessage( RootModel[JSONRPCRequest | JSONRPCNotification | JSONRPCResponse | JSONRPCError] ): pass class EmptyResult(Result): """A response that indicates success but carries no data.""" class Implementation(BaseModel): """Describes the name and version of an MCP implementation.""" name: str version: str model_config = ConfigDict(extra="allow") class RootsCapability(BaseModel): """Capability for root operations.""" listChanged: bool | None = None """Whether the client supports notifications for changes to the roots list.""" model_config = ConfigDict(extra="allow") class SamplingCapability(BaseModel): """Capability for logging operations.""" model_config = ConfigDict(extra="allow") class ClientCapabilities(BaseModel): """Capabilities a client may support.""" experimental: dict[str, dict[str, Any]] | None = None """Experimental, non-standard capabilities that the client supports.""" sampling: SamplingCapability | None = None """Present if the client supports sampling from an LLM.""" roots: RootsCapability | None = None """Present if the client supports listing roots.""" model_config = ConfigDict(extra="allow") class PromptsCapability(BaseModel): """Capability for prompts operations.""" listChanged: bool | None = None """Whether this server supports notifications for changes to the prompt list.""" model_config = ConfigDict(extra="allow") class ResourcesCapability(BaseModel): """Capability for resources operations.""" subscribe: bool | None = None """Whether this server supports subscribing to resource updates.""" listChanged: bool | None = None """Whether this server supports notifications for changes to the resource list.""" model_config = ConfigDict(extra="allow") class ToolsCapability(BaseModel): """Capability for tools operations.""" listChanged: bool | None = None """Whether this server supports notifications for changes to the tool list.""" model_config = ConfigDict(extra="allow") class LoggingCapability(BaseModel): """Capability for logging operations.""" model_config = ConfigDict(extra="allow") class ServerCapabilities(BaseModel): """Capabilities that a server may support.""" experimental: dict[str, dict[str, Any]] | None = None """Experimental, non-standard capabilities that the server supports.""" logging: LoggingCapability | None = None """Present if the server supports sending log messages to the client.""" prompts: PromptsCapability | None = None """Present if the server offers any prompt templates.""" resources: ResourcesCapability | None = None """Present if the server offers any resources to read.""" tools: ToolsCapability | None = None """Present if the server offers any tools to call.""" model_config = ConfigDict(extra="allow") class InitializeRequestParams(RequestParams): """Parameters for the initialize request.""" protocolVersion: str | int """The latest version of the Model Context Protocol that the client supports.""" capabilities: ClientCapabilities clientInfo: Implementation model_config = ConfigDict(extra="allow") class InitializeRequest(Request): """ This request is sent from the client to the server when it first connects, asking it to begin initialization. """ method: Literal["initialize"] params: InitializeRequestParams class InitializeResult(Result): """After receiving an initialize request from the client, the server sends this.""" protocolVersion: str | int """The version of the Model Context Protocol that the server wants to use.""" capabilities: ServerCapabilities serverInfo: Implementation class InitializedNotification(Notification): """ This notification is sent from the client to the server after initialization has finished. """ method: Literal["notifications/initialized"] params: NotificationParams | None = None class PingRequest(Request): """ A ping, issued by either the server or the client, to check that the other party is still alive. """ method: Literal["ping"] params: RequestParams | None = None class ProgressNotificationParams(NotificationParams): """Parameters for progress notifications.""" progressToken: ProgressToken """ The progress token which was given in the initial request, used to associate this notification with the request that is proceeding. """ progress: float """ The progress thus far. This should increase every time progress is made, even if the total is unknown. """ total: float | None = None """Total number of items to process (or total progress required), if known.""" model_config = ConfigDict(extra="allow") class ProgressNotification(Notification): """ An out-of-band notification used to inform the receiver of a progress update for a long-running request. """ method: Literal["notifications/progress"] params: ProgressNotificationParams class ListResourcesRequest(PaginatedRequest): """Sent from the client to request a list of resources the server has.""" method: Literal["resources/list"] params: RequestParams | None = None class Resource(BaseModel): """A known resource that the server is capable of reading.""" uri: AnyUrl """The URI of this resource.""" name: str """A human-readable name for this resource.""" description: str | None = None """A description of what this resource represents.""" mimeType: str | None = None """The MIME type of this resource, if known.""" model_config = ConfigDict(extra="allow") class ResourceTemplate(BaseModel): """A template description for resources available on the server.""" uriTemplate: str """ A URI template (according to RFC 6570) that can be used to construct resource URIs. """ name: str """A human-readable name for the type of resource this template refers to.""" description: str | None = None """A human-readable description of what this template is for.""" mimeType: str | None = None """ The MIME type for all resources that match this template. This should only be included if all resources matching this template have the same type. """ model_config = ConfigDict(extra="allow") class ListResourcesResult(PaginatedResult): """The server's response to a resources/list request from the client.""" resources: list[Resource] class ListResourceTemplatesRequest(PaginatedRequest): """Sent from the client to request a list of resource templates the server has.""" method: Literal["resources/templates/list"] params: RequestParams | None = None class ListResourceTemplatesResult(PaginatedResult): """The server's response to a resources/templates/list request from the client.""" resourceTemplates: list[ResourceTemplate] class ReadResourceRequestParams(RequestParams): """Parameters for reading a resource.""" uri: AnyUrl """ The URI of the resource to read. The URI can use any protocol; it is up to the server how to interpret it. """ model_config = ConfigDict(extra="allow") class ReadResourceRequest(Request): """Sent from the client to the server, to read a specific resource URI.""" method: Literal["resources/read"] params: ReadResourceRequestParams class ResourceContents(BaseModel): """The contents of a specific resource or sub-resource.""" uri: AnyUrl """The URI of this resource.""" mimeType: str | None = None """The MIME type of this resource, if known.""" model_config = ConfigDict(extra="allow") class TextResourceContents(ResourceContents): """Text contents of a resource.""" text: str """ The text of the item. This must only be set if the item can actually be represented as text (not binary data). """ class BlobResourceContents(ResourceContents): """Binary contents of a resource.""" blob: str """A base64-encoded string representing the binary data of the item.""" class ReadResourceResult(Result): """The server's response to a resources/read request from the client.""" contents: list[TextResourceContents | BlobResourceContents] class ResourceListChangedNotification(Notification): """ An optional notification from the server to the client, informing it that the list of resources it can read from has changed. """ method: Literal["notifications/resources/list_changed"] params: NotificationParams | None = None class SubscribeRequestParams(RequestParams): """Parameters for subscribing to a resource.""" uri: AnyUrl """ The URI of the resource to subscribe to. The URI can use any protocol; it is up to the server how to interpret it. """ model_config = ConfigDict(extra="allow") class SubscribeRequest(Request): """ Sent from the client to request resources/updated notifications from the server whenever a particular resource changes. """ method: Literal["resources/subscribe"] params: SubscribeRequestParams class UnsubscribeRequestParams(RequestParams): """Parameters for unsubscribing from a resource.""" uri: AnyUrl """The URI of the resource to unsubscribe from.""" model_config = ConfigDict(extra="allow") class UnsubscribeRequest(Request): """ Sent from the client to request cancellation of resources/updated notifications from the server. """ method: Literal["resources/unsubscribe"] params: UnsubscribeRequestParams class ResourceUpdatedNotificationParams(NotificationParams): """Parameters for resource update notifications.""" uri: AnyUrl """ The URI of the resource that has been updated. This might be a sub-resource of the one that the client actually subscribed to. """ model_config = ConfigDict(extra="allow") class ResourceUpdatedNotification(Notification): """ A notification from the server to the client, informing it that a resource has changed and may need to be read again. """ method: Literal["notifications/resources/updated"] params: ResourceUpdatedNotificationParams class ListPromptsRequest(PaginatedRequest): """Sent from the client to request a list of prompts and prompt templates.""" method: Literal["prompts/list"] params: RequestParams | None = None class PromptArgument(BaseModel): """An argument for a prompt template.""" name: str """The name of the argument.""" description: str | None = None """A human-readable description of the argument.""" required: bool | None = None """Whether this argument must be provided.""" model_config = ConfigDict(extra="allow") class Prompt(BaseModel): """A prompt or prompt template that the server offers.""" name: str """The name of the prompt or prompt template.""" description: str | None = None """An optional description of what this prompt provides.""" arguments: list[PromptArgument] | None = None """A list of arguments to use for templating the prompt.""" model_config = ConfigDict(extra="allow") class ListPromptsResult(PaginatedResult): """The server's response to a prompts/list request from the client.""" prompts: list[Prompt] class GetPromptRequestParams(RequestParams): """Parameters for getting a prompt.""" name: str """The name of the prompt or prompt template.""" arguments: dict[str, str] | None = None """Arguments to use for templating the prompt.""" model_config = ConfigDict(extra="allow") class GetPromptRequest(Request): """Used by the client to get a prompt provided by the server.""" method: Literal["prompts/get"] params: GetPromptRequestParams class TextContent(BaseModel): """Text content for a message.""" type: Literal["text"] text: str """The text content of the message.""" model_config = ConfigDict(extra="allow") class ImageContent(BaseModel): """Image content for a message.""" type: Literal["image"] data: str """The base64-encoded image data.""" mimeType: str """ The MIME type of the image. Different providers may support different image types. """ model_config = ConfigDict(extra="allow") Role = Literal["user", "assistant"] class SamplingMessage(BaseModel): """Describes a message issued to or received from an LLM API.""" role: Role content: TextContent | ImageContent model_config = ConfigDict(extra="allow") class EmbeddedResource(BaseModel): """ The contents of a resource, embedded into a prompt or tool call result. It is up to the client how best to render embedded resources for the benefit of the LLM and/or the user. """ type: Literal["resource"] resource: TextResourceContents | BlobResourceContents model_config = ConfigDict(extra="allow") class PromptMessage(BaseModel): """Describes a message returned as part of a prompt.""" role: Role content: TextContent | ImageContent | EmbeddedResource model_config = ConfigDict(extra="allow") class GetPromptResult(Result): """The server's response to a prompts/get request from the client.""" description: str | None = None """An optional description for the prompt.""" messages: list[PromptMessage] class PromptListChangedNotification(Notification): """ An optional notification from the server to the client, informing it that the list of prompts it offers has changed. """ method: Literal["notifications/prompts/list_changed"] params: NotificationParams | None = None class ListToolsRequest(PaginatedRequest): """Sent from the client to request a list of tools the server has.""" method: Literal["tools/list"] params: RequestParams | None = None class Tool(BaseModel): """Definition for a tool the client can call.""" name: str """The name of the tool.""" description: str | None = None """A human-readable description of the tool.""" inputSchema: dict[str, Any] """A JSON Schema object defining the expected parameters for the tool.""" model_config = ConfigDict(extra="allow") class ListToolsResult(PaginatedResult): """The server's response to a tools/list request from the client.""" tools: list[Tool] class CallToolRequestParams(RequestParams): """Parameters for calling a tool.""" name: str arguments: dict[str, Any] | None = None model_config = ConfigDict(extra="allow") class CallToolRequest(Request): """Used by the client to invoke a tool provided by the server.""" method: Literal["tools/call"] params: CallToolRequestParams class CallToolResult(Result): """The server's response to a tool call.""" content: list[TextContent | ImageContent | EmbeddedResource] isError: bool = False class ToolListChangedNotification(Notification): """ An optional notification from the server to the client, informing it that the list of tools it offers has changed. """ method: Literal["notifications/tools/list_changed"] params: NotificationParams | None = None LoggingLevel = Literal[ "debug", "info", "notice", "warning", "error", "critical", "alert", "emergency" ] class SetLevelRequestParams(RequestParams): """Parameters for setting the logging level.""" level: LoggingLevel """The level of logging that the client wants to receive from the server.""" model_config = ConfigDict(extra="allow") class SetLevelRequest(Request): """A request from the client to the server, to enable or adjust logging.""" method: Literal["logging/setLevel"] params: SetLevelRequestParams class LoggingMessageNotificationParams(NotificationParams): """Parameters for logging message notifications.""" level: LoggingLevel """The severity of this log message.""" logger: str | None = None """An optional name of the logger issuing this message.""" data: Any """ The data to be logged, such as a string message or an object. Any JSON serializable type is allowed here. """ model_config = ConfigDict(extra="allow") class LoggingMessageNotification(Notification): """Notification of a log message passed from server to client.""" method: Literal["notifications/message"] params: LoggingMessageNotificationParams IncludeContext = Literal["none", "thisServer", "allServers"] class ModelHint(BaseModel): """Hints to use for model selection.""" name: str | None = None """A hint for a model name.""" model_config = ConfigDict(extra="allow") class ModelPreferences(BaseModel): """ The server's preferences for model selection, requested of the client during sampling. Because LLMs can vary along multiple dimensions, choosing the "best" model is rarely straightforward. Different models excel in different areas—some are faster but less capable, others are more capable but more expensive, and so on. This interface allows servers to express their priorities across multiple dimensions to help clients make an appropriate selection for their use case. These preferences are always advisory. The client MAY ignore them. It is also up to the client to decide how to interpret these preferences and how to balance them against other considerations. """ hints: list[ModelHint] | None = None """ Optional hints to use for model selection. If multiple hints are specified, the client MUST evaluate them in order (such that the first match is taken). The client SHOULD prioritize these hints over the numeric priorities, but MAY still use the priorities to select from ambiguous matches. """ costPriority: float | None = None """ How much to prioritize cost when selecting a model. A value of 0 means cost is not important, while a value of 1 means cost is the most important factor. """ speedPriority: float | None = None """ How much to prioritize sampling speed (latency) when selecting a model. A value of 0 means speed is not important, while a value of 1 means speed is the most important factor. """ intelligencePriority: float | None = None """ How much to prioritize intelligence and capabilities when selecting a model. A value of 0 means intelligence is not important, while a value of 1 means intelligence is the most important factor. """ model_config = ConfigDict(extra="allow") class CreateMessageRequestParams(RequestParams): """Parameters for creating a message.""" messages: list[SamplingMessage] modelPreferences: ModelPreferences | None = None """ The server's preferences for which model to select. The client MAY ignore these preferences. """ systemPrompt: str | None = None """An optional system prompt the server wants to use for sampling.""" includeContext: IncludeContext | None = None """ A request to include context from one or more MCP servers (including the caller), to be attached to the prompt. """ temperature: float | None = None maxTokens: int """The maximum number of tokens to sample, as requested by the server.""" stopSequences: list[str] | None = None metadata: dict[str, Any] | None = None """Optional metadata to pass through to the LLM provider.""" model_config = ConfigDict(extra="allow") class CreateMessageRequest(Request): """A request from the server to sample an LLM via the client.""" method: Literal["sampling/createMessage"] params: CreateMessageRequestParams StopReason = Literal["endTurn", "stopSequence", "maxTokens"] | str class CreateMessageResult(Result): """The client's response to a sampling/create_message request from the server.""" role: Role content: TextContent | ImageContent model: str """The name of the model that generated the message.""" stopReason: StopReason | None = None """The reason why sampling stopped, if known.""" class ResourceReference(BaseModel): """A reference to a resource or resource template definition.""" type: Literal["ref/resource"] uri: str """The URI or URI template of the resource.""" model_config = ConfigDict(extra="allow") class PromptReference(BaseModel): """Identifies a prompt.""" type: Literal["ref/prompt"] name: str """The name of the prompt or prompt template""" model_config = ConfigDict(extra="allow") class CompletionArgument(BaseModel): """The argument's information for completion requests.""" name: str """The name of the argument""" value: str """The value of the argument to use for completion matching.""" model_config = ConfigDict(extra="allow") class CompleteRequestParams(RequestParams): """Parameters for completion requests.""" ref: ResourceReference | PromptReference argument: CompletionArgument model_config = ConfigDict(extra="allow") class CompleteRequest(Request): """A request from the client to the server, to ask for completion options.""" method: Literal["completion/complete"] params: CompleteRequestParams class Completion(BaseModel): """Completion information.""" values: list[str] """An array of completion values. Must not exceed 100 items.""" total: int | None = None """ The total number of completion options available. This can exceed the number of values actually sent in the response. """ hasMore: bool | None = None """ Indicates whether there are additional completion options beyond those provided in the current response, even if the exact total is unknown. """ model_config = ConfigDict(extra="allow") class CompleteResult(Result): """The server's response to a completion/complete request""" completion: Completion class ListRootsRequest(Request): """ Sent from the server to request a list of root URIs from the client. Roots allow servers to ask for specific directories or files to operate on. A common example for roots is providing a set of repositories or directories a server should operate on. This request is typically used when the server needs to understand the file system structure or access specific locations that the client has permission to read from. """ method: Literal["roots/list"] params: RequestParams | None = None class Root(BaseModel): """Represents a root directory or file that the server can operate on.""" uri: FileUrl """ The URI identifying the root. This *must* start with file:// for now. This restriction may be relaxed in future versions of the protocol to allow other URI schemes. """ name: str | None = None """ An optional name for the root. This can be used to provide a human-readable identifier for the root, which may be useful for display purposes or for referencing the root in other parts of the application. """ model_config = ConfigDict(extra="allow") class ListRootsResult(Result): """ The client's response to a roots/list request from the server. This result contains an array of Root objects, each representing a root directory or file that the server can operate on. """ roots: list[Root] class RootsListChangedNotification(Notification): """ A notification from the client to the server, informing it that the list of roots has changed. This notification should be sent whenever the client adds, removes, or modifies any root. The server should then request an updated list of roots using the ListRootsRequest. """ method: Literal["notifications/roots/list_changed"] params: NotificationParams | None = None class ClientRequest( RootModel[ PingRequest | InitializeRequest | CompleteRequest | SetLevelRequest | GetPromptRequest | ListPromptsRequest | ListResourcesRequest | ListResourceTemplatesRequest | ReadResourceRequest | SubscribeRequest | UnsubscribeRequest | CallToolRequest | ListToolsRequest ] ): pass class ClientNotification( RootModel[ ProgressNotification | InitializedNotification | RootsListChangedNotification ] ): pass class ClientResult(RootModel[EmptyResult | CreateMessageResult | ListRootsResult]): pass class ServerRequest(RootModel[PingRequest | CreateMessageRequest | ListRootsRequest]): pass class ServerNotification( RootModel[ ProgressNotification | LoggingMessageNotification | ResourceUpdatedNotification | ResourceListChangedNotification | ToolListChangedNotification | PromptListChangedNotification ] ): pass class ServerResult( RootModel[ EmptyResult | InitializeResult | CompleteResult | GetPromptResult | ListPromptsResult | ListResourcesResult | ListResourceTemplatesResult | ReadResourceResult | CallToolResult | ListToolsResult ] ): pass