├── .gitignore
├── DEVELOPMENT.md
├── LICENSE
├── Makefile
├── README.md
├── image.png
├── mcp-streamlit-app
├── README.md
├── app.py
├── mcp-streamlit-app-screenshot.png
├── requirements.txt
├── run.sh
└── style.css
├── mcp_playground
├── __init__.py
├── client.py
├── examples
│ ├── __init__.py
│ ├── llm_example.py
│ └── usage_example.py
├── format_converters.py
└── llm_bridge
│ ├── __init__.py
│ ├── anthropic_bridge.py
│ ├── base.py
│ ├── models.py
│ ├── ollama_bridge.py
│ ├── openai_bridge.py
│ ├── openrouter_bridge.py
│ └── openrouter_client.py
├── pytest.ini
├── requirements.txt
├── setup.py
├── test_mcp_connection.py
└── tests
├── __init__.py
└── test_client.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .nox/
42 | .coverage
43 | .coverage.*
44 | .cache
45 | nosetests.xml
46 | coverage.xml
47 | *.cover
48 | .hypothesis/
49 | .pytest_cache/
50 |
51 | # Translations
52 | *.mo
53 | *.pot
54 |
55 | # Django stuff:
56 | *.log
57 | local_settings.py
58 | db.sqlite3
59 |
60 | # Flask stuff:
61 | instance/
62 | .webassets-cache
63 |
64 | # Scrapy stuff:
65 | .scrapy
66 |
67 | # Sphinx documentation
68 | docs/_build/
69 |
70 | # PyBuilder
71 | target/
72 |
73 | # Jupyter Notebook
74 | .ipynb_checkpoints
75 |
76 | # IPython
77 | profile_default/
78 | ipython_config.py
79 |
80 | # pyenv
81 | .python-version
82 |
83 | # celery beat schedule file
84 | celerybeat-schedule
85 |
86 | # SageMath parsed files
87 | *.sage.py
88 |
89 | # Environments
90 | .env
91 | .venv
92 | env/
93 | venv/
94 | ENV/
95 | env.bak/
96 | venv.bak/
97 |
98 | # Spyder project settings
99 | .spyderproject
100 | .spyproject
101 |
102 | # Rope project settings
103 | .ropeproject
104 |
105 | # mkdocs documentation
106 | /site
107 |
108 | # mypy
109 | .mypy_cache/
110 | .dmypy.json
111 | dmypy.json
112 |
113 | # Pyre type checker
114 | .pyre/
115 |
116 | # IDE specific files
117 | .idea/
118 | .vscode/
119 | *.swp
120 | *.swo
121 |
122 | # Project-specific
123 | *.egg-info/
124 | .streamlit/
125 |
126 | # Roo IDE specific
127 | .roocode
128 | .roomodes
129 |
130 | # API Keys and Secrets
131 | *.key
132 | secrets.toml
133 | .secrets
134 | config.local.*
135 |
136 | # OS-specific
137 | .DS_Store
138 | .DS_Store?
139 | ._*
140 | .Spotlight-V100
141 | .Trashes
142 | ehthumbs.db
143 | Thumbs.db
144 | Desktop.ini
145 |
146 | # Development files
147 | *.tmp
148 | *.bak
149 | *.orig
150 | *.rej
151 | *~
152 | .#*
153 |
154 | # Application cache and sessions
155 | .cache/
156 | sessions/
157 | *.session
158 | *.history
159 |
160 | # Logs
161 | *.log.*
162 | logs/
163 |
--------------------------------------------------------------------------------
/DEVELOPMENT.md:
--------------------------------------------------------------------------------
1 | # Development Guide
2 |
3 | This document provides information for developers who want to contribute to the MCP SSE Client Python project.
4 |
5 | ## Setup Development Environment
6 |
7 | ```bash
8 | # Clone the repository
9 | git clone https://github.com/zanetworker/mcp-playground.git
10 | cd mcp-playground
11 |
12 | # Install development dependencies
13 | make dev
14 | ```
15 |
16 | ## Available Make Commands
17 |
18 | The project includes a Makefile with common commands:
19 |
20 | ```bash
21 | make clean # Remove build artifacts
22 | make test # Run tests
23 | make install # Install the package
24 | make dev # Install in development mode
25 | make lint # Run linting
26 | make format # Format code
27 | make build # Build package
28 | make help # Show help message
29 | ```
30 |
31 | ## Running Tests
32 |
33 | The project includes unit tests to ensure functionality works as expected:
34 |
35 | ```bash
36 | # Run tests
37 | make test
38 | ```
39 |
40 | ## Contributing
41 |
42 | Contributions are welcome! Here are some ways you can contribute to this project:
43 |
44 | 1. Report bugs and request features by creating issues
45 | 2. Submit pull requests to fix bugs or add new features
46 | 3. Improve documentation
47 | 4. Write tests to increase code coverage
48 |
49 | Please follow these steps when contributing:
50 |
51 | 1. Fork the repository
52 | 2. Create a new branch for your feature or bugfix
53 | 3. Add tests for your changes
54 | 4. Make your changes
55 | 5. Run the tests to ensure they pass
56 | 6. Submit a pull request
57 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 MCP SSE Client Python
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: clean test install dev lint format build
2 |
3 | # Default target
4 | all: clean test build
5 |
6 | # Clean build artifacts
7 | clean:
8 | rm -rf build/
9 | rm -rf dist/
10 | rm -rf *.egg-info
11 | find . -type d -name __pycache__ -exec rm -rf {} +
12 | find . -type f -name "*.pyc" -delete
13 |
14 | # Run tests
15 | test:
16 | pytest
17 |
18 | # Install the package
19 | install:
20 | pip install .
21 |
22 | # Install in development mode
23 | dev:
24 | pip install -e .
25 | pip install -r requirements.txt
26 |
27 | # Run linting
28 | lint:
29 | pylint mcp_playground tests
30 |
31 | # Format code
32 | format:
33 | black mcp_playground tests
34 |
35 | # Build package
36 | build: clean
37 | python setup.py sdist bdist_wheel
38 |
39 | # Help
40 | help:
41 | @echo "Available targets:"
42 | @echo " all - Clean, test, and build the package"
43 | @echo " clean - Remove build artifacts"
44 | @echo " test - Run tests"
45 | @echo " install - Install the package"
46 | @echo " dev - Install in development mode"
47 | @echo " lint - Run linting"
48 | @echo " format - Format code"
49 | @echo " build - Build package"
50 | @echo " help - Show this help message"
51 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # MCP Playground
2 |
3 | A comprehensive Python toolkit for interacting with remote Model Context Protocol (MCP) endpoints. Currently supports Server-Sent Events (SSE) with planned support for Streamable HTTP protocols.
4 |
5 | ## 🎯 Project Focus
6 |
7 | **MCP Playground** is specifically designed for **remote MCP client capabilities**, providing robust tools for connecting to and interacting with MCP servers over network protocols:
8 |
9 | - **✅ Server-Sent Events (SSE)** - Full implementation with real-time streaming
10 | - **🔄 Streamable HTTP** - Planned for future releases
11 | - **🤖 LLM Integration** - AI-driven tool selection and execution
12 | - **🧪 Interactive Testing** - Comprehensive testing environments
13 |
14 | ## 🚀 Quick Start
15 |
16 | Get up and running in minutes:
17 |
18 | ```bash
19 | # Clone the repository
20 | git clone https://github.com/zanetworker/mcp-playground.git
21 | cd mcp-playground
22 |
23 | # Install the package
24 | pip install -e .
25 |
26 | # Try the interactive Streamlit app
27 | cd mcp-streamlit-app
28 | pip install -r requirements.txt
29 | streamlit run app.py
30 | ```
31 |
32 | 
33 |
34 | > **🚨 IMPORTANT:** When connecting to MCP servers, always use URLs ending with `/sse`
35 | > Example: `http://localhost:8000/sse` (not `http://localhost:8000`)
36 |
37 | ### Environment Variables
38 |
39 | For convenience, you can set API keys and OpenRouter configuration via environment variables:
40 |
41 | ```bash
42 | # Required for LLM providers
43 | export OPENAI_API_KEY="your-openai-key"
44 | export ANTHROPIC_API_KEY="your-anthropic-key"
45 | export OPENROUTER_API_KEY="your-openrouter-key"
46 |
47 | # Optional OpenRouter configuration for better rankings
48 | export OPENROUTER_SITE_URL="https://your-site.com"
49 | export OPENROUTER_SITE_NAME="Your App Name"
50 | ```
51 |
52 | *The Streamlit interface prominently highlights the `/sse` URL requirement with helpful tooltips and validation.*
53 |
54 | ## 🛠️ Supported Protocols
55 |
56 | ### Current Support
57 | - **Server-Sent Events (SSE)** - Real-time streaming communication with MCP servers
58 | - **HTTP/HTTPS** - Standard request-response patterns
59 |
60 | ### Planned Support
61 | - **Streamable HTTP** - Enhanced HTTP streaming capabilities
62 | - **WebSocket** - Bidirectional real-time communication
63 | - **gRPC Streaming** - High-performance streaming protocol
64 |
65 | ## 🤖 LLM Provider Support
66 |
67 | MCP Playground integrates with multiple LLM providers for intelligent tool selection:
68 |
69 | - **OpenAI**: GPT-4o, GPT-4, GPT-3.5-Turbo
70 | - **Anthropic**: Claude 3 Opus, Claude 3 Sonnet, Claude 3 Haiku
71 | - **Ollama**: Llama 3, Mistral, and other locally hosted models
72 | - **OpenRouter**: Access to 100+ models through a unified API
73 |
74 | ## 📋 Core Features
75 |
76 | ### 1. Remote MCP Client
77 |
78 | Easily connect to any remote MCP endpoint and interact with available tools:
79 |
80 | ```python
81 | import asyncio
82 | from mcp_playground import MCPClient
83 |
84 | async def main():
85 | # Connect to a remote MCP endpoint with optional timeout and retry settings
86 | # IMPORTANT: URL must end with /sse for Server-Sent Events
87 | client = MCPClient(
88 | "http://localhost:8000/sse", # Note the /sse suffix!
89 | timeout=30.0, # Connection timeout in seconds
90 | max_retries=3 # Maximum retry attempts
91 | )
92 |
93 | # List available tools
94 | tools = await client.list_tools()
95 | print(f"Found {len(tools)} tools")
96 |
97 | # Invoke a calculator tool
98 | result = await client.invoke_tool(
99 | "calculator",
100 | {"x": 10, "y": 5, "operation": "add"}
101 | )
102 | print(f"Result: {result.content}") # Output: Result: 15
103 | print(f"Success: {result.error_code == 0}")
104 |
105 | asyncio.run(main())
106 | ```
107 |
108 | ### 2. LLM-Powered Tool Selection
109 |
110 | Let AI choose the right tool based on natural language queries:
111 |
112 | ```python
113 | import os
114 | from mcp_playground import MCPClient, OpenAIBridge
115 |
116 | # Connect to MCP endpoint and create an LLM bridge
117 | client = MCPClient("http://localhost:8000/sse")
118 | bridge = OpenAIBridge(
119 | client,
120 | api_key=os.environ.get("OPENAI_API_KEY"),
121 | model="gpt-4o"
122 | )
123 |
124 | # Process a natural language query
125 | result = await bridge.process_query(
126 | "Convert this PDF to text: https://example.com/document.pdf"
127 | )
128 |
129 | # The LLM automatically selects the appropriate tool and parameters
130 | if result["tool_call"]:
131 | print(f"Tool: {result['tool_call']['name']}")
132 | print(f"Result: {result['tool_result'].content}")
133 | ```
134 |
135 | ### 3. Command-Line Interface
136 |
137 | The package includes a powerful CLI tool for interactive testing and analysis:
138 |
139 | ```bash
140 | # Run the CLI tool (note the /sse suffix in the endpoint URL)
141 | python -m mcp_playground.examples.llm_example --provider openai --endpoint http://localhost:8000/sse
142 | ```
143 |
144 | **Configuration Options:**
145 | ```
146 | usage: llm_example.py [-h] [--provider {openai,anthropic,ollama}]
147 | [--openai-model {gpt-4o,gpt-4-turbo,gpt-4,gpt-3.5-turbo}]
148 | [--anthropic-model {claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307}]
149 | [--ollama-model OLLAMA_MODEL] [--ollama-host OLLAMA_HOST]
150 | [--endpoint ENDPOINT] [--openai-key OPENAI_KEY]
151 | [--anthropic-key ANTHROPIC_KEY]
152 | ```
153 |
154 | ### 4. Interactive Testing Environment
155 |
156 | The included Streamlit app provides a comprehensive testing interface:
157 |
158 | **Key Features:**
159 | - **Multiple Chat Modes:**
160 | - **Auto Mode**: LLM automatically decides when to use tools
161 | - **Chat Mode**: Direct conversation without MCP tools
162 | - **Tools Mode**: Always attempt to use MCP tools
163 | - **Multi-LLM Support**: OpenAI, Anthropic, Ollama, and OpenRouter integration
164 | - **Dynamic Configuration**: Connect to any MCP endpoint with real-time status
165 | - **Tool Discovery**: Automatic detection and display of available tools
166 | - **Beautiful Response Formatting**: Special formatting for structured data
167 | - **Error Handling**: Robust connection management with clear error messages
168 |
169 | To run the Streamlit app:
170 | ```bash
171 | cd mcp-streamlit-app
172 | pip install -r requirements.txt
173 | streamlit run app.py
174 | ```
175 |
176 | ## 📦 Installation
177 |
178 | ### From Source
179 |
180 | ```bash
181 | git clone https://github.com/zanetworker/mcp-playground.git
182 | cd mcp-playground
183 | pip install -e .
184 | ```
185 |
186 | ### Using pip (once published)
187 |
188 | ```bash
189 | pip install mcp-playground
190 | ```
191 |
192 | ## 🔧 API Reference
193 |
194 | ### MCPClient
195 |
196 | ```python
197 | client = MCPClient(endpoint, timeout=30.0, max_retries=3)
198 | ```
199 |
200 | **Parameters:**
201 | - `endpoint`: The MCP endpoint URL (must be http or https and end with `/sse`)
202 | - `timeout`: Connection timeout in seconds (default: 30.0)
203 | - `max_retries`: Maximum number of retry attempts (default: 3)
204 |
205 | **⚠️ URL Requirements:**
206 | - The endpoint URL **must** end with `/sse` for Server-Sent Events communication
207 | - Examples of correct URLs:
208 | - `http://localhost:8000/sse`
209 | - `https://my-mcp-server.com/sse`
210 | - `http://192.168.1.100:3000/sse`
211 |
212 | #### Methods
213 |
214 | ##### `async list_tools() -> List[ToolDef]`
215 |
216 | Lists available tools from the MCP endpoint.
217 |
218 | ##### `async invoke_tool(tool_name: str, kwargs: Dict[str, Any]) -> ToolInvocationResult`
219 |
220 | Invokes a specific tool with parameters.
221 |
222 | ##### `async check_connection() -> bool`
223 |
224 | Check if the MCP endpoint is reachable.
225 |
226 | ##### `get_endpoint_info() -> Dict[str, Any]`
227 |
228 | Get information about the configured endpoint.
229 |
230 | ### Error Handling
231 |
232 | The client includes robust error handling with specific exception types:
233 |
234 | ```python
235 | from mcp_playground import MCPClient, MCPConnectionError, MCPTimeoutError
236 |
237 | try:
238 | client = MCPClient("http://localhost:8000/sse")
239 | tools = await client.list_tools()
240 | except MCPConnectionError as e:
241 | print(f"Connection failed: {e}")
242 | except MCPTimeoutError as e:
243 | print(f"Operation timed out: {e}")
244 | ```
245 |
246 | ### LLM Bridges
247 |
248 | #### OpenAIBridge
249 | ```python
250 | bridge = OpenAIBridge(mcp_client, api_key, model="gpt-4o")
251 | ```
252 |
253 | #### AnthropicBridge
254 | ```python
255 | bridge = AnthropicBridge(mcp_client, api_key, model="claude-3-opus-20240229")
256 | ```
257 |
258 | #### OllamaBridge
259 | ```python
260 | bridge = OllamaBridge(mcp_client, model="llama3", host=None)
261 | ```
262 |
263 | #### OpenRouterBridge
264 | ```python
265 | bridge = OpenRouterBridge(mcp_client, api_key, model="anthropic/claude-3-opus")
266 | ```
267 |
268 | ## 🔄 Advanced Features
269 |
270 | ### Retry Logic and Resilience
271 |
272 | The client includes automatic retry logic with exponential backoff:
273 |
274 | ```python
275 | # Configure custom retry behavior
276 | client = MCPClient(
277 | "http://localhost:8000/sse",
278 | timeout=60.0, # Longer timeout for slow servers
279 | max_retries=5 # More retry attempts
280 | )
281 |
282 | # The client automatically retries failed operations
283 | # with exponential backoff: 1s, 2s, 4s, 8s, 16s
284 | ```
285 |
286 | ### Connection Health Monitoring
287 |
288 | ```python
289 | # Check if endpoint is reachable before operations
290 | if await client.check_connection():
291 | tools = await client.list_tools()
292 | else:
293 | print("Server is not reachable")
294 |
295 | # Get detailed endpoint information
296 | info = client.get_endpoint_info()
297 | print(f"Connected to: {info['hostname']}:{info['port']}")
298 | ```
299 |
300 | ## 📋 Requirements
301 |
302 | - Python 3.8+
303 | - `mcp>=0.1.0` (Model Context Protocol library)
304 | - `pydantic>=2.0.0` (Data validation)
305 | - `openai>=1.70.0` (for OpenAI integration)
306 | - `anthropic>=0.15.0` (for Anthropic integration)
307 | - `ollama>=0.1.7` (for Ollama integration)
308 | - `streamlit` (for the interactive test app)
309 |
310 | ## 🐛 Troubleshooting
311 |
312 | ### Common Issues
313 |
314 | **"unhandled errors in a TaskGroup" Error:**
315 | This typically occurs with asyncio compatibility issues. The Streamlit app handles this automatically, but for custom implementations, ensure proper async context management.
316 |
317 | **Connection Timeouts:**
318 | - Increase the timeout parameter: `MCPClient(endpoint, timeout=60.0)`
319 | - Check if the MCP server is running and accessible
320 | - Verify the endpoint URL is correct and ends with `/sse`
321 |
322 | **Import Errors:**
323 | - Ensure all dependencies are installed: `pip install -e .`
324 | - Check Python version compatibility (3.8+)
325 |
326 | **LLM Integration Issues:**
327 | - Verify API keys are set correctly
328 | - Check model names match supported versions
329 | - For Ollama, ensure the service is running locally
330 |
331 | ## 🚀 Roadmap
332 |
333 | ### Upcoming Features
334 |
335 | - **Streamable HTTP Support** - Enhanced HTTP streaming capabilities
336 | - **WebSocket Integration** - Real-time bidirectional communication
337 | - **Connection Pooling** - Improved performance for multiple connections
338 | - **Advanced Caching** - Smart caching for tool definitions and results
339 | - **Monitoring Dashboard** - Real-time monitoring of MCP connections
340 | - **Plugin System** - Extensible architecture for custom protocols
341 |
342 | ## 🤝 Development
343 |
344 | For information on development setup, contributing guidelines, and available make commands, see [DEVELOPMENT.md](DEVELOPMENT.md).
345 |
346 | ## 📄 License
347 |
348 | This project is licensed under the MIT License - see the LICENSE file for details.
349 |
350 | ## 🙏 Acknowledgments
351 |
352 | - Model Context Protocol (MCP) specification and community
353 | - OpenAI, Anthropic, and Ollama for LLM API access
354 | - Streamlit for the interactive testing framework
355 |
--------------------------------------------------------------------------------
/image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zanetworker/mcp-sse-client-python/c1dceaac5b87de9b2a2796950739d01910454efe/image.png
--------------------------------------------------------------------------------
/mcp-streamlit-app/README.md:
--------------------------------------------------------------------------------
1 | # MCP Streamlit Tool Tester App
2 |
3 | A Streamlit application for interactively testing MCP (Model Context Protocol) servers and tools using LLMs (Large Language Models).
4 |
5 | ## Features
6 |
7 | - **Modern UI**: Clean, responsive interface built with Streamlit, including dark mode styling.
8 | - **LLM Integration**: Support for OpenAI and Anthropic models.
9 | - **Tool Visualization**: Clear display of available tools and their parameters in the sidebar.
10 | - **Educational Display**: Shows LLM reasoning, tool selection, and tool results separately.
11 | - **Smart Output Display**:
12 | - Tabbed view for JSON and extracted Text content from tool results.
13 | - Automatic format detection (JSON, Markdown, HTML, Text) in the Text view.
14 | - Content displayed appropriately based on detected format.
15 | - Limited display height to prevent excessive scrolling.
16 | - Easy copy and download options for extracted content.
17 | - **Conversation Management**:
18 | - History tab in the sidebar to view, load, and delete past conversations.
19 | - Ability to start new conversations.
20 | - Server endpoint history for quick switching (though direct input is primary).
21 |
22 | ## Prerequisites
23 |
24 | - Python 3.7+
25 | - Required packages listed in `requirements.txt` (Streamlit, OpenAI, Anthropic, Markdown, etc.)
26 | - The main `mcp-sse-client` package installed (e.g., via `pip install -e .` from the root directory).
27 |
28 | ## Installation
29 |
30 | 1. Ensure you are in the root directory of the `mcp-playground` project.
31 | 2. Navigate to the app directory:
32 | ```bash
33 | cd mcp-streamlit-app
34 | ```
35 | 3. Install dependencies:
36 | ```bash
37 | pip install -r requirements.txt
38 | ```
39 |
40 | ## Running the App
41 |
42 | You can run the app using the provided script:
43 |
44 | ```bash
45 | ./run.sh
46 | ```
47 |
48 | Or directly with Streamlit:
49 |
50 | ```bash
51 | streamlit run app.py
52 | ```
53 |
54 | ## Usage
55 |
56 | 1. Start the application using `./run.sh` or `streamlit run app.py`.
57 | 2. In the sidebar's "Server Config" tab:
58 | - Enter the full URL for your MCP server endpoint.
59 | - Select the LLM provider (OpenAI or Anthropic).
60 | - Enter the corresponding API key.
61 | - Click "Connect".
62 | 3. Once connected, available tools will appear below the configuration tabs.
63 | 4. Use the main chat interface to interact with the LLM and trigger tools.
64 | 5. View results, switch between JSON/Text views, and use copy/download options.
65 | 6. Use the "History" tab to manage past conversations.
66 | 7. Use the "New Conversation" button to save the current chat and start a new one.
67 |
68 | ## Configuration
69 |
70 | - Server endpoints and API keys are configured via the sidebar UI.
71 | - API keys are stored in Streamlit's session state for the duration of the session.
72 | - Conversation history is also stored in session state and will be lost when the app restarts or the browser tab is closed.
73 |
74 | ## License
75 |
76 | This application is part of the `mcp-playground` project, licensed under the MIT License.
77 |
--------------------------------------------------------------------------------
/mcp-streamlit-app/app.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 | import sys
3 | import os
4 | import json
5 | import asyncio
6 | import datetime
7 | import traceback
8 | import ollama # Import ollama for model listing
9 | from typing import List, Dict, Any, Optional, Union
10 |
11 | # Add parent directory to path to import mcp_sse_client
12 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
13 |
14 | # Import the MCP Playground client and model definitions
15 | from mcp_playground.client import MCPClient, MCPConnectionError, MCPTimeoutError
16 | from mcp_playground.llm_bridge.openai_bridge import OpenAIBridge
17 | from mcp_playground.llm_bridge.anthropic_bridge import AnthropicBridge
18 | from mcp_playground.llm_bridge.ollama_bridge import OllamaBridge
19 | from mcp_playground.llm_bridge.openrouter_bridge import OpenRouterBridge
20 | from mcp_playground.llm_bridge.openrouter_client import OpenRouterClient, format_model_display
21 | from mcp_playground.llm_bridge.models import (
22 | OPENAI_MODELS, DEFAULT_OPENAI_MODEL,
23 | ANTHROPIC_MODELS, DEFAULT_ANTHROPIC_MODEL,
24 | DEFAULT_OLLAMA_MODEL
25 | )
26 |
27 | # Set page config
28 | st.set_page_config(
29 | page_title="MCP Playground",
30 | page_icon="🎮",
31 | layout="wide",
32 | initial_sidebar_state="expanded",
33 | )
34 |
35 | # Load custom CSS
36 | def load_css():
37 | with open("style.css") as f:
38 | st.markdown(f"", unsafe_allow_html=True)
39 |
40 | # Apply custom styling
41 | load_css()
42 |
43 | # Initialize session state
44 | if "connected" not in st.session_state:
45 | st.session_state.connected = False
46 | if "client" not in st.session_state:
47 | st.session_state.client = None
48 | if "llm_bridge" not in st.session_state:
49 | st.session_state.llm_bridge = None
50 | if "tools" not in st.session_state:
51 | st.session_state.tools = []
52 | if "messages" not in st.session_state:
53 | st.session_state.messages = []
54 | if "connection_error" not in st.session_state:
55 | st.session_state.connection_error = None
56 | if "api_keys" not in st.session_state:
57 | st.session_state.api_keys = {
58 | "openai": os.environ.get("OPENAI_API_KEY", ""),
59 | "anthropic": os.environ.get("ANTHROPIC_API_KEY", ""),
60 | "openrouter": os.environ.get("OPENROUTER_API_KEY", "")
61 | }
62 | if "mcp_endpoint" not in st.session_state:
63 | st.session_state.mcp_endpoint = "http://localhost:8001/sse"
64 | if "llm_provider" not in st.session_state:
65 | st.session_state.llm_provider = "openai"
66 | if "openai_model" not in st.session_state:
67 | st.session_state.openai_model = DEFAULT_OPENAI_MODEL
68 | if "anthropic_model" not in st.session_state:
69 | st.session_state.anthropic_model = DEFAULT_ANTHROPIC_MODEL
70 | if "ollama_model" not in st.session_state:
71 | st.session_state.ollama_model = DEFAULT_OLLAMA_MODEL
72 | if "ollama_host" not in st.session_state:
73 | st.session_state.ollama_host = ""
74 | if "ollama_models" not in st.session_state:
75 | st.session_state.ollama_models = []
76 | if "chat_mode" not in st.session_state:
77 | st.session_state.chat_mode = "auto" # auto, chat, tools
78 | if "show_tools_only" not in st.session_state:
79 | st.session_state.show_tools_only = True # Default to showing only tool-capable models
80 |
81 | # Auto-refresh session state
82 | if "models_loaded_on_startup" not in st.session_state:
83 | st.session_state.models_loaded_on_startup = False
84 | if "last_provider" not in st.session_state:
85 | st.session_state.last_provider = None
86 | if "auto_refresh_enabled" not in st.session_state:
87 | st.session_state.auto_refresh_enabled = True
88 |
89 | # OpenRouter session state
90 | if "openrouter_site_url" not in st.session_state:
91 | st.session_state.openrouter_site_url = os.environ.get("OPENROUTER_SITE_URL", "")
92 | if "openrouter_site_name" not in st.session_state:
93 | st.session_state.openrouter_site_name = os.environ.get("OPENROUTER_SITE_NAME", "")
94 |
95 | # OpenRouter model caches for each provider
96 | if "openai_openrouter_models" not in st.session_state:
97 | st.session_state.openai_openrouter_models = []
98 | if "anthropic_openrouter_models" not in st.session_state:
99 | st.session_state.anthropic_openrouter_models = []
100 | if "google_openrouter_models" not in st.session_state:
101 | st.session_state.google_openrouter_models = []
102 |
103 | # Selected OpenRouter models
104 | if "openai_openrouter_model" not in st.session_state:
105 | st.session_state.openai_openrouter_model = None
106 | if "anthropic_openrouter_model" not in st.session_state:
107 | st.session_state.anthropic_openrouter_model = None
108 | if "google_openrouter_model" not in st.session_state:
109 | st.session_state.google_openrouter_model = None
110 |
111 | # --- Ollama Helper Functions ---
112 | async def fetch_ollama_models(host=None):
113 | """Asynchronously fetch available Ollama models from the server.
114 |
115 | Args:
116 | host: Optional Ollama host URL. If None, uses default.
117 |
118 | Returns:
119 | List of model names, or empty list if error occurs.
120 | """
121 | try:
122 | client = ollama.AsyncClient(host=host)
123 | models_info = await client.list()
124 |
125 | # Extract model names from the response
126 | model_names = []
127 |
128 | # Direct extraction from ListResponse object (most common case)
129 | if hasattr(models_info, 'models'):
130 | # Extract directly from the models attribute
131 | for model in models_info.models:
132 | if hasattr(model, 'model') and model.model:
133 | model_names.append(model.model)
134 |
135 | # If we couldn't extract models directly and model_names is still empty
136 | if not model_names:
137 | # Try parsing the string representation as a fallback
138 | models_str = str(models_info)
139 |
140 | # Use regex to extract model names from the string representation
141 | import re
142 | pattern = r"model='([^']+)'"
143 | model_names = re.findall(pattern, models_str)
144 |
145 | # If that didn't work, try other formats
146 | if not model_names and isinstance(models_info, dict) and 'models' in models_info:
147 | # Dictionary format
148 | model_names = [m.get('name', m.get('model', '')) for m in models_info.get('models', [])]
149 | elif not model_names and isinstance(models_info, list):
150 | # List format
151 | model_names = [m.get('name', m.get('model', '')) for m in models_info]
152 |
153 | # Filter out empty names
154 | model_names = [name for name in model_names if name]
155 |
156 | return model_names
157 | except Exception as e:
158 | print(f"Error fetching Ollama models: {e}")
159 | return []
160 |
161 | # --- Tool Capability Detection ---
162 | def is_tool_capable_model(model_id: str, model_data: dict = None) -> bool:
163 | """Determine if a model supports tool/function calling.
164 |
165 | Args:
166 | model_id: The model identifier (e.g., 'openai/gpt-4')
167 | model_data: Optional model metadata from OpenRouter API
168 |
169 | Returns:
170 | bool: True if the model supports tools, False otherwise
171 | """
172 | # Check OpenRouter metadata first if available
173 | if model_data:
174 | # Check for explicit tool support flags
175 | supports_tools = model_data.get("supports_tools", False)
176 | supports_function_calling = model_data.get("supports_function_calling", False)
177 | if supports_tools or supports_function_calling:
178 | return True
179 |
180 | # Fallback to pattern matching for known tool-capable models
181 | model_lower = model_id.lower()
182 |
183 | # OpenAI models with tool support
184 | if any(pattern in model_lower for pattern in [
185 | "gpt-4o", "gpt-4-turbo", "gpt-4", "gpt-3.5-turbo"
186 | ]):
187 | # Exclude base models and instruct variants that typically don't support tools
188 | if any(exclude in model_lower for exclude in ["base", "instruct"]):
189 | return False
190 | return True
191 |
192 | # Anthropic Claude models with tool support
193 | if any(pattern in model_lower for pattern in [
194 | "claude-3", "claude-3.5"
195 | ]):
196 | return True
197 |
198 | # Google Gemini models with tool support
199 | if any(pattern in model_lower for pattern in [
200 | "gemini-1.5", "gemini-pro"
201 | ]):
202 | # Exclude vision-only models
203 | if "vision" in model_lower and "pro" not in model_lower:
204 | return False
205 | return True
206 |
207 | # Meta models with tool support
208 | if any(pattern in model_lower for pattern in [
209 | "llama-3.1", "llama-3.2"
210 | ]):
211 | # Only larger models typically support tools
212 | if any(size in model_lower for size in ["70b", "405b"]):
213 | return True
214 |
215 | # Mistral models with tool support
216 | if any(pattern in model_lower for pattern in [
217 | "mistral-large", "mistral-medium", "mixtral"
218 | ]):
219 | return True
220 |
221 | # Default to False for unknown models
222 | return False
223 |
224 | # --- OpenRouter Helper Functions ---
225 | async def fetch_openrouter_models_by_provider(api_key, provider, limit=5, tools_only=False):
226 | """Fetch top N most popular models for a specific provider from OpenRouter.
227 |
228 | Args:
229 | api_key: OpenRouter API key
230 | provider: Provider name (e.g., 'openai', 'anthropic', 'google')
231 | limit: Maximum number of models to return
232 | tools_only: If True, only return models that support tool calling
233 |
234 | Returns:
235 | List of formatted model dictionaries
236 | """
237 | try:
238 | client = OpenRouterClient(
239 | api_key=api_key,
240 | site_url=st.session_state.openrouter_site_url,
241 | site_name=st.session_state.openrouter_site_name
242 | )
243 |
244 | # Fetch more models initially if filtering for tools
245 | fetch_limit = limit * 3 if tools_only else limit
246 | models = await client.fetch_top_models_by_provider(provider, fetch_limit)
247 |
248 | # Filter for tool-capable models if requested
249 | if tools_only:
250 | tool_capable_models = []
251 | for model in models:
252 | if is_tool_capable_model(model["id"], model):
253 | tool_capable_models.append(model)
254 | if len(tool_capable_models) >= limit:
255 | break
256 | models = tool_capable_models
257 |
258 | # Format models for display
259 | formatted_models = []
260 | for model in models:
261 | formatted = format_model_display(model, include_tool_indicator=True)
262 | formatted_models.append(formatted)
263 |
264 | return formatted_models
265 | except Exception as e:
266 | print(f"Error fetching {provider} models from OpenRouter: {e}")
267 | return []
268 |
269 | def sync_fetch_openrouter_models(api_key, provider, limit=5, tools_only=False):
270 | """Synchronous wrapper for OpenRouter model fetching."""
271 | try:
272 | # Handle event loop properly
273 | try:
274 | loop = asyncio.get_running_loop()
275 | # If we're in an existing loop, use thread executor
276 | import concurrent.futures
277 | with concurrent.futures.ThreadPoolExecutor() as executor:
278 | future = executor.submit(
279 | asyncio.run,
280 | fetch_openrouter_models_by_provider(api_key, provider, limit, tools_only)
281 | )
282 | return future.result(timeout=30)
283 | except RuntimeError:
284 | # No event loop running, safe to use asyncio.run()
285 | return asyncio.run(fetch_openrouter_models_by_provider(api_key, provider, limit, tools_only))
286 | except Exception as e:
287 | print(f"Error in sync fetch: {e}")
288 | return []
289 |
290 | # --- Auto-Refresh Functions ---
291 | async def auto_refresh_models_async(provider: str, force: bool = False) -> bool:
292 | """Auto-refresh models for the specified provider.
293 |
294 | Args:
295 | provider: The LLM provider ('openai', 'anthropic', 'google', 'ollama')
296 | force: Force refresh even if models are already loaded
297 |
298 | Returns:
299 | bool: True if models were successfully refreshed, False otherwise
300 | """
301 | try:
302 | # Check if we should refresh
303 | if not force and not should_refresh_models(provider):
304 | return True
305 |
306 | if provider in ["openai", "anthropic", "google"]:
307 | # OpenRouter providers
308 | api_key = st.session_state.api_keys.get("openrouter")
309 | if not api_key:
310 | print(f"No OpenRouter API key available for {provider}")
311 | return False
312 |
313 | models = await fetch_openrouter_models_by_provider(
314 | api_key, provider, 5, st.session_state.show_tools_only
315 | )
316 |
317 | if models:
318 | st.session_state[f"{provider}_openrouter_models"] = models
319 | # Auto-select first model if none selected
320 | selected_model_key = f"{provider}_openrouter_model"
321 | if not st.session_state.get(selected_model_key):
322 | st.session_state[selected_model_key] = models[0]["id"]
323 | return True
324 | else:
325 | print(f"No models found for {provider}")
326 | return False
327 |
328 | elif provider == "ollama":
329 | # Ollama provider
330 | models = await fetch_ollama_models(st.session_state.ollama_host)
331 | if models:
332 | st.session_state.ollama_models = [str(model) for model in models]
333 | # Auto-select first model if current model not in list
334 | if (st.session_state.ollama_model not in st.session_state.ollama_models and
335 | st.session_state.ollama_models):
336 | st.session_state.ollama_model = st.session_state.ollama_models[0]
337 | return True
338 | else:
339 | print("No Ollama models found")
340 | return False
341 |
342 | return False
343 |
344 | except Exception as e:
345 | print(f"Error auto-refreshing {provider} models: {e}")
346 | return False
347 |
348 | def auto_refresh_models(provider: str, force: bool = False) -> bool:
349 | """Synchronous wrapper for auto-refresh models."""
350 | try:
351 | # Handle event loop properly
352 | try:
353 | loop = asyncio.get_running_loop()
354 | # If we're in an existing loop, use thread executor
355 | import concurrent.futures
356 | with concurrent.futures.ThreadPoolExecutor() as executor:
357 | future = executor.submit(asyncio.run, auto_refresh_models_async(provider, force))
358 | return future.result(timeout=30)
359 | except RuntimeError:
360 | # No event loop running, safe to use asyncio.run()
361 | return asyncio.run(auto_refresh_models_async(provider, force))
362 | except Exception as e:
363 | print(f"Error in sync auto-refresh: {e}")
364 | return False
365 |
366 | def should_refresh_models(provider: str) -> bool:
367 | """Determine if models should be refreshed for the given provider."""
368 | if provider in ["openai", "anthropic", "google"]:
369 | models_key = f"{provider}_openrouter_models"
370 | return not st.session_state.get(models_key)
371 | elif provider == "ollama":
372 | return not st.session_state.get("ollama_models")
373 | return True
374 |
375 | def detect_provider_change() -> bool:
376 | """Detect if the provider has changed since last run."""
377 | current_provider = st.session_state.llm_provider
378 | last_provider = st.session_state.get("last_provider")
379 |
380 | if last_provider != current_provider:
381 | st.session_state.last_provider = current_provider
382 | return True
383 | return False
384 |
385 | def handle_startup_auto_refresh():
386 | """Handle auto-refresh on app startup."""
387 | if not st.session_state.models_loaded_on_startup and st.session_state.auto_refresh_enabled:
388 | current_provider = st.session_state.llm_provider
389 |
390 | with st.spinner(f"Loading {current_provider} models..."):
391 | success = auto_refresh_models(current_provider, force=False)
392 | if success:
393 | st.session_state.models_loaded_on_startup = True
394 | st.session_state.last_provider = current_provider
395 |
396 | def handle_provider_change_auto_refresh():
397 | """Handle auto-refresh when provider changes."""
398 | if detect_provider_change() and st.session_state.auto_refresh_enabled:
399 | current_provider = st.session_state.llm_provider
400 |
401 | with st.spinner(f"Loading {current_provider} models..."):
402 | success = auto_refresh_models(current_provider, force=True)
403 | if success:
404 | # Clear previous provider's selection to avoid confusion
405 | providers = ["openai", "anthropic", "google"]
406 | for provider in providers:
407 | if provider != current_provider:
408 | selected_key = f"{provider}_openrouter_model"
409 | if selected_key in st.session_state:
410 | st.session_state[selected_key] = None
411 |
412 | # --- Direct LLM Chat Functions ---
413 | async def chat_with_llm_directly(user_input):
414 | """Chat directly with LLM without tools."""
415 | if st.session_state.llm_provider == "ollama":
416 | try:
417 | host = st.session_state.ollama_host if st.session_state.ollama_host else None
418 | client = ollama.AsyncClient(host=host)
419 |
420 | response = await client.chat(
421 | model=st.session_state.ollama_model,
422 | messages=[{"role": "user", "content": user_input}]
423 | )
424 |
425 | return response.get('message', {}).get('content', 'No response received')
426 |
427 | except Exception as e:
428 | return f"Error chatting with Ollama: {e}"
429 |
430 | elif st.session_state.llm_provider in ["openai", "anthropic", "google"]:
431 | try:
432 | # Use OpenRouter for these providers
433 | import openai
434 |
435 | # Get selected model for the provider
436 | selected_model_key = f"{st.session_state.llm_provider}_openrouter_model"
437 | selected_model = st.session_state.get(selected_model_key)
438 |
439 | if not selected_model:
440 | return f"No {st.session_state.llm_provider} model selected. Please select a model first."
441 |
442 | if not st.session_state.api_keys["openrouter"]:
443 | return "No OpenRouter API key configured. Please add your API key."
444 |
445 | # Create OpenRouter client
446 | client = openai.AsyncOpenAI(
447 | base_url="https://openrouter.ai/api/v1",
448 | api_key=st.session_state.api_keys["openrouter"]
449 | )
450 |
451 | # Prepare extra headers
452 | extra_headers = {}
453 | if st.session_state.openrouter_site_url:
454 | extra_headers["HTTP-Referer"] = st.session_state.openrouter_site_url
455 | if st.session_state.openrouter_site_name:
456 | extra_headers["X-Title"] = st.session_state.openrouter_site_name
457 |
458 | response = await client.chat.completions.create(
459 | extra_headers=extra_headers,
460 | model=selected_model,
461 | messages=st.session_state.messages + [{"role": "user", "content": user_input}]
462 | )
463 |
464 | return response.choices[0].message.content
465 |
466 | except Exception as e:
467 | return f"Error chatting with {st.session_state.llm_provider} via OpenRouter: {e}"
468 |
469 | return "No LLM provider configured for direct chat."
470 |
471 | # --- Connection Functions ---
472 | async def connect_to_server_async():
473 | """Connect to MCP server and LLM provider with enhanced error handling."""
474 | try:
475 | st.session_state.connection_error = None
476 |
477 | # Create MCP client with correct parameters (no retry_delay)
478 | client = MCPClient(
479 | st.session_state.mcp_endpoint,
480 | timeout=30.0,
481 | max_retries=3
482 | )
483 |
484 | # Test connection by listing tools
485 | tools = await client.list_tools()
486 |
487 | # Create LLM bridge based on provider
488 | llm_bridge = None
489 | if st.session_state.llm_provider in ["openai", "anthropic", "google"] and st.session_state.api_keys["openrouter"]:
490 | # Get selected model for the provider
491 | selected_model_key = f"{st.session_state.llm_provider}_openrouter_model"
492 | selected_model = st.session_state.get(selected_model_key)
493 |
494 | if selected_model:
495 | llm_bridge = OpenRouterBridge(
496 | client,
497 | api_key=st.session_state.api_keys["openrouter"],
498 | model=selected_model,
499 | site_url=st.session_state.openrouter_site_url,
500 | site_name=st.session_state.openrouter_site_name
501 | )
502 | elif st.session_state.llm_provider == "ollama":
503 | host = st.session_state.ollama_host if st.session_state.ollama_host else None
504 | llm_bridge = OllamaBridge(client, model=st.session_state.ollama_model, host=host)
505 |
506 | # Update session state
507 | st.session_state.client = client
508 | st.session_state.llm_bridge = llm_bridge
509 | st.session_state.tools = tools
510 | st.session_state.connected = True
511 |
512 | return True, f"Connected to {st.session_state.mcp_endpoint}", len(tools)
513 |
514 | except MCPConnectionError as e:
515 | st.session_state.connected = False
516 | st.session_state.connection_error = f"Connection failed: {e}"
517 | return False, f"Connection failed: {e}", 0
518 | except MCPTimeoutError as e:
519 | st.session_state.connected = False
520 | st.session_state.connection_error = f"Connection timed out: {e}"
521 | return False, f"Connection timed out: {e}", 0
522 | except Exception as e:
523 | st.session_state.connected = False
524 | st.session_state.connection_error = f"Unexpected error: {e}"
525 | return False, f"Unexpected error: {e}\n{traceback.format_exc()}", 0
526 |
527 | def connect_to_server():
528 | """Synchronous wrapper for async connection function."""
529 | try:
530 | # Handle event loop properly
531 | try:
532 | loop = asyncio.get_running_loop()
533 | # If we're in an existing loop, use thread executor
534 | import concurrent.futures
535 | with concurrent.futures.ThreadPoolExecutor() as executor:
536 | future = executor.submit(asyncio.run, connect_to_server_async())
537 | success, message, tool_count = future.result(timeout=60)
538 | except RuntimeError:
539 | # No event loop running, safe to use asyncio.run()
540 | success, message, tool_count = asyncio.run(connect_to_server_async())
541 |
542 | if success:
543 | # Single consolidated success message with all connection details
544 | connection_details = [f"Connected to {st.session_state.mcp_endpoint}"]
545 |
546 | if st.session_state.llm_bridge:
547 | if st.session_state.llm_provider == "ollama":
548 | connection_details.append(f"LLM: {st.session_state.llm_provider} ({st.session_state.ollama_model})")
549 | else:
550 | connection_details.append(f"LLM: {st.session_state.llm_provider}")
551 |
552 | if tool_count > 0:
553 | connection_details.append(f"Tools: {tool_count} available")
554 | else:
555 | connection_details.append("Tools: None found")
556 |
557 | # Show single success message with all details
558 | st.success("✅ " + " | ".join(connection_details))
559 | # Force UI refresh to update button and status immediately
560 | st.rerun()
561 | else:
562 | st.error(f"❌ {message}")
563 | except Exception as e:
564 | st.error(f"❌ Connection error: {e}")
565 | st.session_state.connected = False
566 | st.session_state.connection_error = str(e)
567 |
568 | def disconnect_from_server():
569 | """Disconnect from server and clean up."""
570 | st.session_state.connected = False
571 | st.session_state.client = None
572 | st.session_state.llm_bridge = None
573 | st.session_state.tools = []
574 | st.session_state.connection_error = None
575 | st.success("✅ Disconnected from server")
576 | # Force UI refresh to update button and status immediately
577 | st.rerun()
578 |
579 | # --- Response Parsing Helper ---
580 | def extract_content_from_llm_response(llm_response, response_stage="final"):
581 | """Extract clean text content from different LLM provider response formats.
582 |
583 | Args:
584 | llm_response: Response object from OpenAI, Anthropic, Ollama, or dict
585 | response_stage: "initial" or "final" to handle different processing stages
586 |
587 | Returns:
588 | str: Clean text content extracted from the response (never None)
589 | """
590 | try:
591 | print(f"DEBUG extract_content_from_llm_response: Input type: {type(llm_response)}")
592 | print(f"DEBUG extract_content_from_llm_response: Input value: {llm_response}")
593 |
594 | # OpenAI ChatCompletion object
595 | if hasattr(llm_response, 'choices') and llm_response.choices:
596 | content = llm_response.choices[0].message.content
597 | print(f"DEBUG: OpenAI content extracted: {content}")
598 | return content if content is not None else "No content received from OpenAI"
599 |
600 | # Anthropic Message object
601 | elif hasattr(llm_response, 'content') and llm_response.content:
602 | for content in llm_response.content:
603 | if hasattr(content, 'type') and content.type == "text":
604 | text = content.text
605 | print(f"DEBUG: Anthropic text content extracted: {text}")
606 | return text if text is not None else "No text content from Anthropic"
607 | # Fallback: return first content item as string
608 | first_content = str(llm_response.content[0])
609 | print(f"DEBUG: Anthropic fallback content: {first_content}")
610 | return first_content if first_content else "Empty content from Anthropic"
611 |
612 | # Dict response (could be from any provider)
613 | elif isinstance(llm_response, dict):
614 | print(f"DEBUG: Dict response keys: {list(llm_response.keys())}")
615 |
616 | # Check for direct content key
617 | if 'content' in llm_response:
618 | content = llm_response['content']
619 | print(f"DEBUG: Direct content key found: {content}")
620 | return content if content else "Empty content in dict"
621 |
622 | # Check for message.content (Ollama format)
623 | if 'message' in llm_response:
624 | message = llm_response['message']
625 | if isinstance(message, dict) and 'content' in message:
626 | content = message['content']
627 | print(f"DEBUG: Ollama message content: {content}")
628 | return content if content is not None else "No content in Ollama message"
629 |
630 | # Check for response key (some providers use this)
631 | if 'response' in llm_response:
632 | response = llm_response['response']
633 | print(f"DEBUG: Response key found: {response}")
634 | return response if response else "Empty response in dict"
635 |
636 | # Check for text key
637 | if 'text' in llm_response:
638 | text = llm_response['text']
639 | print(f"DEBUG: Text key found: {text}")
640 | return text if text else "Empty text in dict"
641 |
642 | # Fallback: convert entire dict to string
643 | fallback = str(llm_response)
644 | print(f"DEBUG: Dict fallback: {fallback}")
645 | return fallback if fallback else "Empty dict response"
646 |
647 | # String response (already clean)
648 | elif isinstance(llm_response, str):
649 | print(f"DEBUG: String response: {llm_response}")
650 | return llm_response if llm_response else "Empty string response"
651 |
652 | # None response
653 | elif llm_response is None:
654 | print("DEBUG: None response")
655 | return "No response received"
656 |
657 | # Fallback: convert to string
658 | fallback = str(llm_response)
659 | print(f"DEBUG: Final fallback: {fallback}")
660 | return fallback if fallback else "Empty response object"
661 |
662 | except Exception as e:
663 | error_msg = f"Error extracting content: {e}"
664 | print(f"DEBUG: Exception in extract_content_from_llm_response: {error_msg}")
665 | return error_msg
666 |
667 | def format_tool_result(tool_result_content):
668 | """Format tool result content for better readability.
669 |
670 | Args:
671 | tool_result_content: Raw tool result content (string)
672 |
673 | Returns:
674 | str: Formatted content for display
675 | """
676 | try:
677 | # Try to parse as JSON and format nicely
678 | if isinstance(tool_result_content, str) and tool_result_content.strip().startswith(('[', '{')):
679 | try:
680 | parsed_json = json.loads(tool_result_content)
681 |
682 | # Handle nested structure like {"type": "text", "text": "[...]"}
683 | if isinstance(parsed_json, dict) and 'text' in parsed_json:
684 | # Extract the inner text content
685 | inner_text = parsed_json['text']
686 | if isinstance(inner_text, str) and inner_text.strip().startswith('['):
687 | # Parse the inner JSON array
688 | try:
689 | inner_parsed = json.loads(inner_text)
690 | parsed_json = inner_parsed
691 | except json.JSONDecodeError:
692 | # If inner parsing fails, use the text as-is
693 | return inner_text
694 |
695 | # Special formatting for JIRA issues (common case)
696 | if isinstance(parsed_json, list) and len(parsed_json) > 0:
697 | if all(isinstance(item, dict) and 'key' in item for item in parsed_json):
698 | # Format as JIRA issues
699 | formatted_issues = []
700 | for issue in parsed_json:
701 | issue_text = f"**{issue.get('key', 'Unknown')}**: {issue.get('summary', 'No summary')}"
702 | if 'status' in issue:
703 | issue_text += f"\n - Status: {issue['status']}"
704 | if 'priority' in issue:
705 | issue_text += f"\n - Priority: {issue['priority']}"
706 | if 'assignee' in issue:
707 | issue_text += f"\n - Assignee: {issue['assignee']}"
708 | if 'created' in issue:
709 | issue_text += f"\n - Created: {issue['created']}"
710 | formatted_issues.append(issue_text)
711 |
712 | return f"Found {len(parsed_json)} issues:\n\n" + "\n\n".join(formatted_issues)
713 |
714 | # General JSON formatting
715 | return json.dumps(parsed_json, indent=2, ensure_ascii=False)
716 |
717 | except json.JSONDecodeError:
718 | # Not valid JSON, return as-is
719 | pass
720 |
721 | # Return original content if not JSON or formatting fails
722 | return tool_result_content
723 |
724 | except Exception as e:
725 | # If anything goes wrong, return original content
726 | return tool_result_content
727 |
728 | # --- Chat Functions ---
729 | async def process_user_message_async(user_input):
730 | """Process user message based on chat mode."""
731 |
732 | # Chat mode: direct LLM conversation without tools
733 | if st.session_state.chat_mode == "chat":
734 | return await chat_with_llm_directly(user_input)
735 |
736 | # Tools mode: always use tools
737 | elif st.session_state.chat_mode == "tools":
738 | if not st.session_state.llm_bridge:
739 | return "❌ No LLM bridge configured. Please configure an API key and connect to MCP server."
740 |
741 | try:
742 | result = await st.session_state.llm_bridge.process_query(user_input, st.session_state.messages)
743 |
744 | # Handle enhanced response structure for tools mode
745 | if isinstance(result, dict):
746 | # Extract responses from enhanced structure
747 | final_llm_response = result.get("final_llm_response", {})
748 | tool_call = result.get("tool_call")
749 | tool_result = result.get("tool_result")
750 |
751 | # Extract the final LLM content
752 | final_content = extract_content_from_llm_response(final_llm_response, "final")
753 |
754 | # Store enhanced response data for UI display
755 | enhanced_response_data = {
756 | "final_llm_content": final_content,
757 | "initial_llm_response": result.get("initial_llm_response", {}),
758 | "final_llm_response": final_llm_response,
759 | "raw_initial_response": result.get("raw_initial_response", {}),
760 | "raw_final_response": result.get("raw_final_response", {}),
761 | "tool_call": tool_call,
762 | "tool_result": tool_result,
763 | "processing_steps": result.get("processing_steps", []),
764 | "metadata": result.get("metadata", {}),
765 | "has_tools": hasattr(st.session_state.llm_bridge, 'tools') and st.session_state.llm_bridge.tools
766 | }
767 |
768 | # Store in session state for the UI to access
769 | st.session_state.last_response_data = enhanced_response_data
770 |
771 | response_parts = []
772 |
773 | # Only add content if it's not a generic "no content" message and we have a tool result
774 | if tool_call and tool_result:
775 | # If we have a tool result, prioritize that over generic "no content" messages
776 | if final_content and not final_content.startswith("No content received from"):
777 | response_parts.append(final_content)
778 |
779 | tool_name = tool_call.get('name', 'Unknown')
780 | if tool_result.error_code == 0:
781 | response_parts.append(f"🔧 **Tool Used:** {tool_name}")
782 | formatted_result = format_tool_result(tool_result.content)
783 | response_parts.append(f"**Result:**\n{formatted_result}")
784 | else:
785 | response_parts.append(f"❌ **Tool Error:** {tool_name} failed")
786 | response_parts.append(f"**Error:** {tool_result.content}")
787 | else:
788 | # No tool call, just return the content
789 | response_parts.append(final_content)
790 |
791 | return "\n".join(response_parts)
792 | else:
793 | # Handle legacy response format
794 | return extract_content_from_llm_response(result)
795 |
796 | except Exception as e:
797 | return f"Sorry, I encountered an error: {e}"
798 |
799 | # Auto mode: let LLM decide whether to use tools
800 | else: # auto mode
801 | if not st.session_state.llm_bridge:
802 | # Fall back to direct chat if no bridge available
803 | return await chat_with_llm_directly(user_input)
804 |
805 | try:
806 | # Debug: Check if we have tools available (but don't show misleading warnings)
807 | if hasattr(st.session_state.llm_bridge, 'tools') and st.session_state.llm_bridge.tools:
808 | tools_count = len(st.session_state.llm_bridge.tools)
809 | # Only show this info in debug mode, not always
810 | # st.info(f"🔧 Auto mode: {tools_count} MCP tools available for LLM to use")
811 |
812 | result = await st.session_state.llm_bridge.process_query(user_input, st.session_state.messages)
813 |
814 | # Handle enhanced response structure
815 | if isinstance(result, dict):
816 | # Extract responses from enhanced structure
817 | initial_llm_response = result.get("initial_llm_response", {})
818 | final_llm_response = result.get("final_llm_response", {})
819 | raw_initial_response = result.get("raw_initial_response", {})
820 | raw_final_response = result.get("raw_final_response", {})
821 | tool_call = result.get("tool_call")
822 | tool_result = result.get("tool_result")
823 | processing_steps = result.get("processing_steps", [])
824 | metadata = result.get("metadata", {})
825 |
826 | # Extract the final LLM content (this is what the user should see)
827 | final_content = extract_content_from_llm_response(final_llm_response, "final")
828 |
829 | # Debug logging for enhanced structure
830 | print(f"DEBUG: Enhanced Response Structure:")
831 | print(f" - Initial Response Type: {type(initial_llm_response)}")
832 | print(f" - Final Response Type: {type(final_llm_response)}")
833 | print(f" - Final Content: {final_content}")
834 | print(f" - Tool Call: {tool_call}")
835 | print(f" - Tool Result: {tool_result}")
836 | print(f" - Processing Steps: {len(processing_steps)}")
837 | print(f" - Metadata: {metadata}")
838 |
839 | # Store comprehensive response data for UI display
840 | enhanced_response_data = {
841 | "final_llm_content": final_content,
842 | "initial_llm_response": initial_llm_response,
843 | "final_llm_response": final_llm_response,
844 | "raw_initial_response": raw_initial_response,
845 | "raw_final_response": raw_final_response,
846 | "tool_call": tool_call,
847 | "tool_result": tool_result,
848 | "processing_steps": processing_steps,
849 | "metadata": metadata,
850 | "has_tools": hasattr(st.session_state.llm_bridge, 'tools') and st.session_state.llm_bridge.tools
851 | }
852 |
853 | # Store in session state for the UI to access
854 | st.session_state.last_response_data = enhanced_response_data
855 |
856 | # Return the final LLM content (this is the key fix!)
857 | if final_content and not final_content.startswith("No content received from") and not final_content.startswith("Error extracting content"):
858 | return final_content
859 | else:
860 | # Fallback handling for edge cases
861 | if tool_call and tool_result and tool_result.error_code == 0:
862 | return f"I successfully executed the {tool_call.get('name', 'requested')} tool and processed the results. Please check the details below."
863 | else:
864 | return "I processed your request using the available tools."
865 | else:
866 | # Handle legacy response format
867 | return extract_content_from_llm_response(result)
868 |
869 | except Exception as e:
870 | return f"Sorry, I encountered an error: {e}"
871 |
872 | def process_user_message(user_input):
873 | """Synchronous wrapper for async message processing."""
874 | try:
875 | # Check if there's already an event loop running
876 | try:
877 | loop = asyncio.get_running_loop()
878 | # If we're in an existing loop, we need to use a different approach
879 | import concurrent.futures
880 | with concurrent.futures.ThreadPoolExecutor() as executor:
881 | future = executor.submit(asyncio.run, process_user_message_async(user_input))
882 | result = future.result(timeout=60) # 60 second timeout
883 | return result
884 | except RuntimeError:
885 | # No event loop running, safe to use asyncio.run()
886 | result = asyncio.run(process_user_message_async(user_input))
887 | return result
888 | except Exception as e:
889 | return f"Error processing message: {e}"
890 |
891 | # Modern Header
892 | st.markdown("""
893 |
894 |
895 | 🎮 MCP Playground
896 |
897 |
898 | Quickly test and experiment with MCP servers. Connect your favorite tool-capable models, and off you go!
899 |
900 |
901 |
902 | """, unsafe_allow_html=True)
903 |
904 | # Sidebar for configuration
905 | with st.sidebar:
906 | st.markdown("""
907 |
908 |
⚙️ Configuration
909 |
910 |
911 | """, unsafe_allow_html=True)
912 |
913 | st.markdown("""
914 |
915 |
916 | 📡 Server Connection
917 |
918 |
919 | """, unsafe_allow_html=True)
920 |
921 | # MCP Endpoint
922 | mcp_endpoint = st.text_input(
923 | "MCP Endpoint URL",
924 | value=st.session_state.mcp_endpoint,
925 | help="⚠️ IMPORTANT: URL must end with /sse for Server-Sent Events. Example: http://localhost:8000/sse",
926 | placeholder="http://localhost:8000/sse"
927 | )
928 |
929 | # Add visual warning if URL doesn't end with /sse
930 | if mcp_endpoint and not mcp_endpoint.endswith('/sse'):
931 | st.warning("⚠️ URL should end with `/sse` for proper SSE communication. Example: `http://localhost:8000/sse`")
932 | # Auto-disconnect if endpoint changes while connected
933 | if st.session_state.mcp_endpoint != mcp_endpoint and st.session_state.connected:
934 | disconnect_from_server()
935 | st.info("🔄 Disconnected due to endpoint change. Click Connect to reconnect.")
936 | st.session_state.mcp_endpoint = mcp_endpoint
937 |
938 | st.markdown("""
939 |
940 |
941 | 🤖 LLM Configuration
942 |
943 |
944 | """, unsafe_allow_html=True)
945 |
946 | # LLM Provider
947 | llm_provider = st.selectbox(
948 | "LLM Provider",
949 | ["openai", "anthropic", "google", "ollama"],
950 | index=["openai", "anthropic", "google", "ollama"].index(st.session_state.llm_provider) if st.session_state.llm_provider in ["openai", "anthropic", "google", "ollama"] else 0
951 | )
952 | # Auto-disconnect if provider changes while connected
953 | if st.session_state.llm_provider != llm_provider and st.session_state.connected:
954 | disconnect_from_server()
955 | st.info("🔄 Disconnected due to provider change. Click Connect to reconnect.")
956 | st.session_state.llm_provider = llm_provider
957 |
958 | # Handle auto-refresh on startup and provider changes
959 | handle_startup_auto_refresh()
960 | handle_provider_change_auto_refresh()
961 |
962 | # Provider specific settings
963 | if llm_provider in ["openai", "anthropic", "google"]:
964 | st.markdown(f"""
965 |
966 |
967 | 🎯 {llm_provider.title()} Models (via OpenRouter)
968 |
969 |
970 | """, unsafe_allow_html=True)
971 |
972 | # OpenRouter API Key
973 | api_key = st.text_input(
974 | "OpenRouter API Key",
975 | type="password",
976 | value=st.session_state.api_keys["openrouter"],
977 | help="Get your API key from openrouter.ai, or set OPENROUTER_API_KEY in Env before starting this UI"
978 | )
979 | st.session_state.api_keys["openrouter"] = api_key
980 |
981 | # Optional site information
982 | with st.expander("Optional: Site Information for Rankings"):
983 | site_url = st.text_input(
984 | "Site URL",
985 | value=st.session_state.openrouter_site_url,
986 | help="Your site URL for rankings on openrouter.ai"
987 | )
988 | st.session_state.openrouter_site_url = site_url
989 |
990 | site_name = st.text_input(
991 | "Site Name",
992 | value=st.session_state.openrouter_site_name,
993 | help="Your site name for rankings on openrouter.ai"
994 | )
995 | st.session_state.openrouter_site_name = site_name
996 |
997 | # Model filtering toggle
998 | show_tools_only = st.checkbox(
999 | "🔧 Show only tool-capable models",
1000 | value=st.session_state.show_tools_only,
1001 | help="Filter to show only models that support function/tool calling for MCP integration"
1002 | )
1003 | st.session_state.show_tools_only = show_tools_only
1004 |
1005 | # Model selection dropdown
1006 | models_key = f"{llm_provider}_openrouter_models"
1007 | selected_model_key = f"{llm_provider}_openrouter_model"
1008 |
1009 | if st.session_state.get(models_key):
1010 | model_options = []
1011 | for model in st.session_state[models_key]:
1012 | model_options.append((model["display"], model["id"]))
1013 |
1014 | if model_options:
1015 | selected_model = st.selectbox(
1016 | f"Select {llm_provider.title()} Model",
1017 | options=[opt[1] for opt in model_options],
1018 | format_func=lambda x: next(opt[0] for opt in model_options if opt[1] == x),
1019 | help=f"Top 5 most popular {llm_provider} models on OpenRouter",
1020 | key=f"{llm_provider}_model_select"
1021 | )
1022 | st.session_state[selected_model_key] = selected_model
1023 |
1024 | # Show model details
1025 | selected_model_data = next(
1026 | (m for m in st.session_state[models_key] if m["id"] == selected_model),
1027 | None
1028 | )
1029 | if selected_model_data:
1030 | with st.expander("Model Details"):
1031 | st.write(f"**Description:** {selected_model_data.get('description', 'N/A')}")
1032 | context_length = selected_model_data.get('context_length', 'Unknown')
1033 | if isinstance(context_length, (int, float)) and context_length > 0:
1034 | st.write(f"**Context Length:** {int(context_length):,} tokens")
1035 | else:
1036 | st.write(f"**Context Length:** {context_length}")
1037 |
1038 | pricing = selected_model_data.get('pricing', {})
1039 | if pricing:
1040 | try:
1041 | prompt_cost = float(pricing.get('prompt', 0)) * 1000000
1042 | completion_cost = float(pricing.get('completion', 0)) * 1000000
1043 | st.write(f"**Pricing:** ${prompt_cost:.3f} prompt / ${completion_cost:.3f} completion per 1M tokens")
1044 | except (ValueError, TypeError):
1045 | st.write("**Pricing:** Information unavailable")
1046 | else:
1047 | st.info(f"Models will be automatically loaded when switching to {llm_provider.title()}")
1048 | elif llm_provider == "ollama":
1049 | # Ollama Host input
1050 | ollama_host = st.text_input(
1051 | "Ollama Host (Optional)",
1052 | value=st.session_state.ollama_host,
1053 | help="Enter the Ollama server URL (e.g., 'http://localhost:11434'). Leave blank to use default."
1054 | )
1055 | st.session_state.ollama_host = ollama_host
1056 |
1057 | # Model selection
1058 | # If we have models, show a dropdown, otherwise show a text input
1059 | if st.session_state.ollama_models:
1060 | # Add a default option if the current model is not in the list
1061 | model_options = st.session_state.ollama_models.copy()
1062 | if st.session_state.ollama_model not in model_options:
1063 | model_options = [st.session_state.ollama_model] + model_options
1064 |
1065 | ollama_model = st.selectbox(
1066 | "Ollama Model",
1067 | model_options,
1068 | index=model_options.index(st.session_state.ollama_model) if st.session_state.ollama_model in model_options else 0,
1069 | help="Select an Ollama model from the list. Models are automatically loaded when switching to Ollama."
1070 | )
1071 | st.session_state.ollama_model = ollama_model
1072 | else:
1073 | # No models found, show a text input
1074 | ollama_model = st.text_input(
1075 | "Ollama Model Name",
1076 | value=st.session_state.ollama_model,
1077 | help="Enter the name of the locally available Ollama model (e.g., 'llama3', 'mistral'). Models are automatically detected when switching to Ollama."
1078 | )
1079 | st.session_state.ollama_model = ollama_model
1080 |
1081 | st.markdown("""
1082 |
1083 |
1084 | 💬 Chat Mode
1085 |
1086 |
1087 | """, unsafe_allow_html=True)
1088 |
1089 | # Chat mode selection
1090 | chat_mode = st.selectbox(
1091 | "Mode",
1092 | ["auto", "chat", "tools"],
1093 | index=["auto", "chat", "tools"].index(st.session_state.chat_mode),
1094 | help="Auto: LLM decides when to use tools. Chat: Direct conversation without tools. Tools: Always try to use tools."
1095 | )
1096 | st.session_state.chat_mode = chat_mode
1097 |
1098 | # Show mode description
1099 | if chat_mode == "auto":
1100 | st.caption("🤖 LLM automatically decides when tools are needed")
1101 | elif chat_mode == "chat":
1102 | st.caption("💬 Direct conversation without MCP tools")
1103 | elif chat_mode == "tools":
1104 | st.caption("🔧 Always attempt to use MCP tools")
1105 |
1106 | st.markdown("""
1107 |
1108 |
1109 | 🔌 Connection Control
1110 |
1111 |
1112 | """, unsafe_allow_html=True)
1113 |
1114 | # Simplified connection button with only two clear states
1115 | if st.session_state.connected:
1116 | button_text = "🟢 Disconnect"
1117 | button_help = "Click to disconnect from the server"
1118 | button_class = "disconnect-button"
1119 | else:
1120 | button_text = "🔵 Connect"
1121 | button_help = "Click to connect to the server"
1122 | button_class = "connect-button"
1123 |
1124 | # Use regular Streamlit button
1125 | if st.button(button_text, help=button_help, use_container_width=True, key="main_connect_button"):
1126 | if st.session_state.connected:
1127 | # Disconnect when connected
1128 | disconnect_from_server()
1129 | else:
1130 | # Connect when disconnected
1131 | with st.spinner("Connecting..."):
1132 | connect_to_server()
1133 |
1134 | # Apply button styling based on connection state
1135 | button_color = "#10b981" if st.session_state.connected else "#3b82f6"
1136 | hover_color = "#059669" if st.session_state.connected else "#2563eb"
1137 |
1138 | st.markdown(f"""
1139 |
1163 | """, unsafe_allow_html=True)
1164 |
1165 | # Simple, clean status display
1166 | if st.session_state.connected:
1167 | status_color = "#10b981" # Green
1168 | status_icon = "🟢"
1169 | status_text = "Connected"
1170 |
1171 | # Get model info for display
1172 | if st.session_state.llm_provider in ['openai', 'anthropic', 'google']:
1173 | model_raw = st.session_state.get(f'{st.session_state.llm_provider}_openrouter_model')
1174 | model_display = model_raw.split('/')[-1] if model_raw and '/' in model_raw else (model_raw or 'Not selected')
1175 | else:
1176 | model_display = st.session_state.ollama_model
1177 |
1178 | # Single clean status display for connected state
1179 | status_html = f"""
1180 |
1181 |
1182 | {status_icon}
1183 | {status_text}
1184 |
1185 |
1186 |
📡 {st.session_state.mcp_endpoint}
1187 |
🤖 {st.session_state.llm_provider.title()} ({model_display})
1188 |
🛠️ {len(st.session_state.tools)} tools available
1189 |
1190 |
1191 | """
1192 | else:
1193 | status_color = "#ef4444" # Red
1194 | status_icon = "🔴"
1195 | status_text = "Not Connected"
1196 |
1197 | # Simple status display for disconnected state
1198 | status_html = f"""
1199 |
1200 |
1201 | {status_icon}
1202 | {status_text}
1203 |
1204 |
1205 | """
1206 |
1207 | st.markdown(status_html, unsafe_allow_html=True)
1208 |
1209 | # Show connection error if any
1210 | if st.session_state.connection_error:
1211 | st.error(f"Last error: {st.session_state.connection_error}")
1212 |
1213 | # Tools
1214 | if st.session_state.connected and st.session_state.tools:
1215 | st.markdown("""
1216 |
1217 |
1218 | 🛠️ Available Tools
1219 |
1220 |
1221 | """, unsafe_allow_html=True)
1222 | for tool in st.session_state.tools:
1223 | with st.expander(tool.name):
1224 | st.write(tool.description)
1225 | if hasattr(tool, 'parameters') and tool.parameters:
1226 | st.write("**Parameters:**")
1227 | for param in tool.parameters:
1228 | required = "Required" if param.required else "Optional"
1229 | st.write(f"- **{param.name}** ({required}): {param.description}")
1230 |
1231 | # Main content area
1232 | st.markdown("---")
1233 |
1234 | if not st.session_state.connected and st.session_state.chat_mode != "chat":
1235 | st.markdown("""
1236 |
1237 |
✨ Welcome to MCP Playground
1238 |
Your interactive playground for exploring MCP servers, testing tools, and experimenting with AI-driven integrations.
1239 |
1240 |
1241 | 👈 Configure Connection
1242 |
1243 |
1244 | 💬 Try Chat Mode
1245 |
1246 |
1247 |
1248 | """, unsafe_allow_html=True)
1249 |
1250 | # Show debug info
1251 | st.subheader("Debug Information")
1252 | st.write(f"Current endpoint: {st.session_state.mcp_endpoint}")
1253 | st.write(f"Current provider: {st.session_state.llm_provider}")
1254 | st.write(f"Current mode: {st.session_state.chat_mode}")
1255 | if st.session_state.llm_provider == "openai":
1256 | st.write(f"OpenAI model: {st.session_state.openai_model}")
1257 | elif st.session_state.llm_provider == "anthropic":
1258 | st.write(f"Anthropic model: {st.session_state.anthropic_model}")
1259 | elif st.session_state.llm_provider == "ollama":
1260 | st.write(f"Ollama model: {st.session_state.ollama_model}")
1261 | st.write(f"Ollama host: {st.session_state.ollama_host or 'default'}")
1262 | st.write(f"Available models: {len(st.session_state.ollama_models)} found")
1263 | if st.session_state.connection_error:
1264 | st.error(f"Connection error: {st.session_state.connection_error}")
1265 | else:
1266 | # Chat interface
1267 | if not st.session_state.messages:
1268 | mode_descriptions = {
1269 | "auto": "🤖 The LLM will automatically decide when to use MCP tools based on your questions.",
1270 | "chat": "💬 Direct conversation with the LLM without using MCP tools.",
1271 | "tools": "🔧 The LLM will always attempt to use MCP tools to answer your questions."
1272 | }
1273 |
1274 | mode_icons = {
1275 | "auto": "🤖",
1276 | "chat": "💬",
1277 | "tools": "🔧"
1278 | }
1279 |
1280 | st.markdown(f"""
1281 |
1282 |
1283 | {mode_icons[st.session_state.chat_mode]}
1284 |
Ready to Chat!
1285 |
1286 |
1287 |
1288 | Current Mode: {st.session_state.chat_mode.title()}
1289 |
1290 |
1291 | {mode_descriptions[st.session_state.chat_mode]}
1292 |
1293 |
1294 |
1295 | """, unsafe_allow_html=True)
1296 |
1297 | # Display chat messages
1298 | for message in st.session_state.messages:
1299 | role = message["role"]
1300 | content = message["content"]
1301 |
1302 | if role == "user":
1303 | with st.chat_message("user"):
1304 | st.write(content)
1305 | elif role == "assistant":
1306 | with st.chat_message("assistant"):
1307 | st.write(content)
1308 |
1309 | # Clear conversation button
1310 | if st.session_state.messages:
1311 | col1, col2, col3 = st.columns([1, 1, 1])
1312 | with col2:
1313 | if st.button("🗑️ Clear Conversation", use_container_width=True):
1314 | st.session_state.messages = []
1315 | st.rerun()
1316 |
1317 | # Chat input
1318 | if prompt := st.chat_input("Type your message here..."):
1319 | # Add user message to chat
1320 | st.session_state.messages.append({"role": "user", "content": prompt})
1321 |
1322 | # Display user message immediately
1323 | with st.chat_message("user"):
1324 | st.write(prompt)
1325 |
1326 | # Process and display assistant response
1327 | with st.chat_message("assistant"):
1328 | with st.spinner("Processing..."):
1329 | result = process_user_message(prompt)
1330 |
1331 | # Display the LLM response
1332 | st.write(result)
1333 |
1334 | # Check if we have enhanced response data from the last processing
1335 | if hasattr(st.session_state, 'last_response_data') and st.session_state.last_response_data:
1336 | response_data = st.session_state.last_response_data
1337 | tool_call = response_data.get("tool_call")
1338 | tool_result = response_data.get("tool_result")
1339 | has_tools = response_data.get("has_tools", False)
1340 | processing_steps = response_data.get("processing_steps", [])
1341 | metadata = response_data.get("metadata", {})
1342 |
1343 | # Display tool usage information
1344 | if tool_call and tool_result:
1345 | tool_name = tool_call.get('name', 'Unknown')
1346 | if tool_result.error_code == 0:
1347 | # Show success message
1348 | st.success(f"✅ Auto mode: LLM successfully used MCP tool '{tool_name}' and processed the results")
1349 | else:
1350 | st.error(f"❌ Auto mode: MCP tool '{tool_name}' failed")
1351 | st.error(f"**Error:** {tool_result.content}")
1352 | else:
1353 | if has_tools:
1354 | st.info("ℹ️ Auto mode: LLM chose not to use any MCP tools for this query")
1355 |
1356 | # Enhanced Raw LLM Response Data Expander
1357 | with st.expander("🔍 View Raw LLM Response Data", expanded=False):
1358 | col1, col2 = st.columns(2)
1359 |
1360 | with col1:
1361 | st.subheader("Initial LLM Response")
1362 | if response_data.get("raw_initial_response"):
1363 | # Convert response object to dict for JSON display
1364 | try:
1365 | if hasattr(response_data["raw_initial_response"], '__dict__'):
1366 | initial_dict = vars(response_data["raw_initial_response"])
1367 | else:
1368 | initial_dict = response_data["raw_initial_response"]
1369 | st.json(initial_dict)
1370 | except Exception as e:
1371 | st.code(str(response_data["raw_initial_response"]))
1372 | else:
1373 | st.info("No initial response data")
1374 |
1375 | with col2:
1376 | st.subheader("Final LLM Response")
1377 | if response_data.get("raw_final_response"):
1378 | # Convert response object to dict for JSON display
1379 | try:
1380 | if hasattr(response_data["raw_final_response"], '__dict__'):
1381 | final_dict = vars(response_data["raw_final_response"])
1382 | else:
1383 | final_dict = response_data["raw_final_response"]
1384 | st.json(final_dict)
1385 | except Exception as e:
1386 | st.code(str(response_data["raw_final_response"]))
1387 | else:
1388 | st.info("No final response data")
1389 |
1390 | # Response Metadata
1391 | if metadata:
1392 | st.subheader("Response Metadata")
1393 | st.json(metadata)
1394 |
1395 | # Tool Execution Details Expander
1396 | if tool_call and tool_result:
1397 | with st.expander("🔧 View Tool Execution Details", expanded=False):
1398 | col1, col2 = st.columns(2)
1399 |
1400 | with col1:
1401 | st.subheader("Tool Call")
1402 | st.json(tool_call)
1403 |
1404 | with col2:
1405 | st.subheader("Tool Result")
1406 | st.write(f"**Error Code:** {tool_result.error_code}")
1407 | if tool_result.error_code == 0:
1408 | st.success("✅ Tool executed successfully")
1409 | else:
1410 | st.error("❌ Tool execution failed")
1411 |
1412 | st.subheader("Tool Output")
1413 | formatted_result = format_tool_result(tool_result.content)
1414 | st.code(formatted_result, language="json")
1415 |
1416 | # Debug Information Expander
1417 | with st.expander("🐛 Debug Information", expanded=False):
1418 | if processing_steps:
1419 | st.subheader("Processing Steps Timeline")
1420 | for i, step in enumerate(processing_steps):
1421 | with st.container():
1422 | st.write(f"**Step {i+1}: {step.get('step', 'Unknown').replace('_', ' ').title()}**")
1423 | col1, col2 = st.columns(2)
1424 | with col1:
1425 | if step.get('timestamp'):
1426 | st.write(f"⏰ {step['timestamp']}")
1427 | with col2:
1428 | if step.get('duration'):
1429 | st.write(f"⚡ {step['duration']:.3f}s")
1430 | if step.get('data'):
1431 | st.write(f"📝 {step['data']}")
1432 | st.divider()
1433 |
1434 | st.subheader("Session Debug Info")
1435 | debug_info = {
1436 | "Provider": metadata.get('provider', 'Unknown'),
1437 | "Model": metadata.get('model', 'Unknown'),
1438 | "Base URL": metadata.get('base_url', 'Unknown'),
1439 | "Has Tools": metadata.get('has_tools', 'Unknown'),
1440 | "Total Execution Time": f"{metadata.get('execution_time', 0):.3f}s" if metadata.get('execution_time') else 'Unknown'
1441 | }
1442 | st.json(debug_info)
1443 |
1444 | # Show final LLM content for debugging
1445 | if response_data.get("final_llm_content"):
1446 | st.subheader("Final LLM Content (Displayed to User)")
1447 | st.code(response_data["final_llm_content"])
1448 |
1449 | # Clear the response data
1450 | st.session_state.last_response_data = None
1451 |
1452 | # Add assistant response to chat history
1453 | st.session_state.messages.append({"role": "assistant", "content": result})
1454 |
--------------------------------------------------------------------------------
/mcp-streamlit-app/mcp-streamlit-app-screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zanetworker/mcp-sse-client-python/c1dceaac5b87de9b2a2796950739d01910454efe/mcp-streamlit-app/mcp-streamlit-app-screenshot.png
--------------------------------------------------------------------------------
/mcp-streamlit-app/requirements.txt:
--------------------------------------------------------------------------------
1 | streamlit>=1.30.0
2 | openai>=1.0.0
3 | anthropic>=0.5.0
4 | Markdown>=3.0.0
5 |
--------------------------------------------------------------------------------
/mcp-streamlit-app/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Run the Streamlit app
4 | cd "$(dirname "$0")"
5 |
6 | # Check if streamlit is installed
7 | if ! command -v streamlit &> /dev/null; then
8 | echo "Streamlit not found. Installing dependencies..."
9 | pip install -r requirements.txt
10 | fi
11 |
12 | # Run the app
13 | echo "Starting MCP Streamlit App..."
14 | streamlit run app.py
15 |
--------------------------------------------------------------------------------
/mcp-streamlit-app/style.css:
--------------------------------------------------------------------------------
1 | /* Modern Design System for MCP Tool Tester */
2 |
3 | /* Import Google Fonts */
4 | @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&family=JetBrains+Mono:wght@400;500&display=swap');
5 |
6 | /* CSS Variables for Design System */
7 | :root {
8 | /* Colors - Modern Dark Theme with Accent */
9 | --primary-bg: #0f0f23;
10 | --secondary-bg: #1a1a2e;
11 | --tertiary-bg: #16213e;
12 | --accent-primary: #6366f1;
13 | --accent-secondary: #8b5cf6;
14 | --accent-gradient: linear-gradient(135deg, #6366f1 0%, #8b5cf6 100%);
15 | --success: #10b981;
16 | --warning: #f59e0b;
17 | --error: #ef4444;
18 | --text-primary: #f8fafc;
19 | --text-secondary: #cbd5e1;
20 | --text-muted: #64748b;
21 | --border: #334155;
22 | --border-light: #475569;
23 |
24 | /* Spacing */
25 | --space-xs: 0.25rem;
26 | --space-sm: 0.5rem;
27 | --space-md: 1rem;
28 | --space-lg: 1.5rem;
29 | --space-xl: 2rem;
30 | --space-2xl: 3rem;
31 |
32 | /* Border Radius */
33 | --radius-sm: 0.375rem;
34 | --radius-md: 0.5rem;
35 | --radius-lg: 0.75rem;
36 | --radius-xl: 1rem;
37 |
38 | /* Shadows */
39 | --shadow-sm: 0 1px 2px 0 rgba(0, 0, 0, 0.05);
40 | --shadow-md: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06);
41 | --shadow-lg: 0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05);
42 | --shadow-xl: 0 20px 25px -5px rgba(0, 0, 0, 0.1), 0 10px 10px -5px rgba(0, 0, 0, 0.04);
43 | --shadow-glow: 0 0 20px rgba(99, 102, 241, 0.3);
44 |
45 | /* Transitions */
46 | --transition-fast: 0.15s ease-out;
47 | --transition-normal: 0.3s ease-out;
48 | --transition-slow: 0.5s ease-out;
49 | }
50 |
51 | /* Global Styles */
52 | .stApp {
53 | background: var(--primary-bg);
54 | color: var(--text-primary);
55 | font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif;
56 | }
57 |
58 | /* Hide Streamlit branding */
59 | #MainMenu {visibility: hidden;}
60 | footer {visibility: hidden;}
61 | header {visibility: hidden;}
62 |
63 | /* Custom scrollbar */
64 | ::-webkit-scrollbar {
65 | width: 8px;
66 | }
67 |
68 | ::-webkit-scrollbar-track {
69 | background: var(--secondary-bg);
70 | }
71 |
72 | ::-webkit-scrollbar-thumb {
73 | background: var(--border);
74 | border-radius: var(--radius-md);
75 | }
76 |
77 | ::-webkit-scrollbar-thumb:hover {
78 | background: var(--border-light);
79 | }
80 |
81 | /* Main Title Styling */
82 | .stApp > div > div > div > div > h1 {
83 | background: var(--accent-gradient);
84 | -webkit-background-clip: text;
85 | -webkit-text-fill-color: transparent;
86 | background-clip: text;
87 | font-weight: 700;
88 | font-size: 3rem;
89 | text-align: center;
90 | margin-bottom: var(--space-xl);
91 | letter-spacing: -0.025em;
92 | }
93 |
94 | /* Sidebar Styling */
95 | .css-1d391kg {
96 | background: var(--secondary-bg);
97 | border-right: 1px solid var(--border);
98 | }
99 |
100 | .css-1d391kg .stSelectbox > div > div {
101 | background: var(--tertiary-bg);
102 | border: 1px solid var(--border);
103 | border-radius: var(--radius-md);
104 | color: var(--text-primary);
105 | transition: all var(--transition-fast);
106 | }
107 |
108 | .css-1d391kg .stSelectbox > div > div:hover {
109 | border-color: var(--accent-primary);
110 | box-shadow: var(--shadow-glow);
111 | }
112 |
113 | .css-1d391kg .stTextInput > div > div > input {
114 | background: var(--tertiary-bg);
115 | border: 1px solid var(--border);
116 | border-radius: var(--radius-md);
117 | color: var(--text-primary);
118 | transition: all var(--transition-fast);
119 | }
120 |
121 | .css-1d391kg .stTextInput > div > div > input:focus {
122 | border-color: var(--accent-primary);
123 | box-shadow: var(--shadow-glow);
124 | outline: none;
125 | }
126 |
127 | /* Simplified Button Styling - Let app.py handle connection button colors */
128 | /* General sidebar button styling for other buttons */
129 | div[data-testid="stSidebar"] .stButton > button:not([data-testid*="main_connect_button"]),
130 | .css-1d391kg .stButton > button:not([data-testid*="main_connect_button"]) {
131 | background-color: var(--accent-primary) !important;
132 | background: var(--accent-primary) !important;
133 | border: none !important;
134 | border-radius: var(--radius-md) !important;
135 | color: white !important;
136 | font-weight: 500 !important;
137 | padding: var(--space-sm) var(--space-md) !important;
138 | transition: all var(--transition-fast) !important;
139 | box-shadow: var(--shadow-sm) !important;
140 | width: 100% !important;
141 | }
142 |
143 | div[data-testid="stSidebar"] .stButton > button:not([data-testid*="main_connect_button"]):hover,
144 | .css-1d391kg .stButton > button:not([data-testid*="main_connect_button"]):hover {
145 | background-color: #4f46e5 !important;
146 | background: #4f46e5 !important;
147 | transform: translateY(-1px) !important;
148 | box-shadow: var(--shadow-md) !important;
149 | }
150 |
151 | /* Sidebar specific improvements */
152 | .css-1d391kg {
153 | background: var(--secondary-bg);
154 | border-right: 1px solid var(--border);
155 | padding: var(--space-md);
156 | }
157 |
158 | /* Improved sidebar text inputs and selects */
159 | .css-1d391kg .stTextInput > div > div > input,
160 | .css-1d391kg .stSelectbox > div > div {
161 | font-size: 0.9rem;
162 | padding: var(--space-sm);
163 | }
164 |
165 | /* Status container responsive improvements */
166 | .css-1d391kg .stMarkdown {
167 | margin-bottom: var(--space-sm);
168 | }
169 |
170 | /* Success/Error Messages */
171 | .stSuccess {
172 | background: rgba(16, 185, 129, 0.1);
173 | border: 1px solid var(--success);
174 | border-radius: var(--radius-md);
175 | color: var(--success);
176 | }
177 |
178 | .stError {
179 | background: rgba(239, 68, 68, 0.1);
180 | border: 1px solid var(--error);
181 | border-radius: var(--radius-md);
182 | color: var(--error);
183 | }
184 |
185 | .stWarning {
186 | background: rgba(245, 158, 11, 0.1);
187 | border: 1px solid var(--warning);
188 | border-radius: var(--radius-md);
189 | color: var(--warning);
190 | }
191 |
192 | /* Chat Interface */
193 | .stChatMessage {
194 | background: var(--secondary-bg);
195 | border: 1px solid var(--border);
196 | border-radius: var(--radius-lg);
197 | margin: var(--space-md) 0;
198 | padding: var(--space-lg);
199 | box-shadow: var(--shadow-sm);
200 | }
201 |
202 | .stChatMessage[data-testid="user-message"] {
203 | background: var(--tertiary-bg);
204 | border-color: var(--accent-primary);
205 | }
206 |
207 | .stChatMessage[data-testid="assistant-message"] {
208 | background: var(--secondary-bg);
209 | }
210 |
211 | /* Chat Input */
212 | .stChatInput > div > div > div > div > div > textarea {
213 | background: var(--tertiary-bg);
214 | border: 1px solid var(--border);
215 | border-radius: var(--radius-lg);
216 | color: var(--text-primary);
217 | font-family: 'Inter', sans-serif;
218 | transition: all var(--transition-fast);
219 | }
220 |
221 | .stChatInput > div > div > div > div > div > textarea:focus {
222 | border-color: var(--accent-primary);
223 | box-shadow: var(--shadow-glow);
224 | outline: none;
225 | }
226 |
227 | /* Expander Styling */
228 | .streamlit-expanderHeader {
229 | background: var(--tertiary-bg);
230 | border: 1px solid var(--border);
231 | border-radius: var(--radius-md);
232 | color: var(--text-primary);
233 | font-weight: 500;
234 | transition: all var(--transition-fast);
235 | }
236 |
237 | .streamlit-expanderHeader:hover {
238 | background: var(--secondary-bg);
239 | border-color: var(--accent-primary);
240 | }
241 |
242 | .streamlit-expanderContent {
243 | background: var(--secondary-bg);
244 | border: 1px solid var(--border);
245 | border-top: none;
246 | border-radius: 0 0 var(--radius-md) var(--radius-md);
247 | }
248 |
249 | /* Status Indicators */
250 | .status-connected {
251 | color: var(--success);
252 | font-weight: 600;
253 | }
254 |
255 | .status-disconnected {
256 | color: var(--error);
257 | font-weight: 600;
258 | }
259 |
260 | /* Welcome Card */
261 | .welcome-card {
262 | background: var(--secondary-bg);
263 | border: 1px solid var(--border);
264 | border-radius: var(--radius-xl);
265 | padding: var(--space-2xl);
266 | text-align: center;
267 | box-shadow: var(--shadow-lg);
268 | margin: var(--space-xl) 0;
269 | }
270 |
271 | .welcome-card h2 {
272 | background: var(--accent-gradient);
273 | -webkit-background-clip: text;
274 | -webkit-text-fill-color: transparent;
275 | background-clip: text;
276 | font-weight: 700;
277 | margin-bottom: var(--space-lg);
278 | }
279 |
280 | .welcome-card p {
281 | color: var(--text-secondary);
282 | font-size: 1.1rem;
283 | line-height: 1.6;
284 | }
285 |
286 | /* Tool Cards */
287 | .tool-card {
288 | background: var(--secondary-bg);
289 | border: 1px solid var(--border);
290 | border-radius: var(--radius-lg);
291 | padding: var(--space-lg);
292 | margin: var(--space-md) 0;
293 | transition: all var(--transition-fast);
294 | }
295 |
296 | .tool-card:hover {
297 | border-color: var(--accent-primary);
298 | box-shadow: var(--shadow-md);
299 | transform: translateY(-2px);
300 | }
301 |
302 | /* Code blocks */
303 | code {
304 | background: var(--tertiary-bg);
305 | border: 1px solid var(--border);
306 | border-radius: var(--radius-sm);
307 | color: var(--accent-secondary);
308 | font-family: 'JetBrains Mono', monospace;
309 | padding: var(--space-xs) var(--space-sm);
310 | }
311 |
312 | pre {
313 | background: var(--tertiary-bg);
314 | border: 1px solid var(--border);
315 | border-radius: var(--radius-md);
316 | padding: var(--space-lg);
317 | overflow-x: auto;
318 | }
319 |
320 | /* Metrics and KPIs */
321 | .metric-card {
322 | background: var(--secondary-bg);
323 | border: 1px solid var(--border);
324 | border-radius: var(--radius-lg);
325 | padding: var(--space-lg);
326 | text-align: center;
327 | transition: all var(--transition-fast);
328 | }
329 |
330 | .metric-card:hover {
331 | border-color: var(--accent-primary);
332 | box-shadow: var(--shadow-md);
333 | }
334 |
335 | .metric-value {
336 | font-size: 2rem;
337 | font-weight: 700;
338 | color: var(--accent-primary);
339 | }
340 |
341 | .metric-label {
342 | color: var(--text-secondary);
343 | font-size: 0.875rem;
344 | text-transform: uppercase;
345 | letter-spacing: 0.05em;
346 | }
347 |
348 | /* Loading Spinner */
349 | .stSpinner {
350 | color: var(--accent-primary);
351 | }
352 |
353 | /* Responsive Design */
354 | @media (max-width: 768px) {
355 | .stApp > div > div > div > div > h1 {
356 | font-size: 2rem;
357 | }
358 |
359 | .welcome-card {
360 | padding: var(--space-lg);
361 | }
362 |
363 | .metric-card {
364 | margin-bottom: var(--space-md);
365 | }
366 | }
367 |
368 | /* Animation Classes */
369 | @keyframes fadeIn {
370 | from { opacity: 0; transform: translateY(20px); }
371 | to { opacity: 1; transform: translateY(0); }
372 | }
373 |
374 | @keyframes slideIn {
375 | from { transform: translateX(-20px); opacity: 0; }
376 | to { transform: translateX(0); opacity: 1; }
377 | }
378 |
379 | .fade-in {
380 | animation: fadeIn 0.5s ease-out;
381 | }
382 |
383 | .slide-in {
384 | animation: slideIn 0.3s ease-out;
385 | }
386 |
387 | /* Glassmorphism Effect */
388 | .glass {
389 | background: rgba(26, 26, 46, 0.7);
390 | backdrop-filter: blur(10px);
391 | border: 1px solid rgba(255, 255, 255, 0.1);
392 | }
393 |
394 | /* Gradient Text */
395 | .gradient-text {
396 | background: var(--accent-gradient);
397 | -webkit-background-clip: text;
398 | -webkit-text-fill-color: transparent;
399 | background-clip: text;
400 | }
401 |
402 | /* Custom Toggle Switch */
403 | .toggle-switch {
404 | position: relative;
405 | display: inline-block;
406 | width: 60px;
407 | height: 34px;
408 | }
409 |
410 | .toggle-switch input {
411 | opacity: 0;
412 | width: 0;
413 | height: 0;
414 | }
415 |
416 | .slider {
417 | position: absolute;
418 | cursor: pointer;
419 | top: 0;
420 | left: 0;
421 | right: 0;
422 | bottom: 0;
423 | background-color: var(--border);
424 | transition: var(--transition-fast);
425 | border-radius: 34px;
426 | }
427 |
428 | .slider:before {
429 | position: absolute;
430 | content: "";
431 | height: 26px;
432 | width: 26px;
433 | left: 4px;
434 | bottom: 4px;
435 | background-color: white;
436 | transition: var(--transition-fast);
437 | border-radius: 50%;
438 | }
439 |
440 | input:checked + .slider {
441 | background: var(--accent-gradient);
442 | }
443 |
444 | input:checked + .slider:before {
445 | transform: translateX(26px);
446 | }
--------------------------------------------------------------------------------
/mcp_playground/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | MCP Playground - A comprehensive Python toolkit for interacting with remote Model Context Protocol (MCP) endpoints.
3 |
4 | This package provides clients for connecting to remote MCP endpoints using various protocols:
5 | - Server-Sent Events (SSE) - Currently supported
6 | - Streamable HTTP - Planned for future releases
7 |
8 | Features include tool discovery, invocation, and LLM integration for AI-driven tool selection.
9 | """
10 |
11 | from mcp_playground.client import MCPClient, ToolDef, ToolParameter, ToolInvocationResult
12 |
13 | # Import LLM bridge classes for easier access
14 | try:
15 | from mcp_playground.llm_bridge import LLMBridge, OpenAIBridge, AnthropicBridge
16 | __all__ = [
17 | "MCPClient", "ToolDef", "ToolParameter", "ToolInvocationResult",
18 | "LLMBridge", "OpenAIBridge", "AnthropicBridge"
19 | ]
20 | except ImportError:
21 | # LLM dependencies might not be installed
22 | __all__ = ["MCPClient", "ToolDef", "ToolParameter", "ToolInvocationResult"]
23 |
24 | __version__ = "0.2.0"
25 |
--------------------------------------------------------------------------------
/mcp_playground/client.py:
--------------------------------------------------------------------------------
1 | """
2 | MCP SSE Client - A Python client for interacting with Model Context Protocol (MCP) endpoints.
3 |
4 | This module provides a client for connecting to MCP endpoints using Server-Sent Events (SSE),
5 | listing available tools, and invoking tools with parameters.
6 |
7 | IMPORTANT: MCP server URLs must end with '/sse' for Server-Sent Events communication.
8 | Example: http://localhost:8000/sse
9 | """
10 |
11 | import asyncio
12 | import logging
13 | from typing import Any, Dict, List, Optional
14 | from urllib.parse import urlparse
15 | from dataclasses import dataclass
16 | from mcp import ClientSession
17 | from mcp.client.sse import sse_client
18 | from pydantic import BaseModel
19 |
20 | # Set up logging
21 | logger = logging.getLogger(__name__)
22 |
23 |
24 | @dataclass
25 | class ToolParameter:
26 | """Represents a parameter for a tool.
27 |
28 | Attributes:
29 | name: Parameter name
30 | parameter_type: Parameter type (e.g., "string", "number")
31 | description: Parameter description
32 | required: Whether the parameter is required
33 | default: Default value for the parameter
34 | """
35 | name: str
36 | parameter_type: str
37 | description: str
38 | required: bool = False
39 | default: Any = None
40 |
41 |
42 | @dataclass
43 | class ToolDef:
44 | """Represents a tool definition.
45 |
46 | Attributes:
47 | name: Tool name
48 | description: Tool description
49 | parameters: List of ToolParameter objects
50 | metadata: Optional dictionary of additional metadata
51 | identifier: Tool identifier (defaults to name)
52 | """
53 | name: str
54 | description: str
55 | parameters: List[ToolParameter]
56 | metadata: Optional[Dict[str, Any]] = None
57 | identifier: str = ""
58 |
59 |
60 | @dataclass
61 | class ToolInvocationResult:
62 | """Represents the result of a tool invocation.
63 |
64 | Attributes:
65 | content: Result content as a string
66 | error_code: Error code (0 for success, 1 for error)
67 | """
68 | content: str
69 | error_code: int
70 |
71 |
72 | class MCPConnectionError(Exception):
73 | """Exception raised when MCP connection fails"""
74 | pass
75 |
76 |
77 | class MCPTimeoutError(Exception):
78 | """Exception raised when MCP operation times out"""
79 | pass
80 |
81 |
82 | class MCPClient:
83 | """Client for interacting with Model Context Protocol (MCP) endpoints
84 |
85 | IMPORTANT: MCP server URLs must end with '/sse' for Server-Sent Events communication.
86 | Example: http://localhost:8000/sse
87 | """
88 |
89 | def __init__(self, endpoint: str, timeout: float = 30.0, max_retries: int = 3):
90 | """Initialize MCP client with endpoint URL
91 |
92 | Args:
93 | endpoint: The MCP endpoint URL (must be http or https and should end with '/sse')
94 | timeout: Connection timeout in seconds
95 | max_retries: Maximum number of retry attempts
96 |
97 | Raises:
98 | ValueError: If endpoint is not a valid HTTP(S) URL
99 |
100 | Note:
101 | The endpoint URL should end with '/sse' for proper Server-Sent Events communication.
102 | Example: http://localhost:8000/sse
103 | """
104 | if urlparse(endpoint).scheme not in ("http", "https"):
105 | raise ValueError(f"Endpoint {endpoint} is not a valid HTTP(S) URL")
106 |
107 | # Warn if URL doesn't end with /sse (but don't fail - allow flexibility)
108 | if not endpoint.endswith('/sse'):
109 | logger.warning(f"Endpoint URL '{endpoint}' does not end with '/sse'. "
110 | f"MCP servers typically require '/sse' suffix for Server-Sent Events. "
111 | f"Consider using: {endpoint.rstrip('/')}/sse")
112 |
113 | self.endpoint = endpoint
114 | self.timeout = timeout
115 | self.max_retries = max_retries
116 |
117 | async def _execute_with_retry(self, operation_name: str, operation_func):
118 | """Execute an operation with retry logic and proper error handling
119 |
120 | Args:
121 | operation_name: Name of the operation for logging
122 | operation_func: Async function to execute
123 |
124 | Returns:
125 | Result of the operation
126 |
127 | Raises:
128 | MCPConnectionError: If connection fails after all retries
129 | MCPTimeoutError: If operation times out
130 | """
131 | last_exception = None
132 |
133 | for attempt in range(self.max_retries):
134 | try:
135 | logger.debug(f"Attempting {operation_name} (attempt {attempt + 1}/{self.max_retries})")
136 |
137 | # Execute with timeout
138 | result = await asyncio.wait_for(operation_func(), timeout=self.timeout)
139 | logger.debug(f"{operation_name} completed successfully")
140 | return result
141 |
142 | except asyncio.TimeoutError as e:
143 | last_exception = MCPTimeoutError(f"{operation_name} timed out after {self.timeout} seconds")
144 | logger.warning(f"{operation_name} timed out on attempt {attempt + 1}")
145 |
146 | except Exception as e:
147 | last_exception = e
148 | logger.warning(f"{operation_name} failed on attempt {attempt + 1}: {str(e)}")
149 |
150 | # Don't retry on certain types of errors
151 | if isinstance(e, (ValueError, TypeError)):
152 | break
153 |
154 | # Wait before retry (exponential backoff)
155 | if attempt < self.max_retries - 1:
156 | wait_time = 2 ** attempt
157 | logger.debug(f"Waiting {wait_time} seconds before retry")
158 | await asyncio.sleep(wait_time)
159 |
160 | # All retries failed
161 | if isinstance(last_exception, MCPTimeoutError):
162 | raise last_exception
163 | else:
164 | raise MCPConnectionError(f"{operation_name} failed after {self.max_retries} attempts: {str(last_exception)}")
165 |
166 | async def _safe_sse_operation(self, operation_func):
167 | """Safely execute an SSE operation with proper task cleanup
168 |
169 | Args:
170 | operation_func: Function that takes (streams, session) as arguments
171 |
172 | Returns:
173 | Result of the operation
174 | """
175 | streams = None
176 | session = None
177 |
178 | try:
179 | # Create SSE client with proper error handling
180 | streams = sse_client(self.endpoint)
181 | async with streams as stream_context:
182 | # Create session with proper cleanup
183 | session = ClientSession(*stream_context)
184 | async with session as session_context:
185 | await session_context.initialize()
186 | return await operation_func(session_context)
187 |
188 | except Exception as e:
189 | logger.error(f"SSE operation failed: {str(e)}")
190 | # Ensure proper cleanup of any remaining tasks
191 | if session:
192 | try:
193 | # Cancel any pending tasks in the session
194 | tasks = [task for task in asyncio.all_tasks() if not task.done()]
195 | if tasks:
196 | logger.debug(f"Cancelling {len(tasks)} pending tasks")
197 | for task in tasks:
198 | task.cancel()
199 | # Wait for tasks to be cancelled
200 | await asyncio.gather(*tasks, return_exceptions=True)
201 | except Exception as cleanup_error:
202 | logger.warning(f"Error during task cleanup: {cleanup_error}")
203 | raise
204 |
205 | async def list_tools(self) -> List[ToolDef]:
206 | """List available tools from the MCP endpoint
207 |
208 | Returns:
209 | List of ToolDef objects describing available tools
210 |
211 | Raises:
212 | MCPConnectionError: If connection fails
213 | MCPTimeoutError: If operation times out
214 | """
215 | async def _list_tools_operation():
216 | async def _operation(session):
217 | tools_result = await session.list_tools()
218 | tools = []
219 |
220 | for tool in tools_result.tools:
221 | parameters = []
222 | required_params = tool.inputSchema.get("required", [])
223 | for param_name, param_schema in tool.inputSchema.get("properties", {}).items():
224 | parameters.append(
225 | ToolParameter(
226 | name=param_name,
227 | parameter_type=param_schema.get("type", "string"),
228 | description=param_schema.get("description", ""),
229 | required=param_name in required_params,
230 | default=param_schema.get("default"),
231 | )
232 | )
233 | tools.append(
234 | ToolDef(
235 | name=tool.name,
236 | description=tool.description,
237 | parameters=parameters,
238 | metadata={"endpoint": self.endpoint},
239 | identifier=tool.name # Using name as identifier
240 | )
241 | )
242 | return tools
243 |
244 | return await self._safe_sse_operation(_operation)
245 |
246 | return await self._execute_with_retry("list_tools", _list_tools_operation)
247 |
248 | async def invoke_tool(self, tool_name: str, kwargs: Dict[str, Any]) -> ToolInvocationResult:
249 | """Invoke a specific tool with parameters
250 |
251 | Args:
252 | tool_name: Name of the tool to invoke
253 | kwargs: Dictionary of parameters to pass to the tool
254 |
255 | Returns:
256 | ToolInvocationResult containing the tool's response
257 |
258 | Raises:
259 | MCPConnectionError: If connection fails
260 | MCPTimeoutError: If operation times out
261 | """
262 | async def _invoke_tool_operation():
263 | async def _operation(session):
264 | result = await session.call_tool(tool_name, kwargs)
265 | return ToolInvocationResult(
266 | content="\n".join([result.model_dump_json() for result in result.content]),
267 | error_code=1 if result.isError else 0,
268 | )
269 |
270 | return await self._safe_sse_operation(_operation)
271 |
272 | return await self._execute_with_retry(f"invoke_tool({tool_name})", _invoke_tool_operation)
273 |
274 | async def check_connection(self) -> bool:
275 | """Check if the MCP endpoint is reachable
276 |
277 | Returns:
278 | True if connection is successful, False otherwise
279 | """
280 | try:
281 | await self.list_tools()
282 | return True
283 | except Exception as e:
284 | logger.debug(f"Connection check failed: {str(e)}")
285 | return False
286 |
287 | def get_endpoint_info(self) -> Dict[str, Any]:
288 | """Get information about the configured endpoint
289 |
290 | Returns:
291 | Dictionary with endpoint information
292 | """
293 | parsed = urlparse(self.endpoint)
294 | return {
295 | "endpoint": self.endpoint,
296 | "scheme": parsed.scheme,
297 | "hostname": parsed.hostname,
298 | "port": parsed.port,
299 | "path": parsed.path,
300 | "timeout": self.timeout,
301 | "max_retries": self.max_retries
302 | }
303 |
--------------------------------------------------------------------------------
/mcp_playground/examples/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Examples for using the MCP SSE Client.
3 | """
4 |
--------------------------------------------------------------------------------
/mcp_playground/examples/llm_example.py:
--------------------------------------------------------------------------------
1 | """
2 | Example showing how to use the MCP SSE Client with an LLM.
3 |
4 | This script demonstrates how to integrate the MCP client with OpenAI or Anthropic
5 | to enable LLM-driven tool selection and invocation, with a focus on understanding
6 | how the LLM makes its tool selection decisions.
7 | """
8 |
9 | import asyncio
10 | import asyncio
11 | import sys
12 | import os
13 | import json
14 | import textwrap
15 | import asyncio
16 | import sys
17 | import os
18 | import json
19 | import textwrap
20 | import asyncio
21 | import sys
22 | import os
23 | import json
24 | import textwrap
25 | import argparse # Import argparse
26 | from mcp_playground import MCPClient
27 | # Import all bridges
28 | from mcp_playground.llm_bridge import OpenAIBridge, AnthropicBridge, OllamaBridge
29 | from mcp_playground.format_converters import to_openai_format, to_anthropic_format
30 | # Import model definitions
31 | from mcp_playground.llm_bridge.models import (
32 | OPENAI_MODELS, DEFAULT_OPENAI_MODEL,
33 | ANTHROPIC_MODELS, DEFAULT_ANTHROPIC_MODEL,
34 | DEFAULT_OLLAMA_MODEL # Import Ollama default
35 | )
36 |
37 |
38 | def print_section(title, content, indent=0):
39 | """Print a formatted section with a title and content."""
40 | indent_str = " " * indent
41 | print(f"\n{indent_str}=== {title} ===")
42 | if isinstance(content, str):
43 | for line in content.split('\n'):
44 | wrapped_lines = textwrap.wrap(line, width=100 - indent)
45 | for wrapped in wrapped_lines:
46 | print(f"{indent_str}{wrapped}")
47 | else:
48 | print(f"{indent_str}{content}")
49 |
50 |
51 | def print_tool_summary(tools, formatted_tools=None):
52 | """Print a summary of the available tools."""
53 | print_section("Available Tools Summary", f"{len(tools)} tools available:")
54 |
55 | for i, tool in enumerate(tools):
56 | print(f" {i+1}. {tool.name}: {tool.description}")
57 |
58 | # Print key parameters
59 | required_params = [p for p in tool.parameters if p.required]
60 | if required_params:
61 | print(" Required parameters:")
62 | for param in required_params:
63 | print(f" - {param.name} ({param.parameter_type}): {param.description}")
64 |
65 | optional_params = [p for p in tool.parameters if not p.required]
66 | if optional_params:
67 | print(" Optional parameters:")
68 | for param in optional_params:
69 | print(f" - {param.name} ({param.parameter_type}): {param.description}")
70 |
71 | if formatted_tools:
72 | print("\n Note: These tools have been formatted for the LLM with proper JSON Schema types.")
73 |
74 |
75 | def extract_reasoning(llm_response, provider):
76 | """Extract reasoning from LLM response based on provider."""
77 | if provider == "openai":
78 | if hasattr(llm_response.choices[0].message, 'content') and llm_response.choices[0].message.content:
79 | return llm_response.choices[0].message.content
80 | return "[No explicit reasoning provided]"
81 | else: # anthropic
82 | text_parts = []
83 | for content in llm_response.content:
84 | if content.type == "text":
85 | text_parts.append(content.text)
86 | return "\n".join(text_parts) if text_parts else "[No explicit reasoning provided]"
87 |
88 |
89 | async def main():
90 | """Run the MCP-LLM integration example with focus on understanding tool selection."""
91 |
92 | # --- Argument Parsing ---
93 | parser = argparse.ArgumentParser(description="MCP-LLM Integration Example with Tool Selection Analysis")
94 |
95 | # Use imported model lists and defaults
96 | parser.add_argument(
97 | "--provider",
98 | choices=["openai", "anthropic", "ollama"], # Add ollama
99 | help="Select the LLM provider (openai, anthropic, or ollama). If not provided, you will be prompted."
100 | )
101 | parser.add_argument(
102 | "--openai-model",
103 | choices=OPENAI_MODELS,
104 | default=DEFAULT_OPENAI_MODEL,
105 | help=f"Select the OpenAI model to use (default: {DEFAULT_OPENAI_MODEL}). Choices: {', '.join(OPENAI_MODELS)}"
106 | )
107 | parser.add_argument(
108 | "--anthropic-model",
109 | choices=ANTHROPIC_MODELS,
110 | default=DEFAULT_ANTHROPIC_MODEL,
111 | help=f"Select the Anthropic model to use (default: {DEFAULT_ANTHROPIC_MODEL}). Choices: {', '.join(ANTHROPIC_MODELS)}"
112 | )
113 | parser.add_argument(
114 | "--ollama-model",
115 | default=DEFAULT_OLLAMA_MODEL,
116 | help=f"Specify the Ollama model name (default: {DEFAULT_OLLAMA_MODEL}). Ensure it's available locally."
117 | )
118 | parser.add_argument(
119 | "--ollama-host",
120 | help="Specify the Ollama host URL (e.g., 'http://localhost:11434'). Uses library default if not set."
121 | )
122 | parser.add_argument(
123 | "--endpoint",
124 | default=os.environ.get("MCP_ENDPOINT", "http://localhost:8000/sse"),
125 | help="MCP SSE endpoint URL (default: http://localhost:8000/sse or MCP_ENDPOINT env var)"
126 | )
127 | parser.add_argument("--openai-key", help="OpenAI API key (overrides OPENAI_API_KEY env var)")
128 | parser.add_argument("--anthropic-key", help="Anthropic API key (overrides ANTHROPIC_API_KEY env var)")
129 | # No API key arg for Ollama
130 |
131 | args = parser.parse_args()
132 |
133 | print("Starting MCP-LLM Integration Example...")
134 |
135 | # --- Configuration Setup ---
136 | endpoint = args.endpoint
137 |
138 | # Determine provider
139 | provider = args.provider
140 | if not provider:
141 | provider = input("Select LLM provider (openai/anthropic): ").strip().lower()
142 |
143 | # Initialize MCP client
144 | client = MCPClient(endpoint)
145 | print(f"Connecting to MCP server at: {endpoint}")
146 |
147 | # Setup bridge based on provider
148 | if provider == "openai":
149 | api_key = args.openai_key or os.environ.get("OPENAI_API_KEY")
150 | if not api_key:
151 | api_key = input("Enter OpenAI API key: ").strip()
152 |
153 | model = args.openai_model
154 | bridge = OpenAIBridge(client, api_key, model=model)
155 | print(f"Using OpenAI LLM bridge with model: {model}")
156 |
157 | elif provider == "anthropic":
158 | api_key = args.anthropic_key or os.environ.get("ANTHROPIC_API_KEY")
159 | if not api_key:
160 | api_key = input("Enter Anthropic API key: ").strip()
161 |
162 | model = args.anthropic_model
163 | bridge = AnthropicBridge(client, api_key, model=model)
164 | print(f"Using Anthropic LLM bridge with model: {model}")
165 |
166 | elif provider == "ollama":
167 | # No API key needed for Ollama
168 | model = args.ollama_model
169 | host = args.ollama_host # Will be None if not provided, which is handled by the bridge
170 | bridge = OllamaBridge(client, model=model, host=host)
171 | print(f"Using Ollama LLM bridge with model: {model} (Host: {host or 'Default'})")
172 | # Optional: Add connection check
173 | if not await bridge.check_connection():
174 | print(f"Warning: Could not verify connection to Ollama. Ensure it's running and model '{model}' is available.", file=sys.stderr)
175 |
176 | else:
177 | print(f"Unsupported provider: {provider}", file=sys.stderr)
178 | return
179 |
180 | # --- Tool Fetching and Interaction ---
181 | print("Fetching tools from server...")
182 | tools = await bridge.fetch_tools()
183 |
184 | # Show tool summary
185 | print_tool_summary(tools)
186 |
187 | # Interactive mode
188 | print("\nEntering interactive mode. Type 'quit' to exit.")
189 | while True:
190 | query = input("\nEnter your query: ")
191 | if query.lower() in ("quit", "exit"):
192 | break
193 |
194 | print_section("User Query", query)
195 | print("Processing query...")
196 |
197 | # Get the formatted tools that will be sent to the LLM
198 | if provider == "openai" or provider == "ollama": # Ollama uses OpenAI format
199 | formatted_tools = to_openai_format(tools)
200 | elif provider == "anthropic":
201 | formatted_tools = to_anthropic_format(tools)
202 | else:
203 | formatted_tools = [] # Should not happen due to earlier check
204 |
205 | # Process the query
206 | result = await bridge.process_query(query)
207 |
208 | # Extract and show LLM's reasoning
209 | # Need to handle different response structures
210 | llm_response = result["initial_llm_response"]
211 | reasoning = "[Could not extract reasoning]" # Default
212 | if provider == "openai":
213 | if hasattr(llm_response.choices[0].message, 'content') and llm_response.choices[0].message.content:
214 | reasoning = llm_response.choices[0].message.content
215 | elif provider == "anthropic":
216 | text_parts = [c.text for c in llm_response.content if hasattr(c, 'type') and c.type == "text"]
217 | if text_parts: reasoning = "\n".join(text_parts)
218 | elif provider == "ollama":
219 | if isinstance(llm_response, dict) and 'message' in llm_response and llm_response['message'].get('content'):
220 | # Check if tool calls exist; if so, reasoning might be empty or just whitespace
221 | if not llm_response['message'].get('tool_calls'):
222 | reasoning = llm_response['message']['content']
223 | # Optionally, could try to extract pre-tool-call text if available, but Ollama structure varies
224 |
225 | print_section("LLM Reasoning", reasoning)
226 |
227 | # Show tool selection decision
228 | if result["tool_call"]:
229 | selected_tool = result["tool_call"]["name"]
230 | params = result["tool_call"]["parameters"]
231 |
232 | # Find the matching tool definition
233 | tool_def = next((t for t in tools if t.name == selected_tool), None)
234 |
235 | print_section("Tool Selection Decision", f"Selected: {selected_tool}")
236 | if tool_def:
237 | print(f" Description: {tool_def.description}")
238 |
239 | # Show parameter matching
240 | print("\n Parameters provided:")
241 | for param_name, param_value in params.items():
242 | param_def = next((p for p in tool_def.parameters if p.name == param_name), None)
243 | if param_def:
244 | required = "required" if param_def.required else "optional"
245 | print(f" - {param_name} ({param_def.parameter_type}, {required}): {param_value}")
246 | print(f" Description: {param_def.description}")
247 |
248 | # Show how the query maps to the tool selection
249 | print("\n Query to Tool Mapping:")
250 | print(f" Query: \"{query}\"")
251 | print(f" Tool: {selected_tool}")
252 | print(f" Key parameters: {', '.join(params.keys())}")
253 | else:
254 | print_section("Tool Selection Decision", "No tool was selected by the LLM")
255 |
256 | # Show tool result if any
257 | if result["tool_result"]:
258 | print_section("Tool Execution Result",
259 | f"Success: {result['tool_result'].error_code == 0}\n" +
260 | f"Content: {result['tool_result'].content}")
261 |
262 |
263 | if __name__ == "__main__":
264 | try:
265 | asyncio.run(main())
266 | except KeyboardInterrupt:
267 | print("\nExiting...")
268 | except Exception as e:
269 | print(f"Error: {e}", file=sys.stderr)
270 | import traceback
271 | traceback.print_exc()
272 |
--------------------------------------------------------------------------------
/mcp_playground/examples/usage_example.py:
--------------------------------------------------------------------------------
1 | """
2 | MCP SSE Client Usage Example
3 |
4 | This script demonstrates how to use the MCPClient to interact with an MCP endpoint,
5 | list available tools, and invoke a tool with parameters.
6 | """
7 |
8 | import asyncio
9 | import sys
10 | from mcp_playground import MCPClient
11 |
12 | async def main():
13 | print("Starting MCPClient example...")
14 | try:
15 | # Initialize the client
16 | # IMPORTANT: URL must end with /sse for Server-Sent Events
17 | print("Initializing client...")
18 | client = MCPClient("http://localhost:8001/sse") # Note the /sse suffix!
19 |
20 | # List available tools
21 | print("Listing available tools...")
22 | tools = await client.list_tools()
23 | print("Available tools:")
24 | for tool in tools:
25 | print(f"- {tool.name}: {tool.description}")
26 | print(" Parameters:")
27 | for param in tool.parameters:
28 | print(f" - {param.name} ({param.parameter_type}): {param.description}")
29 |
30 | # # Invoke a tool
31 | # print("\nInvoking tool 'convert_document'...")
32 | # result = await client.invoke_tool(
33 | # "convert_document",
34 | # {
35 | # "source": "https://arxiv.org/pdf/2404.09982",
36 | # "enable_ocr": False
37 | # }
38 | # )
39 | # print(f"\nTool result: {result.content}")
40 | # print(f"Error code: {result.error_code}")
41 | except Exception as e:
42 | print(f"Error: {e}", file=sys.stderr)
43 | import traceback
44 | traceback.print_exc()
45 |
46 | if __name__ == "__main__":
47 | asyncio.run(main())
48 | print("Script completed.")
49 |
--------------------------------------------------------------------------------
/mcp_playground/format_converters.py:
--------------------------------------------------------------------------------
1 | """
2 | Format converters for transforming MCP tool definitions to various LLM formats.
3 | """
4 | from typing import List, Dict, Any
5 | from .client import ToolDef, ToolParameter
6 |
7 | # Type mapping from Python/MCP types to JSON Schema types
8 | TYPE_MAPPING = {
9 | "int": "integer",
10 | "bool": "boolean",
11 | "str": "string",
12 | "float": "number",
13 | "list": "array",
14 | "dict": "object",
15 | "boolean": "boolean",
16 | "string": "string",
17 | "integer": "integer",
18 | "number": "number",
19 | "array": "array",
20 | "object": "object"
21 | }
22 |
23 |
24 | def _infer_array_item_type(param: ToolParameter) -> str:
25 | """Infer the item type for an array parameter based on its name and description.
26 |
27 | Args:
28 | param: The ToolParameter object
29 |
30 | Returns:
31 | The inferred JSON Schema type for array items
32 | """
33 | # Default to string items
34 | item_type = "string"
35 |
36 | # Check if parameter name contains hints about item type
37 | param_name_lower = param.name.lower()
38 | if any(hint in param_name_lower for hint in ["language", "code", "tag", "name", "id"]):
39 | item_type = "string"
40 | elif any(hint in param_name_lower for hint in ["number", "count", "amount", "index"]):
41 | item_type = "integer"
42 |
43 | # Also check the description for hints
44 | if param.description:
45 | desc_lower = param.description.lower()
46 | if "string" in desc_lower or "text" in desc_lower or "language" in desc_lower:
47 | item_type = "string"
48 | elif "number" in desc_lower or "integer" in desc_lower or "int" in desc_lower:
49 | item_type = "integer"
50 |
51 | return item_type
52 |
53 |
54 | def to_openai_format(tools: List[ToolDef]) -> List[Dict[str, Any]]:
55 | """Convert ToolDef objects to OpenAI function format.
56 |
57 | Args:
58 | tools: List of ToolDef objects to convert
59 |
60 | Returns:
61 | List of dictionaries in OpenAI function format
62 | """
63 |
64 | openai_tools = []
65 | for tool in tools:
66 | openai_tool = {
67 | "type": "function",
68 | "function": {
69 | "name": tool.name,
70 | "description": tool.description,
71 | "parameters": {
72 | "type": "object",
73 | "properties": {},
74 | "required": []
75 | }
76 | }
77 | }
78 |
79 | # Add properties
80 | for param in tool.parameters:
81 | # Map the type or use the original if no mapping exists
82 | schema_type = TYPE_MAPPING.get(param.parameter_type, param.parameter_type)
83 |
84 | param_schema = {
85 | "type": schema_type, # Use mapped type
86 | "description": param.description
87 | }
88 |
89 | # For arrays, we need to specify the items type
90 | if schema_type == "array":
91 | item_type = _infer_array_item_type(param)
92 | param_schema["items"] = {"type": item_type}
93 |
94 | openai_tool["function"]["parameters"]["properties"][param.name] = param_schema
95 |
96 | # Add default value if provided
97 | if param.default is not None:
98 | openai_tool["function"]["parameters"]["properties"][param.name]["default"] = param.default
99 |
100 | # Add to required list if required
101 | if param.required:
102 | openai_tool["function"]["parameters"]["required"].append(param.name)
103 |
104 | openai_tools.append(openai_tool)
105 | return openai_tools
106 |
107 |
108 | def to_anthropic_format(tools: List[ToolDef]) -> List[Dict[str, Any]]:
109 | """Convert ToolDef objects to Anthropic tool format.
110 |
111 | Args:
112 | tools: List of ToolDef objects to convert
113 |
114 | Returns:
115 | List of dictionaries in Anthropic tool format
116 | """
117 |
118 | anthropic_tools = []
119 | for tool in tools:
120 | anthropic_tool = {
121 | "name": tool.name,
122 | "description": tool.description,
123 | "input_schema": {
124 | "type": "object",
125 | "properties": {},
126 | "required": []
127 | }
128 | }
129 |
130 | # Add properties
131 | for param in tool.parameters:
132 | # Map the type or use the original if no mapping exists
133 | schema_type = TYPE_MAPPING.get(param.parameter_type, param.parameter_type)
134 |
135 | param_schema = {
136 | "type": schema_type, # Use mapped type
137 | "description": param.description
138 | }
139 |
140 | # For arrays, we need to specify the items type
141 | if schema_type == "array":
142 | item_type = _infer_array_item_type(param)
143 | param_schema["items"] = {"type": item_type}
144 |
145 | anthropic_tool["input_schema"]["properties"][param.name] = param_schema
146 |
147 | # Add default value if provided
148 | if param.default is not None:
149 | anthropic_tool["input_schema"]["properties"][param.name]["default"] = param.default
150 |
151 | # Add to required list if required
152 | if param.required:
153 | anthropic_tool["input_schema"]["required"].append(param.name)
154 |
155 | anthropic_tools.append(anthropic_tool)
156 | return anthropic_tools
157 |
--------------------------------------------------------------------------------
/mcp_playground/llm_bridge/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | LLM Bridge for integrating MCP client with various LLM providers.
3 | """
4 |
5 | from .base import LLMBridge
6 | from .openai_bridge import OpenAIBridge
7 | from .anthropic_bridge import AnthropicBridge
8 | from .ollama_bridge import OllamaBridge
9 | from .openrouter_bridge import OpenRouterBridge
10 | from .openrouter_client import OpenRouterClient, format_model_display
11 |
12 | __all__ = [
13 | "LLMBridge",
14 | "OpenAIBridge",
15 | "AnthropicBridge",
16 | "OllamaBridge",
17 | "OpenRouterBridge",
18 | "OpenRouterClient",
19 | "format_model_display"
20 | ]
21 |
--------------------------------------------------------------------------------
/mcp_playground/llm_bridge/anthropic_bridge.py:
--------------------------------------------------------------------------------
1 | """
2 | Anthropic-specific implementation of the LLM Bridge.
3 | """
4 | from typing import Dict, List, Any, Optional
5 | import anthropic
6 | from ..client import ToolDef
7 | from ..format_converters import to_anthropic_format
8 | from .base import LLMBridge
9 | from .models import DEFAULT_ANTHROPIC_MODEL # Import default model
10 |
11 |
12 | class AnthropicBridge(LLMBridge):
13 | """Anthropic-specific implementation of the LLM Bridge."""
14 |
15 | def __init__(self, mcp_client, api_key, model=DEFAULT_ANTHROPIC_MODEL): # Use imported default
16 | """Initialize Anthropic bridge with API key and model.
17 |
18 | Args:
19 | mcp_client: An initialized MCPClient instance
20 | api_key: Anthropic API key
21 | model: Anthropic model to use (default: from models.py)
22 | """
23 | super().__init__(mcp_client)
24 | self.llm_client = anthropic.Anthropic(api_key=api_key)
25 | self.model = model
26 |
27 | # Store provider info for metadata
28 | self.provider_info = {
29 | "provider": "anthropic",
30 | "model": model,
31 | "base_url": "https://api.anthropic.com"
32 | }
33 |
34 | async def format_tools(self, tools: List[ToolDef]) -> List[Dict[str, Any]]:
35 | """Format tools for Anthropic.
36 |
37 | Args:
38 | tools: List of ToolDef objects
39 |
40 | Returns:
41 | List of tools in Anthropic format
42 | """
43 | return to_anthropic_format(tools)
44 |
45 | async def submit_query(self, query: str, formatted_tools: List[Dict[str, Any]], conversation_history: Optional[List[Dict[str, str]]] = None) -> Dict[str, Any]:
46 | """Submit a query to Anthropic with the formatted tools.
47 |
48 | Args:
49 | query: User query string
50 | formatted_tools: Tools in Anthropic format
51 | conversation_history: Previous conversation messages (optional)
52 |
53 | Returns:
54 | Anthropic API response
55 | """
56 | # Build messages with conversation history
57 | messages = conversation_history.copy() if conversation_history else []
58 | messages.append({"role": "user", "content": query})
59 |
60 | response = self.llm_client.messages.create(
61 | model=self.model,
62 | max_tokens=4096,
63 | system="You are a helpful tool-using assistant.",
64 | messages=messages,
65 | tools=formatted_tools
66 | )
67 |
68 | return response
69 |
70 | async def submit_query_without_tools(self, messages: List[Dict[str, Any]]) -> Any:
71 | """Submit a query to Anthropic without tools for final processing.
72 |
73 | Args:
74 | messages: Complete conversation including tool results
75 |
76 | Returns:
77 | Anthropic API response
78 | """
79 | # Make the API call without tools
80 | response = self.llm_client.messages.create(
81 | model=self.model,
82 | max_tokens=4096,
83 | messages=messages
84 | # Note: No tools parameter - this is for final processing
85 | )
86 |
87 | return response
88 |
89 | async def parse_tool_call(self, llm_response: Any) -> Optional[Dict[str, Any]]:
90 | """Parse the Anthropic response to extract tool calls.
91 |
92 | Args:
93 | llm_response: Response from Anthropic
94 |
95 | Returns:
96 | Dictionary with tool name and parameters, or None if no tool call
97 | """
98 | for content in llm_response.content:
99 | if content.type == "tool_use":
100 | return {
101 | "name": content.name, # Access name directly from the ToolUseBlock
102 | "parameters": content.input # Access input directly from the ToolUseBlock
103 | }
104 |
105 | return None
106 |
--------------------------------------------------------------------------------
/mcp_playground/llm_bridge/base.py:
--------------------------------------------------------------------------------
1 | """
2 | Base class for LLM Bridge implementations.
3 | """
4 | import abc
5 | from typing import Dict, List, Any, Optional
6 | from ..client import MCPClient, ToolDef, ToolInvocationResult
7 |
8 |
9 | class LLMBridge(abc.ABC):
10 | """Abstract base class for LLM bridge implementations."""
11 |
12 | def __init__(self, mcp_client: MCPClient):
13 | """Initialize the LLM bridge with an MCPClient instance.
14 |
15 | Args:
16 | mcp_client: An initialized MCPClient instance
17 | """
18 | self.mcp_client = mcp_client
19 | self.tools = None
20 |
21 | async def fetch_tools(self) -> List[ToolDef]:
22 | """Fetch available tools from the MCP endpoint.
23 |
24 | Returns:
25 | List of ToolDef objects
26 | """
27 | self.tools = await self.mcp_client.list_tools()
28 | return self.tools
29 |
30 | @abc.abstractmethod
31 | async def format_tools(self, tools: List[ToolDef]) -> Any:
32 | """Format tools for the specific LLM provider.
33 |
34 | Args:
35 | tools: List of ToolDef objects
36 |
37 | Returns:
38 | Formatted tools in the LLM-specific format
39 | """
40 | pass
41 |
42 | @abc.abstractmethod
43 | async def submit_query(self, query: str, formatted_tools: Any, conversation_history: Optional[List[Dict[str, str]]] = None) -> Dict[str, Any]:
44 | """Submit a query to the LLM with the formatted tools.
45 |
46 | Args:
47 | query: User query string
48 | formatted_tools: Tools in the LLM-specific format
49 | conversation_history: Previous conversation messages (optional)
50 |
51 | Returns:
52 | LLM response
53 | """
54 | pass
55 |
56 | @abc.abstractmethod
57 | async def parse_tool_call(self, llm_response: Any) -> Optional[Dict[str, Any]]:
58 | """Parse the LLM response to extract tool calls.
59 |
60 | Args:
61 | llm_response: Response from the LLM
62 |
63 | Returns:
64 | Dictionary with tool name and parameters, or None if no tool call
65 | """
66 | pass
67 |
68 | @abc.abstractmethod
69 | async def submit_query_without_tools(self, messages: List[Dict[str, Any]]) -> Any:
70 | """Submit a query to the LLM without tools for final processing.
71 |
72 | Args:
73 | messages: Complete conversation including tool results
74 |
75 | Returns:
76 | LLM response
77 | """
78 | pass
79 |
80 | async def execute_tool(self, tool_name: str, kwargs: Dict[str, Any]) -> ToolInvocationResult:
81 | """Execute a tool with the given parameters.
82 |
83 | Args:
84 | tool_name: Name of the tool to invoke
85 | kwargs: Dictionary of parameters to pass to the tool
86 |
87 | Returns:
88 | ToolInvocationResult containing the tool's response
89 | """
90 | return await self.mcp_client.invoke_tool(tool_name, kwargs)
91 |
92 | async def process_query(self, query: str, conversation_history: Optional[List[Dict[str, str]]] = None) -> Dict[str, Any]:
93 | """Process a user query through the LLM and execute any tool calls.
94 |
95 | This method handles the full flow:
96 | 1. Fetch tools if not already fetched
97 | 2. Format tools for the LLM
98 | 3. Submit query to LLM
99 | 4. Parse tool calls from LLM response
100 | 5. Execute tool if needed
101 | 6. Send tool result back to LLM for processing
102 |
103 | Args:
104 | query: User query string
105 | conversation_history: Previous conversation messages (optional)
106 |
107 | Returns:
108 | Enhanced dictionary containing comprehensive response data
109 | """
110 | import time
111 | from datetime import datetime
112 |
113 | start_time = time.time()
114 | processing_steps = []
115 |
116 | # 1. Fetch tools if not already fetched
117 | if self.tools is None:
118 | await self.fetch_tools()
119 |
120 | # 2. Format tools for the LLM
121 | formatted_tools = await self.format_tools(self.tools)
122 |
123 | # 3. Submit query to LLM
124 | step_start = time.time()
125 | initial_llm_response = await self.submit_query(query, formatted_tools, conversation_history)
126 | processing_steps.append({
127 | "step": "initial_query",
128 | "timestamp": datetime.now().isoformat(),
129 | "duration": time.time() - step_start,
130 | "data": "Initial LLM query submitted"
131 | })
132 |
133 | # 4. Parse tool calls from LLM response
134 | tool_call = await self.parse_tool_call(initial_llm_response)
135 |
136 | # Enhanced result structure
137 | result = {
138 | "initial_llm_response": initial_llm_response,
139 | "final_llm_response": initial_llm_response, # Will be updated if tools are used
140 | "raw_initial_response": initial_llm_response,
141 | "raw_final_response": initial_llm_response, # Will be updated if tools are used
142 | "tool_call": tool_call,
143 | "tool_result": None,
144 | "processing_steps": processing_steps,
145 | "metadata": {
146 | "provider": getattr(self, 'provider_info', {}).get('provider', 'unknown'),
147 | "model": getattr(self, 'provider_info', {}).get('model', getattr(self, 'model', 'unknown')),
148 | "base_url": getattr(self, 'provider_info', {}).get('base_url', 'unknown'),
149 | "has_tools": bool(self.tools),
150 | "execution_time": None # Will be set at the end
151 | }
152 | }
153 |
154 | # 5. Execute tool if needed
155 | if tool_call:
156 | tool_name = tool_call.get("name")
157 | kwargs = tool_call.get("parameters", {})
158 |
159 | step_start = time.time()
160 | tool_result = await self.execute_tool(tool_name, kwargs)
161 | processing_steps.append({
162 | "step": "tool_execution",
163 | "timestamp": datetime.now().isoformat(),
164 | "duration": time.time() - step_start,
165 | "data": f"Executed tool: {tool_name}"
166 | })
167 |
168 | result["tool_result"] = tool_result
169 |
170 | # 6. Send tool result back to LLM for processing
171 | if tool_result.error_code == 0: # Only if tool succeeded
172 | step_start = time.time()
173 | final_response = await self.process_tool_result(
174 | query, tool_call, tool_result, conversation_history
175 | )
176 | processing_steps.append({
177 | "step": "final_processing",
178 | "timestamp": datetime.now().isoformat(),
179 | "duration": time.time() - step_start,
180 | "data": "Final LLM processing with tool results"
181 | })
182 |
183 | result["final_llm_response"] = final_response
184 | result["raw_final_response"] = final_response
185 |
186 | # Update metadata
187 | result["metadata"]["execution_time"] = time.time() - start_time
188 | result["processing_steps"] = processing_steps
189 |
190 | return result
191 |
192 | async def process_tool_result(self, original_query: str, tool_call: Dict[str, Any],
193 | tool_result: Any, conversation_history: Optional[List[Dict[str, str]]] = None) -> Any:
194 | """Send tool result back to LLM for processing and response generation.
195 |
196 | Args:
197 | original_query: The user's original question
198 | tool_call: The tool call that was executed
199 | tool_result: The result from the tool execution
200 | conversation_history: Previous conversation context
201 |
202 | Returns:
203 | LLM response after processing the tool result
204 | """
205 | import json
206 |
207 | # Build conversation with tool result
208 | messages = conversation_history.copy() if conversation_history else []
209 | messages.append({"role": "user", "content": original_query})
210 |
211 | # Add assistant's tool call
212 | messages.append({
213 | "role": "assistant",
214 | "content": None,
215 | "tool_calls": [{
216 | "id": tool_call.get("id", "call_1"),
217 | "type": "function",
218 | "function": {
219 | "name": tool_call.get("name"),
220 | "arguments": json.dumps(tool_call.get("parameters", {}))
221 | }
222 | }]
223 | })
224 |
225 | # Add tool result
226 | messages.append({
227 | "role": "tool",
228 | "tool_call_id": tool_call.get("id", "call_1"),
229 | "content": str(tool_result.content)
230 | })
231 |
232 | # Get LLM's final response (without tools this time)
233 | final_response = await self.submit_query_without_tools(messages)
234 | return final_response
235 |
--------------------------------------------------------------------------------
/mcp_playground/llm_bridge/models.py:
--------------------------------------------------------------------------------
1 | """
2 | Defines available models and defaults for LLM providers.
3 | """
4 |
5 | # OpenAI Models (Updated as of May 2025)
6 | OPENAI_MODELS = [
7 | 'gpt-4o',
8 | 'gpt-4.5-turbo',
9 | 'gpt-4.5-preview',
10 | 'gpt-4o-mini',
11 | 'gpt-4-turbo',
12 | 'gpt-4-vision-preview',
13 | 'gpt-3.5-turbo',
14 | 'o1-preview',
15 | 'o1-mini'
16 | ]
17 | DEFAULT_OPENAI_MODEL = 'gpt-4o'
18 |
19 | # Anthropic Models (Updated as of May 2025)
20 | ANTHROPIC_MODELS = [
21 | 'claude-3-opus-20240229',
22 | 'claude-3-sonnet-20240229',
23 | 'claude-3-haiku-20240307',
24 | 'claude-3.5-sonnet-20250501',
25 | 'claude-3-5-sonnet-20250501',
26 | 'claude-3-7-sonnet-20250219',
27 | 'claude-3-5-haiku-20241022-v1:0',
28 | 'claude-2.1',
29 | 'claude-2.0'
30 | ]
31 | DEFAULT_ANTHROPIC_MODEL = 'claude-3-5-sonnet-20250501'
32 |
33 | # Ensure defaults are in the lists
34 | if DEFAULT_OPENAI_MODEL not in OPENAI_MODELS:
35 | # Fallback if default is somehow removed or renamed
36 | DEFAULT_OPENAI_MODEL = OPENAI_MODELS[0] if OPENAI_MODELS else 'gpt-4o'
37 |
38 | if DEFAULT_ANTHROPIC_MODEL not in ANTHROPIC_MODELS:
39 | # Fallback
40 | DEFAULT_ANTHROPIC_MODEL = ANTHROPIC_MODELS[0] if ANTHROPIC_MODELS else 'claude-3-7-sonnet-20250219'
41 |
42 | # Ollama Default Model
43 | # Note: Ollama models are installed locally by the user (e.g., 'llama3', 'mistral').
44 | # We don't maintain a list here, but define a common default.
45 | DEFAULT_OLLAMA_MODEL = "llama3"
46 |
--------------------------------------------------------------------------------
/mcp_playground/llm_bridge/ollama_bridge.py:
--------------------------------------------------------------------------------
1 | """
2 | Ollama-specific implementation of the LLM Bridge for local models.
3 | """
4 | from typing import Dict, List, Any, Optional
5 | import json
6 | import ollama
7 | from ..client import ToolDef
8 | from ..format_converters import to_openai_format # Ollama uses OpenAI-like tool format
9 | from .base import LLMBridge
10 | from .models import OPENAI_MODELS # Re-use OpenAI format for tools
11 |
12 | # Note: Ollama model names are user-defined (e.g., 'llama3', 'mistral')
13 | # We won't define a static list here, but allow users to specify.
14 | DEFAULT_OLLAMA_MODEL = "llama3" # A common default, user might need to change
15 |
16 | class OllamaBridge(LLMBridge):
17 | """Ollama-specific implementation of the LLM Bridge."""
18 |
19 | def __init__(self, mcp_client, model=DEFAULT_OLLAMA_MODEL, host=None):
20 | """Initialize Ollama bridge with model and optional host.
21 |
22 | Args:
23 | mcp_client: An initialized MCPClient instance.
24 | model: Ollama model name to use (e.g., 'llama3', 'mistral').
25 | Ensure the model is available locally in Ollama.
26 | host: Optional URL of the Ollama server (e.g., 'http://localhost:11434').
27 | If None, the default host configured for the ollama library will be used.
28 | """
29 | super().__init__(mcp_client)
30 | # Initialize Ollama client, optionally specifying the host
31 | self.llm_client = ollama.AsyncClient(host=host)
32 | self.model = model
33 | self.host = host # Store host for potential display/debugging
34 |
35 | # Store provider info for metadata
36 | self.provider_info = {
37 | "provider": "ollama",
38 | "model": model,
39 | "base_url": host or "http://localhost:11434"
40 | }
41 |
42 | print(f"Ollama Bridge initialized. Model: {self.model}, Host: {self.host or 'default'}")
43 |
44 | async def format_tools(self, tools: List[ToolDef]) -> List[Dict[str, Any]]:
45 | """Format tools for Ollama (uses OpenAI-like format).
46 |
47 | Args:
48 | tools: List of ToolDef objects
49 |
50 | Returns:
51 | List of tools in Ollama/OpenAI format
52 | """
53 | return to_openai_format(tools)
54 |
55 | async def submit_query(self, query: str, formatted_tools: List[Dict[str, Any]], conversation_history: Optional[List[Dict[str, str]]] = None) -> Dict[str, Any]:
56 | """Submit a query to Ollama with the formatted tools.
57 |
58 | Args:
59 | query: User query string
60 | formatted_tools: Tools in Ollama/OpenAI format
61 | conversation_history: Previous conversation messages (optional)
62 |
63 | Returns:
64 | Ollama API response object (dictionary-like)
65 | """
66 | # Build messages with conversation history
67 | messages = conversation_history.copy() if conversation_history else []
68 | messages.append({"role": "user", "content": query})
69 |
70 | try:
71 | response = await self.llm_client.chat(
72 | model=self.model,
73 | messages=messages,
74 | tools=formatted_tools,
75 | # Ollama automatically decides on tool use if tools are provided
76 | )
77 | # The response object is already a dictionary
78 | return response
79 | except ollama.ResponseError as e:
80 | # Handle potential errors like model not found
81 | print(f"Ollama API Error: {e.error} (Status code: {e.status_code})")
82 | # Re-raise or return an error structure if needed
83 | raise e
84 | except Exception as e:
85 | print(f"An unexpected error occurred with Ollama: {e}")
86 | raise e
87 |
88 | async def submit_query_without_tools(self, messages: List[Dict[str, Any]]) -> Any:
89 | """Submit a query to Ollama without tools for final processing.
90 |
91 | Args:
92 | messages: Complete conversation including tool results
93 |
94 | Returns:
95 | Ollama API response
96 | """
97 | try:
98 | response = await self.llm_client.chat(
99 | model=self.model,
100 | messages=messages
101 | # Note: No tools parameter - this is for final processing
102 | )
103 | return response
104 | except Exception as e:
105 | print(f"An unexpected error occurred with Ollama: {e}")
106 | raise e
107 |
108 | async def parse_tool_call(self, llm_response: Any) -> Optional[Dict[str, Any]]:
109 | """Parse the Ollama response to extract tool calls.
110 |
111 | Args:
112 | llm_response: Response dictionary from Ollama
113 |
114 | Returns:
115 | Dictionary with tool name and parameters, or None if no tool call
116 | """
117 | message = llm_response.get('message', {})
118 | tool_calls = message.get('tool_calls')
119 |
120 | if not tool_calls:
121 | return None
122 |
123 | # Ollama might return multiple tool calls, handle the first one for now
124 | tool_call = tool_calls[0]
125 | function_info = tool_call.get('function', {})
126 |
127 | # Ensure arguments are loaded as JSON if they are a string
128 | arguments = function_info.get('arguments', {})
129 | if isinstance(arguments, str):
130 | try:
131 | arguments = json.loads(arguments)
132 | except json.JSONDecodeError:
133 | print(f"Warning: Could not parse tool arguments as JSON: {arguments}")
134 | arguments = {} # Fallback to empty dict
135 |
136 | return {
137 | "name": function_info.get('name'),
138 | "parameters": arguments
139 | }
140 |
141 | async def check_connection(self):
142 | """Check if the Ollama server is reachable and the model exists."""
143 | try:
144 | # Simple check to see if server responds
145 | await self.llm_client.list()
146 |
147 | # Check if the specific model exists locally
148 | models_info = await self.llm_client.list()
149 |
150 | # Debug the response structure
151 | print(f"Ollama models response in check_connection: {models_info}")
152 |
153 | # Handle different response structures
154 | model_names = []
155 | if isinstance(models_info, dict) and 'models' in models_info:
156 | # New API format
157 | model_names = [m.get('name', m.get('model', '')) for m in models_info.get('models', [])]
158 | elif isinstance(models_info, list):
159 | # Older API format or direct list
160 | model_names = [m.get('name', m.get('model', '')) for m in models_info]
161 |
162 | # Filter out empty names
163 | model_names = [name for name in model_names if name]
164 |
165 | if not model_names:
166 | print("Warning: No models found in Ollama response")
167 | return True # Still return True as the server is reachable
168 |
169 | if self.model not in model_names:
170 | print(f"Warning: Model '{self.model}' not found in local Ollama models: {model_names}")
171 | # Depending on strictness, could raise an error here
172 | return True
173 | except Exception as e:
174 | print(f"Error connecting to Ollama host '{self.host or 'default'}': {e}")
175 | return False
176 |
--------------------------------------------------------------------------------
/mcp_playground/llm_bridge/openai_bridge.py:
--------------------------------------------------------------------------------
1 | """
2 | OpenAI-specific implementation of the LLM Bridge.
3 | """
4 | from typing import Dict, List, Any, Optional
5 | import json
6 | import openai
7 | from ..client import ToolDef
8 | from ..format_converters import to_openai_format
9 | from .base import LLMBridge
10 | from .models import DEFAULT_OPENAI_MODEL # Import default model
11 |
12 |
13 | class OpenAIBridge(LLMBridge):
14 | """OpenAI-specific implementation of the LLM Bridge."""
15 |
16 | def __init__(self, mcp_client, api_key, model=DEFAULT_OPENAI_MODEL): # Use imported default
17 | """Initialize OpenAI bridge with API key and model.
18 |
19 | Args:
20 | mcp_client: An initialized MCPClient instance
21 | api_key: OpenAI API key
22 | model: OpenAI model to use (default: from models.py)
23 | """
24 | super().__init__(mcp_client)
25 | self.llm_client = openai.OpenAI(api_key=api_key)
26 | self.model = model
27 |
28 | async def format_tools(self, tools: List[ToolDef]) -> List[Dict[str, Any]]:
29 | """Format tools for OpenAI.
30 |
31 | Args:
32 | tools: List of ToolDef objects
33 |
34 | Returns:
35 | List of tools in OpenAI format
36 | """
37 | return to_openai_format(tools)
38 |
39 | async def submit_query(self, query: str, formatted_tools: List[Dict[str, Any]], conversation_history: Optional[List[Dict[str, str]]] = None) -> Dict[str, Any]:
40 | """Submit a query to OpenAI with the formatted tools.
41 |
42 | Args:
43 | query: User query string
44 | formatted_tools: Tools in OpenAI format
45 | conversation_history: Previous conversation messages (optional)
46 |
47 | Returns:
48 | OpenAI API response
49 | """
50 | # Build messages with conversation history
51 | messages = conversation_history.copy() if conversation_history else []
52 | messages.append({"role": "user", "content": query})
53 |
54 | response = self.llm_client.chat.completions.create(
55 | model=self.model,
56 | messages=messages,
57 | tools=formatted_tools,
58 | tool_choice="auto"
59 | )
60 |
61 | return response
62 |
63 | async def submit_query_without_tools(self, messages: List[Dict[str, Any]]) -> Any:
64 | """Submit a query to OpenAI without tools for final processing.
65 |
66 | Args:
67 | messages: Complete conversation including tool results
68 |
69 | Returns:
70 | OpenAI API response
71 | """
72 | # Make the API call without tools
73 | response = self.llm_client.chat.completions.create(
74 | model=self.model,
75 | messages=messages
76 | # Note: No tools parameter - this is for final processing
77 | )
78 |
79 | return response
80 |
81 | async def parse_tool_call(self, llm_response: Any) -> Optional[Dict[str, Any]]:
82 | """Parse the OpenAI response to extract tool calls.
83 |
84 | Args:
85 | llm_response: Response from OpenAI
86 |
87 | Returns:
88 | Dictionary with tool name and parameters, or None if no tool call
89 | """
90 | message = llm_response.choices[0].message
91 |
92 | if not hasattr(message, 'tool_calls') or not message.tool_calls:
93 | return None
94 |
95 | tool_call = message.tool_calls[0]
96 |
97 | return {
98 | "name": tool_call.function.name,
99 | "parameters": json.loads(tool_call.function.arguments)
100 | }
101 |
--------------------------------------------------------------------------------
/mcp_playground/llm_bridge/openrouter_bridge.py:
--------------------------------------------------------------------------------
1 | """
2 | OpenRouter-based implementation of the LLM Bridge for unified provider access.
3 | """
4 | from typing import Dict, List, Any, Optional
5 | import openai
6 | from ..client import ToolDef
7 | from ..format_converters import to_openai_format
8 | from .base import LLMBridge
9 | from .openrouter_client import OpenRouterClient
10 |
11 |
12 | class OpenRouterBridge(LLMBridge):
13 | """OpenRouter-based implementation of the LLM Bridge."""
14 |
15 | def __init__(self, mcp_client, api_key: str, model: str, site_url: Optional[str] = None, site_name: Optional[str] = None):
16 | """Initialize OpenRouter bridge.
17 |
18 | Args:
19 | mcp_client: An initialized MCPClient instance
20 | api_key: OpenRouter API key
21 | model: Model ID (e.g., 'openai/gpt-4o', 'anthropic/claude-3-opus')
22 | site_url: Optional site URL for rankings
23 | site_name: Optional site name for rankings
24 | """
25 | super().__init__(mcp_client)
26 | self.api_key = api_key
27 | self.model = model
28 | self.site_url = site_url
29 | self.site_name = site_name
30 |
31 | # Initialize OpenAI client with OpenRouter base URL
32 | self.llm_client = openai.OpenAI(
33 | base_url="https://openrouter.ai/api/v1",
34 | api_key=api_key
35 | )
36 |
37 | # Initialize OpenRouter client for model fetching
38 | self.openrouter_client = OpenRouterClient(api_key, site_url, site_name)
39 |
40 | # Store provider info for metadata
41 | self.provider_info = {
42 | "provider": "openrouter",
43 | "model": model,
44 | "base_url": "https://openrouter.ai/api/v1"
45 | }
46 |
47 | async def format_tools(self, tools: List[ToolDef]) -> List[Dict[str, Any]]:
48 | """Format tools for OpenRouter (uses OpenAI format).
49 |
50 | Args:
51 | tools: List of ToolDef objects
52 |
53 | Returns:
54 | List of tools in OpenAI format (compatible with OpenRouter)
55 | """
56 | return to_openai_format(tools)
57 |
58 | async def submit_query(self, query: str, formatted_tools: List[Dict[str, Any]], conversation_history: Optional[List[Dict[str, str]]] = None) -> Dict[str, Any]:
59 | """Submit a query to OpenRouter with the formatted tools.
60 |
61 | Args:
62 | query: User query string
63 | formatted_tools: Tools in OpenAI format
64 | conversation_history: Previous conversation messages (optional)
65 |
66 | Returns:
67 | OpenRouter API response (OpenAI-compatible format)
68 | """
69 | # Build messages with conversation history
70 | messages = conversation_history.copy() if conversation_history else []
71 | messages.append({"role": "user", "content": query})
72 |
73 | # Prepare extra headers for OpenRouter
74 | extra_headers = self.openrouter_client.get_extra_headers()
75 |
76 | # Make the API call
77 | if formatted_tools:
78 | response = self.llm_client.chat.completions.create(
79 | extra_headers=extra_headers,
80 | model=self.model,
81 | messages=messages,
82 | tools=formatted_tools,
83 | tool_choice="auto"
84 | )
85 | else:
86 | response = self.llm_client.chat.completions.create(
87 | extra_headers=extra_headers,
88 | model=self.model,
89 | messages=messages
90 | )
91 |
92 | return response
93 |
94 | async def submit_query_without_tools(self, messages: List[Dict[str, Any]]) -> Any:
95 | """Submit a query to OpenRouter without tools for final processing.
96 |
97 | Args:
98 | messages: Complete conversation including tool results
99 |
100 | Returns:
101 | OpenRouter API response (OpenAI-compatible format)
102 | """
103 | # Prepare extra headers for OpenRouter
104 | extra_headers = self.openrouter_client.get_extra_headers()
105 |
106 | # Make the API call without tools
107 | response = self.llm_client.chat.completions.create(
108 | extra_headers=extra_headers,
109 | model=self.model,
110 | messages=messages
111 | # Note: No tools parameter - this is for final processing
112 | )
113 |
114 | return response
115 |
116 | async def parse_tool_call(self, llm_response: Any) -> Optional[Dict[str, Any]]:
117 | """Parse the OpenRouter response to extract tool calls.
118 |
119 | Args:
120 | llm_response: Response from OpenRouter (OpenAI-compatible format)
121 |
122 | Returns:
123 | Dictionary with tool name and parameters, or None if no tool call
124 | """
125 | if hasattr(llm_response, 'choices') and llm_response.choices:
126 | choice = llm_response.choices[0]
127 | if hasattr(choice, 'message') and hasattr(choice.message, 'tool_calls') and choice.message.tool_calls:
128 | tool_call = choice.message.tool_calls[0]
129 | if hasattr(tool_call, 'function'):
130 | try:
131 | import json
132 | return {
133 | "name": tool_call.function.name,
134 | "parameters": json.loads(tool_call.function.arguments)
135 | }
136 | except (json.JSONDecodeError, AttributeError):
137 | pass
138 |
139 | return None
140 |
141 | async def get_available_models(self, provider: str, limit: int = 5) -> List[Dict[str, Any]]:
142 | """Get top available models for a specific provider.
143 |
144 | Args:
145 | provider: Provider name (e.g., 'openai', 'anthropic', 'google')
146 | limit: Maximum number of models to return
147 |
148 | Returns:
149 | List of top models for the provider
150 | """
151 | return await self.openrouter_client.fetch_top_models_by_provider(provider, limit)
--------------------------------------------------------------------------------
/mcp_playground/llm_bridge/openrouter_client.py:
--------------------------------------------------------------------------------
1 | """
2 | OpenRouter client for unified access to multiple LLM providers.
3 | """
4 | import requests
5 | import asyncio
6 | from typing import List, Dict, Any, Optional
7 |
8 |
9 | class OpenRouterClient:
10 | """Client for interacting with OpenRouter API."""
11 |
12 | def __init__(self, api_key: str, site_url: Optional[str] = None, site_name: Optional[str] = None):
13 | """Initialize OpenRouter client.
14 |
15 | Args:
16 | api_key: OpenRouter API key
17 | site_url: Optional site URL for rankings
18 | site_name: Optional site name for rankings
19 | """
20 | self.api_key = api_key
21 | self.site_url = site_url
22 | self.site_name = site_name
23 | self.base_url = "https://openrouter.ai/api/v1"
24 |
25 | async def fetch_models(self) -> List[Dict[str, Any]]:
26 | """Fetch all available models from OpenRouter.
27 |
28 | Returns:
29 | List of model dictionaries with metadata
30 | """
31 | try:
32 | # Use requests for now, can be made async later if needed
33 | response = requests.get(
34 | f"{self.base_url}/models",
35 | headers={"Authorization": f"Bearer {self.api_key}"},
36 | timeout=30
37 | )
38 | response.raise_for_status()
39 |
40 | data = response.json()
41 | return data.get("data", [])
42 |
43 | except Exception as e:
44 | print(f"Error fetching OpenRouter models: {e}")
45 | return []
46 |
47 | async def fetch_top_models_by_provider(self, provider: str, limit: int = 5) -> List[Dict[str, Any]]:
48 | """Fetch top N most popular models for a specific provider.
49 |
50 | Args:
51 | provider: Provider name (e.g., 'openai', 'anthropic', 'google')
52 | limit: Maximum number of models to return
53 |
54 | Returns:
55 | List of top models for the provider
56 | """
57 | all_models = await self.fetch_models()
58 |
59 | # Filter by provider prefix (models are already sorted by popularity)
60 | provider_models = [
61 | model for model in all_models
62 | if model["id"].startswith(f"{provider}/")
63 | ]
64 |
65 | # Return top N models
66 | return provider_models[:limit]
67 |
68 | def get_extra_headers(self) -> Dict[str, str]:
69 | """Get extra headers for OpenRouter requests.
70 |
71 | Returns:
72 | Dictionary of extra headers
73 | """
74 | headers = {}
75 | if self.site_url:
76 | headers["HTTP-Referer"] = self.site_url
77 | if self.site_name:
78 | headers["X-Title"] = self.site_name
79 | return headers
80 |
81 |
82 | def format_model_display(model: Dict[str, Any], include_tool_indicator: bool = False) -> Dict[str, Any]:
83 | """Format model information for display in UI.
84 |
85 | Args:
86 | model: Model dictionary from OpenRouter API
87 | include_tool_indicator: Whether to include tool capability indicator
88 |
89 | Returns:
90 | Formatted model information
91 | """
92 | name = model.get("name", model.get("id", "Unknown"))
93 | model_id = model["id"]
94 | pricing = model.get("pricing", {})
95 | context_length = model.get("context_length", "Unknown")
96 | description = model.get("description", "")
97 |
98 | # Check tool capability if indicator requested
99 | tool_indicator = ""
100 | if include_tool_indicator:
101 | # Import here to avoid circular imports
102 | try:
103 | # Check for tool support in model metadata
104 | supports_tools = model.get("supports_tools", False)
105 | supports_function_calling = model.get("supports_function_calling", False)
106 |
107 | # Fallback to pattern matching
108 | if not (supports_tools or supports_function_calling):
109 | model_lower = model_id.lower()
110 | if any(pattern in model_lower for pattern in [
111 | "gpt-4o", "gpt-4-turbo", "gpt-4", "gpt-3.5-turbo",
112 | "claude-3", "claude-3.5", "gemini-1.5", "gemini-pro",
113 | "llama-3.1", "llama-3.2", "mistral-large", "mixtral"
114 | ]):
115 | supports_tools = True
116 |
117 | if supports_tools or supports_function_calling:
118 | tool_indicator = "🔧 "
119 | except Exception:
120 | pass
121 |
122 | # Format pricing (convert to per 1M tokens)
123 | try:
124 | prompt_price = float(pricing.get("prompt", 0)) * 1000000
125 | completion_price = float(pricing.get("completion", 0)) * 1000000
126 | pricing_str = f"${prompt_price:.2f}/${completion_price:.2f} per 1M"
127 | except (ValueError, TypeError):
128 | pricing_str = "Pricing unavailable"
129 |
130 | # Format context length
131 | if isinstance(context_length, (int, float)) and context_length > 0:
132 | context_str = f"{int(context_length):,} ctx"
133 | else:
134 | context_str = "Unknown ctx"
135 |
136 | # Create display string with optional tool indicator
137 | display_name = f"{tool_indicator}{name} | {pricing_str} | {context_str}"
138 |
139 | return {
140 | "display": display_name,
141 | "id": model_id,
142 | "name": name,
143 | "description": description,
144 | "pricing": pricing,
145 | "context_length": context_length,
146 | "supports_tools": supports_tools if include_tool_indicator else None
147 | }
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | testpaths = tests
3 | python_files = test_*.py
4 | python_classes = Test*
5 | python_functions = test_*
6 | addopts = -v --tb=native
7 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # MCP SSE Client Python Requirements
2 |
3 | # Core dependencies
4 | mcp>=0.1.0 # Model Context Protocol library
5 | pydantic>=2.0.0 # Data validation library
6 |
7 | # LLM integration dependencies
8 | openai>=1.70.0 # OpenAI API client
9 | anthropic>=0.15.0 # Anthropic API client
10 | ollama>=0.1.7 # Ollama client for local models
11 |
12 | # Testing dependencies
13 | pytest>=7.0.0 # Testing framework
14 | pytest-asyncio>=0.18.0 # Async support for pytest
15 |
16 | # Development dependencies
17 | pylint>=2.15.0 # Linting
18 | black>=22.10.0 # Code formatting
19 | wheel>=0.37.0 # For building wheels
20 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | """
2 | Setup script for the MCP SSE Client Python package.
3 | """
4 |
5 | from setuptools import setup, find_packages
6 |
7 | with open("README.md", "r", encoding="utf-8") as fh:
8 | long_description = fh.read()
9 |
10 | with open("requirements.txt", "r", encoding="utf-8") as fh:
11 | requirements = [line.strip() for line in fh if not line.startswith("#")]
12 |
13 | setup(
14 | name="mcp_playground",
15 | version="0.2.0",
16 | author="zanetworker",
17 | author_email="", # Add author email if available
18 | description="A comprehensive Python toolkit for interacting with remote Model Context Protocol (MCP) endpoints",
19 | long_description=long_description,
20 | long_description_content_type="text/markdown",
21 | url="https://github.com/zanetworker/mcp-playground",
22 | packages=find_packages(),
23 | classifiers=[
24 | "Programming Language :: Python :: 3",
25 | "Programming Language :: Python :: 3.7",
26 | "Programming Language :: Python :: 3.8",
27 | "Programming Language :: Python :: 3.9",
28 | "Programming Language :: Python :: 3.10",
29 | "License :: OSI Approved :: MIT License", # Adjust if using a different license
30 | "Operating System :: OS Independent",
31 | ],
32 | python_requires=">=3.7",
33 | install_requires=requirements,
34 | )
35 |
--------------------------------------------------------------------------------
/test_mcp_connection.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | Quick test script to verify MCP connection and tool calling functionality
4 | """
5 | import asyncio
6 | import sys
7 | import os
8 |
9 | # Add the project root to the path
10 | sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
11 |
12 | from mcp_playground.client import MCPClient
13 | from mcp_playground.llm_bridge.openrouter_bridge import OpenRouterBridge
14 |
15 | async def test_mcp_connection():
16 | """Test MCP connection and tool calling"""
17 | print("🔍 Testing MCP Connection...")
18 |
19 | # Test MCP client connection
20 | client = MCPClient("http://localhost:8001/sse")
21 |
22 | try:
23 | print("📡 Connecting to MCP server...")
24 | await client.connect()
25 | print("✅ MCP server connected successfully")
26 |
27 | # Get available tools
28 | tools = await client.list_tools()
29 | print(f"🔧 Found {len(tools)} tools: {[tool.name for tool in tools]}")
30 |
31 | if tools:
32 | # Test tool calling with OpenRouter bridge
33 | print("\n🤖 Testing LLM Bridge with tools...")
34 |
35 | # You'll need to set your OpenRouter API key
36 | api_key = os.getenv("OPENROUTER_API_KEY")
37 | if not api_key:
38 | print("❌ OPENROUTER_API_KEY environment variable not set")
39 | return
40 |
41 | bridge = OpenRouterBridge(
42 | api_key=api_key,
43 | model="openai/gpt-3.5-turbo",
44 | tools=tools
45 | )
46 |
47 | # Test query that should trigger tool usage
48 | test_query = "List JIRA issues in the RHOAISTRAT project with component 'llama stack'"
49 | print(f"📝 Testing query: {test_query}")
50 |
51 | result = await bridge.process_query(test_query, [])
52 |
53 | if isinstance(result, dict):
54 | print("📊 Result structure:")
55 | print(f" - LLM Response: {bool(result.get('llm_response'))}")
56 | print(f" - Tool Call: {bool(result.get('tool_call'))}")
57 | print(f" - Tool Result: {bool(result.get('tool_result'))}")
58 |
59 | if result.get('tool_call'):
60 | tool_call = result['tool_call']
61 | print(f" - Tool Used: {tool_call.get('name', 'Unknown')}")
62 |
63 | if result.get('tool_result'):
64 | tool_result = result['tool_result']
65 | print(f" - Tool Success: {tool_result.error_code == 0}")
66 |
67 | print("✅ Tool calling test completed")
68 | else:
69 | print("⚠️ No tools available for testing")
70 |
71 | except Exception as e:
72 | print(f"❌ Error: {e}")
73 | finally:
74 | await client.disconnect()
75 | print("🔌 Disconnected from MCP server")
76 |
77 | if __name__ == "__main__":
78 | asyncio.run(test_mcp_connection())
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Tests for the MCP SSE Client.
3 | """
4 |
--------------------------------------------------------------------------------
/tests/test_client.py:
--------------------------------------------------------------------------------
1 | """
2 | Tests for the MCP SSE Client.
3 | """
4 |
5 | import unittest
6 | from unittest.mock import AsyncMock, MagicMock, patch
7 | import asyncio
8 | from mcp_playground import MCPClient, ToolDef, ToolParameter, ToolInvocationResult
9 |
10 |
11 | class TestMCPClient(unittest.TestCase):
12 | """Test cases for the MCPClient class."""
13 |
14 | def setUp(self):
15 | """Set up test fixtures."""
16 | self.endpoint = "http://localhost:8000/sse"
17 | self.client = MCPClient(self.endpoint)
18 |
19 | def test_init_valid_endpoint(self):
20 | """Test initialization with a valid endpoint."""
21 | client = MCPClient(self.endpoint)
22 | self.assertEqual(client.endpoint, self.endpoint)
23 |
24 | def test_init_invalid_endpoint(self):
25 | """Test initialization with an invalid endpoint."""
26 | with self.assertRaises(ValueError):
27 | MCPClient("ftp://example.com/sse")
28 |
29 | @patch("mcp_playground.client.sse_client")
30 | @patch("mcp_playground.client.ClientSession")
31 | def test_list_tools(self, mock_session_class, mock_sse_client):
32 | """Test listing tools from the MCP endpoint."""
33 | # Set up mocks
34 | mock_streams = MagicMock()
35 | mock_sse_client.return_value.__aenter__.return_value = mock_streams
36 |
37 | mock_session = AsyncMock()
38 | mock_session_class.return_value.__aenter__.return_value = mock_session
39 |
40 | # Mock the list_tools response
41 | mock_tool = MagicMock()
42 | mock_tool.name = "test_tool"
43 | mock_tool.description = "A test tool"
44 | mock_tool.inputSchema = {
45 | "properties": {
46 | "param1": {
47 | "type": "string",
48 | "description": "A test parameter"
49 | }
50 | }
51 | }
52 |
53 | mock_tools_result = MagicMock()
54 | mock_tools_result.tools = [mock_tool]
55 | mock_session.list_tools.return_value = mock_tools_result
56 |
57 | # Run the test
58 | loop = asyncio.get_event_loop()
59 | tools = loop.run_until_complete(self.client.list_tools())
60 |
61 | # Assertions
62 | self.assertEqual(len(tools), 1)
63 | self.assertEqual(tools[0].name, "test_tool")
64 | self.assertEqual(tools[0].description, "A test tool")
65 | self.assertEqual(len(tools[0].parameters), 1)
66 | self.assertEqual(tools[0].parameters[0].name, "param1")
67 | self.assertEqual(tools[0].parameters[0].parameter_type, "string")
68 | self.assertEqual(tools[0].parameters[0].description, "A test parameter")
69 |
70 | @patch("mcp_playground.client.sse_client")
71 | @patch("mcp_playground.client.ClientSession")
72 | def test_invoke_tool(self, mock_session_class, mock_sse_client):
73 | """Test invoking a tool with parameters."""
74 | # Set up mocks
75 | mock_streams = MagicMock()
76 | mock_sse_client.return_value.__aenter__.return_value = mock_streams
77 |
78 | mock_session = AsyncMock()
79 | mock_session_class.return_value.__aenter__.return_value = mock_session
80 |
81 | # Mock the call_tool response
82 | mock_content_item = MagicMock()
83 | mock_content_item.model_dump_json.return_value = '{"result": "success"}'
84 |
85 | mock_result = MagicMock()
86 | mock_result.content = [mock_content_item]
87 | mock_result.isError = False
88 |
89 | mock_session.call_tool.return_value = mock_result
90 |
91 | # Run the test
92 | loop = asyncio.get_event_loop()
93 | result = loop.run_until_complete(
94 | self.client.invoke_tool("test_tool", {"param1": "value1"})
95 | )
96 |
97 | # Assertions
98 | self.assertEqual(result.content, '{"result": "success"}')
99 | self.assertEqual(result.error_code, 0)
100 |
101 | # Verify the call_tool was called with the correct arguments
102 | mock_session.call_tool.assert_called_once_with(
103 | "test_tool", {"param1": "value1"}
104 | )
105 |
106 |
107 | if __name__ == "__main__":
108 | unittest.main()
109 |
--------------------------------------------------------------------------------