├── .gitignore
├── LICENSE
├── README.md
├── artwork
└── logo.png
├── config
├── gemini_web_search.yaml
└── url_scraper.yaml
├── example.env
├── mcp_server
├── __init__.py
├── __main__.py
├── core
│ ├── __init__.py
│ ├── config.py
│ ├── server.py
│ └── tool_manager.py
├── handlers
│ ├── __init__.py
│ └── watchdog.py
└── utils
│ ├── __init__.py
│ ├── config_manager.py
│ ├── logging_config.py
│ ├── rate_limiter.py
│ └── tool_decorator.py
├── requirements.txt
├── tests
├── test_mcp_client.py
└── test_smolagents.py
└── tools
├── __init__.py
├── gemini_web_search.py
└── url_scraper.py
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | mcp_server/.DS_Store
3 | .DS_Store
4 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 BatteryShark
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Agent Construct
2 |
3 |
4 |
5 |
6 |
7 | > "We can load anything, from clothing to equipment, weapons, training simulations, anything we need." - The Matrix (1999)
8 |
9 | Agent Construct is a Model Context Protocol (MCP) server implementation that standardizes how AI applications access tools and context. Just as the Construct in The Matrix provided operators with instant access to any equipment they needed, Agent Construct provides a standardized interface for AI models to access tools and data through the MCP specification.
10 |
11 | Built on the [Model Context Protocol](https://modelcontextprotocol.io/introduction) specification, it acts as a central hub that manages tool discovery, execution, and context management for AI applications. It provides a robust and scalable way to expose capabilities to AI models through a standardized protocol. It also provides a simplified configuration and tool structure to make adding new capabilities a breeze! An example tool for searching the web with Gemini is included.
12 |
13 | ## Core Features
14 |
15 | ### MCP Protocol Implementation
16 | - **Full MCP Compliance**: Complete implementation of the Model Context Protocol specification
17 | - **Tool Discovery**: Dynamic tool registration and discovery mechanism
18 | - **Standardized Communication**: Implements MCP's communication patterns for tool interaction
19 |
20 | ### Server Architecture
21 | - **FastAPI Backend**: High-performance asynchronous server implementation
22 | - **Event Streaming**: Real-time updates via Server-Sent Events (SSE)
23 | - **Modular Design**: Clean separation between core protocol handling and tool implementations
24 | - **Handler System**: Extensible request handler architecture for different MCP operations
25 | - **Tool-Based Rate Limiting**: Let the server handle your configurable per-tool rate limiting.
26 |
27 | ### Development Features
28 | - **Tool Decorator System**: Simple way to expose new tools via MCP
29 | - **Logging & Monitoring**: Comprehensive logging system for debugging and monitoring
30 | - **Configuration Management**: Environment-based configuration with secure defaults
31 | - **Testing Framework**: Extensive test suite for protocol compliance
32 | - **Agent Framework Friendly**: Included implementation examples for custom clients or frameworks like smolagents.
33 |
34 | ## Getting Started
35 |
36 | ### Prerequisites
37 |
38 | - Python 3.8 or higher
39 | - pip package manager
40 |
41 | ### Installation
42 |
43 | 1. Clone the repository:
44 | ```bash
45 | git clone https://github.com/yourusername/agent-construct.git
46 | cd agent-construct
47 | ```
48 |
49 | 2. Install dependencies:
50 | ```bash
51 | pip install -r requirements.txt
52 | ```
53 |
54 | 3. Set up environment variables:
55 | Create a `.env` file in the root directory with the following variables:
56 | ```
57 | # Server Configuration
58 | SERVER_HOST=localhost
59 | SERVER_PORT=8000
60 |
61 | # MCP Protocol Settings
62 | MCP_VERSION=1.0
63 | TOOL_DISCOVERY_ENABLED=true
64 |
65 | # Security Settings
66 | ENABLE_AUTH=false # Enable for production
67 | ```
68 |
69 | 4. Run the server:
70 | ```bash
71 | python -m mcp_server
72 | ```
73 |
74 | ## Core Architecture
75 |
76 | ```
77 | mcp_server/
78 | ├── core/ # Core MCP protocol implementation
79 | │ ├── server.py # Main server implementation
80 | │ ├── protocol.py # MCP protocol handlers
81 | │ └── context.py # Context management
82 | ├── handlers/ # MCP operation handlers
83 | │ ├── discovery.py # Tool discovery
84 | │ ├── execution.py # Tool execution
85 | │ └── context.py # Context operations
86 | ├── utils/ # Utility functions
87 | │ ├── logging.py # Logging configuration
88 | │ ├── security.py # Security utilities
89 | │ └── config.py # Configuration management
90 | └── __main__.py # Server entry point
91 | ```
92 |
93 | ## MCP Protocol Features
94 |
95 | ### Tool Discovery
96 | - Dynamic tool registration system
97 | - Tool capability advertisement
98 | - Version management
99 | - Tool metadata and documentation
100 |
101 | ### Context Management
102 | - Efficient context storage and retrieval
103 | - Context scoping and isolation
104 | - Real-time context updates
105 | - Context persistence options
106 |
107 | ### Communication Patterns
108 | - Synchronous request/response
109 | - Server-sent events for updates
110 | - Streaming responses
111 | - Error handling and recovery
112 |
113 | ## Future Enhancements
114 |
115 | ### Protocol Extensions
116 | - [ ] Advanced context management features
117 | - [ ] Custom protocol extensions
118 | - [ ] Plugin system for protocol handlers
119 |
120 | ### Security
121 | - [ ] Authentication and authorization
122 | - [ ] Tool access control
123 | - [-] Rate limiting and quota management
124 | - [ ] Audit logging
125 | - [ ] End-to-end encryption
126 |
127 | ### Performance
128 | - [ ] Tool execution optimization
129 | - [ ] Context caching
130 | - [ ] Load balancing
131 | - [ ] Request queuing
132 | - [ ] Resource management
133 |
134 | ### Development
135 | - [ ] Interactive protocol explorer
136 | - [ ] Tool development SDK
137 | - [ ] Protocol compliance testing tools
138 | - [ ] Performance monitoring dashboard
139 |
140 | ## Contributing
141 |
142 | Contributions are welcome! Please feel free to submit a Pull Request. For major changes, please open an issue first to discuss what you would like to change.
143 |
144 | ## License
145 |
146 | This project is licensed under the MIT License - see the LICENSE file for details.
147 |
148 | ## Acknowledgements
149 |
150 | - [Model Context Protocol](https://modelcontextprotocol.io/) for the protocol specification
151 | - FastAPI for the excellent web framework
152 | - The open-source community for various tools and libraries used in this project
--------------------------------------------------------------------------------
/artwork/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/batteryshark/agent_construct/db8609dec16188c764074032c311f977fd28a075/artwork/logo.png
--------------------------------------------------------------------------------
/config/gemini_web_search.yaml:
--------------------------------------------------------------------------------
1 | # Gemini Web Search Tool Configuration
2 |
3 | # Rate limiting settings
4 | rate_limit: 15 # requests per time window
5 | time_window: 60 # time window in seconds
6 |
7 | # Search settings
8 | max_retries: 3
9 | timeout: 5 # seconds
10 | follow_redirects: true
11 |
12 | # Response settings
13 | max_references: 10
14 | include_confidence_scores: true
15 |
16 | # Gemini API settings
17 | gemini_api_key: YOUR_API_KEY
18 | gemini_model: gemini-2.0-flash
--------------------------------------------------------------------------------
/config/url_scraper.yaml:
--------------------------------------------------------------------------------
1 | name: url_scraper
2 | description: Scrapes a webpage and returns its content as markdown
3 | version: 1.0.0
4 | timeout: 10
5 | user_agent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
6 | max_retries: 3
7 | # Rate limiting settings
8 | rate_limit: 60 # requests per time window
9 | time_window: 60 # time window in seconds
--------------------------------------------------------------------------------
/example.env:
--------------------------------------------------------------------------------
1 | # MCP Server environment variables
2 |
3 | MCP_SERVER_NAME="MCP Tool Server"
4 | MCP_SERVER_PORT=32823
5 | MCP_SERVER_HOST=127.0.0.1
6 | MCP_RELOAD_DELAY=1.0
7 |
8 | # MCP Server directories
9 | MCP_TOOLS_DIR=tools
10 | MCP_CONFIG_DIR=config
11 |
12 | # MCP Server debugging
13 | DEBUG_MODE=true
14 | PYTHONDONTWRITEBYTECODE=1
15 |
--------------------------------------------------------------------------------
/mcp_server/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | MCP Tool Server - A server for managing and running MCP tools.
3 | """
4 | import sys
5 | sys.dont_write_bytecode = True
6 |
7 | __version__ = "0.1.0"
--------------------------------------------------------------------------------
/mcp_server/__main__.py:
--------------------------------------------------------------------------------
1 | """
2 | MCP Tool Server entry point.
3 | """
4 | from mcp_server.core.server import MCPToolServer
5 | from mcp_server.utils.logging_config import configure_logging
6 |
7 | def main():
8 | """Run the MCP Tool Server."""
9 | # Configure logging first
10 | configure_logging()
11 |
12 | # Initialize and run server
13 | server = MCPToolServer()
14 | server.run()
15 |
16 | if __name__ == "__main__":
17 | main()
--------------------------------------------------------------------------------
/mcp_server/core/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Core server components.
3 | """
4 | from .config import config
5 | from .server import MCPToolServer
6 | from .tool_manager import ToolManager
7 |
8 | __all__ = ['config', 'MCPToolServer', 'ToolManager']
--------------------------------------------------------------------------------
/mcp_server/core/config.py:
--------------------------------------------------------------------------------
1 | """
2 | Configuration management for the MCP server.
3 | """
4 | import os
5 | from pathlib import Path
6 | from typing import Optional
7 | from dotenv import load_dotenv
8 |
9 | # Load environment variables
10 | load_dotenv()
11 |
12 | class ServerConfig:
13 | """Server configuration management."""
14 |
15 | def __init__(self):
16 | # Server settings
17 | self.debug_mode = os.getenv('DEBUG_MODE', 'false').lower() == 'true'
18 | self.port = int(os.getenv("MCP_SERVER_PORT", "32823"))
19 | self.host = os.getenv("MCP_SERVER_HOST", "127.0.0.1") # Default to localhost
20 | self.name = os.getenv("MCP_SERVER_NAME", "MCP Tool Server")
21 | self.reload_delay = float(os.getenv("MCP_RELOAD_DELAY", "1.0"))
22 |
23 | # Directory paths
24 | self.tools_dir = self._resolve_path(
25 | os.getenv("MCP_TOOLS_DIR"),
26 | "tools"
27 | )
28 | self.config_dir = self._resolve_path(
29 | os.getenv("MCP_CONFIG_DIR"),
30 | "config"
31 | )
32 |
33 | # Ensure directories exist
34 | self._ensure_directories()
35 |
36 | # Log configuration on init
37 | self._log_config()
38 |
39 | def _resolve_path(self, env_path: Optional[str], default_name: str) -> Path:
40 | """Resolve a directory path from environment or default."""
41 | if env_path:
42 | return Path(env_path).resolve()
43 | return Path.cwd() / default_name
44 |
45 | def _ensure_directories(self):
46 | """Ensure required directories exist."""
47 | self.tools_dir.mkdir(parents=True, exist_ok=True)
48 | self.config_dir.mkdir(parents=True, exist_ok=True)
49 |
50 | def _log_config(self):
51 | """Log the current configuration."""
52 | import logging
53 | logger = logging.getLogger(__name__)
54 | logger.info("Server Configuration:")
55 | logger.info(f" Host: {self.host}")
56 | logger.info(f" Port: {self.port}")
57 | logger.info(f" Debug Mode: {self.debug_mode}")
58 | logger.info(f" Tools Directory: {self.tools_dir}")
59 | logger.info(f" Config Directory: {self.config_dir}")
60 |
61 | # Global configuration instance
62 | config = ServerConfig()
--------------------------------------------------------------------------------
/mcp_server/core/server.py:
--------------------------------------------------------------------------------
1 | """
2 | MCP Server with dynamic tool loading and hot reloading during development.
3 | """
4 | import logging
5 | import threading
6 | import time
7 | import os
8 | from typing import Optional
9 | import uvicorn
10 | from starlette.applications import Starlette
11 | from starlette.routing import Mount, Route
12 | from mcp.server.lowlevel import Server
13 | from mcp.server.sse import SseServerTransport
14 | from watchdog.observers import Observer
15 | from mcp_server.core.config import config
16 | from mcp_server.core.tool_manager import ToolManager
17 | from mcp_server.handlers.watchdog import ToolDirectoryHandler, ConfigDirectoryHandler
18 |
19 | # Configure logging
20 | logger = logging.getLogger(__name__)
21 |
22 | class MCPToolServer:
23 | """MCP Tool Server with dynamic tool loading and hot reloading."""
24 |
25 | def __init__(self):
26 | """Initialize the MCP Tool Server."""
27 | self.app = Server(config.name)
28 | self.tool_manager = ToolManager(self)
29 | self.observers: list[Observer] = []
30 | self.watchdog_threads: list[threading.Thread] = []
31 | self.is_running = True # Set this to True during initialization
32 |
33 | # Set up SSE transport
34 | self.sse = SseServerTransport("/messages/")
35 |
36 | # Register tool listing handler
37 | @self.app.list_tools()
38 | async def list_tools():
39 | return self.tool_manager.get_tool_list()
40 |
41 | # Set up watchdog in debug mode
42 | if config.debug_mode:
43 | self.setup_watchdog()
44 | logger.info("Running in DEBUG mode with hot reloading enabled")
45 |
46 | # Load tools after watchdog is set up
47 | self.tool_manager.load_tools_from_directory()
48 |
49 | async def handle_sse(self, request):
50 | """Handle SSE connection."""
51 | async with self.sse.connect_sse(
52 | request.scope,
53 | request.receive,
54 | request._send
55 | ) as streams:
56 | await self.app.run(
57 | streams[0],
58 | streams[1],
59 | self.app.create_initialization_options()
60 | )
61 |
62 | def setup_routes(self):
63 | """Set up Starlette routes."""
64 | return Starlette(
65 | debug=config.debug_mode,
66 | routes=[
67 | Route("/sse", endpoint=self.handle_sse),
68 | Mount("/messages/", app=self.sse.handle_post_message),
69 | ],
70 | )
71 |
72 | def run_watchdog(self, observer: Observer):
73 | """Run a watchdog observer in a separate thread."""
74 | thread_id = threading.get_ident()
75 | logger.info(f"Watchdog thread {thread_id} started")
76 | try:
77 | while self.is_running:
78 | #logger.debug(f"Watchdog thread {thread_id} is alive")
79 | time.sleep(1) # Keep thread alive but don't busy-wait
80 | except Exception as e:
81 | logger.error(f"Watchdog thread {thread_id} error: {str(e)}", exc_info=True)
82 | finally:
83 | logger.info(f"Stopping watchdog thread {thread_id}...")
84 | observer.stop()
85 | observer.join()
86 | logger.info(f"Watchdog thread {thread_id} stopped")
87 |
88 | def setup_watchdog(self):
89 | """Set up watchdogs for the tools and config directories."""
90 | if not config.debug_mode:
91 | logger.warning("Debug mode is disabled, watchdog will not start")
92 | return
93 |
94 | logger.info("Setting up file system watchdogs...")
95 | logger.info(f"Current working directory: {os.getcwd()}")
96 |
97 | # Set up tools directory watchdog
98 | tools_observer = Observer()
99 | tools_handler = ToolDirectoryHandler(self)
100 | tools_path = str(config.tools_dir.resolve())
101 | logger.info(f"Setting up tools watchdog for directory: {tools_path}")
102 | logger.info(f"Tools directory exists: {os.path.exists(tools_path)}")
103 | tools_observer.schedule(tools_handler, tools_path, recursive=False)
104 | tools_observer.start()
105 | self.observers.append(tools_observer)
106 | logger.info("Tools directory watchdog started")
107 |
108 | # Set up config directory watchdog
109 | config_observer = Observer()
110 | config_handler = ConfigDirectoryHandler(self)
111 | config_path = str(config.config_dir.resolve())
112 | logger.info(f"Setting up config watchdog for directory: {config_path}")
113 | logger.info(f"Config directory exists: {os.path.exists(config_path)}")
114 | config_observer.schedule(config_handler, config_path, recursive=False)
115 | config_observer.start()
116 | self.observers.append(config_observer)
117 | logger.info("Config directory watchdog started")
118 |
119 | # Start watchdog threads
120 | logger.info("Starting watchdog threads...")
121 | for observer in self.observers:
122 | thread = threading.Thread(target=self.run_watchdog, args=(observer,))
123 | thread.daemon = True
124 | thread.start()
125 | thread_id = thread.ident
126 | self.watchdog_threads.append(thread)
127 | logger.info(f"Started watchdog thread {thread_id}")
128 |
129 | logger.info(f"All watchdogs started and running. Active threads: {len(self.watchdog_threads)}")
130 |
131 | def cleanup_watchdog(self):
132 | """Clean up watchdog observers and threads."""
133 | logger.info("Beginning watchdog cleanup...")
134 | self.is_running = False
135 |
136 | # Stop and join watchdog threads
137 | for i, thread in enumerate(self.watchdog_threads):
138 | logger.info(f"Stopping watchdog thread {thread.ident} ({i+1}/{len(self.watchdog_threads)})")
139 | thread.join(timeout=2)
140 | if thread.is_alive():
141 | logger.warning(f"Watchdog thread {thread.ident} did not stop cleanly")
142 |
143 | # Stop and join observers
144 | for i, observer in enumerate(self.observers):
145 | logger.info(f"Stopping observer {i+1}/{len(self.observers)}")
146 | observer.stop()
147 | observer.join()
148 |
149 | logger.info("Watchdogs stopped and cleaned up")
150 |
151 | def reload_tool(self, tool_name: str):
152 | """Reload a specific tool."""
153 | self.tool_manager.reload_tool(tool_name)
154 |
155 | def reload_tools(self):
156 | """Reload all tools."""
157 | self.tool_manager.reload_tools()
158 |
159 | def unload_tool(self, tool_name: str):
160 | """Unload a specific tool."""
161 | self.tool_manager.unload_tool(tool_name)
162 |
163 | def run(self):
164 | """Start the MCP server."""
165 | try:
166 | logger.info(f"Starting {config.name}")
167 | logger.info(f"Loaded tools: {list(self.tool_manager.tools.keys())}")
168 |
169 | # Run the server using uvicorn
170 | server_config = uvicorn.Config(
171 | self.setup_routes(),
172 | host=config.host,
173 | port=config.port,
174 | log_level="info"
175 | )
176 | server = uvicorn.Server(server_config)
177 | server.run()
178 |
179 | except KeyboardInterrupt:
180 | logger.info("Shutting down server...")
181 | finally:
182 | self.is_running = False
183 | self.cleanup_watchdog()
--------------------------------------------------------------------------------
/mcp_server/core/tool_manager.py:
--------------------------------------------------------------------------------
1 | """
2 | Tool management for the MCP server.
3 | """
4 | import os
5 | import importlib
6 | import logging
7 | import functools
8 | from typing import Dict, List, Any, Callable, Awaitable
9 | from pathlib import Path
10 | from mcp import types as mcp_types
11 | from mcp_server.core.config import config
12 | from mcp_server.utils.rate_limiter import RateLimiter
13 | from mcp_server.utils.config_manager import config_manager
14 | import sys
15 | import json
16 |
17 | # Configure logging
18 | logger = logging.getLogger(__name__)
19 |
20 | class ToolManager:
21 | """Manage MCP tools and their lifecycle."""
22 |
23 | def __init__(self, server):
24 | self.server = server
25 | self.tools: Dict[str, dict] = {}
26 | self.handlers: Dict[str, Callable] = {} # Store handlers by tool name
27 | self.rate_limiters: Dict[str, RateLimiter] = {}
28 |
29 | # Register the global tool handler
30 | @server.app.call_tool()
31 | async def handle_tool(name: str, arguments: dict) -> list[mcp_types.TextContent]:
32 | """Global tool handler that routes to the appropriate tool."""
33 | logger.debug(f"Global handler received call for tool: {name}")
34 |
35 | if name not in self.tools:
36 | error_msg = f"Unknown tool: {name}"
37 | logger.error(error_msg)
38 | return [mcp_types.TextContent(
39 | type="text",
40 | text=json.dumps({
41 | "status": "error",
42 | "error": error_msg
43 | })
44 | )]
45 |
46 | handler = self.handlers.get(name)
47 | if not handler:
48 | error_msg = f"Handler not found for tool: {name}"
49 | logger.error(error_msg)
50 | return [mcp_types.TextContent(
51 | type="text",
52 | text=json.dumps({
53 | "status": "error",
54 | "error": error_msg
55 | })
56 | )]
57 |
58 | # Apply rate limiting if configured
59 | rate_limiter = self.rate_limiters.get(name)
60 | if rate_limiter:
61 | rate_limiter.wait_for_slot()
62 |
63 | try:
64 | # Call the handler with the tool name and arguments
65 | result = await handler(name, arguments)
66 | if not result or not isinstance(result, list):
67 | error_msg = f"Handler for {name} returned invalid response"
68 | logger.error(error_msg)
69 | return [mcp_types.TextContent(
70 | type="text",
71 | text=json.dumps({
72 | "status": "error",
73 | "error": error_msg
74 | })
75 | )]
76 | return result
77 | except Exception as e:
78 | error_msg = f"Error executing tool {name}: {str(e)}"
79 | logger.error(error_msg)
80 | return [mcp_types.TextContent(
81 | type="text",
82 | text=json.dumps({
83 | "status": "error",
84 | "error": error_msg
85 | })
86 | )]
87 |
88 | def wrap_tool_handler(self, handler: Callable, tool_name: str) -> Callable:
89 | """Wrap a tool handler with rate limiting if configured."""
90 | @functools.wraps(handler)
91 | async def wrapped_handler(name: str, arguments: dict) -> Any:
92 | # Apply rate limiting if configured
93 | rate_limiter = self.rate_limiters.get(tool_name)
94 | if rate_limiter:
95 | rate_limiter.wait_for_slot()
96 | return await handler(name, arguments)
97 | return wrapped_handler
98 |
99 | def load_tool(self, filename: str) -> bool:
100 | """Load a single tool module."""
101 | try:
102 | logger.info(f"Attempting to load tool from file: {filename}")
103 |
104 | # Import the tool module
105 | module_name = Path(filename).stem
106 | tool_path = config.tools_dir / filename
107 |
108 | if not tool_path.exists():
109 | logger.error(f"Tool file not found: {tool_path}")
110 | return False
111 |
112 | logger.debug(f"Loading module from path: {tool_path}")
113 | spec = importlib.util.spec_from_file_location(
114 | module_name,
115 | str(tool_path)
116 | )
117 | module = importlib.util.module_from_spec(spec)
118 | spec.loader.exec_module(module)
119 |
120 | # Find the decorated function
121 | tool_func = None
122 | for attr_name in dir(module):
123 | attr = getattr(module, attr_name)
124 | if hasattr(attr, 'register_tool'):
125 | tool_func = attr
126 | break
127 |
128 | if not tool_func:
129 | logger.warning(f"Tool {module_name} has no decorated function with register_tool")
130 | return False
131 |
132 | # Check required environment variables
133 | if hasattr(tool_func, 'REQUIRED_ENV_VARS'):
134 | missing_vars = [var for var in tool_func.REQUIRED_ENV_VARS if not os.getenv(var)]
135 | if missing_vars:
136 | logger.error(f"Missing required environment variables for {module_name}: {missing_vars}")
137 | return False
138 |
139 | logger.info(f"Loading configuration for tool: {module_name}")
140 | # Load tool configuration from config directory
141 | tool_config = config_manager.get_tool_config(module_name, refresh=True)
142 |
143 | # Store tool metadata using the tool's name as the key
144 | tool_name = tool_func.TOOL_NAME
145 | self.tools[tool_name] = {
146 | "name": tool_name,
147 | "description": tool_func.TOOL_DESCRIPTION,
148 | "schema": tool_func.TOOL_SCHEMA
149 | }
150 | logger.debug(f"Stored metadata for tool: {tool_name}")
151 |
152 | # Get the handler without registering it with the server
153 | handler = tool_func.register_tool(self.server.app, tool_config)
154 | self.handlers[tool_name] = handler
155 | logger.debug(f"Successfully stored handler for: {tool_name}")
156 |
157 | # Set up rate limiter if configured
158 | if hasattr(tool_func, '_tool_metadata') and tool_func._tool_metadata.rate_limit:
159 | self.rate_limiters[tool_name] = RateLimiter(
160 | tool_func._tool_metadata.rate_limit,
161 | tool_func._tool_metadata.rate_limit_window
162 | )
163 |
164 | logger.info(f"Successfully loaded tool: {module_name} as {tool_name}")
165 | return True
166 |
167 | except Exception as e:
168 | logger.error(f"Failed to load tool {filename}: {str(e)}")
169 | import traceback
170 | logger.error(f"Traceback: {traceback.format_exc()}")
171 | return False
172 |
173 | def load_tools_from_directory(self) -> List[str]:
174 | """Load all tool modules from the tools directory."""
175 | loaded_tools = []
176 |
177 | logger.info(f"Scanning directory for tools: {config.tools_dir}")
178 |
179 | # Load each .py file in the directory
180 | for path in config.tools_dir.glob("*.py"):
181 | if not path.name.startswith('__'):
182 | logger.debug(f"Found tool file: {path.name}")
183 | if self.load_tool(path.name):
184 | loaded_tools.append(path.stem)
185 | logger.info(f"Successfully loaded tool: {path.stem}")
186 | else:
187 | logger.warning(f"Failed to load tool: {path.name}")
188 |
189 | logger.info(f"Finished loading tools. Loaded {len(loaded_tools)} tools: {loaded_tools}")
190 | return loaded_tools
191 |
192 | def reload_tool(self, tool_name: str):
193 | """Reload a specific tool."""
194 | logger.info(f"Attempting to reload tool: {tool_name}")
195 | try:
196 | # First unload the tool if it exists
197 | if tool_name in self.tools:
198 | logger.info(f"Unloading existing tool: {tool_name}")
199 | self.unload_tool(tool_name)
200 |
201 | # Try to load the tool file
202 | tool_file = f"{tool_name}.py"
203 | logger.info(f"Loading tool from file: {tool_file}")
204 |
205 | if not os.path.exists(os.path.join(config.tools_dir, tool_file)):
206 | logger.error(f"Tool file not found: {tool_file}")
207 | return
208 |
209 | # Force reload the module if it was previously imported
210 | module_name = f"tools.{tool_name}"
211 | if module_name in sys.modules:
212 | logger.info(f"Force reloading module: {module_name}")
213 | importlib.reload(sys.modules[module_name])
214 |
215 | # Load the tool
216 | if self.load_tool(tool_file):
217 | logger.info(f"Successfully reloaded tool: {tool_name}")
218 | else:
219 | logger.error(f"Failed to reload tool: {tool_name}")
220 | except Exception as e:
221 | logger.error(f"Error reloading tool {tool_name}: {str(e)}", exc_info=True)
222 |
223 | def reload_tools(self):
224 | """Reload all tools."""
225 | logger.info("Reloading all tools...")
226 | try:
227 | # Get list of current tools
228 | current_tools = list(self.tools.keys())
229 | logger.info(f"Current tools: {current_tools}")
230 |
231 | # Clear tracking dicts
232 | self.tools.clear()
233 | self.handlers.clear()
234 | self.rate_limiters.clear()
235 | logger.info("Cleared tool tracking dictionaries")
236 |
237 | # Load all tools
238 | loaded_tools = self.load_tools_from_directory()
239 | if loaded_tools:
240 | logger.info(f"Successfully reloaded tools: {loaded_tools}")
241 | else:
242 | logger.warning("No tools were loaded during reload")
243 |
244 | except Exception as e:
245 | logger.error(f"Error reloading tools: {str(e)}", exc_info=True)
246 |
247 | def unload_tool(self, tool_name: str):
248 | """Unload a specific tool."""
249 | logger.info(f"Unloading tool: {tool_name}")
250 | try:
251 | if tool_name in self.tools:
252 | # Remove from tracking dictionaries
253 | self.tools.pop(tool_name, None)
254 | self.handlers.pop(tool_name, None)
255 | self.rate_limiters.pop(tool_name, None)
256 | logger.info(f"Successfully unloaded tool: {tool_name}")
257 | else:
258 | logger.warning(f"Tool not found for unloading: {tool_name}")
259 | except Exception as e:
260 | logger.error(f"Error unloading tool {tool_name}: {str(e)}", exc_info=True)
261 |
262 | def get_tool_list(self) -> list[mcp_types.Tool]:
263 | """Get the current list of tools."""
264 | return [
265 | mcp_types.Tool(
266 | name=tool_info["name"],
267 | description=tool_info["description"],
268 | inputSchema=tool_info["schema"]
269 | )
270 | for tool_info in self.tools.values()
271 | ]
--------------------------------------------------------------------------------
/mcp_server/handlers/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Event handlers and middleware.
3 | """
4 | from .watchdog import ToolDirectoryHandler, ConfigDirectoryHandler
5 |
6 | __all__ = ['ToolDirectoryHandler', 'ConfigDirectoryHandler']
--------------------------------------------------------------------------------
/mcp_server/handlers/watchdog.py:
--------------------------------------------------------------------------------
1 | """
2 | File system watchdog for monitoring tool and configuration changes.
3 | """
4 | import time
5 | import logging
6 | from pathlib import Path
7 | from watchdog.events import FileSystemEventHandler, FileModifiedEvent, FileCreatedEvent, FileDeletedEvent
8 | from mcp_server.core.config import config
9 |
10 | # Configure logging
11 | logger = logging.getLogger(__name__)
12 |
13 | class ToolDirectoryHandler(FileSystemEventHandler):
14 | """Handle file system events in the tools directory."""
15 | def __init__(self, server):
16 | self.server = server
17 | self.last_reload = 0
18 | logger.info("ToolDirectoryHandler initialized")
19 |
20 | def _should_handle_event(self, event) -> bool:
21 | """Check if we should handle this event."""
22 | if event.is_directory:
23 | logger.debug(f"Ignoring directory event: {event.src_path}")
24 | return False
25 |
26 | current_time = time.time()
27 | if current_time - self.last_reload < config.reload_delay:
28 | logger.debug(f"Ignoring event due to reload delay: {event.src_path}")
29 | return False
30 |
31 | logger.debug(f"Should handle event: {event.src_path}")
32 | return True
33 |
34 | def _get_tool_name(self, event) -> str:
35 | """Extract tool name from event path."""
36 | path = Path(event.src_path)
37 | return path.stem
38 |
39 | def on_modified(self, event):
40 | logger.info(f"[TOOL] Modification detected: {event.src_path}")
41 | if not self._should_handle_event(event):
42 | return
43 |
44 | path = Path(event.src_path)
45 |
46 | # Handle tool files
47 | if path.parent == config.tools_dir and path.suffix == '.py' and not path.name.startswith('__'):
48 | self.last_reload = time.time()
49 | logger.info(f"[TOOL] Reloading tool due to file change: {path.name}")
50 | try:
51 | self.server.reload_tool(path.stem)
52 | logger.info(f"[TOOL] Successfully reloaded tool: {path.stem}")
53 | except Exception as e:
54 | logger.error(f"[TOOL] Failed to reload tool {path.stem}: {str(e)}", exc_info=True)
55 |
56 | def on_created(self, event):
57 | logger.info(f"[TOOL] Creation detected: {event.src_path}")
58 | if not self._should_handle_event(event):
59 | return
60 |
61 | path = Path(event.src_path)
62 |
63 | # Handle new tool files
64 | if path.parent == config.tools_dir and path.suffix == '.py' and not path.name.startswith('__'):
65 | logger.info(f"[TOOL] Loading new tool: {path.name}")
66 | try:
67 | self.server.reload_tool(path.stem)
68 | logger.info(f"[TOOL] Successfully loaded new tool: {path.stem}")
69 | except Exception as e:
70 | logger.error(f"[TOOL] Failed to load new tool {path.stem}: {str(e)}", exc_info=True)
71 |
72 | def on_deleted(self, event):
73 | logger.info(f"[TOOL] Deletion detected: {event.src_path}")
74 | if not self._should_handle_event(event):
75 | return
76 |
77 | path = Path(event.src_path)
78 |
79 | # Handle deleted tool files
80 | if path.parent == config.tools_dir and path.suffix == '.py' and not path.name.startswith('__'):
81 | logger.info(f"[TOOL] Unloading deleted tool: {path.name}")
82 | try:
83 | self.server.unload_tool(path.stem)
84 | logger.info(f"[TOOL] Successfully unloaded tool: {path.stem}")
85 | except Exception as e:
86 | logger.error(f"[TOOL] Failed to unload tool {path.stem}: {str(e)}", exc_info=True)
87 |
88 | class ConfigDirectoryHandler(FileSystemEventHandler):
89 | """Handle file system events in the config directory."""
90 | def __init__(self, server):
91 | self.server = server
92 | self.last_reload = 0
93 | logger.info("ConfigDirectoryHandler initialized")
94 |
95 | def _should_handle_event(self, event) -> bool:
96 | """Check if we should handle this event."""
97 | if event.is_directory:
98 | logger.debug(f"Ignoring directory event: {event.src_path}")
99 | return False
100 |
101 | current_time = time.time()
102 | if current_time - self.last_reload < config.reload_delay:
103 | logger.debug(f"Ignoring event due to reload delay: {event.src_path}")
104 | return False
105 |
106 | logger.debug(f"Should handle event: {event.src_path}")
107 | return True
108 |
109 | def on_modified(self, event):
110 | logger.info(f"[CONFIG] Modification detected: {event.src_path}")
111 | if not self._should_handle_event(event):
112 | return
113 |
114 | path = Path(event.src_path)
115 |
116 | # Handle config files
117 | if path.parent == config.config_dir and path.suffix == '.yaml':
118 | self.last_reload = time.time()
119 | logger.info(f"[CONFIG] Reloading tool due to config change: {path.name}")
120 | try:
121 | self.server.reload_tool(path.stem)
122 | logger.info(f"[CONFIG] Successfully reloaded tool: {path.stem}")
123 | except Exception as e:
124 | logger.error(f"[CONFIG] Failed to reload tool {path.stem}: {str(e)}", exc_info=True)
--------------------------------------------------------------------------------
/mcp_server/utils/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Utility functions and classes.
3 | """
4 | from .rate_limiter import RateLimiter
5 | from .config_manager import config_manager
6 |
7 | __all__ = ['RateLimiter', 'config_manager']
--------------------------------------------------------------------------------
/mcp_server/utils/config_manager.py:
--------------------------------------------------------------------------------
1 | """
2 | Configuration management for MCP tools.
3 | """
4 | import os
5 | import yaml
6 | import logging
7 | from pathlib import Path
8 | from typing import Dict, Any, Optional
9 | from dotenv import load_dotenv
10 | from mcp_server.core.config import config
11 |
12 | logger = logging.getLogger(__name__)
13 |
14 | class ConfigManager:
15 | """Manages tool configurations."""
16 |
17 | def __init__(self):
18 | self._cache: Dict[str, Any] = {}
19 | self.env_overrides = {}
20 |
21 | # Load environment variables
22 | load_dotenv()
23 |
24 | # Cache environment overrides
25 | self._cache_env_overrides()
26 |
27 | def _cache_env_overrides(self):
28 | """Cache all environment variables that could override tool configs."""
29 | for key in os.environ:
30 | # Look for tool-specific environment variables
31 | # Format: TOOL_NAME__CONFIG_KEY=value (double underscore separator)
32 | if '__' in key:
33 | tool_name, config_key = key.lower().split('__', 1)
34 | if tool_name not in self.env_overrides:
35 | self.env_overrides[tool_name] = {}
36 | self.env_overrides[tool_name][config_key] = os.getenv(key)
37 |
38 | def get_tool_config(self, tool_name: str, refresh: bool = False) -> Dict[str, Any]:
39 | """Get configuration for a specific tool."""
40 | if not refresh and tool_name in self._cache:
41 | return self._cache[tool_name]
42 |
43 | config_path = config.config_dir / f"{tool_name}.yaml"
44 | if not config_path.exists():
45 | self._cache[tool_name] = {}
46 | return {}
47 |
48 | try:
49 | with open(config_path) as f:
50 | tool_config = yaml.safe_load(f) or {}
51 | self._cache[tool_name] = tool_config
52 | return tool_config
53 | except Exception as e:
54 | logger.error(f"Error loading config for {tool_name}: {str(e)}")
55 | self._cache[tool_name] = {}
56 | return {}
57 |
58 | def get_value(self, tool_name: str, key: str, default: Any = None) -> Any:
59 | """
60 | Get a specific configuration value for a tool.
61 |
62 | Args:
63 | tool_name: Name of the tool
64 | key: Configuration key to get
65 | default: Default value if key doesn't exist
66 |
67 | Returns:
68 | Configuration value or default
69 | """
70 | config = self.get_tool_config(tool_name)
71 | return config.get(key, default)
72 |
73 | def clear_cache(self):
74 | """Clear the configuration cache."""
75 | self._cache.clear()
76 |
77 | # Global config manager instance
78 | config_manager = ConfigManager()
--------------------------------------------------------------------------------
/mcp_server/utils/logging_config.py:
--------------------------------------------------------------------------------
1 | """
2 | Logging configuration for the MCP server.
3 | """
4 | import logging
5 | from mcp_server.core.config import config
6 |
7 | def configure_logging():
8 | """Configure logging based on server settings."""
9 | # Set up root logger
10 | root_logger = logging.getLogger()
11 |
12 | # Create console handler
13 | console_handler = logging.StreamHandler()
14 |
15 | # Create formatter
16 | formatter = logging.Formatter(
17 | '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
18 | datefmt='%Y-%m-%d %H:%M:%S'
19 | )
20 | console_handler.setFormatter(formatter)
21 |
22 | # Set log level based on debug mode
23 | log_level = logging.DEBUG if config.debug_mode else logging.INFO
24 | root_logger.setLevel(log_level)
25 | console_handler.setLevel(log_level)
26 |
27 | # Remove any existing handlers to avoid duplicate logs
28 | root_logger.handlers.clear()
29 |
30 | # Add the console handler to the root logger
31 | root_logger.addHandler(console_handler)
32 |
33 | # Log initial configuration
34 | logging.info(f"Logging configured with level: {logging.getLevelName(log_level)}")
35 | if config.debug_mode:
36 | logging.debug("Debug logging enabled")
--------------------------------------------------------------------------------
/mcp_server/utils/rate_limiter.py:
--------------------------------------------------------------------------------
1 | """
2 | Rate limiting utility for MCP tools
3 | """
4 | from datetime import datetime
5 | import threading
6 | from collections import deque
7 | import time
8 |
9 | class RateLimiter:
10 | def __init__(self, max_requests: int, time_window_seconds: int):
11 | """
12 | Initialize a rate limiter.
13 |
14 | Args:
15 | max_requests: Maximum number of requests allowed in the time window
16 | time_window_seconds: Time window in seconds
17 | """
18 | self.max_requests = max_requests
19 | self.time_window_seconds = time_window_seconds
20 | self.requests = deque()
21 | self.lock = threading.Lock()
22 |
23 | def can_make_request(self) -> bool:
24 | """Check if a request can be made within the rate limit."""
25 | now = datetime.now()
26 | with self.lock:
27 | # Remove old requests
28 | while self.requests and (now - self.requests[0]).total_seconds() > self.time_window_seconds:
29 | self.requests.popleft()
30 |
31 | # Check if we can make a new request
32 | if len(self.requests) < self.max_requests:
33 | self.requests.append(now)
34 | return True
35 | return False
36 |
37 | def wait_for_slot(self):
38 | """Block until a request slot is available."""
39 | while not self.can_make_request():
40 | time.sleep(1) # Wait 1 second before checking again
--------------------------------------------------------------------------------
/mcp_server/utils/tool_decorator.py:
--------------------------------------------------------------------------------
1 | """
2 | Decorator utilities for creating MCP tools.
3 | """
4 | import os
5 | import json
6 | import functools
7 | import logging
8 | from typing import Any, Dict, List, Optional, Type, get_type_hints
9 | from dataclasses import dataclass, field
10 | from pydantic import BaseModel, create_model
11 | from mcp import types as mcp_types
12 |
13 | logger = logging.getLogger(__name__)
14 |
15 | @dataclass
16 | class ToolMetadata:
17 | """Tool metadata configuration."""
18 | name: str
19 | description: str
20 | required_env_vars: List[str] = field(default_factory=list)
21 | config_defaults: Dict[str, Any] = field(default_factory=dict)
22 | rate_limit: Optional[int] = None
23 | rate_limit_window: int = 60
24 |
25 | def mcp_tool(
26 | name: str,
27 | description: str,
28 | *,
29 | input_model: Optional[Type[BaseModel]] = None,
30 | required_env_vars: List[str] = None,
31 | config_defaults: Dict[str, Any] = None,
32 | rate_limit: Optional[int] = None,
33 | rate_limit_window: int = 60
34 | ):
35 | """
36 | Decorator to create an MCP tool from a function.
37 |
38 | Example:
39 | ```python
40 | from pydantic import BaseModel
41 |
42 | class WebSearchInput(BaseModel):
43 | query: str = Field(description="The search query to process")
44 |
45 | @mcp_tool(
46 | name="web_search",
47 | description="Search the web for information",
48 | input_model=WebSearchInput,
49 | required_env_vars=["API_KEY"],
50 | config_defaults={
51 | "max_results": 10,
52 | "timeout": 5
53 | },
54 | rate_limit=100,
55 | rate_limit_window=60
56 | )
57 | async def search_web(query: str, config: Dict[str, Any]) -> Dict:
58 | # Your implementation here
59 | pass
60 | ```
61 | """
62 | def decorator(func):
63 | # Create input model from function signature if not provided
64 | nonlocal input_model
65 | if input_model is None:
66 | hints = get_type_hints(func)
67 | # Remove 'config' and 'return' from hints
68 | hints = {k: v for k, v in hints.items()
69 | if k not in ('config', 'return')}
70 | input_model = create_model(
71 | f"{func.__name__.title()}Input",
72 | **hints
73 | )
74 |
75 | # Store metadata
76 | metadata = ToolMetadata(
77 | name=name,
78 | description=description,
79 | required_env_vars=required_env_vars or [],
80 | config_defaults=config_defaults or {},
81 | rate_limit=rate_limit,
82 | rate_limit_window=rate_limit_window
83 | )
84 |
85 | @functools.wraps(func)
86 | async def wrapped_func(*args, **kwargs):
87 | return await func(*args, **kwargs)
88 |
89 | # Transfer metadata to wrapped function
90 | wrapped_func._tool_metadata = metadata
91 | wrapped_func.TOOL_NAME = name
92 | wrapped_func.TOOL_DESCRIPTION = description
93 | wrapped_func.TOOL_SCHEMA = input_model.model_json_schema()
94 | wrapped_func.REQUIRED_ENV_VARS = required_env_vars or []
95 |
96 | def register_tool(server, config: Dict[str, Any]):
97 | """Create and return a handler for this tool."""
98 | # Merge defaults with provided config
99 | tool_config = {
100 | **metadata.config_defaults,
101 | **(config or {})
102 | }
103 |
104 | # Create the handler function
105 | async def handle_tool(tool_name: str, arguments: dict) -> list[mcp_types.TextContent]:
106 | logger.debug(f"Tool handler called for {tool_name}")
107 |
108 | try:
109 | # Validate input using the model
110 | validated_input = input_model(**arguments)
111 |
112 | # Call the tool function with validated input and config
113 | result = await wrapped_func(**validated_input.model_dump(), config=tool_config)
114 |
115 | # Handle different return types
116 | if isinstance(result, (str, int, float, bool)):
117 | result = {"result": result}
118 | elif isinstance(result, list):
119 | result = {"results": result}
120 |
121 | # Return as MCP TextContent
122 | return [mcp_types.TextContent(
123 | type="text",
124 | text=json.dumps(result)
125 | )]
126 | except Exception as e:
127 | logger.error(f"Error in tool {tool_name}: {str(e)}")
128 | return [mcp_types.TextContent(
129 | type="text",
130 | text=json.dumps({
131 | "status": "error",
132 | "error": str(e)
133 | })
134 | )]
135 |
136 | # Just return the handler, don't register it
137 | return handle_tool
138 |
139 | # Attach registration function to wrapped function
140 | wrapped_func.register_tool = register_tool
141 |
142 | return wrapped_func
143 |
144 | return decorator
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | fastapi>=0.68.0,<1.0.0
2 | uvicorn>=0.34.0,<1.0.0
3 | sse-starlette>=2.2.1,<3.0.0
4 | watchdog>=6.0.0,<7.0.0
5 | python-dotenv>=1.0.1,<2.0.0
6 | google-generativeai>=0.3.0,<1.0.0
7 | httpx>=0.28.1,<1.0.0
8 | mcpadapt>=0.0.15,<1.0.0
--------------------------------------------------------------------------------
/tests/test_mcp_client.py:
--------------------------------------------------------------------------------
1 | """
2 | Test client for the Web Search MCP server.
3 | """
4 | import asyncio
5 | import json
6 | import os
7 | import traceback
8 | from mcp import ClientSession, types
9 | from mcp.client.sse import sse_client
10 | from urllib.parse import urljoin
11 |
12 | def tool_to_dict(tool):
13 | """Convert MCP tool object to a dictionary."""
14 | return {
15 | "name": tool.name,
16 | "description": tool.description,
17 | "inputSchema": tool.inputSchema
18 | }
19 |
20 | async def test_web_search():
21 | """
22 | Test web search tools by connecting to an existing MCP server using SSE transport.
23 | """
24 | # Get connection details from environment or use defaults
25 | base_url = os.getenv("MCP_SERVER_URL", "http://localhost:32823")
26 | sse_url = urljoin(base_url, "/sse") # Connect to the SSE endpoint
27 | print(f"\nConnecting to MCP server at {sse_url}...")
28 |
29 | try:
30 | async with sse_client(sse_url) as (read, write):
31 | async with ClientSession(read, write) as session:
32 | # Initialize the connection
33 | await session.initialize()
34 |
35 | # List available tools
36 | tools = await session.list_tools()
37 | print("\nAvailable tools:")
38 | # Convert tools to dictionary for JSON serialization
39 | tools_dict = [tool_to_dict(tool) for tool in tools.tools]
40 | print(json.dumps(tools_dict, indent=2))
41 |
42 | # Test queries
43 | test_queries = [
44 | "What is the latest version of Python?",
45 | "Who won the most recent Super Bowl?",
46 | "What are the key features of the Rust programming language?"
47 | ]
48 |
49 | for query in test_queries:
50 | print(f"\nTesting query: {query}")
51 | print("-" * 50)
52 |
53 | # Test web search
54 | print("\nWeb Search Results:")
55 | print("-" * 25)
56 | try:
57 | result = await session.call_tool(
58 | "gemini_web_search", # Use the actual tool name from the loaded tools
59 | arguments={"query": query}
60 | )
61 | print("Result:")
62 | # Extract text content from the first result
63 | if result.content and len(result.content) > 0:
64 | text_content = result.content[0].text
65 | result_json = json.loads(text_content)
66 | print(json.dumps(result_json, indent=2))
67 | else:
68 | print("No content returned")
69 | except Exception as e:
70 | print(f"Error during tool call: {e}")
71 | traceback.print_exc()
72 |
73 | except ConnectionRefusedError:
74 | print(f"Error: Could not connect to MCP server at {sse_url}")
75 | print("Make sure the server is running and the connection details are correct.")
76 | except Exception as e:
77 | print(f"Error during connection: {str(e)}")
78 | print("\nFull traceback:")
79 | traceback.print_exc()
80 |
81 | async def main():
82 | try:
83 | await test_web_search()
84 | except Exception as e:
85 | print(f"Fatal error: {str(e)}")
86 | print("\nFull traceback:")
87 | traceback.print_exc()
88 |
89 | if __name__ == "__main__":
90 | asyncio.run(main())
--------------------------------------------------------------------------------
/tests/test_smolagents.py:
--------------------------------------------------------------------------------
1 | import time
2 | from smolagents import CodeAgent
3 | from smolagents import LiteLLMModel
4 | from mcpadapt.core import MCPAdapt
5 | from mcpadapt.smolagents_adapter import SmolAgentsAdapter
6 |
7 | from dotenv import load_dotenv
8 | import os
9 | load_dotenv()
10 |
11 | model = LiteLLMModel(
12 | model_id="gemini/gemini-2.0-flash",
13 | api_key="YOUR_API_KEY"
14 | )
15 |
16 | REMOTE_SSE_SERVER = "http://127.0.0.1:32823/sse"
17 |
18 |
19 |
20 | if __name__ == "__main__":
21 |
22 | # Create List of Requested Tools
23 | remote_tool_names = ["gemini_web_search"]
24 |
25 | with MCPAdapt({"url": REMOTE_SSE_SERVER},SmolAgentsAdapter()) as tools:
26 | selected_tools = [tool for tool in tools if tool.name in remote_tool_names]
27 | selected_tools.extend([]) # add your local tools here
28 | agent = CodeAgent(tools=selected_tools, model=model, add_base_tools=True)
29 | agent.run("Please find a remedy for hangover.")
30 |
31 |
32 |
--------------------------------------------------------------------------------
/tools/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/batteryshark/agent_construct/db8609dec16188c764074032c311f977fd28a075/tools/__init__.py
--------------------------------------------------------------------------------
/tools/gemini_web_search.py:
--------------------------------------------------------------------------------
1 | """
2 | Gemini Web Search Tool
3 | """
4 | import os
5 | import json
6 | import time
7 | import aiohttp
8 | import logging
9 | import requests
10 | import re
11 | from typing import Dict, List, Optional, Any
12 | from pydantic import BaseModel, Field
13 | from google import genai
14 | from google.genai import types
15 | from mcp_server.utils.tool_decorator import mcp_tool
16 |
17 | # Configure logging
18 | logger = logging.getLogger(__name__)
19 |
20 | class WebSearchInput(BaseModel):
21 | """Input model for web search."""
22 | query: str = Field(description="The search query to process")
23 |
24 | def extract_title_from_html(html_content: str) -> Optional[str]:
25 | """Extract title from HTML content using regex."""
26 | title_match = re.search(r']*>([^<]+)', html_content, re.IGNORECASE)
27 | return title_match.group(1).strip() if title_match else None
28 |
29 | def follow_redirect(url: str, timeout: int = 5, follow_redirects: bool = True) -> tuple[str, Optional[str]]:
30 | """Follow a URL redirect and return the final URL and page title."""
31 | try:
32 | head_response = requests.head(url, allow_redirects=follow_redirects, timeout=timeout)
33 | final_url = head_response.url if follow_redirects else url
34 |
35 | if not follow_redirects:
36 | return final_url, None
37 |
38 | response = requests.get(final_url, stream=True, timeout=timeout)
39 | content = next(response.iter_content(8192)).decode('utf-8', errors='ignore')
40 | response.close()
41 |
42 | title = extract_title_from_html(content)
43 |
44 | # Return None for title if it's a Cloudflare attention page
45 | if title and ("Attention Required! | Cloudflare" in title or
46 | "Just a moment..." in title or
47 | "Security check" in title):
48 | return final_url, None
49 |
50 | return final_url, title
51 | except:
52 | return url, None
53 |
54 | def extract_references(response, max_references: int = 10, include_confidence: bool = True) -> List[Dict]:
55 | """Extract detailed references from Gemini response."""
56 | try:
57 | # Convert response to raw format to access grounding metadata
58 | raw_response = json.loads(response.model_dump_json())
59 | grounding_metadata = raw_response["candidates"][0]["grounding_metadata"]
60 |
61 | references = []
62 |
63 | for support in grounding_metadata["grounding_supports"]:
64 | if len(references) >= max_references:
65 | break
66 |
67 | for chunk_idx in support["grounding_chunk_indices"]:
68 | chunk = grounding_metadata["grounding_chunks"][chunk_idx]
69 | if "web" in chunk:
70 | # Follow URL and get actual title
71 | url = chunk["web"]["uri"]
72 | final_url, actual_title = follow_redirect(url)
73 |
74 | reference = {
75 | "content": support["segment"]["text"],
76 | "url": final_url,
77 | "title": actual_title or chunk["web"].get("title", "") # Fallback to Gemini-provided title
78 | }
79 |
80 | if include_confidence:
81 | reference["confidence"] = support["confidence_scores"][0] if support["confidence_scores"] else None
82 |
83 | references.append(reference)
84 |
85 | if len(references) >= max_references:
86 | break
87 |
88 | return references
89 | except Exception as e:
90 | logger.error(f"Error extracting references: {e}")
91 | return []
92 |
93 | @mcp_tool(
94 | name="gemini_web_search",
95 | description="Web search tool powered by Gemini Search API",
96 | input_model=WebSearchInput,
97 | required_env_vars=[], # Remove environment variable requirement
98 | config_defaults={
99 | "max_retries": 3,
100 | "gemini_model": "gemini-2.0-flash",
101 | "max_references": 10,
102 | "include_confidence_scores": True,
103 | "timeout": 5,
104 | "follow_redirects": True,
105 | "gemini_api_key": None # Must be provided in config
106 | },
107 | rate_limit=100,
108 | rate_limit_window=60
109 | )
110 | async def search_web(query: str, config: Dict[str, Any]) -> Dict:
111 | """Perform a web search using Gemini API."""
112 | if not config.get("gemini_api_key"):
113 | logger.error("Gemini API key not provided in tool configuration")
114 | return {
115 | "status": "error",
116 | "error": "Gemini API key not provided in tool configuration"
117 | }
118 |
119 | for attempt in range(config["max_retries"]):
120 | try:
121 | # Initialize Gemini client with config API key
122 | client = genai.Client(api_key=config["gemini_api_key"])
123 |
124 | # Generate content using Gemini
125 | response = client.models.generate_content(
126 | model=config["gemini_model"],
127 | contents=f"{query}",
128 | config=types.GenerateContentConfig(
129 | tools=[types.Tool(google_search=types.GoogleSearch())]
130 | )
131 | )
132 |
133 | # Extract all metadata from response
134 | raw_response = json.loads(response.model_dump_json())
135 | grounding_metadata = raw_response["candidates"][0]["grounding_metadata"]
136 |
137 | # Extract references with detailed information
138 | references = extract_references(
139 | response,
140 | max_references=config["max_references"],
141 | include_confidence=config["include_confidence_scores"]
142 | )
143 |
144 | # Return structured response
145 | return {
146 | "status": "success",
147 | "data": {
148 | "prompt": query,
149 | "search_query": grounding_metadata["web_search_queries"],
150 | "response": response.text,
151 | "references": references
152 | }
153 | }
154 | except Exception as e:
155 | logger.error(f"Search attempt {attempt + 1} failed: {str(e)}")
156 | if attempt == config["max_retries"] - 1:
157 | return {
158 | "status": "error",
159 | "error": str(e)
160 | }
161 | time.sleep(1) # Wait before retrying
162 |
--------------------------------------------------------------------------------
/tools/url_scraper.py:
--------------------------------------------------------------------------------
1 | """
2 | URL Scraper Tool - Converts webpage content to markdown, with support for JavaScript-rendered content
3 | """
4 | import re
5 | import json
6 | import logging
7 | import asyncio
8 | import requests
9 | import urllib3
10 | from typing import Dict, Any, List
11 | from markdownify import markdownify
12 | from requests.exceptions import RequestException
13 | from http.client import HTTPException
14 | from pydantic import BaseModel, Field
15 | from mcp_server.utils.tool_decorator import mcp_tool
16 | from mcp import types as mcp_types
17 | from playwright.async_api import async_playwright, TimeoutError as PlaywrightTimeout
18 |
19 | # Configure logging
20 | logger = logging.getLogger(__name__)
21 |
22 | # Define tool name as a constant
23 | TOOL_NAME = "url_scraper"
24 |
25 | class URLScraperInput(BaseModel):
26 | """Input model for URL scraping."""
27 | url: str = Field(description="The URL of the webpage to scrape")
28 | render_js: bool = Field(
29 | default=False,
30 | description="Whether to render JavaScript before scraping (slower but more accurate for dynamic content)"
31 | )
32 |
33 | async def scrape_with_playwright(url: str, config: Dict[str, Any]) -> Dict:
34 | """Scrape content using Playwright with JavaScript rendering."""
35 | try:
36 | async with async_playwright() as p:
37 | # Launch browser with custom user agent
38 | browser = await p.chromium.launch(headless=True)
39 | context = await browser.new_context(
40 | user_agent=config["user_agent"],
41 | viewport={'width': 1920, 'height': 1080} # Set a standard viewport
42 | )
43 |
44 | # Create new page and navigate
45 | page = await context.new_page()
46 |
47 | try:
48 | # First try with load event
49 | await page.goto(
50 | url,
51 | wait_until="load",
52 | timeout=config["timeout"] * 1000
53 | )
54 | except PlaywrightTimeout:
55 | logger.warning("Initial page load timed out, trying with domcontentloaded")
56 | # If that fails, try with just DOM content loaded
57 | await page.goto(
58 | url,
59 | wait_until="domcontentloaded",
60 | timeout=config["timeout"] * 1000
61 | )
62 |
63 | try:
64 | # Wait for the page to become mostly stable
65 | await page.wait_for_load_state("networkidle", timeout=5000)
66 | except PlaywrightTimeout:
67 | logger.warning("Network idle wait timed out, proceeding with current state")
68 |
69 | # Additional wait for any lazy-loaded content
70 | try:
71 | # Scroll to bottom to trigger lazy loading
72 | await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
73 | await asyncio.sleep(2) # Brief pause for content to load
74 | except Exception as e:
75 | logger.warning(f"Scroll attempt failed: {e}")
76 |
77 | # Get the rendered HTML
78 | content = await page.content()
79 |
80 | # Convert to markdown
81 | markdown_content = markdownify(content).strip()
82 | markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content)
83 |
84 | await browser.close()
85 |
86 | return {
87 | "status": "success",
88 | "content": markdown_content
89 | }
90 |
91 | except Exception as e:
92 | logger.error(f"Error scraping with Playwright: {str(e)}")
93 | return {
94 | "status": "error",
95 | "error": f"Error scraping with Playwright: {str(e)}"
96 | }
97 |
98 | def scrape_with_urllib3(url: str, config: Dict[str, Any]) -> Dict:
99 | """Fallback scraping implementation using urllib3 for cases where requests fails."""
100 | try:
101 | http = urllib3.PoolManager(
102 | headers={"User-Agent": config["user_agent"]},
103 | timeout=urllib3.Timeout(connect=config["timeout"], read=config["timeout"])
104 | )
105 | response = http.request("GET", url)
106 |
107 | if response.status >= 400:
108 | raise urllib3.exceptions.HTTPError(f"HTTP {response.status}")
109 |
110 | markdown_content = markdownify(response.data.decode('utf-8')).strip()
111 | markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content)
112 |
113 | return {
114 | "status": "success",
115 | "content": markdown_content
116 | }
117 | except Exception as e:
118 | logger.error(f"Error fetching webpage with urllib3: {str(e)}")
119 | return {
120 | "status": "error",
121 | "error": f"Error fetching webpage with urllib3: {str(e)}"
122 | }
123 |
124 | def scrape_with_requests(url: str, config: Dict[str, Any]) -> Dict:
125 | """Simple scraping implementation using requests."""
126 | try:
127 | headers = {"User-Agent": config["user_agent"]}
128 | response = requests.get(url, headers=headers, timeout=config["timeout"])
129 | response.raise_for_status()
130 |
131 | markdown_content = markdownify(response.text).strip()
132 | markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content)
133 |
134 | return {
135 | "status": "success",
136 | "content": markdown_content
137 | }
138 | except (HTTPException, RequestException) as e:
139 | # If we hit header limits or other request issues, try urllib3
140 | logger.warning(f"Requests failed, falling back to urllib3: {str(e)}")
141 | return scrape_with_urllib3(url, config)
142 | except Exception as e:
143 | logger.error(f"Unexpected error: {str(e)}")
144 | return {
145 | "status": "error",
146 | "error": f"An unexpected error occurred: {str(e)}"
147 | }
148 |
149 | @mcp_tool(
150 | name=TOOL_NAME,
151 | description="Scrapes a webpage and returns its content as markdown, with optional JavaScript rendering support",
152 | input_model=URLScraperInput,
153 | required_env_vars=[],
154 | config_defaults={
155 | "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
156 | "timeout": 30 # Increased timeout for JS rendering
157 | },
158 | rate_limit=50,
159 | rate_limit_window=60
160 | )
161 | async def scrape_url(url: str, render_js: bool = False, config: Dict[str, Any] = None) -> Dict:
162 | """Scrape content from a URL and convert it to markdown.
163 |
164 | Args:
165 | url: The URL to scrape
166 | render_js: Whether to render JavaScript before scraping
167 | config: Configuration dictionary containing user_agent and timeout settings
168 |
169 | Returns:
170 | Dictionary containing the scraped content and metadata
171 | """
172 | if config is None:
173 | config = {}
174 |
175 | # Ensure we have default config values
176 | default_config = {
177 | "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
178 | "timeout": 30
179 | }
180 | config = {**default_config, **config}
181 |
182 | if render_js:
183 | return await scrape_with_playwright(url, config)
184 | else:
185 | return scrape_with_requests(url, config)
--------------------------------------------------------------------------------