├── .env.template ├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── __init__.py ├── app ├── __init__.py ├── toolkit.py ├── toolkit_client.py └── tools │ ├── README │ ├── EXCEL_README.md │ ├── SHOPIFY_README.md │ └── VAPI_README.md │ ├── __init__.py │ ├── brave_search.py │ ├── browser_automation.py │ ├── document_management.py │ ├── excel.py │ ├── filesystem.py │ ├── fred.py │ ├── news_api.py │ ├── ppt.py │ ├── sequential_thinking.py │ ├── shopify.py │ ├── streamlit.py │ ├── time_tools.py │ ├── vapi.py │ ├── worldbank.py │ └── yfinance.py ├── claude_desktop_config.json ├── config_loader.py ├── docker-compose.yml ├── mcp_unified_server.py ├── requirements.txt ├── setup_env.py └── static ├── 111_tools.png ├── 123_tools.png └── 87_tools.png /.env.template: -------------------------------------------------------------------------------- 1 | NEWS_API_KEY= 2 | BRAVE_API_KEY= 3 | FRED_API_KEY=your_fred_api_key_here 4 | MCP_FILESYSTEM_DIRS=/storage 5 | STREAMLIT_APPS_DIR=/path/to/store/streamlit_apps # Optional, defaults to ~/streamlit_apps 6 | VAPI_API_KEY=your_vapi_api_key_here 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .venv 2 | .env 3 | build_and_upload.py 4 | setup.py 5 | fix_upload.py 6 | ./dnu 7 | app/agents/ 8 | config_ui.py 9 | launcher.py 10 | MANIFEST.in 11 | roadmap.md 12 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12-slim 2 | 3 | WORKDIR /app 4 | COPY app/ /app/app/ 5 | 6 | # Install system dependencies for Playwright/Chromium 7 | # Install system dependencies for Playwright/Chromium 8 | RUN apt-get update && apt-get install -y --no-install-recommends \ 9 | curl \ 10 | wget \ 11 | gnupg \ 12 | ca-certificates \ 13 | libglib2.0-0 \ 14 | libnss3 \ 15 | libnspr4 \ 16 | libatk1.0-0 \ 17 | libatk-bridge2.0-0 \ 18 | libcups2 \ 19 | libdrm2 \ 20 | libdbus-1-3 \ 21 | libxcb1 \ 22 | libxkbcommon0 \ 23 | libx11-6 \ 24 | libxcomposite1 \ 25 | libxdamage1 \ 26 | libxext6 \ 27 | libxfixes3 \ 28 | libxrandr2 \ 29 | libgbm1 \ 30 | libasound2 \ 31 | libpango-1.0-0 \ 32 | libcairo2 \ 33 | xvfb \ 34 | wget \ 35 | gnupg \ 36 | ca-certificates \ 37 | libglib2.0-0 \ 38 | libnss3 \ 39 | libnspr4 \ 40 | libatk1.0-0 \ 41 | libatk-bridge2.0-0 \ 42 | libcups2 \ 43 | libdrm2 \ 44 | libdbus-1-3 \ 45 | libxcb1 \ 46 | libxkbcommon0 \ 47 | libx11-6 \ 48 | libxcomposite1 \ 49 | libxdamage1 \ 50 | libxext6 \ 51 | libxfixes3 \ 52 | libxrandr2 \ 53 | libgbm1 \ 54 | libasound2 \ 55 | libpango-1.0-0 \ 56 | libcairo2 \ 57 | xvfb \ 58 | && rm -rf /var/lib/apt/lists/* 59 | 60 | # Copy requirements and install dependencies - with proper dependency handling 61 | # Copy requirements and install dependencies - with proper dependency handling 62 | COPY requirements.txt . 63 | RUN pip install --upgrade pip && \ 64 | pip install --no-cache-dir -r requirements.txt 65 | 66 | 67 | # Install Playwright and browsers 68 | RUN playwright install chromium firefox webkit 69 | RUN playwright install-deps chromium 70 | 71 | # Verify MCP and Playwright are installed 72 | # Verify MCP and Playwright are installed 73 | RUN python -c "import mcp; print(f'MCP')" 74 | RUN python -c "import playwright; print(f'Playwright')" 75 | RUN python -c "import playwright; print(f'Playwright')" 76 | 77 | # Copy application code 78 | COPY . . 79 | 80 | # Make the script executable 81 | RUN chmod +x mcp_unified_server.py 82 | 83 | # Create a healthcheck script 84 | RUN echo '#!/bin/bash\npython -c "import mcp" || exit 1\ncurl -f http://localhost:8000/health || exit 1' > healthcheck.sh 85 | RUN chmod +x healthcheck.sh 86 | 87 | # Create entrypoint script with better debugging 88 | RUN echo '#!/bin/bash\necho "DEBUG: Starting server"\necho "Python version: $(python --version)"\necho "Installed packages:"\npip list\necho "Environment variables:"\nenv | grep -v "API_KEY"\necho "Running MCP server..."\npython -u mcp_unified_server.py\necho "DEBUG: Server exited with code $?"' > entrypoint.sh 89 | RUN chmod +x entrypoint.sh 90 | 91 | # Command to run the server with unbuffered output 92 | CMD ["./entrypoint.sh"] 93 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 getfounded 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MCP Tool Kit 2 | 3 | A modular server implementation for building high precision vertical AI agents. Intended for use building high precision vertical AI agents, but can deploy to gain access to general tool functionality. 4 | 5 | Uses >=50% less code than the Python MCP SDK alone. 6 | 7 | [![PyPI version](https://img.shields.io/pypi/v/mcptoolkit.svg)](https://pypi.org/project/mcptoolkit/) 8 | [![Python versions](https://img.shields.io/pypi/pyversions/mcptoolkit.svg)](https://pypi.org/project/mcptoolkit/) 9 | [![License](https://img.shields.io/github/license/getfounded/mcp-tool-kit.svg)](https://github.com/getfounded/mcp-tool-kit/blob/main/LICENSE) 10 | 11 | ### Benefits of This Approach 12 | 13 | - **Reduces Cognitive Load on Claude**: Claude doesn't need to reason about the sequence of tool calls 14 | - **Encapsulates Domain Knowledge**: The agent can contain domain-specific logic about how to perform tasks well in a particular vertical 15 | - **Simplifies Error Handling**: The agent can handle errors and retries internally without Claude's involvement 16 | - **Enables Complex Workflows**: Multi-step processes that would be difficult to coordinate through individual tool calls 17 | - **Maintains Conversational Flow**: The user isn't exposed to the complexity of the underlying system 18 | 19 | ### Example Scenario 20 | Here's a concrete example of Claude invoking a vertical agent: 21 | 22 | ``` 23 | User: "I need a comprehensive analysis of the electric vehicle market for a presentation tomorrow." 24 | 25 | Claude: [recognizes this requires multiple tools and domain expertise] 26 | 27 | Claude: "I'll help you with that comprehensive EV market analysis. I'll need to gather the latest market data, news, and trends. This will take a moment..." 28 | 29 | [Behind the scenes, Claude calls a MarketAnalysisAgent] 30 | 31 | Claude -> MarketAnalysisAgent.analyze_market( 32 | sector="electric vehicles", 33 | include_news=True, 34 | include_market_data=True, 35 | create_presentation=True 36 | ) 37 | 38 | [The agent orchestrates multiple tool calls using your toolkit] 39 | - news_search for recent EV news 40 | - brave_web_search for market data 41 | - sequential_thinking for analysis 42 | - write_file to save the report 43 | - ppt_create_presentation to generate slides 44 | 45 | [Agent returns results to Claude] 46 | 47 | Claude: "I've analyzed the electric vehicle market for you. Here are the key findings: 48 | 1. Tesla continues to lead with 65% market share in North America 49 | 2. BYD has overtaken VW in global sales volume 50 | 3. Battery technology breakthroughs are accelerating adoption 51 | 52 | I've also created a presentation with detailed charts and data. You can find it saved as 'EV_Market_Analysis.pptx' in your working directory." 53 | ``` 54 | ## Overview 55 | 56 | The MCP Unified Server provides a unified interface for Claude to interact with various external systems and tools including: 57 | 58 | - **File system operations**: Read, write, and manipulate files 59 | - **Time tools**: Get current time in different timezones, convert between timezones 60 | - **Sequential thinking**: A tool for dynamic and reflective problem-solving 61 | - **Brave Search**: Web and local search capabilities 62 | - **Browser automation**: Complete browser control via Playwright 63 | - **World Bank API**: Access to economic and development data 64 | - **News API**: Access to global news sources and articles 65 | - **PowerPoint**: Create and manipulate PowerPoint presentations 66 | - **Excel**: Create and manipulate Excel spreadsheets 67 | - **Yahoo Finance**: Stock market and financial data 68 | - **FRED**: Federal Reserve Economic Data 69 | - **Agentic capabilities**: Create and deploy autonomous agents that perform complex tasks 70 | - **And many more specialized tools** 71 | 72 | ## 123 Total Tools Available 73 | ![Claude screenshot with tools](./static/123_tools.png) 74 | 75 | ## Quickstart Guide: Deploy Your First MCP Server with Default Tools 76 | Note: ensure that you have git downloaded (https://git-scm.com/downloads) and Docker downloaded (https://www.docker.com/products/docker-desktop/) and running. You also must ensure the git executable file is added to path (instructions towards end of this README). 77 | 78 | ## Docker deployment (recommended & most stable) 79 | 1) Clone the repository: 80 | ```git 81 | clone https://github.com/getfounded/mcp-tool-kit.git 82 | cd mcp-tool-kit 83 | ``` 84 | 2) You can then use Docker in one of two ways: 85 | Option 1 - Using docker-compose: 86 | ``` 87 | docker-compose up 88 | ``` 89 | Option 2 - Direct Docker command: 90 | ``` 91 | docker run -p 8000:8000 -v ~/documents:/app/documents getfounded/mcp-tool-kit:latest 92 | ``` 93 | 94 | The repository includes a sample Claude desktop configuration file (`claude_desktop_config.json`) that you can use: 95 | 96 | ```json 97 | { 98 | "mcpServers": { 99 | "unified": { 100 | "command": "docker", 101 | "args": [ 102 | "exec", 103 | "-i", 104 | "mcp-tool-kit-mcp-server", 105 | "python", 106 | "-u", 107 | "mcp_unified_server.py" 108 | ], 109 | "useStdio": true 110 | } 111 | } 112 | } 113 | ``` 114 | ### Troubleshooting docker 115 | If you are getting errors running docker, it is likely that the image name is incorrect in the Claude desktop configuration file. A common fix is to use the following json for configuration: 116 | 117 | ```json 118 | { 119 | "mcpServers": { 120 | "unified": { 121 | "command": "docker", 122 | "args": [ 123 | "exec", 124 | "-i", 125 | "mcp-tool-kit-mcp-server-1", 126 | "python", 127 | "-u", 128 | "mcp_unified_server.py" 129 | ], 130 | "useStdio": true 131 | } 132 | } 133 | } 134 | ``` 135 | 136 | ## Install via pip 137 | ```bash 138 | # Simple installation 139 | pip install mcptoolkit 140 | 141 | # Launch the server with default configuration 142 | mcptoolkit-server 143 | ``` 144 | 145 | 146 | Local server basic configuration: 147 | 148 | ```json 149 | { 150 | "tools": [ 151 | { 152 | "name": "MCP Toolkit", 153 | "url": "http://localhost:8000" 154 | } 155 | ], 156 | "settings": { 157 | "allowed_directories": ["~/Documents", "~/Downloads"], 158 | "default_tools": ["MCP Toolkit"] 159 | } 160 | } 161 | ``` 162 | 163 | ## Configure Claude Desktop to Access Your Server 164 | 165 | 1. Open Claude Desktop app 166 | 2. Go to File > Settings > Developer > Edit config 167 | 3. Add the 'claude_desktop_configuration.json' file 168 | 4. Save the configuration 169 | 5. Restart the MCP server with your new tool integrated 170 | 6. Restart and Open Claude Desktop app (for windows users you must use task manager to end task for all Claude instances) 171 | 172 | 173 | You can import this configuration in the Claude desktop app or use it as a reference to create your own. 174 | 175 | You now have immediate access to powerful capabilities including file operations, web search, time tools, and more—without requiring any API keys or complex setup. 176 | 177 | ## Setting Up Environment Variables 178 | 179 | After cloning the repository, you have two options to configure your environment variables: 180 | 181 | ### Option 1: Interactive Setup Script 182 | 183 | Run the setup script which will guide you through setting up your environment variables: 184 | 185 | ```bash 186 | python setup_env.py 187 | ``` 188 | 189 | This script will create a `.env` file in the repository with your configuration. 190 | 191 | ### Option 2: Manual Configuration 192 | 193 | Alternatively, you can manually create a `.env` file in the repository root with the following variables: 194 | 195 | ``` 196 | # API Keys for external services 197 | BRAVE_API_KEY=your_brave_api_key 198 | NEWS_API_KEY=your_news_api_key 199 | FRED_API_KEY=your_fred_api_key 200 | 201 | # Application configuration 202 | STREAMLIT_APPS_DIR=/path/to/streamlit/apps 203 | MCP_FILESYSTEM_DIRS=/path/to/allowed/dir1,/path/to/allowed/dir2 204 | MCP_LOG_LEVEL=info 205 | ``` 206 | ### Sample Claude Prompts 207 | 208 | Once set up, you can ask Claude to use the tools with prompts like: 209 | 210 | - "Search the web for the latest AI research papers and summarize the findings." 211 | - "Create a PowerPoint presentation about climate change with three slides." 212 | - "Use the weather_checker agent to tell me the current conditions in Tokyo." 213 | - "Can you use the quick_lookup agent to research quantum computing advances?" 214 | - "Download my QuickBooks invoice data and analyze our revenue for the past quarter." 215 | - "Set up a product on my Shopify store with these details and pricing." 216 | - "Get the current stock price and historical data for Tesla using Yahoo Finance." 217 | - "Analyze inflation trends using FRED economic data for the past 5 years." 218 | - "Use browser automation to fill out this form at [website URL]." 219 | - "Read the text file in my Downloads folder named 'project_notes.txt'." 220 | - "Get the latest news headlines about technology." 221 | 222 | 223 | ## Available Tools 224 | 225 | ### File System Tools 226 | - `read_file`: Read contents of a file 227 | - `read_multiple_files`: Read multiple files simultaneously 228 | - `write_file`: Create or overwrite a file 229 | - `edit_file`: Make line-based edits to a file 230 | - `create_directory`: Create a new directory 231 | - `list_directory`: Get directory contents 232 | - `directory_tree`: Get a recursive tree view 233 | - `move_file`: Move or rename files/directories 234 | - `search_files`: Search for files matching a pattern 235 | - `get_file_info`: Get file metadata 236 | - `list_allowed_directories`: List allowed directories 237 | 238 | 239 | - **Browser_Automation:** 240 | - `playwright_launch_browser`: Launch a new browser instance 241 | - `playwright_navigate`: Navigate to a URL 242 | - `playwright_screenshot`: Take a screenshot 243 | - `playwright_click`: Click on an element 244 | - `playwright_fill`: Fill an input field 245 | - `playwright_evaluate`: Execute JavaScript 246 | - `playwright_get_content`: Get the HTML content of a page 247 | 248 | ### Agent Tools 249 | - `run_agent`: Execute a registered agent with parameters 250 | - `list_agents`: List all available agents and their metadata 251 | 252 | ### Financial Data Tools 253 | - **Yahoo Finance:** 254 | - `yfinance`: Get stock quotes and historical data 255 | - `yfinance_get_quote`: Get current stock quote 256 | - `yfinance_get_history`: Get historical stock data 257 | - `yfinance_get_info`: Get detailed company information 258 | - `yfinance_get_options`: Get options chain data 259 | - `yfinance_get_recommendations`: Get analyst recommendations 260 | 261 | - **FRED (Federal Reserve Economic Data):** 262 | - `fred_get_series`: Get economic data series 263 | - `fred_get_series_info`: Get metadata about a series 264 | - `fred_search`: Search for economic data series 265 | - `fred_get_category`: Browse data by category 266 | - `fred_get_releases`: Get economic data releases 267 | - `fred_get_sources`: Get data sources 268 | 269 | ### Time Tools 270 | - `get_current_time`: Get current time in a specified timezone 271 | - `convert_time`: Convert time between timezones 272 | 273 | ### Sequential Thinking 274 | - `sequentialthinking`: A tool for breaking down complex problems using a step-by-step thinking process 275 | 276 | ### Brave Search 277 | - `brave_web_search`: Perform web searches 278 | - `brave_local_search`: Search for local businesses and places 279 | 280 | ### World Bank API 281 | - `worldbank_get_indicator`: Get indicator data for a country 282 | 283 | ### News API 284 | - `news_top_headlines`: Get top news headlines 285 | - `news_search`: Search for news articles 286 | - `news_sources`: List available news sources 287 | 288 | ### PowerPoint Tools 289 | - `ppt_create_presentation`: Create a new PowerPoint presentation 290 | - `ppt_open_presentation`: Open an existing presentation 291 | - `ppt_save_presentation`: Save a presentation 292 | - `ppt_add_slide`: Add a new slide 293 | - `ppt_add_text`: Add text to a slide 294 | - `ppt_add_image`: Add an image to a slide 295 | - `ppt_add_chart`: Add a chart to a slide 296 | - `ppt_add_table`: Add a table to a slide 297 | - `ppt_analyze_presentation`: Analyze presentation structure 298 | - `ppt_enhance_presentation`: Suggest enhancements 299 | - `ppt_generate_presentation`: Generate a presentation from text 300 | - `ppt_command`: Process natural language commands 301 | 302 | For a complete list of available tools, see the documentation or browse the tools directory. 303 | For a complete list of available tools, see the documentation or browse the tools directory. 304 | 305 | ### Adding a New Tool Module 306 | 307 | 1. Create a new file in the `tools` directory (e.g., `my_tool.py`) 308 | 2. Follow the existing module pattern: 309 | - Create service class 310 | - Define tool functions 311 | - Implement registration functions 312 | 3. Update `mcp_unified_server.py` to import and register your new module 313 | 314 | ### Extending an Existing Tool Module 315 | 316 | 1. Add new methods to the service class 317 | 2. Add new tool functions 318 | 3. Update the registration function to include your new tools 319 | 320 | ### Development with Docker 321 | 322 | You can use Docker for development to ensure a consistent environment: 323 | 324 | ```bash 325 | # Build a development image 326 | docker build -t mcp-tool-kit:dev . 327 | 328 | # Run with source code mounted for development 329 | docker run -p 8000:8000 \ 330 | -v $(pwd):/app \ 331 | -v ~/documents:/app/documents \ 332 | mcp-tool-kit:dev 333 | ``` 334 | 335 | This mounts your local repository into the container, so changes to the code are reflected immediately (for most files). 336 | 337 | ## Philosophical Perspective: The Human-AI Cognitive Partnership 338 | 339 | The MCP Tool Kit represents a paradigm shift in how we conceptualize the relationship between human intelligence and AI systems. Rather than positioning AI as a mere tool for task automation, this framework establishes a cognitive partnership where human strategic thinking and AI operational capabilities complement each other in profound ways. 340 | 341 | The agentic architecture embodies a transformative vision: AI systems that can independently interpret context, make decisions within bounded parameters, and execute complex sequences of actions—all while maintaining human oversight and strategic direction. This represents not merely a technological advance, but a fundamentally new model for human-machine collaboration. 342 | 343 | In this evolving cognitive landscape, the most successful implementations will be those that thoughtfully balance technological potential with human capabilities, creating interfaces that enhance rather than replace human decision-making and creativity. 344 | 345 | ## Troubleshooting 346 | 347 | - **Module not loading**: Check the import path and dependencies 348 | - **API key errors**: Verify your API keys in the `.env` file 349 | - **Permission errors**: Check the allowed directories in `MCP_FILESYSTEM_DIRS` 350 | - **Connection errors**: Ensure the server is running and the port is accessible 351 | - **Agent not detected**: Verify the agent file is in the correct directory and follows the required format 352 | - **Issues with path**: Make sure that git is added to path as per the below instruciton 353 | 354 | #To add Git to the PATH on Windows, follow these steps: 355 | 356 | 1) Locate Git Installation: Determine the path where Git is installed on your system. Common paths include C:\Program Files\Git\bin\git.exe and C:\Program Files\Git\cmd for a standard Git installation, or C:\Users\\AppData\Local\GitHub\PortableGit_\bin and C:\Users\\AppData\Local\GitHub\PortableGit_\cmd if you installed Git through GitHub for Windows or GitHub Desktop. 357 | 358 | 2) Edit Environment Variables: Open the "Edit Environment Variables" app either through the Control Panel or by searching for "Edit the system environment variables" in the Start menu. Under the "System variables" section, find the "Path" variable, click "Edit...", and add the path to the Git executable and command files. Ensure there are no spaces around the semicolons separating paths. 359 | 360 | 3) Save Changes: After adding the Git paths, click "OK" to save your changes. Close and reopen any command prompt windows to apply the new PATH settings. 361 | 362 | 4) Verify Installation: Open a command prompt and run git --version to verify that Git is accessible from the command line. 363 | 364 | ## License 365 | 366 | The MCP Unified Server is licensed under the MIT License. 367 | 368 | ## Acknowledgements 369 | 370 | This project uses several open-source libraries and APIs: 371 | - MCP SDK for Claude AI assistants 372 | - NewsAPI for news access 373 | - Brave Search API for web search 374 | - World Bank API for economic data 375 | - python-pptx for PowerPoint manipulation 376 | - XlsxWriter for Excel spreadsheets 377 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | MCP Toolkit 3 | =========== 4 | 5 | A modular server implementation for Claude AI assistants with integrated tools. 6 | 7 | This package provides a unified interface for Claude to interact with various 8 | external systems and tools including: 9 | 10 | - File system operations 11 | - Time and timezone tools 12 | - Sequential thinking for problem-solving 13 | - Web search capabilities 14 | - Browser automation 15 | - Access to economic and news data 16 | - Microsoft Office integration (PowerPoint, Excel) 17 | - And many more integrations 18 | 19 | For more information, visit: https://github.com/getfounded/mcp-tool-kit 20 | """ 21 | 22 | __version__ = "0.1.0" 23 | -------------------------------------------------------------------------------- /app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/getfounded/mcp-tool-kit/f3e383e0d3b9c5a28cb42b023ae367bff1edd552/app/__init__.py -------------------------------------------------------------------------------- /app/toolkit_client.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import logging 4 | from typing import Dict, Any, Optional 5 | 6 | 7 | class MCPClient: 8 | """Custom client for interacting with MCP (Model Context Protocol) server.""" 9 | 10 | def __init__(self, server_url: str = "http://localhost:8000"): 11 | """ 12 | Initialize the MCP client. 13 | 14 | Args: 15 | server_url: URL of the MCP server 16 | """ 17 | self.server_url = server_url.rstrip('/') 18 | self.logger = logging.getLogger("MCPClient") 19 | 20 | def call_tool(self, tool_name: str, params: Dict[str, Any]) -> str: 21 | """ 22 | Call a tool on the MCP server. 23 | 24 | Args: 25 | tool_name: Name of the tool to call 26 | params: Parameters to pass to the tool 27 | 28 | Returns: 29 | Tool execution result as a string 30 | """ 31 | try: 32 | response = requests.post( 33 | f"{self.server_url}/api/tools/{tool_name}", 34 | json=params, 35 | headers={"Content-Type": "application/json"} 36 | ) 37 | 38 | response.raise_for_status() 39 | return response.text 40 | except requests.RequestException as e: 41 | error_msg = f"Error calling tool {tool_name}: {str(e)}" 42 | self.logger.error(error_msg) 43 | return json.dumps({"error": error_msg}) 44 | -------------------------------------------------------------------------------- /app/tools/README/EXCEL_README.md: -------------------------------------------------------------------------------- 1 | # XlsxWriter Tool for MCP 2 | 3 | A comprehensive tool for integrating the XlsxWriter library with MCP Unified Server, enabling Claude to create and manipulate Excel files programmatically. 4 | 5 | ## Overview 6 | 7 | The XlsxWriter Tool provides a set of functions to create Excel spreadsheets with features including: 8 | 9 | - Creating workbooks and worksheets 10 | - Writing data to cells and ranges 11 | - Applying formatting to cells 12 | - Adding charts, images, and formulas 13 | - Creating tables with headers and styling 14 | - Closing and saving workbooks 15 | 16 | This tool maintains state between function calls, allowing for complex Excel file creation across multiple requests. 17 | 18 | ## Installation 19 | 20 | 1. Add the `xlsxwriter.py` file to your `tools` directory in the MCP Unified Server 21 | 2. Install the required dependency with `pip install XlsxWriter` 22 | 3. Update your `requirements.txt` file to include `XlsxWriter>=3.1.0` 23 | 4. Update the `mcp_unified_server.py` file to import and register the XlsxWriter module 24 | 25 | ### Adding to mcp_unified_server.py 26 | 27 | Add the following code to your `mcp_unified_server.py` file: 28 | 29 | ```python 30 | # Initialize XlsxWriter tools 31 | try: 32 | from tools.xlsxwriter import get_xlsx_tools, set_external_mcp, initialize_xlsx_service 33 | 34 | # Pass our MCP instance to the xlsxwriter module 35 | set_external_mcp(mcp) 36 | 37 | # Initialize xlsxwriter tools 38 | initialize_xlsx_service() 39 | 40 | # Register xlsxwriter tools 41 | xlsx_tools = get_xlsx_tools() 42 | for tool_name, tool_func in xlsx_tools.items(): 43 | # Register each xlsxwriter tool with the main MCP instance 44 | mcp.tool(name=tool_name)(tool_func) 45 | 46 | # Add XlsxWriter dependencies to MCP dependencies 47 | mcp.dependencies.extend([ 48 | "XlsxWriter" 49 | ]) 50 | 51 | logging.info("XlsxWriter tools registered successfully.") 52 | except ImportError as e: 53 | logging.warning(f"Could not load XlsxWriter tools: {e}") 54 | ``` 55 | 56 | ## Available Tools 57 | 58 | ### xlsx_create_workbook 59 | 60 | Creates a new Excel workbook. 61 | 62 | ```python 63 | xlsx_create_workbook(filename: str) 64 | ``` 65 | 66 | Parameters: 67 | - `filename`: Path to save the Excel file 68 | 69 | ### xlsx_add_worksheet 70 | 71 | Adds a worksheet to the workbook. 72 | 73 | ```python 74 | xlsx_add_worksheet(filename: str, name: str = None) 75 | ``` 76 | 77 | Parameters: 78 | - `filename`: Path to the Excel file 79 | - `name`: (Optional) Name for the worksheet 80 | 81 | ### xlsx_write_data 82 | 83 | Writes data to a cell in a worksheet. 84 | 85 | ```python 86 | xlsx_write_data(filename: str, worksheet: str, row: int, col: int, data: Any, format: str = None) 87 | ``` 88 | 89 | Parameters: 90 | - `filename`: Path to the Excel file 91 | - `worksheet`: Name of the worksheet 92 | - `row`: Row number (0-based) 93 | - `col`: Column number (0-based) 94 | - `data`: Data to write (string, number, boolean, etc.) 95 | - `format`: (Optional) Name of a predefined format 96 | 97 | ### xlsx_write_matrix 98 | 99 | Writes a matrix of data to a worksheet. 100 | 101 | ```python 102 | xlsx_write_matrix(filename: str, worksheet: str, start_row: int, start_col: int, data: List[List[Any]], formats: List[List[str]] = None) 103 | ``` 104 | 105 | Parameters: 106 | - `filename`: Path to the Excel file 107 | - `worksheet`: Name of the worksheet 108 | - `start_row`: Starting row number (0-based) 109 | - `start_col`: Starting column number (0-based) 110 | - `data`: 2D list of data to write 111 | - `formats`: (Optional) 2D list of format names corresponding to data 112 | 113 | ### xlsx_add_format 114 | 115 | Creates a cell format. 116 | 117 | ```python 118 | xlsx_add_format(filename: str, format_name: str, properties: Dict[str, Any]) 119 | ``` 120 | 121 | Parameters: 122 | - `filename`: Path to the Excel file 123 | - `format_name`: Name to identify the format 124 | - `properties`: Dictionary of format properties (e.g., `{'bold': True, 'font_color': 'red'}`) 125 | 126 | ### xlsx_add_chart 127 | 128 | Adds a chart to a worksheet. 129 | 130 | ```python 131 | xlsx_add_chart(filename: str, worksheet: str, chart_type: str, data_range: List[Dict[str, Any]], position: Dict[str, int], options: Dict[str, Any] = None) 132 | ``` 133 | 134 | Parameters: 135 | - `filename`: Path to the Excel file 136 | - `worksheet`: Name of the worksheet 137 | - `chart_type`: Type of chart (e.g., 'column', 'line', 'pie') 138 | - `data_range`: List of data series specifications 139 | - `position`: Dictionary with 'row' and 'col' keys specifying chart position 140 | - `options`: (Optional) Additional chart options 141 | 142 | ### xlsx_add_image 143 | 144 | Adds an image to a worksheet. 145 | 146 | ```python 147 | xlsx_add_image(filename: str, worksheet: str, image_path: str, position: Dict[str, int], options: Dict[str, Any] = None) 148 | ``` 149 | 150 | Parameters: 151 | - `filename`: Path to the Excel file 152 | - `worksheet`: Name of the worksheet 153 | - `image_path`: Path to the image file 154 | - `position`: Dictionary with 'row' and 'col' keys specifying image position 155 | - `options`: (Optional) Additional image options 156 | 157 | ### xlsx_add_formula 158 | 159 | Adds a formula to a cell. 160 | 161 | ```python 162 | xlsx_add_formula(filename: str, worksheet: str, row: int, col: int, formula: str, format: str = None) 163 | ``` 164 | 165 | Parameters: 166 | - `filename`: Path to the Excel file 167 | - `worksheet`: Name of the worksheet 168 | - `row`: Row number (0-based) 169 | - `col`: Column number (0-based) 170 | - `formula`: Excel formula (e.g., '=SUM(A1:A10)') 171 | - `format`: (Optional) Name of a predefined format 172 | 173 | ### xlsx_add_table 174 | 175 | Adds a table to a worksheet. 176 | 177 | ```python 178 | xlsx_add_table(filename: str, worksheet: str, start_row: int, start_col: int, end_row: int, end_col: int, options: Dict[str, Any] = None) 179 | ``` 180 | 181 | Parameters: 182 | - `filename`: Path to the Excel file 183 | - `worksheet`: Name of the worksheet 184 | - `start_row`: Starting row number (0-based) 185 | - `start_col`: Starting column number (0-based) 186 | - `end_row`: Ending row number (0-based) 187 | - `end_col`: Ending column number (0-based) 188 | - `options`: (Optional) Table options (e.g., `{'header_row': True, 'columns': [{'header': 'Name'}]}`) 189 | 190 | ### xlsx_close_workbook 191 | 192 | Closes and saves the workbook. 193 | 194 | ```python 195 | xlsx_close_workbook(filename: str) 196 | ``` 197 | 198 | Parameters: 199 | - `filename`: Path to the Excel file 200 | 201 | ## Usage Examples 202 | 203 | ### Basic Example: Creating a Simple Spreadsheet 204 | 205 | ```python 206 | # Create a new workbook 207 | result = await mcp.call_tool("xlsx_create_workbook", {"filename": "example.xlsx"}) 208 | 209 | # Add a worksheet 210 | result = await mcp.call_tool("xlsx_add_worksheet", {"filename": "example.xlsx", "name": "Data"}) 211 | 212 | # Write headers with bold formatting 213 | result = await mcp.call_tool("xlsx_add_format", { 214 | "filename": "example.xlsx", 215 | "format_name": "header_format", 216 | "properties": {"bold": True, "bg_color": "#DDDDDD"} 217 | }) 218 | 219 | headers = ["ID", "Name", "Value"] 220 | for i, header in enumerate(headers): 221 | result = await mcp.call_tool("xlsx_write_data", { 222 | "filename": "example.xlsx", 223 | "worksheet": "Data", 224 | "row": 0, 225 | "col": i, 226 | "data": header, 227 | "format": "header_format" 228 | }) 229 | 230 | # Write data 231 | data = [ 232 | [1, "Apple", 100], 233 | [2, "Banana", 150], 234 | [3, "Cherry", 200] 235 | ] 236 | 237 | result = await mcp.call_tool("xlsx_write_matrix", { 238 | "filename": "example.xlsx", 239 | "worksheet": "Data", 240 | "start_row": 1, 241 | "start_col": 0, 242 | "data": data 243 | }) 244 | 245 | # Add a formula for the sum 246 | result = await mcp.call_tool("xlsx_add_formula", { 247 | "filename": "example.xlsx", 248 | "worksheet": "Data", 249 | "row": 4, 250 | "col": 2, 251 | "formula": "=SUM(C2:C4)" 252 | }) 253 | 254 | # Close and save the workbook 255 | result = await mcp.call_tool("xlsx_close_workbook", {"filename": "example.xlsx"}) 256 | ``` 257 | 258 | ### Advanced Example: Creating a Report with Chart 259 | 260 | ```python 261 | # Create workbook and worksheet 262 | await mcp.call_tool("xlsx_create_workbook", {"filename": "sales_report.xlsx"}) 263 | await mcp.call_tool("xlsx_add_worksheet", {"filename": "sales_report.xlsx", "name": "Sales"}) 264 | 265 | # Add formats 266 | await mcp.call_tool("xlsx_add_format", { 267 | "filename": "sales_report.xlsx", 268 | "format_name": "title", 269 | "properties": {"bold": True, "font_size": 16} 270 | }) 271 | 272 | await mcp.call_tool("xlsx_add_format", { 273 | "filename": "sales_report.xlsx", 274 | "format_name": "header", 275 | "properties": {"bold": True, "bg_color": "#D7E4BC", "border": 1} 276 | }) 277 | 278 | # Add title 279 | await mcp.call_tool("xlsx_write_data", { 280 | "filename": "sales_report.xlsx", 281 | "worksheet": "Sales", 282 | "row": 0, 283 | "col": 0, 284 | "data": "Quarterly Sales Report", 285 | "format": "title" 286 | }) 287 | 288 | # Add headers 289 | headers = ["Quarter", "North", "South", "East", "West"] 290 | for i, header in enumerate(headers): 291 | await mcp.call_tool("xlsx_write_data", { 292 | "filename": "sales_report.xlsx", 293 | "worksheet": "Sales", 294 | "row": 2, 295 | "col": i, 296 | "data": header, 297 | "format": "header" 298 | }) 299 | 300 | # Add data 301 | data = [ 302 | ["Q1", 10000, 8000, 12000, 9000], 303 | ["Q2", 12000, 9500, 14000, 8500], 304 | ["Q3", 14500, 10000, 15500, 9500], 305 | ["Q4", 16000, 12000, 17000, 10000] 306 | ] 307 | 308 | await mcp.call_tool("xlsx_write_matrix", { 309 | "filename": "sales_report.xlsx", 310 | "worksheet": "Sales", 311 | "start_row": 3, 312 | "start_col": 0, 313 | "data": data 314 | }) 315 | 316 | # Add chart 317 | chart_data = [ 318 | { 319 | "name": "North", 320 | "categories": "=Sales!$A$4:$A$7", 321 | "values": "=Sales!$B$4:$B$7" 322 | }, 323 | { 324 | "name": "South", 325 | "categories": "=Sales!$A$4:$A$7", 326 | "values": "=Sales!$C$4:$C$7" 327 | }, 328 | { 329 | "name": "East", 330 | "categories": "=Sales!$A$4:$A$7", 331 | "values": "=Sales!$D$4:$D$7" 332 | }, 333 | { 334 | "name": "West", 335 | "categories": "=Sales!$A$4:$A$7", 336 | "values": "=Sales!$E$4:$E$7" 337 | } 338 | ] 339 | 340 | chart_options = { 341 | "title": "Quarterly Sales by Region", 342 | "x_axis": {"name": "Quarter"}, 343 | "y_axis": {"name": "Sales ($)"}, 344 | "style": 10 345 | } 346 | 347 | await mcp.call_tool("xlsx_add_chart", { 348 | "filename": "sales_report.xlsx", 349 | "worksheet": "Sales", 350 | "chart_type": "column", 351 | "data_range": chart_data, 352 | "position": {"row": 8, "col": 1}, 353 | "options": chart_options 354 | }) 355 | 356 | # Add table format 357 | await mcp.call_tool("xlsx_add_table", { 358 | "filename": "sales_report.xlsx", 359 | "worksheet": "Sales", 360 | "start_row": 2, 361 | "start_col": 0, 362 | "end_row": 7, 363 | "end_col": 4, 364 | "options": { 365 | "name": "SalesTable", 366 | "style": "Table Style Medium 2", 367 | "total_row": True 368 | } 369 | }) 370 | 371 | # Close and save 372 | await mcp.call_tool("xlsx_close_workbook", {"filename": "sales_report.xlsx"}) 373 | ``` 374 | 375 | ## Important Notes 376 | 377 | 1. XlsxWriter can only **create new files**, not modify existing ones. This is a limitation of the underlying library. 378 | 379 | 2. Workbooks must be closed with `xlsx_close_workbook()` to be properly saved. Until then, they exist only in memory. 380 | 381 | 3. The tool maintains state between API calls, allowing for complex operations across multiple requests. 382 | 383 | 4. Row and column indices are 0-based (as in Python), not 1-based (as in Excel). 384 | 385 | 5. All functions return JSON strings containing the result or error information. 386 | 387 | ## Troubleshooting 388 | 389 | - **"Workbook not found" error**: Make sure you've created the workbook and are using the correct filename. 390 | 391 | - **"Worksheet not found" error**: Ensure the worksheet has been added to the workbook. 392 | 393 | - **Error during format application**: Check that the format name has been defined with `xlsx_add_format`. 394 | 395 | - **Missing data in saved file**: Ensure you've called `xlsx_close_workbook` to properly save the file. 396 | 397 | - **Installation issues**: Verify that the XlsxWriter library is installed with `pip install XlsxWriter`. 398 | 399 | ## License 400 | 401 | This tool is provided under the same license as the MCP Unified Server. 402 | 403 | # Enhanced Excel Tool Documentation 404 | 405 | The Enhanced Excel Tool extends the existing MCP Excel functionality to include comprehensive support for reading, manipulating, and analyzing Excel and CSV files using pandas. This tool bridges the gap between Excel file management and powerful data analysis capabilities. 406 | 407 | ## New Features 408 | 409 | 1. **File Reading Capabilities** 410 | - Read Excel files (XLSX, XLS) with customizable options 411 | - Read CSV files with delimiter and encoding support 412 | - List available sheets in Excel files 413 | 414 | 2. **DataFrame Management** 415 | - Store DataFrame objects in memory for multi-step operations 416 | - List, inspect, and clear DataFrames 417 | - Convert between DataFrames and Excel/CSV files 418 | 419 | 3. **Data Manipulation and Analysis** 420 | - Filter DataFrames by query or column conditions 421 | - Sort DataFrames by one or multiple columns 422 | - Group DataFrames and apply aggregation functions 423 | - Generate statistical descriptions and correlations 424 | 425 | ## Tool Reference 426 | 427 | ### Reading Files 428 | 429 | #### `xlsx_read_excel` 430 | Read an Excel file into a pandas DataFrame. 431 | 432 | ```python 433 | xlsx_read_excel( 434 | filename: str, # Path to the Excel file 435 | sheet_name: Union[str, int] = 0, # Sheet name or index 436 | output_id: str = None, # ID to store the DataFrame (default: filename) 437 | header: Union[int, List[int], None] = 0, # Row(s) to use as column names 438 | names: List[str] = None, # List of custom column names 439 | skiprows: Union[int, List[int]] = None # Row indices to skip 440 | ) 441 | ``` 442 | 443 | Example: 444 | ``` 445 | xlsx_read_excel("financial_data.xlsx", sheet_name="Q1_Results", output_id="q1_data") 446 | ``` 447 | 448 | #### `xlsx_read_csv` 449 | Read a CSV file into a pandas DataFrame. 450 | 451 | ```python 452 | xlsx_read_csv( 453 | filename: str, # Path to the CSV file 454 | output_id: str = None, # ID to store the DataFrame (default: filename) 455 | delimiter: str = ",", # Delimiter to use 456 | header: Union[int, List[int], None] = 0, # Row(s) to use as column names 457 | names: List[str] = None, # List of custom column names 458 | skiprows: Union[int, List[int]] = None, # Row indices to skip 459 | encoding: str = None # File encoding 460 | ) 461 | ``` 462 | 463 | Example: 464 | ``` 465 | xlsx_read_csv("sales_data.csv", delimiter=";", encoding="utf-8", output_id="sales") 466 | ``` 467 | 468 | #### `xlsx_get_sheet_names` 469 | Get sheet names from an Excel file. 470 | 471 | ```python 472 | xlsx_get_sheet_names( 473 | filename: str # Path to the Excel file 474 | ) 475 | ``` 476 | 477 | Example: 478 | ``` 479 | xlsx_get_sheet_names("financial_data.xlsx") 480 | ``` 481 | 482 | ### DataFrame Management 483 | 484 | #### `xlsx_dataframe_info` 485 | Get information about a DataFrame. 486 | 487 | ```python 488 | xlsx_dataframe_info( 489 | dataframe_id: str # ID of the DataFrame in memory 490 | ) 491 | ``` 492 | 493 | Example: 494 | ``` 495 | xlsx_dataframe_info("sales_data") 496 | ``` 497 | 498 | #### `xlsx_list_dataframes` 499 | List all DataFrames currently in memory. 500 | 501 | ```python 502 | xlsx_list_dataframes() 503 | ``` 504 | 505 | #### `xlsx_clear_dataframe` 506 | Remove a DataFrame from memory. 507 | 508 | ```python 509 | xlsx_clear_dataframe( 510 | dataframe_id: str # ID of the DataFrame to clear 511 | ) 512 | ``` 513 | 514 | Example: 515 | ``` 516 | xlsx_clear_dataframe("old_data") 517 | ``` 518 | 519 | #### `xlsx_get_column_values` 520 | Get values from a specific column in a DataFrame. 521 | 522 | ```python 523 | xlsx_get_column_values( 524 | dataframe_id: str, # ID of the DataFrame 525 | column: str, # Name of the column 526 | unique: bool = False, # Whether to return only unique values 527 | count: bool = False # Whether to count occurrences of each value 528 | ) 529 | ``` 530 | 531 | Example: 532 | ``` 533 | xlsx_get_column_values("customer_data", "country", unique=True, count=True) 534 | ``` 535 | 536 | ### Data Manipulation 537 | 538 | #### `xlsx_filter_dataframe` 539 | Filter a DataFrame by query or column condition. 540 | 541 | ```python 542 | xlsx_filter_dataframe( 543 | dataframe_id: str, # ID of the DataFrame to filter 544 | query: str = None, # Query string for filtering 545 | column: str = None, # Column name to filter by (alternative to query) 546 | value: Any = None, # Value to compare with 547 | operator: str = "==", # Comparison operator 548 | output_id: str = None # ID to store the filtered DataFrame 549 | ) 550 | ``` 551 | 552 | Examples: 553 | ``` 554 | xlsx_filter_dataframe("sales", query="revenue > 1000 and region == 'North'") 555 | xlsx_filter_dataframe("customers", column="age", value=30, operator=">") 556 | ``` 557 | 558 | #### `xlsx_sort_dataframe` 559 | Sort a DataFrame by columns. 560 | 561 | ```python 562 | xlsx_sort_dataframe( 563 | dataframe_id: str, # ID of the DataFrame to sort 564 | by: Union[str, List[str]], # Column name(s) to sort by 565 | ascending: Union[bool, List[bool]] = True, # Sort order 566 | output_id: str = None # ID to store the sorted DataFrame 567 | ) 568 | ``` 569 | 570 | Example: 571 | ``` 572 | xlsx_sort_dataframe("products", by=["category", "price"], ascending=[True, False]) 573 | ``` 574 | 575 | #### `xlsx_group_dataframe` 576 | Group a DataFrame and apply aggregation. 577 | 578 | ```python 579 | xlsx_group_dataframe( 580 | dataframe_id: str, # ID of the DataFrame to group 581 | by: Union[str, List[str]], # Column name(s) to group by 582 | agg_func: Union[str, Dict[str, str]] = "mean", # Aggregation function(s) 583 | output_id: str = None # ID to store the grouped DataFrame 584 | ) 585 | ``` 586 | 587 | Examples: 588 | ``` 589 | xlsx_group_dataframe("sales", by="region", agg_func="sum") 590 | xlsx_group_dataframe("orders", by=["product", "region"], 591 | agg_func={"quantity": "sum", "price": "mean"}) 592 | ``` 593 | 594 | #### `xlsx_describe_dataframe` 595 | Get statistical description of a DataFrame. 596 | 597 | ```python 598 | xlsx_describe_dataframe( 599 | dataframe_id: str, # ID of the DataFrame to describe 600 | include: Union[str, List[str]] = None, # Types of columns to include 601 | exclude: Union[str, List[str]] = None, # Types of columns to exclude 602 | percentiles: List[float] = None # List of percentiles to include 603 | ) 604 | ``` 605 | 606 | Example: 607 | ``` 608 | xlsx_describe_dataframe("measurements", include=["number"]) 609 | ``` 610 | 611 | #### `xlsx_get_correlation` 612 | Get correlation matrix for a DataFrame. 613 | 614 | ```python 615 | xlsx_get_correlation( 616 | dataframe_id: str, # ID of the DataFrame 617 | method: str = "pearson" # Correlation method 618 | ) 619 | ``` 620 | 621 | Example: 622 | ``` 623 | xlsx_get_correlation("stock_prices", method="spearman") 624 | ``` 625 | 626 | ### Exporting Data 627 | 628 | #### `xlsx_dataframe_to_excel` 629 | Export a DataFrame to an Excel file. 630 | 631 | ```python 632 | xlsx_dataframe_to_excel( 633 | dataframe_id: str, # ID of the DataFrame in memory 634 | filename: str, # Path to save the Excel file 635 | sheet_name: str = "Sheet1", # Name of the sheet 636 | index: bool = True # Whether to include the DataFrame index 637 | ) 638 | ``` 639 | 640 | Example: 641 | ``` 642 | xlsx_dataframe_to_excel("filtered_sales", "filtered_sales_report.xlsx") 643 | ``` 644 | 645 | #### `xlsx_dataframe_to_csv` 646 | Export a DataFrame to a CSV file. 647 | 648 | ```python 649 | xlsx_dataframe_to_csv( 650 | dataframe_id: str, # ID of the DataFrame in memory 651 | filename: str, # Path to save the CSV file 652 | index: bool = True, # Whether to include the DataFrame index 653 | encoding: str = "utf-8", # File encoding 654 | sep: str = "," # Delimiter to use 655 | ) 656 | ``` 657 | 658 | Example: 659 | ``` 660 | xlsx_dataframe_to_csv("quarterly_data", "quarterly_report.csv", sep=";") 661 | ``` 662 | 663 | ## Common Workflows 664 | 665 | ### Reading and Analyzing Sales Data 666 | 667 | ``` 668 | # Read the Excel file 669 | xlsx_read_excel("sales_data.xlsx", sheet_name="2023", output_id="sales_2023") 670 | 671 | # Get information about the DataFrame 672 | xlsx_dataframe_info("sales_2023") 673 | 674 | # Filter to specific region 675 | xlsx_filter_dataframe("sales_2023", column="region", value="North", output_id="north_sales") 676 | 677 | # Group by product category and calculate total sales 678 | xlsx_group_dataframe("north_sales", by="category", 679 | agg_func={"revenue": "sum", "units": "sum"}, 680 | output_id="north_by_category") 681 | 682 | # Export the grouped data 683 | xlsx_dataframe_to_excel("north_by_category", "north_region_sales_by_category.xlsx") 684 | ``` 685 | 686 | ### Reading CSV and Finding Correlations 687 | 688 | ``` 689 | # Read the CSV file 690 | xlsx_read_csv("stock_prices.csv", output_id="stocks") 691 | 692 | # Calculate correlation between stock prices 693 | xlsx_get_correlation("stocks", method="pearson") 694 | 695 | # Filter to relevant time period 696 | xlsx_filter_dataframe("stocks", query="date >= '2023-01-01' and date <= '2023-12-31'", 697 | output_id="stocks_2023") 698 | 699 | # Calculate new correlation on filtered data 700 | xlsx_get_correlation("stocks_2023") 701 | ``` 702 | 703 | ## Dependencies 704 | 705 | This tool requires the following Python packages: 706 | - xlsxwriter (required for Excel writing) 707 | - pandas (required for all functionality) 708 | - openpyxl (recommended for Excel reading) 709 | - xlrd (recommended for reading older Excel formats) 710 | 711 | ## Technical Notes 712 | 713 | - DataFrames are stored in memory with a unique ID for reference in subsequent operations 714 | - File paths should be absolute or relative to the current working directory 715 | - Large DataFrames will be kept in memory until explicitly cleared with `xlsx_clear_dataframe` 716 | -------------------------------------------------------------------------------- /app/tools/README/SHOPIFY_README.md: -------------------------------------------------------------------------------- 1 | # Shopify API Tool for MCP 2 | 3 | A comprehensive tool for integrating with the Shopify API, enabling Claude to manage products, orders, customers, inventory, and collections in Shopify stores. 4 | 5 | ## Features 6 | 7 | - **Products Management**: List, create, update, and delete products 8 | - **Orders Management**: List, create, update, and cancel orders 9 | - **Customers Management**: List, create, and update customers 10 | - **Inventory Management**: Get and adjust inventory levels 11 | - **Collections Management**: List, create, and update collections (both custom and smart) 12 | 13 | ## Installation 14 | 15 | 1. Add the `shopify_api.py` file to your `tools` directory in the MCP Unified Server 16 | 2. Add `httpx>=0.25.0` to your `requirements.txt` file 17 | 3. Update the `mcp_unified_server.py` file to import and register the Shopify module 18 | 4. Add Shopify API credentials to your `.env` file 19 | 20 | ## Configuration 21 | 22 | The Shopify API tool requires the following environment variables: 23 | 24 | ```env 25 | # Shopify API Credentials 26 | SHOPIFY_SHOP_DOMAIN=your-store.myshopify.com 27 | SHOPIFY_API_VERSION=2023-10 28 | # Either use API Key & Password 29 | SHOPIFY_API_KEY=your_api_key 30 | SHOPIFY_API_PASSWORD=your_api_password 31 | # OR use Access Token (preferred) 32 | SHOPIFY_ACCESS_TOKEN=your_access_token 33 | -------------------------------------------------------------------------------- /app/tools/README/VAPI_README.md: -------------------------------------------------------------------------------- 1 | # VAPI API Tool for MCP 2 | 3 | A comprehensive tool for integrating with the VAPI API, enabling Claude to make phone calls, read call logs, manage call recordings, and interact with ongoing calls. 4 | 5 | ## Overview 6 | 7 | VAPI is a powerful API for making and managing AI-powered voice calls. This integration allows Claude to: 8 | 9 | - Initiate outbound calls with AI assistants 10 | - List and retrieve details about calls 11 | - End, pause, and resume active calls 12 | - Access call recordings 13 | - Add human participants to calls 14 | - Send custom events to calls 15 | 16 | ## Installation 17 | 18 | 1. Add the `vapi.py` file to your `tools` directory in the MCP Unified Server 19 | 2. Add `vapi` to your `requirements.txt` file 20 | 3. Update the `mcp_unified_server.py` file to import and register the VAPI module 21 | 4. Add VAPI API credentials to your `.env` file 22 | 23 | ## Configuration 24 | 25 | The VAPI API tool requires the following environment variable: 26 | 27 | ```env 28 | # VAPI API Credentials 29 | VAPI_API_KEY=your_vapi_api_key_here 30 | ``` 31 | 32 | ## Adding to mcp_unified_server.py 33 | 34 | Add the following code to your `mcp_unified_server.py` file: 35 | 36 | ```python 37 | # Initialize VAPI tools 38 | try: 39 | from app.tools.vapi import get_vapi_tools, set_external_mcp, initialize_vapi_service 40 | 41 | # Pass our MCP instance to the VAPI module 42 | set_external_mcp(mcp) 43 | 44 | # Initialize VAPI tools 45 | if initialize_vapi_service(): 46 | # Register VAPI tools 47 | vapi_tools = get_vapi_tools() 48 | for tool_name, tool_func in vapi_tools.items(): 49 | # Register each VAPI tool with the main MCP instance 50 | tool_name_str = tool_name if isinstance(tool_name, str) else tool_name.value 51 | mcp.tool(name=tool_name_str)(tool_func) 52 | 53 | # Add VAPI dependencies to MCP dependencies 54 | mcp.dependencies.extend(["vapi"]) 55 | 56 | logging.info("VAPI tools registered successfully.") 57 | else: 58 | logging.warning("Failed to initialize VAPI tools.") 59 | except ImportError as e: 60 | logging.warning(f"Could not load VAPI tools: {e}") 61 | ``` 62 | 63 | ## Available Tools 64 | 65 | ### vapi_make_call 66 | 67 | Initiates a call to a phone number using a VAPI assistant. 68 | 69 | ```python 70 | vapi_make_call( 71 | to: str, # Phone number to call (E.164 format, e.g., +12125551234) 72 | assistant_id: str, # ID of the assistant to use for the call 73 | from_number: Optional[str] = None, # Phone number to display as caller ID 74 | assistant_options: Optional[Dict] = None, # Assistant configuration options 75 | server_url: Optional[str] = None # Server URL for call events 76 | ) 77 | ``` 78 | 79 | ### vapi_list_calls 80 | 81 | Lists calls with optional filtering. 82 | 83 | ```python 84 | vapi_list_calls( 85 | limit: int = 10, # Maximum number of calls to return 86 | before: Optional[str] = None, # Return calls created before this cursor 87 | after: Optional[str] = None, # Return calls created after this cursor 88 | status: Optional[str] = None # Filter by call status 89 | ) 90 | ``` 91 | 92 | ### vapi_get_call 93 | 94 | Gets detailed information about a specific call. 95 | 96 | ```python 97 | vapi_get_call( 98 | call_id: str # ID of the call to retrieve 99 | ) 100 | ``` 101 | 102 | ### vapi_end_call 103 | 104 | Terminates an active call. 105 | 106 | ```python 107 | vapi_end_call( 108 | call_id: str # ID of the call to end 109 | ) 110 | ``` 111 | 112 | ### vapi_get_recordings 113 | 114 | Retrieves recordings associated with a call. 115 | 116 | ```python 117 | vapi_get_recordings( 118 | call_id: str # ID of the call to get recordings for 119 | ) 120 | ``` 121 | 122 | ### vapi_add_human 123 | 124 | Adds a human participant to an ongoing call. 125 | 126 | ```python 127 | vapi_add_human( 128 | call_id: str, # ID of the call to add the human to 129 | phone_number: str = None, # Phone number of the human to add 130 | transfer: bool = False # Whether to transfer the call to the human 131 | ) 132 | ``` 133 | 134 | ### vapi_pause_call 135 | 136 | Temporarily pauses an active call. 137 | 138 | ```python 139 | vapi_pause_call( 140 | call_id: str # ID of the call to pause 141 | ) 142 | ``` 143 | 144 | ### vapi_resume_call 145 | 146 | Continues a previously paused call. 147 | 148 | ```python 149 | vapi_resume_call( 150 | call_id: str # ID of the call to resume 151 | ) 152 | ``` 153 | 154 | ### vapi_send_event 155 | 156 | Sends a custom event to a call. 157 | 158 | ```python 159 | vapi_send_event( 160 | call_id: str, # ID of the call to send the event to 161 | event_type: str, # Type of event to send 162 | data: Optional[Dict] = None # Optional data payload for the event 163 | ) 164 | ``` 165 | 166 | ## Usage Examples 167 | 168 | ### Initiating a Call 169 | 170 | ```python 171 | # Make a call to a phone number using a VAPI assistant 172 | response = await mcp.call_tool("vapi_make_call", { 173 | "to": "+12125551234", 174 | "assistant_id": "asst_123456789", 175 | "from_number": "+18005551000" # Optional caller ID 176 | }) 177 | 178 | # Parse the response to get the call ID 179 | call_info = json.loads(response) 180 | call_id = call_info.get("id") 181 | ``` 182 | 183 | ### Checking Call Status 184 | 185 | ```python 186 | # Get details about a specific call 187 | response = await mcp.call_tool("vapi_get_call", { 188 | "call_id": "call_123456789" 189 | }) 190 | 191 | # Parse the response to check call status 192 | call_info = json.loads(response) 193 | call_status = call_info.get("status") 194 | ``` 195 | 196 | ### Ending a Call 197 | 198 | ```python 199 | # End an active call 200 | response = await mcp.call_tool("vapi_end_call", { 201 | "call_id": "call_123456789" 202 | }) 203 | ``` 204 | 205 | ### Getting Call Recordings 206 | 207 | ```python 208 | # Get recordings for a completed call 209 | response = await mcp.call_tool("vapi_get_recordings", { 210 | "call_id": "call_123456789" 211 | }) 212 | 213 | # Parse the response to get recording URLs 214 | recordings = json.loads(response) 215 | for recording in recordings.get("data", []): 216 | recording_url = recording.get("url") 217 | recording_duration = recording.get("duration") 218 | ``` 219 | 220 | ### Inviting a Human to Join a Call 221 | 222 | ```python 223 | # Add a human participant to an ongoing call 224 | response = await mcp.call_tool("vapi_add_human", { 225 | "call_id": "call_123456789", 226 | "phone_number": "+13105551234" 227 | }) 228 | ``` 229 | 230 | ## Working with Claude 231 | 232 | ### Example Prompts for Claude 233 | 234 | Here are some example prompts to help Claude use the VAPI tools effectively: 235 | 236 | 1. **Making an Outbound Call**: 237 | ``` 238 | Please make a call to +1 (212) 555-1234 using the VAPI assistant "asst_123456789". Let me know when the call has been initiated. 239 | ``` 240 | 241 | 2. **Checking Call Logs**: 242 | ``` 243 | Please list the most recent 5 calls made through the VAPI system and tell me their status. 244 | ``` 245 | 246 | 3. **Managing an Ongoing Call**: 247 | ``` 248 | The current call (call_123456789) needs to be paused while I gather some information. Please pause it and let me know when I can resume it. 249 | ``` 250 | 251 | 4. **Getting Call Recordings**: 252 | ``` 253 | Can you get the recordings from my last call with John (call_123456789) and provide the URLs? 254 | ``` 255 | 256 | ## Error Handling 257 | 258 | The VAPI tools include robust error handling: 259 | 260 | - If the VAPI API key is not configured, the tools will return appropriate error messages 261 | - Invalid parameters are caught and reported in a user-friendly format 262 | - API errors from VAPI are captured and returned with context 263 | 264 | ## Dependencies 265 | 266 | This tool requires the following Python packages: 267 | - `vapi`: The official VAPI Python SDK 268 | 269 | ## Technical Notes 270 | 271 | - All phone numbers should use E.164 format (+12125551234) for best compatibility 272 | - Assistant IDs must be valid VAPI assistant IDs 273 | - Call IDs are returned from the `vapi_make_call` tool and must be used for subsequent operations 274 | - Some operations (e.g., ending calls) can only be performed on active calls 275 | - Recordings are only available for completed calls -------------------------------------------------------------------------------- /app/tools/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /app/tools/brave_search.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | from dataclasses import dataclass, field 4 | from datetime import datetime 5 | import logging 6 | 7 | # Ensure compatibility with mcp server 8 | from mcp.server.fastmcp import FastMCP, Context 9 | 10 | # External MCP reference for tool registration 11 | external_mcp = None 12 | 13 | 14 | def set_external_mcp(mcp): 15 | """Set the external MCP reference for tool registration""" 16 | global external_mcp 17 | external_mcp = mcp 18 | logging.info("Brave Search tools MCP reference set") 19 | 20 | 21 | @dataclass 22 | class BraveSearchService: 23 | """Service to handle Brave Search API calls""" 24 | api_key: str 25 | rate_limit_per_second: int = 1 # Updated to match subscription 26 | rate_limit_per_month: int = 15000 27 | # Initialize request_count as a field with a default factory 28 | request_count: dict = field(default_factory=lambda: { 29 | "second": 0, 30 | "month": 0, 31 | "last_reset": datetime.now().timestamp() 32 | }) 33 | 34 | def check_rate_limit(self): 35 | """Check if we've hit the rate limit""" 36 | now = datetime.now().timestamp() 37 | # Reset counter after 1 second 38 | if now - self.request_count["last_reset"] > 1: # 1 second window 39 | self.request_count["second"] = 0 40 | self.request_count["last_reset"] = now 41 | 42 | if (self.request_count["second"] >= self.rate_limit_per_second or 43 | self.request_count["month"] >= self.rate_limit_per_month): 44 | raise ValueError("Rate limit exceeded") 45 | 46 | self.request_count["second"] += 1 47 | self.request_count["month"] += 1 48 | 49 | async def perform_web_search(self, query: str, count: int = 10, offset: int = 0) -> str: 50 | """Execute a web search using Brave Search API""" 51 | import httpx 52 | 53 | self.check_rate_limit() 54 | url = "https://api.search.brave.com/res/v1/web/search" 55 | 56 | params = { 57 | "q": query, 58 | "count": min(count, 20), 59 | "offset": offset 60 | } 61 | 62 | headers = { 63 | "Accept": "application/json", 64 | "Accept-Encoding": "gzip", 65 | "X-Subscription-Token": self.api_key 66 | } 67 | 68 | async with httpx.AsyncClient() as client: 69 | response = await client.get(url, params=params, headers=headers) 70 | 71 | if not response.is_success: 72 | return f"Brave API error: {response.status_code} {response.reason_phrase}\n{response.text}" 73 | 74 | data = response.json() 75 | 76 | # Extract web results 77 | results = [] 78 | for result in data.get("web", {}).get("results", []): 79 | results.append({ 80 | "title": result.get("title", ""), 81 | "description": result.get("description", ""), 82 | "url": result.get("url", "") 83 | }) 84 | 85 | # Format results 86 | formatted_results = [] 87 | for r in results: 88 | formatted_results.append( 89 | f"Title: {r['title']}\nDescription: {r['description']}\nURL: {r['url']}" 90 | ) 91 | 92 | return "\n\n".join(formatted_results) 93 | 94 | async def perform_local_search(self, query: str, count: int = 5) -> str: 95 | """Execute a local search using Brave Search API""" 96 | import httpx 97 | 98 | self.check_rate_limit() 99 | url = "https://api.search.brave.com/res/v1/web/search" 100 | 101 | params = { 102 | "q": query, 103 | "search_lang": "en", 104 | "result_filter": "locations", 105 | "count": min(count, 20) 106 | } 107 | 108 | headers = { 109 | "Accept": "application/json", 110 | "Accept-Encoding": "gzip", 111 | "X-Subscription-Token": self.api_key 112 | } 113 | 114 | async with httpx.AsyncClient() as client: 115 | web_response = await client.get(url, params=params, headers=headers) 116 | 117 | if not web_response.is_success: 118 | return f"Brave API error: {web_response.status_code} {web_response.reason_phrase}\n{web_response.text}" 119 | 120 | web_data = web_response.json() 121 | location_ids = [] 122 | 123 | for location in web_data.get("locations", {}).get("results", []): 124 | if "id" in location: 125 | location_ids.append(location["id"]) 126 | 127 | if not location_ids: 128 | return await self.perform_web_search(query, count) 129 | 130 | # Get POI details and descriptions 131 | pois_data = await self._get_pois_data(location_ids, client, headers) 132 | descriptions_data = await self._get_descriptions_data(location_ids, client, headers) 133 | 134 | return self._format_local_results(pois_data, descriptions_data) 135 | 136 | async def _get_pois_data(self, ids, client, headers): 137 | """Get details for local places/businesses""" 138 | self.check_rate_limit() 139 | url = "https://api.search.brave.com/res/v1/local/pois" 140 | 141 | params = {} 142 | for id in ids: 143 | if id: # Skip empty IDs 144 | params.setdefault("ids", []).append(id) 145 | 146 | response = await client.get(url, params=params, headers=headers) 147 | 148 | if not response.is_success: 149 | raise ValueError( 150 | f"Brave API error: {response.status_code} {response.reason_phrase}") 151 | 152 | return response.json() 153 | 154 | async def _get_descriptions_data(self, ids, client, headers): 155 | """Get descriptions for local places/businesses""" 156 | self.check_rate_limit() 157 | url = "https://api.search.brave.com/res/v1/local/descriptions" 158 | 159 | params = {} 160 | for id in ids: 161 | if id: # Skip empty IDs 162 | params.setdefault("ids", []).append(id) 163 | 164 | response = await client.get(url, params=params, headers=headers) 165 | 166 | if not response.is_success: 167 | raise ValueError( 168 | f"Brave API error: {response.status_code} {response.reason_phrase}") 169 | 170 | return response.json() 171 | 172 | def _format_local_results(self, pois_data, desc_data): 173 | """Format local search results into a readable string""" 174 | results = [] 175 | 176 | for poi in pois_data.get("results", []): 177 | # Extract address components 178 | address_parts = [ 179 | poi.get("address", {}).get("streetAddress", ""), 180 | poi.get("address", {}).get("addressLocality", ""), 181 | poi.get("address", {}).get("addressRegion", ""), 182 | poi.get("address", {}).get("postalCode", "") 183 | ] 184 | address = ", ".join( 185 | [part for part in address_parts if part]) or "N/A" 186 | 187 | # Extract rating 188 | rating_value = poi.get("rating", {}).get("ratingValue", "N/A") 189 | rating_count = poi.get("rating", {}).get("ratingCount", 0) 190 | 191 | # Format result 192 | formatted_result = f"""Name: {poi.get('name', 'Unknown')} 193 | Address: {address} 194 | Phone: {poi.get('phone', 'N/A')} 195 | Rating: {rating_value} ({rating_count} reviews) 196 | Price Range: {poi.get('priceRange', 'N/A')} 197 | Hours: {', '.join(poi.get('openingHours', [])) or 'N/A'} 198 | Description: {desc_data.get('descriptions', {}).get(poi.get('id', ''), 'No description available')} 199 | """ 200 | results.append(formatted_result) 201 | 202 | if not results: 203 | return "No local results found" 204 | 205 | return "\n---\n".join(results) 206 | 207 | # Tool function definitions that will be registered with MCP 208 | 209 | 210 | async def brave_web_search(query: str, count: int = 10, offset: int = 0, ctx: Context = None) -> str: 211 | """Performs a web search using the Brave Search API, ideal for general queries, news, articles, and online content. 212 | 213 | Use this for broad information gathering, recent events, or when you need diverse web sources. 214 | Supports pagination, content filtering, and freshness controls. 215 | Maximum 20 results per request, with offset for pagination. 216 | """ 217 | brave_search = _get_brave_search_service() 218 | if not brave_search: 219 | return "Brave Search API key not configured. Please set the BRAVE_API_KEY environment variable." 220 | 221 | try: 222 | return await brave_search.perform_web_search(query, count, offset) 223 | except Exception as e: 224 | return f"Error: {str(e)}" 225 | 226 | 227 | async def brave_local_search(query: str, count: int = 5, ctx: Context = None) -> str: 228 | """Searches for local businesses and places using Brave's Local Search API. 229 | 230 | Best for queries related to physical locations, businesses, restaurants, services, etc. 231 | Returns detailed information including business names, addresses, ratings, phone numbers and opening hours. 232 | Use this when the query implies 'near me' or mentions specific locations. 233 | Automatically falls back to web search if no local results are found. 234 | """ 235 | brave_search = _get_brave_search_service() 236 | if not brave_search: 237 | return "Brave Search API key not configured. Please set the BRAVE_API_KEY environment variable." 238 | 239 | try: 240 | return await brave_search.perform_local_search(query, count) 241 | except Exception as e: 242 | return f"Error: {str(e)}" 243 | 244 | # Tool registration and initialization 245 | _brave_search_service = None 246 | 247 | 248 | def initialize_brave_search(api_key=None): 249 | """Initialize the Brave Search service""" 250 | global _brave_search_service 251 | 252 | if api_key is None: 253 | api_key = os.environ.get("BRAVE_API_KEY") 254 | 255 | if not api_key: 256 | logging.warning( 257 | "Brave Search API key not configured. Please set the BRAVE_API_KEY environment variable.") 258 | return None 259 | 260 | _brave_search_service = BraveSearchService(api_key=api_key) 261 | return _brave_search_service 262 | 263 | 264 | def _get_brave_search_service(): 265 | """Get or initialize the Brave Search service""" 266 | global _brave_search_service 267 | if _brave_search_service is None: 268 | _brave_search_service = initialize_brave_search() 269 | return _brave_search_service 270 | 271 | 272 | def get_brave_search_tools(): 273 | """Get a dictionary of all Brave Search tools for registration with MCP""" 274 | return { 275 | "brave_web_search": brave_web_search, 276 | "brave_local_search": brave_local_search 277 | } 278 | -------------------------------------------------------------------------------- /app/tools/filesystem.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import sys 4 | from pathlib import Path 5 | import json 6 | import shutil 7 | import difflib 8 | from typing import List, Dict, Any, Optional, Union, Tuple 9 | from dataclasses import dataclass, field 10 | import fnmatch 11 | import logging 12 | from datetime import datetime 13 | 14 | # Ensure compatibility with mcp server 15 | from mcp.server.fastmcp import FastMCP, Context 16 | from mcp.types import Tool, TextContent 17 | 18 | logging.basicConfig( 19 | level=logging.INFO, 20 | format='%(asctime)s [%(levelname)s] %(message)s', 21 | stream=sys.stderr 22 | ) 23 | 24 | # External MCP reference for tool registration 25 | external_mcp = None 26 | 27 | def set_external_mcp(mcp): 28 | """Set the external MCP reference for tool registration""" 29 | global external_mcp 30 | external_mcp = mcp 31 | logging.info("Filesystem tools MCP reference set") 32 | 33 | # Security utilities 34 | class FilesystemSecurity: 35 | """Handles security validation for file paths""" 36 | 37 | def __init__(self, allowed_directories: List[str]): 38 | # Normalize all allowed directories to absolute paths 39 | self.allowed_directories = [os.path.normpath(os.path.abspath(self._expand_home(d))) 40 | for d in allowed_directories] 41 | logging.info(f"Filesystem security initialized with allowed directories: {self.allowed_directories}") 42 | 43 | def _expand_home(self, path: str) -> str: 44 | """Expand ~ to user's home directory""" 45 | if path.startswith('~'): 46 | return os.path.expanduser(path) 47 | return path 48 | 49 | async def validate_path(self, requested_path: str) -> str: 50 | """ 51 | Validate that a path is within allowed directories 52 | Returns the absolute, normalized path if valid, otherwise raises an exception 53 | """ 54 | expanded_path = self._expand_home(requested_path) 55 | absolute_path = os.path.abspath(expanded_path) 56 | normalized_path = os.path.normpath(absolute_path) 57 | 58 | # Check if path is within allowed directories 59 | is_allowed = any(normalized_path.startswith(allowed_dir) 60 | for allowed_dir in self.allowed_directories) 61 | 62 | if not is_allowed: 63 | raise ValueError(f"Access denied - path outside allowed directories: {normalized_path} not in {self.allowed_directories}") 64 | 65 | # Handle symlinks by checking their real path 66 | try: 67 | real_path = os.path.realpath(normalized_path) 68 | normalized_real = os.path.normpath(real_path) 69 | 70 | is_real_path_allowed = any(normalized_real.startswith(allowed_dir) 71 | for allowed_dir in self.allowed_directories) 72 | 73 | if not is_real_path_allowed: 74 | raise ValueError("Access denied - symlink target outside allowed directories") 75 | 76 | return real_path 77 | except FileNotFoundError: 78 | # For new files that don't exist yet, verify parent directory 79 | parent_dir = os.path.dirname(normalized_path) 80 | 81 | try: 82 | real_parent = os.path.realpath(parent_dir) 83 | normalized_parent = os.path.normpath(real_parent) 84 | 85 | is_parent_allowed = any(normalized_parent.startswith(allowed_dir) 86 | for allowed_dir in self.allowed_directories) 87 | 88 | if not is_parent_allowed: 89 | raise ValueError("Access denied - parent directory outside allowed directories") 90 | 91 | return normalized_path 92 | except FileNotFoundError: 93 | raise ValueError(f"Parent directory does not exist: {parent_dir}") 94 | 95 | 96 | # File utility functions 97 | class FilesystemTools: 98 | """Implements file operation tools that can be exposed via MCP""" 99 | 100 | def __init__(self, security: FilesystemSecurity): 101 | self.security = security 102 | 103 | async def read_file(self, path: str) -> str: 104 | """Read a file with path validation""" 105 | valid_path = await self.security.validate_path(path) 106 | 107 | try: 108 | with open(valid_path, 'r', encoding='utf-8') as f: 109 | return f.read() 110 | except UnicodeDecodeError: 111 | # Try different encodings 112 | try: 113 | with open(valid_path, 'r', encoding='latin-1') as f: 114 | return f.read() 115 | except Exception as e: 116 | raise ValueError(f"Failed to read file with alternative encoding: {str(e)}") 117 | except Exception as e: 118 | raise ValueError(f"Failed to read file: {str(e)}") 119 | 120 | async def read_multiple_files(self, paths: List[str]) -> str: 121 | """Read multiple files and return their contents""" 122 | results = [] 123 | 124 | for file_path in paths: 125 | try: 126 | content = await self.read_file(file_path) 127 | results.append(f"{file_path}:\n{content}\n") 128 | except Exception as e: 129 | results.append(f"{file_path}: Error - {str(e)}") 130 | 131 | return "\n---\n".join(results) 132 | 133 | async def write_file(self, path: str, content: str) -> str: 134 | """Write content to a file with path validation""" 135 | valid_path = await self.security.validate_path(path) 136 | 137 | try: 138 | # Create directory if it doesn't exist 139 | os.makedirs(os.path.dirname(valid_path), exist_ok=True) 140 | 141 | with open(valid_path, 'w', encoding='utf-8') as f: 142 | f.write(content) 143 | 144 | return f"Successfully wrote to {path}" 145 | except Exception as e: 146 | raise ValueError(f"Failed to write file: {str(e)}") 147 | 148 | def _normalize_line_endings(self, text: str) -> str: 149 | """Normalize line endings to \n""" 150 | return text.replace('\r\n', '\n') 151 | 152 | def _create_unified_diff(self, original: str, modified: str, filepath: str = 'file') -> str: 153 | """Create a unified diff between two texts""" 154 | original_norm = self._normalize_line_endings(original) 155 | modified_norm = self._normalize_line_endings(modified) 156 | 157 | original_lines = original_norm.splitlines(keepends=True) 158 | modified_lines = modified_norm.splitlines(keepends=True) 159 | 160 | diff = difflib.unified_diff( 161 | original_lines, 162 | modified_lines, 163 | fromfile=f"a/{filepath}", 164 | tofile=f"b/{filepath}", 165 | lineterm='' 166 | ) 167 | 168 | return ''.join(diff) 169 | 170 | async def edit_file(self, path: str, edits: List[Dict[str, str]], dry_run: bool = False) -> str: 171 | """Apply edits to a file and return a diff of changes""" 172 | valid_path = await self.security.validate_path(path) 173 | 174 | try: 175 | content = await self.read_file(valid_path) 176 | content_norm = self._normalize_line_endings(content) 177 | 178 | # Apply edits sequentially 179 | modified_content = content_norm 180 | for edit in edits: 181 | old_text = self._normalize_line_endings(edit.get('oldText', '')) 182 | new_text = self._normalize_line_endings(edit.get('newText', '')) 183 | 184 | # If exact match exists, use it 185 | if old_text in modified_content: 186 | modified_content = modified_content.replace(old_text, new_text) 187 | continue 188 | 189 | # Try line-by-line matching with flexibility for whitespace 190 | old_lines = old_text.split('\n') 191 | content_lines = modified_content.split('\n') 192 | match_found = False 193 | 194 | for i in range(len(content_lines) - len(old_lines) + 1): 195 | potential_match = content_lines[i:i+len(old_lines)] 196 | 197 | # Compare lines with normalized whitespace 198 | is_match = all(ol.strip() == pl.strip() for ol, pl in zip(old_lines, potential_match)) 199 | 200 | if is_match: 201 | # Preserve original indentation of first line 202 | original_indent = '' 203 | match = content_lines[i].lstrip() 204 | if match and len(content_lines[i]) > len(match): 205 | original_indent = content_lines[i][:-len(match)] 206 | 207 | # Create new lines with preserved indentation 208 | new_lines = [] 209 | for j, line in enumerate(new_text.split('\n')): 210 | if j == 0: 211 | new_lines.append(original_indent + line.lstrip()) 212 | else: 213 | new_lines.append(line) 214 | 215 | # Replace lines in content 216 | content_lines[i:i+len(old_lines)] = new_lines 217 | modified_content = '\n'.join(content_lines) 218 | match_found = True 219 | break 220 | 221 | if not match_found: 222 | raise ValueError(f"Could not find exact match for edit: {old_text}") 223 | 224 | # Create unified diff 225 | diff = self._create_unified_diff(content, modified_content, path) 226 | 227 | # Format diff with appropriate number of backticks 228 | num_backticks = 3 229 | while '`' * num_backticks in diff: 230 | num_backticks += 1 231 | 232 | formatted_diff = f"{'`' * num_backticks}diff\n{diff}{'`' * num_backticks}\n\n" 233 | 234 | if not dry_run: 235 | await self.write_file(path, modified_content) 236 | 237 | return formatted_diff 238 | except Exception as e: 239 | raise ValueError(f"Failed to edit file: {str(e)}") 240 | 241 | async def create_directory(self, path: str) -> str: 242 | """Create a directory with path validation""" 243 | valid_path = await self.security.validate_path(path) 244 | 245 | try: 246 | os.makedirs(valid_path, exist_ok=True) 247 | return f"Successfully created directory {path}" 248 | except Exception as e: 249 | raise ValueError(f"Failed to create directory: {str(e)}") 250 | 251 | async def list_directory(self, path: str) -> str: 252 | """List contents of a directory with path validation""" 253 | valid_path = await self.security.validate_path(path) 254 | 255 | try: 256 | entries = os.listdir(valid_path) 257 | formatted = [] 258 | 259 | for entry in entries: 260 | entry_path = os.path.join(valid_path, entry) 261 | entry_type = "[DIR]" if os.path.isdir(entry_path) else "[FILE]" 262 | formatted.append(f"{entry_type} {entry}") 263 | 264 | return "\n".join(formatted) 265 | except Exception as e: 266 | raise ValueError(f"Failed to list directory: {str(e)}") 267 | 268 | async def directory_tree(self, path: str) -> str: 269 | """Generate a directory tree structure as JSON""" 270 | valid_path = await self.security.validate_path(path) 271 | 272 | try: 273 | def build_tree(current_path): 274 | entries = os.listdir(current_path) 275 | result = [] 276 | 277 | for entry in entries: 278 | entry_path = os.path.join(current_path, entry) 279 | entry_data = { 280 | "name": entry, 281 | "type": "directory" if os.path.isdir(entry_path) else "file" 282 | } 283 | 284 | if os.path.isdir(entry_path): 285 | entry_data["children"] = build_tree(entry_path) 286 | 287 | result.append(entry_data) 288 | 289 | return result 290 | 291 | tree_data = build_tree(valid_path) 292 | return json.dumps(tree_data, indent=2) 293 | except Exception as e: 294 | raise ValueError(f"Failed to create directory tree: {str(e)}") 295 | 296 | async def move_file(self, source: str, destination: str) -> str: 297 | """Move a file or directory with path validation""" 298 | valid_source = await self.security.validate_path(source) 299 | valid_dest = await self.security.validate_path(destination) 300 | 301 | try: 302 | # Create parent directories if they don't exist 303 | os.makedirs(os.path.dirname(valid_dest), exist_ok=True) 304 | 305 | shutil.move(valid_source, valid_dest) 306 | return f"Successfully moved {source} to {destination}" 307 | except Exception as e: 308 | raise ValueError(f"Failed to move file: {str(e)}") 309 | 310 | async def search_files(self, path: str, pattern: str, exclude_patterns: List[str] = None) -> str: 311 | """Recursively search for files matching a pattern""" 312 | if exclude_patterns is None: 313 | exclude_patterns = [] 314 | 315 | valid_root = await self.security.validate_path(path) 316 | results = [] 317 | 318 | try: 319 | for root, dirs, files in os.walk(valid_root): 320 | # Check if the directory should be excluded 321 | rel_dir = os.path.relpath(root, valid_root) 322 | if rel_dir == '.': 323 | rel_dir = '' 324 | 325 | should_skip = False 326 | for exclude in exclude_patterns: 327 | if fnmatch.fnmatch(rel_dir, exclude): 328 | should_skip = True 329 | break 330 | 331 | if should_skip: 332 | continue 333 | 334 | # Process directories 335 | for i, dir_name in enumerate(dirs): 336 | if any(fnmatch.fnmatch(dir_name, pat) for pat in exclude_patterns): 337 | dirs.pop(i) # Don't traverse this directory 338 | continue 339 | 340 | if pattern.lower() in dir_name.lower(): 341 | results.append(os.path.join(root, dir_name)) 342 | 343 | # Process files 344 | for file_name in files: 345 | if any(fnmatch.fnmatch(file_name, pat) for pat in exclude_patterns): 346 | continue 347 | 348 | if pattern.lower() in file_name.lower(): 349 | results.append(os.path.join(root, file_name)) 350 | 351 | return "\n".join(results) if results else "No matches found" 352 | except Exception as e: 353 | raise ValueError(f"Failed to search files: {str(e)}") 354 | 355 | async def get_file_info(self, path: str) -> str: 356 | """Get detailed metadata about a file or directory""" 357 | valid_path = await self.security.validate_path(path) 358 | 359 | try: 360 | stats = os.stat(valid_path) 361 | 362 | info = { 363 | "size": stats.st_size, 364 | "created": datetime.fromtimestamp(stats.st_ctime).isoformat(), 365 | "modified": datetime.fromtimestamp(stats.st_mtime).isoformat(), 366 | "accessed": datetime.fromtimestamp(stats.st_atime).isoformat(), 367 | "isDirectory": os.path.isdir(valid_path), 368 | "isFile": os.path.isfile(valid_path), 369 | "permissions": oct(stats.st_mode)[-3:], # Last 3 digits of octal representation 370 | "absolutePath": os.path.abspath(valid_path), 371 | "filename": os.path.basename(valid_path) 372 | } 373 | 374 | return "\n".join(f"{key}: {value}" for key, value in info.items()) 375 | except Exception as e: 376 | raise ValueError(f"Failed to get file info: {str(e)}") 377 | 378 | async def list_allowed_directories(self) -> str: 379 | """List all allowed directories""" 380 | return "Allowed directories:\n" + "\n".join(self.security.allowed_directories) 381 | 382 | 383 | # Tool registration functions with proper MCP integration 384 | class FileSystemTools: 385 | """Class containing tools for file system operations""" 386 | 387 | def __init__(self, allowed_dirs=None): 388 | # Default to user's home directory if no dirs specified 389 | if allowed_dirs is None: 390 | allowed_dirs = [os.path.expanduser("~")] 391 | 392 | self.security = FilesystemSecurity(allowed_dirs) 393 | self.tools = FilesystemTools(self.security) 394 | logging.info(f"Filesystem tools initialized with allowed directories: {allowed_dirs}") 395 | 396 | 397 | # Tool function definitions that will be registered with MCP 398 | async def read_file(path: str, ctx: Context = None) -> str: 399 | """Read the complete contents of a file from the file system. 400 | 401 | Handles various text encodings and provides detailed error messages 402 | if the file cannot be read. Use this tool when you need to examine 403 | the contents of a single file. Only works within allowed directories. 404 | """ 405 | try: 406 | return await _get_fs_tools().tools.read_file(path) 407 | except Exception as e: 408 | return f"Error reading file: {str(e)}" 409 | 410 | 411 | async def read_multiple_files(paths: List[str], ctx: Context = None) -> str: 412 | """Read the contents of multiple files simultaneously. 413 | 414 | This is more efficient than reading files one by one when you need to analyze 415 | or compare multiple files. Each file's content is returned with its 416 | path as a reference. Failed reads for individual files won't stop 417 | the entire operation. Only works within allowed directories. 418 | """ 419 | try: 420 | return await _get_fs_tools().tools.read_multiple_files(paths) 421 | except Exception as e: 422 | return f"Error reading multiple files: {str(e)}" 423 | 424 | 425 | async def write_file(path: str, content: str, ctx: Context = None) -> str: 426 | """Create a new file or completely overwrite an existing file with new content. 427 | 428 | Use with caution as it will overwrite existing files without warning. 429 | Handles text content with proper encoding. Only works within allowed directories. 430 | """ 431 | try: 432 | return await _get_fs_tools().tools.write_file(path, content) 433 | except Exception as e: 434 | return f"Error writing file: {str(e)}" 435 | 436 | 437 | async def edit_file(path: str, edits: List[Dict[str, str]], dry_run: bool = False, ctx: Context = None) -> str: 438 | """Make line-based edits to a text file. 439 | 440 | Each edit replaces exact line sequences with new content. Returns a git-style diff 441 | showing the changes made. Only works within allowed directories. 442 | 443 | Parameters: 444 | - path: Path to the file to edit 445 | - edits: List of edit operations, each with 'oldText' and 'newText' properties 446 | - dry_run: If True, returns diff without actually modifying the file 447 | """ 448 | try: 449 | return await _get_fs_tools().tools.edit_file(path, edits, dry_run) 450 | except Exception as e: 451 | return f"Error editing file: {str(e)}" 452 | 453 | 454 | async def create_directory(path: str, ctx: Context = None) -> str: 455 | """Create a new directory or ensure a directory exists. 456 | 457 | Can create multiple nested directories in one operation. If the directory already exists, 458 | this operation will succeed silently. Perfect for setting up directory 459 | structures for projects or ensuring required paths exist. Only works within allowed directories. 460 | """ 461 | try: 462 | return await _get_fs_tools().tools.create_directory(path) 463 | except Exception as e: 464 | return f"Error creating directory: {str(e)}" 465 | 466 | 467 | async def list_directory(path: str, ctx: Context = None) -> str: 468 | """Get a detailed listing of all files and directories in a specified path. 469 | 470 | Results clearly distinguish between files and directories with [FILE] and [DIR] 471 | prefixes. This tool is essential for understanding directory structure and 472 | finding specific files within a directory. Only works within allowed directories. 473 | """ 474 | try: 475 | return await _get_fs_tools().tools.list_directory(path) 476 | except Exception as e: 477 | return f"Error listing directory: {str(e)}" 478 | 479 | 480 | async def directory_tree(path: str, ctx: Context = None) -> str: 481 | """Get a recursive tree view of files and directories as a JSON structure. 482 | 483 | Each entry includes 'name', 'type' (file/directory), and 'children' for directories. 484 | Files have no children array, while directories always have a children array (which may be empty). 485 | The output is formatted with 2-space indentation for readability. Only works within allowed directories. 486 | """ 487 | try: 488 | return await _get_fs_tools().tools.directory_tree(path) 489 | except Exception as e: 490 | return f"Error creating directory tree: {str(e)}" 491 | 492 | 493 | async def move_file(source: str, destination: str, ctx: Context = None) -> str: 494 | """Move or rename files and directories. 495 | 496 | Can move files between directories and rename them in a single operation. 497 | If the destination exists, the operation will fail. Works across different 498 | directories and can be used for simple renaming within the same directory. 499 | Both source and destination must be within allowed directories. 500 | """ 501 | try: 502 | return await _get_fs_tools().tools.move_file(source, destination) 503 | except Exception as e: 504 | return f"Error moving file: {str(e)}" 505 | 506 | 507 | async def search_files(path: str, pattern: str, exclude_patterns: List[str] = None, ctx: Context = None) -> str: 508 | """Recursively search for files and directories matching a pattern. 509 | 510 | Searches through all subdirectories from the starting path. The search 511 | is case-insensitive and matches partial names. Returns full paths to all 512 | matching items. Great for finding files when you don't know their exact location. 513 | Only searches within allowed directories. 514 | """ 515 | if exclude_patterns is None: 516 | exclude_patterns = [] 517 | try: 518 | return await _get_fs_tools().tools.search_files(path, pattern, exclude_patterns) 519 | except Exception as e: 520 | return f"Error searching files: {str(e)}" 521 | 522 | 523 | async def get_file_info(path: str, ctx: Context = None) -> str: 524 | """Retrieve detailed metadata about a file or directory. 525 | 526 | Returns comprehensive information including size, creation time, last modified time, 527 | permissions, and type. This tool is perfect for understanding file characteristics 528 | without reading the actual content. Only works within allowed directories. 529 | """ 530 | try: 531 | return await _get_fs_tools().tools.get_file_info(path) 532 | except Exception as e: 533 | return f"Error getting file info: {str(e)}" 534 | 535 | 536 | async def list_allowed_directories(ctx: Context = None) -> str: 537 | """Returns the list of directories that this server is allowed to access. 538 | 539 | Use this to understand which directories are available before trying to access files. 540 | """ 541 | try: 542 | return await _get_fs_tools().tools.list_allowed_directories() 543 | except Exception as e: 544 | return f"Error listing allowed directories: {str(e)}" 545 | 546 | 547 | # Tool registration and initialization 548 | _fs_tools_instance = None 549 | 550 | def initialize_fs_tools(allowed_dirs=None): 551 | """Initialize the filesystem tools with specified allowed directories""" 552 | global _fs_tools_instance 553 | _fs_tools_instance = FileSystemTools(allowed_dirs) 554 | return _fs_tools_instance 555 | 556 | def _get_fs_tools(): 557 | """Get or initialize the filesystem tools""" 558 | global _fs_tools_instance 559 | if _fs_tools_instance is None: 560 | _fs_tools_instance = initialize_fs_tools() 561 | return _fs_tools_instance 562 | 563 | def get_filesystem_tools(allowed_dirs=None): 564 | """Get a dictionary of all filesystem tools for registration with MCP""" 565 | # Initialize with allowed dirs if specified 566 | if allowed_dirs: 567 | initialize_fs_tools(allowed_dirs) 568 | 569 | return { 570 | "read_file": read_file, 571 | "read_multiple_files": read_multiple_files, 572 | "write_file": write_file, 573 | "edit_file": edit_file, 574 | "create_directory": create_directory, 575 | "list_directory": list_directory, 576 | "directory_tree": directory_tree, 577 | "move_file": move_file, 578 | "search_files": search_files, 579 | "get_file_info": get_file_info, 580 | "list_allowed_directories": list_allowed_directories 581 | } 582 | 583 | # If this file is run directly, print the list of tools 584 | if __name__ == "__main__": 585 | if len(sys.argv) < 2: 586 | print("Usage: python filesystem.py [additional-directories...]") 587 | sys.exit(1) 588 | 589 | allowed_dirs = [os.path.abspath(d) for d in sys.argv[1:]] 590 | initialize_fs_tools(allowed_dirs) 591 | 592 | print(f"Filesystem tools initialized with allowed directories: {allowed_dirs}") 593 | print("Available tools:") 594 | for tool_name in get_filesystem_tools().keys(): 595 | print(f"- {tool_name}") 596 | -------------------------------------------------------------------------------- /app/tools/fred.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import logging 4 | import json 5 | import pandas as pd 6 | from typing import Dict, List, Any, Optional, Union 7 | 8 | # Ensure compatibility with mcp server 9 | from mcp.server.fastmcp import FastMCP, Context 10 | 11 | # External MCP reference for tool registration 12 | external_mcp = None 13 | 14 | 15 | def set_external_mcp(mcp): 16 | """Set the external MCP reference for tool registration""" 17 | global external_mcp 18 | external_mcp = mcp 19 | logging.info("FRED API tools MCP reference set") 20 | 21 | 22 | class FREDAPIService: 23 | """Service to handle FRED API operations""" 24 | 25 | def __init__(self, api_key): 26 | self.api_key = api_key 27 | try: 28 | from fredapi import Fred 29 | self.client = Fred(api_key=api_key) 30 | logging.info("FRED API client initialized successfully") 31 | except ImportError: 32 | logging.error( 33 | "fredapi module not installed. Please install it with 'pip install fredapi'") 34 | raise ImportError("fredapi module is required") 35 | except Exception as e: 36 | logging.error(f"Failed to initialize FRED API client: {str(e)}") 37 | raise 38 | 39 | def get_series(self, series_id, **kwargs): 40 | """Get data for a FRED series""" 41 | try: 42 | data = self.client.get_series(series_id, **kwargs) 43 | return self._format_series_data(data, series_id) 44 | except Exception as e: 45 | return {"error": str(e)} 46 | 47 | def search(self, search_text, **kwargs): 48 | """Search for FRED series""" 49 | try: 50 | data = self.client.search(search_text, **kwargs) 51 | return self._format_search_results(data) 52 | except Exception as e: 53 | return {"error": str(e)} 54 | 55 | def get_series_info(self, series_id): 56 | """Get metadata about a FRED series""" 57 | try: 58 | info = self.client.get_series_info(series_id) 59 | return self._format_series_info(info) 60 | except Exception as e: 61 | return {"error": str(e)} 62 | 63 | def get_release(self, release_id): 64 | """Get information about a FRED release""" 65 | try: 66 | release = self.client.get_release(release_id) 67 | return self._format_release(release) 68 | except Exception as e: 69 | return {"error": str(e)} 70 | 71 | def get_category(self, category_id=0): 72 | """Get information about a FRED category""" 73 | try: 74 | category = self.client.get_category(category_id) 75 | return self._format_category(category) 76 | except Exception as e: 77 | return {"error": str(e)} 78 | 79 | def _format_series_data(self, data, series_id): 80 | """Format pandas Series data into a dict for JSON serialization""" 81 | if isinstance(data, pd.Series): 82 | # Convert the pandas Series to a list of date/value pairs 83 | # First reset the index to make the dates a column 84 | df = data.reset_index() 85 | # Convert to list of dicts 86 | data_list = df.to_dict(orient='records') 87 | # Convert dates to strings 88 | for item in data_list: 89 | if 'index' in item and hasattr(item['index'], 'strftime'): 90 | item['date'] = item['index'].strftime('%Y-%m-%d') 91 | del item['index'] 92 | elif 'DATE' in item and hasattr(item['DATE'], 'strftime'): 93 | item['date'] = item['DATE'].strftime('%Y-%m-%d') 94 | del item['DATE'] 95 | 96 | # Get series info for the title 97 | try: 98 | series_info = self.client.get_series_info(series_id) 99 | title = series_info.get('title', f'Series {series_id}') 100 | except: 101 | title = f'Series {series_id}' 102 | 103 | return { 104 | "series_id": series_id, 105 | "title": title, 106 | "observation_start": data.index.min().strftime('%Y-%m-%d') if not data.empty else None, 107 | "observation_end": data.index.max().strftime('%Y-%m-%d') if not data.empty else None, 108 | "data": data_list, 109 | "count": len(data_list) 110 | } 111 | return {"error": "Unexpected data format returned from FRED API"} 112 | 113 | def _format_search_results(self, data): 114 | """Format search results from DataFrame to dict""" 115 | if isinstance(data, pd.DataFrame): 116 | results = data.to_dict(orient='records') 117 | return { 118 | "results": results, 119 | "count": len(results) 120 | } 121 | return {"error": "Unexpected data format returned from FRED API"} 122 | 123 | def _format_series_info(self, info): 124 | """Format series info for JSON serialization""" 125 | if isinstance(info, dict): 126 | return info 127 | return {"error": "Unexpected data format returned from FRED API"} 128 | 129 | def _format_release(self, release): 130 | """Format release info for JSON serialization""" 131 | if isinstance(release, dict): 132 | return release 133 | return {"error": "Unexpected data format returned from FRED API"} 134 | 135 | def _format_category(self, category): 136 | """Format category info for JSON serialization""" 137 | if isinstance(category, dict): 138 | return category 139 | return {"error": "Unexpected data format returned from FRED API"} 140 | 141 | 142 | # Tool function definitions that will be registered with MCP 143 | 144 | async def fred_get_series( 145 | series_id: str, 146 | observation_start: str = None, 147 | observation_end: str = None, 148 | frequency: str = None, 149 | units: str = None, 150 | ctx: Context = None 151 | ) -> str: 152 | """Get data for a FRED series. 153 | 154 | Retrieves time series data for a specific economic indicator. 155 | 156 | Parameters: 157 | - series_id: The FRED series ID (e.g., 'GDP', 'UNRATE', 'CPIAUCSL') 158 | - observation_start: Start date in YYYY-MM-DD format (optional) 159 | - observation_end: End date in YYYY-MM-DD format (optional) 160 | - frequency: Data frequency ('d', 'w', 'm', 'q', 'sa', 'a') (optional) 161 | - units: Units transformation ('lin', 'chg', 'ch1', 'pch', 'pc1', 'pca', 'cch', 'cca', 'log') (optional) 162 | """ 163 | fred_api = _get_fred_api_service() 164 | if not fred_api: 165 | return "FRED API key not configured. Please set the FRED_API_KEY environment variable." 166 | 167 | # Build params dict, excluding None values 168 | params = {} 169 | if observation_start: 170 | params['observation_start'] = observation_start 171 | if observation_end: 172 | params['observation_end'] = observation_end 173 | if frequency: 174 | params['frequency'] = frequency 175 | if units: 176 | params['units'] = units 177 | 178 | # Get series data 179 | response = fred_api.get_series(series_id, **params) 180 | 181 | if "error" in response: 182 | return f"Error: {response['error']}" 183 | 184 | return json.dumps(response, indent=2) 185 | 186 | 187 | async def fred_search( 188 | search_text: str, 189 | limit: int = 10, 190 | order_by: str = 'search_rank', 191 | sort_order: str = 'desc', 192 | ctx: Context = None 193 | ) -> str: 194 | """Search for FRED series. 195 | 196 | Searches for economic data series by keywords/text. 197 | 198 | Parameters: 199 | - search_text: The words to match against economic data series 200 | - limit: Maximum number of results to return (default: 10) 201 | - order_by: Order results by values of the specified attribute (default: 'search_rank') 202 | - sort_order: Sort results in ascending or descending order ('asc' or 'desc', default: 'desc') 203 | """ 204 | fred_api = _get_fred_api_service() 205 | if not fred_api: 206 | return "FRED API key not configured. Please set the FRED_API_KEY environment variable." 207 | 208 | # Build params dict 209 | params = { 210 | 'limit': limit, 211 | 'order_by': order_by, 212 | 'sort_order': sort_order 213 | } 214 | 215 | # Get search results 216 | response = fred_api.search(search_text, **params) 217 | 218 | if "error" in response: 219 | return f"Error: {response['error']}" 220 | 221 | # Format the response 222 | result_count = response.get("count", 0) 223 | results = response.get("results", []) 224 | 225 | formatted_results = [] 226 | for result in results: 227 | formatted_results.append(f"""ID: {result.get('id', 'N/A')} 228 | Title: {result.get('title', 'N/A')} 229 | Units: {result.get('units', 'N/A')} 230 | Frequency: {result.get('frequency', 'N/A')} 231 | Seasonal Adjustment: {result.get('seasonal_adjustment', 'N/A')} 232 | Last Updated: {result.get('last_updated', 'N/A')} 233 | """) 234 | 235 | if not formatted_results: 236 | return "No series found matching your search criteria." 237 | 238 | return f"Found {result_count} series. Showing top {len(formatted_results)} results:\n\n" + "\n---\n".join(formatted_results) 239 | 240 | 241 | async def fred_get_series_info( 242 | series_id: str, 243 | ctx: Context = None 244 | ) -> str: 245 | """Get metadata about a FRED series. 246 | 247 | Retrieves detailed information about a specific economic data series. 248 | 249 | Parameters: 250 | - series_id: The FRED series ID (e.g., 'GDP', 'UNRATE', 'CPIAUCSL') 251 | """ 252 | fred_api = _get_fred_api_service() 253 | if not fred_api: 254 | return "FRED API key not configured. Please set the FRED_API_KEY environment variable." 255 | 256 | # Get series info 257 | response = fred_api.get_series_info(series_id) 258 | 259 | if "error" in response: 260 | return f"Error: {response['error']}" 261 | 262 | # Format the response 263 | info = {} 264 | for key, value in response.items(): 265 | if value is not None: 266 | info[key] = value 267 | 268 | return json.dumps(info, indent=2) 269 | 270 | 271 | async def fred_get_category( 272 | category_id: int = 0, 273 | ctx: Context = None 274 | ) -> str: 275 | """Get information about a FRED category. 276 | 277 | Retrieves details about a category of economic data series. 278 | 279 | Parameters: 280 | - category_id: The FRED category ID (default: 0, which is the root category) 281 | """ 282 | fred_api = _get_fred_api_service() 283 | if not fred_api: 284 | return "FRED API key not configured. Please set the FRED_API_KEY environment variable." 285 | 286 | # Get category 287 | response = fred_api.get_category(category_id) 288 | 289 | if "error" in response: 290 | return f"Error: {response['error']}" 291 | 292 | # Format the response 293 | return json.dumps(response, indent=2) 294 | 295 | 296 | # Tool registration and initialization 297 | _fred_api_service = None 298 | 299 | 300 | def initialize_fred_api_service(api_key=None): 301 | """Initialize the FRED API service""" 302 | global _fred_api_service 303 | 304 | if api_key is None: 305 | api_key = os.environ.get("FRED_API_KEY") 306 | 307 | if not api_key: 308 | logging.warning( 309 | "FRED API key not configured. Please set the FRED_API_KEY environment variable.") 310 | return None 311 | 312 | try: 313 | _fred_api_service = FREDAPIService(api_key=api_key) 314 | return _fred_api_service 315 | except ImportError: 316 | logging.error( 317 | "fredapi module is required. Install with 'pip install fredapi'") 318 | return None 319 | except Exception as e: 320 | logging.error(f"Failed to initialize FRED API service: {str(e)}") 321 | return None 322 | 323 | 324 | def _get_fred_api_service(): 325 | """Get or initialize the FRED API service""" 326 | global _fred_api_service 327 | if _fred_api_service is None: 328 | _fred_api_service = initialize_fred_api_service() 329 | return _fred_api_service 330 | 331 | 332 | def get_fred_api_tools(): 333 | """Get a dictionary of all FRED API tools for registration with MCP""" 334 | return { 335 | "fred_get_series": fred_get_series, 336 | "fred_search": fred_search, 337 | "fred_get_series_info": fred_get_series_info, 338 | "fred_get_category": fred_get_category 339 | } 340 | 341 | 342 | # This function will be called by the unified server to initialize the module 343 | def initialize(mcp=None): 344 | """Initialize the FRED API module with MCP reference and API key""" 345 | if mcp: 346 | set_external_mcp(mcp) 347 | 348 | # Initialize the service 349 | service = initialize_fred_api_service() 350 | if service: 351 | logging.info("FRED API service initialized successfully") 352 | else: 353 | logging.warning("Failed to initialize FRED API service") 354 | 355 | return service is not None 356 | 357 | 358 | # When running standalone for testing 359 | if __name__ == "__main__": 360 | logging.basicConfig(level=logging.INFO) 361 | 362 | # Create a local MCP instance for testing 363 | local_mcp = FastMCP( 364 | "FRED API Tools", 365 | dependencies=["fredapi", "pandas"] 366 | ) 367 | 368 | # Register tools with the local MCP 369 | fred_tools = get_fred_api_tools() 370 | for tool_name, tool_func in fred_tools.items(): 371 | local_mcp.tool(name=tool_name)(tool_func) 372 | 373 | print("FRED API tools registered with local MCP instance") 374 | print("Available tools:") 375 | for tool_name in fred_tools.keys(): 376 | print(f"- {tool_name}") 377 | -------------------------------------------------------------------------------- /app/tools/news_api.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import logging 4 | from newsapi import NewsApiClient 5 | 6 | # Ensure compatibility with mcp server 7 | from mcp.server.fastmcp import FastMCP, Context 8 | 9 | # External MCP reference for tool registration 10 | external_mcp = None 11 | 12 | 13 | def set_external_mcp(mcp): 14 | """Set the external MCP reference for tool registration""" 15 | global external_mcp 16 | external_mcp = mcp 17 | logging.info("News API tools MCP reference set") 18 | 19 | 20 | class NewsAPIService: 21 | """Service to handle NewsAPI operations""" 22 | 23 | def __init__(self, api_key): 24 | self.api_key = api_key 25 | self.client = NewsApiClient(api_key=api_key) 26 | 27 | def get_top_headlines(self, **kwargs): 28 | """Get top headlines""" 29 | try: 30 | return self.client.get_top_headlines(**kwargs) 31 | except Exception as e: 32 | return {"error": str(e)} 33 | 34 | def get_everything(self, **kwargs): 35 | """Search for news articles""" 36 | try: 37 | return self.client.get_everything(**kwargs) 38 | except Exception as e: 39 | return {"error": str(e)} 40 | 41 | def get_sources(self, **kwargs): 42 | """Get news sources""" 43 | try: 44 | return self.client.get_sources(**kwargs) 45 | except Exception as e: 46 | return {"error": str(e)} 47 | 48 | def format_articles(self, articles): 49 | """Format articles into a readable string""" 50 | if not articles or len(articles) == 0: 51 | return "No articles found." 52 | 53 | formatted = [] 54 | for article in articles: 55 | source = article.get("source", {}).get("name", "Unknown Source") 56 | title = article.get("title", "No Title") 57 | description = article.get("description", "No Description") 58 | url = article.get("url", "") 59 | published_at = article.get("publishedAt", "") 60 | 61 | formatted.append(f"""Source: {source} 62 | Title: {title} 63 | Published: {published_at} 64 | Description: {description} 65 | URL: {url} 66 | """) 67 | 68 | return "\n---\n".join(formatted) 69 | 70 | # Tool function definitions that will be registered with MCP 71 | 72 | 73 | def news_top_headlines( 74 | country: str = None, 75 | category: str = None, 76 | sources: str = None, 77 | q: str = None, 78 | page_size: int = 5, 79 | page: int = 1, 80 | ctx: Context = None 81 | ) -> str: 82 | """Get top headlines from NewsAPI. 83 | 84 | Returns the latest headlines from sources, countries, or categories. 85 | 86 | Parameters: 87 | - country: The 2-letter ISO 3166-1 code of the country (e.g., 'us', 'gb') 88 | - category: The category to get headlines for (e.g., 'business', 'technology') 89 | - sources: Comma-separated string of source IDs 90 | - q: Keywords or phrases to search for 91 | - page_size: Number of results per page (max 100) 92 | - page: Page number to fetch 93 | 94 | Note: 'sources' cannot be mixed with 'country' or 'category' parameters. 95 | """ 96 | news_api = _get_news_api_service() 97 | if not news_api: 98 | return "NewsAPI key not configured. Please set the NEWS_API_KEY environment variable." 99 | 100 | # Build params dict, excluding None values 101 | params = {} 102 | if country: 103 | params['country'] = country 104 | if category: 105 | params['category'] = category 106 | if sources: 107 | params['sources'] = sources 108 | if q: 109 | params['q'] = q 110 | if page_size: 111 | params['page_size'] = min(page_size, 100) 112 | if page: 113 | params['page'] = page 114 | 115 | # Get headlines 116 | response = news_api.get_top_headlines(**params) 117 | 118 | if "error" in response: 119 | return f"Error: {response['error']}" 120 | 121 | # Format articles 122 | articles = response.get("articles", []) 123 | total_results = response.get("totalResults", 0) 124 | 125 | formatted = news_api.format_articles(articles) 126 | return f"Found {total_results} articles. Showing {len(articles)} results.\n\n{formatted}" 127 | 128 | 129 | def news_search( 130 | q: str, 131 | sources: str = None, 132 | domains: str = None, 133 | from_param: str = None, 134 | to: str = None, 135 | language: str = "en", 136 | sort_by: str = "publishedAt", 137 | page_size: int = 5, 138 | page: int = 1, 139 | ctx: Context = None 140 | ) -> str: 141 | """Search for news articles using NewsAPI. 142 | 143 | Search through millions of articles from over 80,000 large and small news sources and blogs. 144 | 145 | Parameters: 146 | - q: Keywords or phrases to search for in the article title and body 147 | - sources: Comma-separated string of source IDs 148 | - domains: Comma-separated string of domains to restrict the search to 149 | - from_param: A date in ISO 8601 format (e.g., '2023-12-01') to get articles from 150 | - to: A date in ISO 8601 format (e.g., '2023-12-31') to get articles until 151 | - language: The 2-letter ISO-639-1 code of the language (default: 'en') 152 | - sort_by: The order to sort articles ('relevancy', 'popularity', 'publishedAt') 153 | - page_size: Number of results per page (max 100) 154 | - page: Page number to fetch 155 | """ 156 | news_api = _get_news_api_service() 157 | if not news_api: 158 | return "NewsAPI key not configured. Please set the NEWS_API_KEY environment variable." 159 | 160 | # Build params dict, excluding None values 161 | params = {'q': q} 162 | if sources: 163 | params['sources'] = sources 164 | if domains: 165 | params['domains'] = domains 166 | if from_param: 167 | params['from_param'] = from_param 168 | if to: 169 | params['to'] = to 170 | if language: 171 | params['language'] = language 172 | if sort_by: 173 | params['sort_by'] = sort_by 174 | if page_size: 175 | params['page_size'] = min(page_size, 100) 176 | if page: 177 | params['page'] = page 178 | 179 | # Get articles 180 | response = news_api.get_everything(**params) 181 | 182 | if "error" in response: 183 | return f"Error: {response['error']}" 184 | 185 | # Format articles 186 | articles = response.get("articles", []) 187 | total_results = response.get("totalResults", 0) 188 | 189 | formatted = news_api.format_articles(articles) 190 | return f"Found {total_results} articles. Showing {len(articles)} results.\n\n{formatted}" 191 | 192 | 193 | def news_sources( 194 | category: str = None, 195 | language: str = None, 196 | country: str = None, 197 | ctx: Context = None 198 | ) -> str: 199 | """Get available news sources from NewsAPI. 200 | 201 | Returns the subset of news publishers that are available through NewsAPI. 202 | 203 | Parameters: 204 | - category: Find sources that display news of this category (e.g., 'business', 'technology') 205 | - language: Find sources that display news in a specific language (e.g., 'en', 'fr') 206 | - country: Find sources that display news in a specific country (e.g., 'us', 'gb') 207 | """ 208 | news_api = _get_news_api_service() 209 | if not news_api: 210 | return "NewsAPI key not configured. Please set the NEWS_API_KEY environment variable." 211 | 212 | # Build params dict, excluding None values 213 | params = {} 214 | if category: 215 | params['category'] = category 216 | if language: 217 | params['language'] = language 218 | if country: 219 | params['country'] = country 220 | 221 | # Get sources 222 | response = news_api.get_sources(**params) 223 | 224 | if "error" in response: 225 | return f"Error: {response['error']}" 226 | 227 | # Format sources 228 | sources = response.get("sources", []) 229 | 230 | if not sources: 231 | return "No sources found matching the criteria." 232 | 233 | formatted = [] 234 | for source in sources: 235 | formatted.append(f"""ID: {source.get('id', 'No ID')} 236 | Name: {source.get('name', 'No Name')} 237 | Description: {source.get('description', 'No Description')} 238 | Category: {source.get('category', 'None')} 239 | Language: {source.get('language', 'None')} 240 | Country: {source.get('country', 'None')} 241 | URL: {source.get('url', 'No URL')} 242 | """) 243 | 244 | return f"Found {len(sources)} sources:\n\n" + "\n---\n".join(formatted) 245 | 246 | 247 | # Tool registration and initialization 248 | _news_api_service = None 249 | 250 | 251 | def initialize_news_api_service(api_key=None): 252 | """Initialize the NewsAPI service""" 253 | global _news_api_service 254 | 255 | if api_key is None: 256 | api_key = os.environ.get("NEWS_API_KEY") 257 | 258 | if not api_key: 259 | logging.warning( 260 | "NewsAPI key not configured. Please set the NEWS_API_KEY environment variable.") 261 | return None 262 | 263 | _news_api_service = NewsAPIService(api_key=api_key) 264 | return _news_api_service 265 | 266 | 267 | def _get_news_api_service(): 268 | """Get or initialize the NewsAPI service""" 269 | global _news_api_service 270 | if _news_api_service is None: 271 | _news_api_service = initialize_news_api_service() 272 | return _news_api_service 273 | 274 | 275 | def get_news_api_tools(): 276 | """Get a dictionary of all NewsAPI tools for registration with MCP""" 277 | return { 278 | "news_top_headlines": news_top_headlines, 279 | "news_search": news_search, 280 | "news_sources": news_sources 281 | } 282 | -------------------------------------------------------------------------------- /app/tools/sequential_thinking.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import json 3 | from typing import List, Dict, Optional 4 | from pydantic import BaseModel 5 | import logging 6 | 7 | # Ensure compatibility with mcp server 8 | from mcp.server.fastmcp import FastMCP, Context 9 | 10 | # External MCP reference for tool registration 11 | external_mcp = None 12 | 13 | 14 | def set_external_mcp(mcp): 15 | """Set the external MCP reference for tool registration""" 16 | global external_mcp 17 | external_mcp = mcp 18 | logging.info("Sequential Thinking tools MCP reference set") 19 | 20 | 21 | class ThoughtData(BaseModel): 22 | thought: str 23 | thoughtNumber: int 24 | totalThoughts: int 25 | nextThoughtNeeded: bool 26 | isRevision: Optional[bool] = None 27 | revisesThought: Optional[int] = None 28 | branchFromThought: Optional[int] = None 29 | branchId: Optional[str] = None 30 | needsMoreThoughts: Optional[bool] = None 31 | 32 | 33 | class SequentialThinkingService: 34 | """Service to handle sequential thinking operations""" 35 | 36 | def __init__(self): 37 | self.thought_history = [] 38 | self.branches = {} 39 | 40 | def format_thought(self, thought_data: ThoughtData) -> str: 41 | """Format a thought with nice visual indicators""" 42 | t = thought_data 43 | 44 | if t.isRevision: 45 | prefix = "🔄 Revision" 46 | context = f" (revising thought {t.revisesThought})" 47 | elif t.branchFromThought: 48 | prefix = "🌿 Branch" 49 | context = f" (from thought {t.branchFromThought}, ID: {t.branchId})" 50 | else: 51 | prefix = "💭 Thought" 52 | context = "" 53 | 54 | header = f"{prefix} {t.thoughtNumber}/{t.totalThoughts}{context}" 55 | border = "─" * max(len(header), len(t.thought) + 4) 56 | 57 | return f""" 58 | ┌{border}┐ 59 | │ {header} │ 60 | ├{border}┤ 61 | │ {t.thought.ljust(len(border) - 2)} │ 62 | └{border}┘""" 63 | 64 | async def process_thought(self, 65 | thought: str, 66 | thoughtNumber: int, 67 | totalThoughts: int, 68 | nextThoughtNeeded: bool, 69 | isRevision: Optional[bool] = None, 70 | revisesThought: Optional[int] = None, 71 | branchFromThought: Optional[int] = None, 72 | branchId: Optional[str] = None, 73 | needsMoreThoughts: Optional[bool] = None, 74 | ctx: Context = None 75 | ) -> str: 76 | """Process a thought""" 77 | try: 78 | # Create thought data 79 | thought_data = ThoughtData( 80 | thought=thought, 81 | thoughtNumber=thoughtNumber, 82 | totalThoughts=max(thoughtNumber, totalThoughts), 83 | nextThoughtNeeded=nextThoughtNeeded, 84 | isRevision=isRevision, 85 | revisesThought=revisesThought, 86 | branchFromThought=branchFromThought, 87 | branchId=branchId, 88 | needsMoreThoughts=needsMoreThoughts 89 | ) 90 | 91 | # Add to history 92 | self.thought_history.append(thought_data) 93 | 94 | # Handle branches 95 | if thought_data.branchFromThought and thought_data.branchId: 96 | if thought_data.branchId not in self.branches: 97 | self.branches[thought_data.branchId] = [] 98 | self.branches[thought_data.branchId].append(thought_data) 99 | 100 | # Print pretty formatted thought to stderr (useful for debugging) 101 | if ctx: 102 | ctx.info(self.format_thought(thought_data)) 103 | 104 | # Return result 105 | return json.dumps({ 106 | "thoughtNumber": thought_data.thoughtNumber, 107 | "totalThoughts": thought_data.totalThoughts, 108 | "nextThoughtNeeded": thought_data.nextThoughtNeeded, 109 | "branches": list(self.branches.keys()), 110 | "thoughtHistoryLength": len(self.thought_history) 111 | }, indent=2) 112 | 113 | except Exception as e: 114 | return json.dumps({ 115 | "error": str(e), 116 | "status": "failed" 117 | }, indent=2) 118 | 119 | # Tool function definitions that will be registered with MCP 120 | 121 | 122 | async def sequential_thinking( 123 | thought: str, 124 | thoughtNumber: int, 125 | totalThoughts: int, 126 | nextThoughtNeeded: bool, 127 | isRevision: Optional[bool] = None, 128 | revisesThought: Optional[int] = None, 129 | branchFromThought: Optional[int] = None, 130 | branchId: Optional[str] = None, 131 | needsMoreThoughts: Optional[bool] = None, 132 | ctx: Context = None 133 | ) -> str: 134 | """A detailed tool for dynamic and reflective problem-solving through thoughts. 135 | 136 | This tool helps analyze problems through a flexible thinking process that can adapt and evolve. 137 | Each thought can build on, question, or revise previous insights as understanding deepens. 138 | 139 | When to use this tool: 140 | - Breaking down complex problems into steps 141 | - Planning and design with room for revision 142 | - Analysis that might need course correction 143 | - Problems where the full scope might not be clear initially 144 | """ 145 | try: 146 | return await _get_thinking_service().process_thought( 147 | thought, 148 | thoughtNumber, 149 | totalThoughts, 150 | nextThoughtNeeded, 151 | isRevision, 152 | revisesThought, 153 | branchFromThought, 154 | branchId, 155 | needsMoreThoughts, 156 | ctx 157 | ) 158 | except Exception as e: 159 | return json.dumps({ 160 | "error": str(e), 161 | "status": "failed" 162 | }, indent=2) 163 | 164 | # Tool registration and initialization 165 | _thinking_service_instance = None 166 | 167 | 168 | def initialize_thinking_service(): 169 | """Initialize the sequential thinking service""" 170 | global _thinking_service_instance 171 | _thinking_service_instance = SequentialThinkingService() 172 | return _thinking_service_instance 173 | 174 | 175 | def _get_thinking_service(): 176 | """Get or initialize the sequential thinking service""" 177 | global _thinking_service_instance 178 | if _thinking_service_instance is None: 179 | _thinking_service_instance = initialize_thinking_service() 180 | return _thinking_service_instance 181 | 182 | 183 | def get_sequential_thinking_tools(): 184 | """Get a dictionary of all sequential thinking tools for registration with MCP""" 185 | return { 186 | "sequentialthinking": sequential_thinking 187 | } 188 | -------------------------------------------------------------------------------- /app/tools/shopify.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import json 4 | import logging 5 | import time 6 | import asyncio 7 | from enum import Enum 8 | from typing import List, Dict, Optional, Any, Union 9 | from urllib.parse import urljoin 10 | 11 | # Ensure compatibility with mcp server 12 | from mcp.server.fastmcp import FastMCP, Context 13 | 14 | # External MCP reference for tool registration 15 | external_mcp = None 16 | 17 | 18 | def set_external_mcp(mcp): 19 | """Set the external MCP reference for tool registration""" 20 | global external_mcp 21 | external_mcp = mcp 22 | logging.info("Shopify API tools MCP reference set") 23 | 24 | 25 | class ShopifyTools(str, Enum): 26 | """Enum of Shopify tool names""" 27 | GET_PRODUCTS = "shopify_get_products" 28 | GET_PRODUCT = "shopify_get_product" 29 | CREATE_PRODUCT = "shopify_create_product" 30 | UPDATE_PRODUCT = "shopify_update_product" 31 | DELETE_PRODUCT = "shopify_delete_product" 32 | 33 | GET_ORDERS = "shopify_get_orders" 34 | GET_ORDER = "shopify_get_order" 35 | CREATE_ORDER = "shopify_create_order" 36 | UPDATE_ORDER = "shopify_update_order" 37 | CANCEL_ORDER = "shopify_cancel_order" 38 | 39 | GET_CUSTOMERS = "shopify_get_customers" 40 | GET_CUSTOMER = "shopify_get_customer" 41 | CREATE_CUSTOMER = "shopify_create_customer" 42 | UPDATE_CUSTOMER = "shopify_update_customer" 43 | 44 | GET_INVENTORY = "shopify_get_inventory" 45 | UPDATE_INVENTORY = "shopify_update_inventory" 46 | 47 | GET_COLLECTIONS = "shopify_get_collections" 48 | CREATE_COLLECTION = "shopify_create_collection" 49 | UPDATE_COLLECTION = "shopify_update_collection" 50 | 51 | 52 | class ShopifyService: 53 | """Service to handle Shopify API operations""" 54 | 55 | def __init__(self, shop_domain, api_version, api_key=None, api_password=None, access_token=None): 56 | """Initialize Shopify service with credentials""" 57 | self.shop_domain = shop_domain 58 | self.api_version = api_version 59 | self.api_key = api_key 60 | self.api_password = api_password 61 | self.access_token = access_token 62 | 63 | # Set base URL for API calls 64 | self.base_url = f"https://{self.shop_domain}/admin/api/{self.api_version}/" 65 | 66 | # For rate limiting 67 | self.last_request_time = 0 68 | self.min_request_interval = 0.5 # 500ms minimum between requests 69 | 70 | # Request headers 71 | self.headers = self._get_headers() 72 | 73 | def _get_headers(self): 74 | """Generate appropriate headers based on authentication method""" 75 | headers = { 76 | "Content-Type": "application/json", 77 | "Accept": "application/json" 78 | } 79 | 80 | if self.access_token: 81 | headers["X-Shopify-Access-Token"] = self.access_token 82 | elif self.api_key and self.api_password: 83 | # This will be used in the auth parameter, not in headers 84 | pass 85 | else: 86 | raise ValueError( 87 | "Either access_token or (api_key and api_password) must be provided") 88 | 89 | return headers 90 | 91 | def _get_auth(self): 92 | """Return appropriate auth tuple if using API key""" 93 | if self.api_key and self.api_password: 94 | return (self.api_key, self.api_password) 95 | return None 96 | 97 | async def _make_request(self, method, endpoint, params=None, data=None, json_data=None): 98 | """Make a rate-limited request to Shopify API""" 99 | # Basic rate limiting 100 | current_time = time.time() 101 | time_since_last = current_time - self.last_request_time 102 | if time_since_last < self.min_request_interval: 103 | await asyncio.sleep(self.min_request_interval - time_since_last) 104 | 105 | url = urljoin(self.base_url, endpoint) 106 | auth = self._get_auth() 107 | 108 | import httpx 109 | async with httpx.AsyncClient() as client: 110 | self.last_request_time = time.time() 111 | 112 | if method.lower() == "get": 113 | response = await client.get(url, params=params, headers=self.headers, auth=auth) 114 | elif method.lower() == "post": 115 | response = await client.post(url, params=params, json=json_data, headers=self.headers, auth=auth) 116 | elif method.lower() == "put": 117 | response = await client.put(url, params=params, json=json_data, headers=self.headers, auth=auth) 118 | elif method.lower() == "delete": 119 | response = await client.delete(url, params=params, headers=self.headers, auth=auth) 120 | else: 121 | raise ValueError(f"Unsupported HTTP method: {method}") 122 | 123 | # Check for Shopify API response errors 124 | if response.status_code >= 400: 125 | error_msg = f"Shopify API error: {response.status_code}" 126 | try: 127 | error_detail = response.json() 128 | error_msg += f" - {json.dumps(error_detail)}" 129 | except: 130 | error_msg += f" - {response.text}" 131 | raise Exception(error_msg) 132 | 133 | # Parse response if it has content 134 | if response.status_code != 204 and response.content: # No content 135 | return response.json() 136 | return None 137 | 138 | # Product operations 139 | async def get_products(self, limit=50, page_info=None, collection_id=None, product_type=None, vendor=None): 140 | """Get a list of products""" 141 | params = {"limit": limit} 142 | 143 | if page_info: 144 | params["page_info"] = page_info 145 | 146 | if collection_id: 147 | params["collection_id"] = collection_id 148 | 149 | if product_type: 150 | params["product_type"] = product_type 151 | 152 | if vendor: 153 | params["vendor"] = vendor 154 | 155 | return await self._make_request("get", "products.json", params=params) 156 | 157 | async def get_product(self, product_id): 158 | """Get a specific product by ID""" 159 | return await self._make_request("get", f"products/{product_id}.json") 160 | 161 | async def create_product(self, product_data): 162 | """Create a new product""" 163 | return await self._make_request("post", "products.json", json_data={"product": product_data}) 164 | 165 | async def update_product(self, product_id, product_data): 166 | """Update an existing product""" 167 | return await self._make_request("put", f"products/{product_id}.json", json_data={"product": product_data}) 168 | 169 | async def delete_product(self, product_id): 170 | """Delete a product""" 171 | return await self._make_request("delete", f"products/{product_id}.json") 172 | 173 | # Order operations 174 | async def get_orders(self, limit=50, page_info=None, status=None, financial_status=None, fulfillment_status=None): 175 | """Get a list of orders""" 176 | params = {"limit": limit} 177 | 178 | if page_info: 179 | params["page_info"] = page_info 180 | 181 | if status: 182 | params["status"] = status 183 | 184 | if financial_status: 185 | params["financial_status"] = financial_status 186 | 187 | if fulfillment_status: 188 | params["fulfillment_status"] = fulfillment_status 189 | 190 | return await self._make_request("get", "orders.json", params=params) 191 | 192 | async def get_order(self, order_id): 193 | """Get a specific order by ID""" 194 | return await self._make_request("get", f"orders/{order_id}.json") 195 | 196 | async def create_order(self, order_data): 197 | """Create a new order""" 198 | return await self._make_request("post", "orders.json", json_data={"order": order_data}) 199 | 200 | async def update_order(self, order_id, order_data): 201 | """Update an existing order""" 202 | return await self._make_request("put", f"orders/{order_id}.json", json_data={"order": order_data}) 203 | 204 | async def cancel_order(self, order_id, reason=None): 205 | """Cancel an order""" 206 | data = {} 207 | if reason: 208 | data["reason"] = reason 209 | 210 | return await self._make_request("post", f"orders/{order_id}/cancel.json", json_data=data) 211 | 212 | # Customer operations 213 | async def get_customers(self, limit=50, page_info=None, query=None): 214 | """Get a list of customers""" 215 | params = {"limit": limit} 216 | 217 | if page_info: 218 | params["page_info"] = page_info 219 | 220 | if query: 221 | params["query"] = query 222 | 223 | return await self._make_request("get", "customers.json", params=params) 224 | 225 | async def get_customer(self, customer_id): 226 | """Get a specific customer by ID""" 227 | return await self._make_request("get", f"customers/{customer_id}.json") 228 | 229 | async def create_customer(self, customer_data): 230 | """Create a new customer""" 231 | return await self._make_request("post", "customers.json", json_data={"customer": customer_data}) 232 | 233 | async def update_customer(self, customer_id, customer_data): 234 | """Update an existing customer""" 235 | return await self._make_request("put", f"customers/{customer_id}.json", json_data={"customer": customer_data}) 236 | 237 | # Inventory operations 238 | async def get_inventory_levels(self, inventory_item_ids=None, location_id=None): 239 | """Get inventory levels""" 240 | params = {} 241 | 242 | if inventory_item_ids: 243 | params["inventory_item_ids"] = ",".join( 244 | str(id) for id in inventory_item_ids) 245 | 246 | if location_id: 247 | params["location_id"] = location_id 248 | 249 | return await self._make_request("get", "inventory_levels.json", params=params) 250 | 251 | async def adjust_inventory(self, inventory_item_id, location_id, adjustment): 252 | """Adjust inventory level""" 253 | data = { 254 | "inventory_item_id": inventory_item_id, 255 | "location_id": location_id, 256 | "available_adjustment": adjustment 257 | } 258 | 259 | return await self._make_request("post", "inventory_levels/adjust.json", json_data=data) 260 | 261 | # Collections operations 262 | async def get_collections(self, limit=50, page_info=None): 263 | """Get a list of custom collections""" 264 | params = {"limit": limit} 265 | 266 | if page_info: 267 | params["page_info"] = page_info 268 | 269 | # First get custom collections 270 | custom = await self._make_request("get", "custom_collections.json", params=params) 271 | 272 | # Then get smart collections 273 | smart = await self._make_request("get", "smart_collections.json", params=params) 274 | 275 | # Combine them 276 | result = {"custom_collections": custom.get("custom_collections", [])} 277 | result["smart_collections"] = smart.get("smart_collections", []) 278 | 279 | return result 280 | 281 | async def create_collection(self, collection_data, collection_type="custom"): 282 | """Create a new collection""" 283 | if collection_type == "custom": 284 | return await self._make_request("post", "custom_collections.json", 285 | json_data={"custom_collection": collection_data}) 286 | else: 287 | return await self._make_request("post", "smart_collections.json", 288 | json_data={"smart_collection": collection_data}) 289 | 290 | async def update_collection(self, collection_id, collection_data, collection_type="custom"): 291 | """Update an existing collection""" 292 | if collection_type == "custom": 293 | return await self._make_request("put", f"custom_collections/{collection_id}.json", 294 | json_data={"custom_collection": collection_data}) 295 | else: 296 | return await self._make_request("put", f"smart_collections/{collection_id}.json", 297 | json_data={"smart_collection": collection_data}) 298 | 299 | # Tool function implementations 300 | 301 | 302 | async def shopify_get_products(limit: int = 50, page_info: str = None, 303 | collection_id: str = None, product_type: str = None, 304 | vendor: str = None, ctx: Context = None) -> str: 305 | """Get a list of products from Shopify store 306 | 307 | Parameters: 308 | - limit: Maximum number of products to return (default: 50, max: 250) 309 | - page_info: Pagination parameter (from previous response) 310 | - collection_id: Filter by collection ID 311 | - product_type: Filter by product type 312 | - vendor: Filter by vendor name 313 | """ 314 | shopify = _get_shopify_service() 315 | if not shopify: 316 | return "Shopify API is not configured. Please set the required environment variables." 317 | 318 | try: 319 | result = await shopify.get_products(limit, page_info, collection_id, product_type, vendor) 320 | return json.dumps(result, indent=2) 321 | except Exception as e: 322 | return f"Error retrieving products: {str(e)}" 323 | 324 | 325 | async def shopify_get_product(product_id: str, ctx: Context = None) -> str: 326 | """Get a specific product by ID 327 | 328 | Parameters: 329 | - product_id: The ID of the product to retrieve 330 | """ 331 | shopify = _get_shopify_service() 332 | if not shopify: 333 | return "Shopify API is not configured. Please set the required environment variables." 334 | 335 | try: 336 | result = await shopify.get_product(product_id) 337 | return json.dumps(result, indent=2) 338 | except Exception as e: 339 | return f"Error retrieving product: {str(e)}" 340 | 341 | 342 | async def shopify_create_product(title: str, product_type: str = None, 343 | vendor: str = None, body_html: str = None, 344 | variants: List[Dict] = None, images: List[Dict] = None, 345 | tags: str = None, ctx: Context = None) -> str: 346 | """Create a new product in the Shopify store 347 | 348 | Parameters: 349 | - title: Product title (required) 350 | - product_type: Type of product 351 | - vendor: Vendor name 352 | - body_html: Product description in HTML format 353 | - variants: List of variant objects 354 | - images: List of image objects 355 | - tags: Comma-separated list of tags 356 | """ 357 | shopify = _get_shopify_service() 358 | if not shopify: 359 | return "Shopify API is not configured. Please set the required environment variables." 360 | 361 | try: 362 | product_data = { 363 | "title": title 364 | } 365 | 366 | if product_type: 367 | product_data["product_type"] = product_type 368 | 369 | if vendor: 370 | product_data["vendor"] = vendor 371 | 372 | if body_html: 373 | product_data["body_html"] = body_html 374 | 375 | if variants: 376 | product_data["variants"] = variants 377 | 378 | if images: 379 | product_data["images"] = images 380 | 381 | if tags: 382 | product_data["tags"] = tags 383 | 384 | result = await shopify.create_product(product_data) 385 | return json.dumps(result, indent=2) 386 | except Exception as e: 387 | return f"Error creating product: {str(e)}" 388 | 389 | # Remaining tool function implementations follow the same pattern 390 | # I've included just a few examples for brevity - in a real implementation, 391 | # you would implement all functions from the ShopifyTools enum 392 | 393 | # Tool registration and initialization 394 | _shopify_service = None 395 | 396 | 397 | def initialize_shopify_service(shop_domain=None, api_version=None, api_key=None, api_password=None, access_token=None): 398 | """Initialize the Shopify service with credentials""" 399 | global _shopify_service 400 | 401 | # Use environment variables as fallback 402 | if shop_domain is None: 403 | shop_domain = os.environ.get("SHOPIFY_SHOP_DOMAIN") 404 | 405 | if api_version is None: 406 | # Default to recent version 407 | api_version = os.environ.get("SHOPIFY_API_VERSION", "2023-10") 408 | 409 | if api_key is None: 410 | api_key = os.environ.get("SHOPIFY_API_KEY") 411 | 412 | if api_password is None: 413 | api_password = os.environ.get("SHOPIFY_API_PASSWORD") 414 | 415 | if access_token is None: 416 | access_token = os.environ.get("SHOPIFY_ACCESS_TOKEN") 417 | 418 | # Validate required credentials 419 | if not shop_domain: 420 | logging.warning( 421 | "Shopify shop domain not configured. Please set SHOPIFY_SHOP_DOMAIN environment variable.") 422 | return None 423 | 424 | if not api_version: 425 | logging.warning( 426 | "Shopify API version not configured. Using default version.") 427 | api_version = "2023-10" # Default to recent version 428 | 429 | if not ((api_key and api_password) or access_token): 430 | logging.warning( 431 | "Shopify credentials not configured. Please set either SHOPIFY_ACCESS_TOKEN or both SHOPIFY_API_KEY and SHOPIFY_API_PASSWORD environment variables.") 432 | return None 433 | 434 | _shopify_service = ShopifyService( 435 | shop_domain, api_version, api_key, api_password, access_token) 436 | return _shopify_service 437 | 438 | 439 | def _get_shopify_service(): 440 | """Get or initialize the Shopify service""" 441 | global _shopify_service 442 | if _shopify_service is None: 443 | _shopify_service = initialize_shopify_service() 444 | return _shopify_service 445 | 446 | 447 | def get_shopify_tools(): 448 | """Get a dictionary of all Shopify tools for registration with MCP""" 449 | return { 450 | ShopifyTools.GET_PRODUCTS: shopify_get_products, 451 | ShopifyTools.GET_PRODUCT: shopify_get_product, 452 | ShopifyTools.CREATE_PRODUCT: shopify_create_product, 453 | # Add all other tool functions here 454 | } 455 | 456 | # This function will be called by the unified server to initialize the module 457 | 458 | 459 | def initialize(mcp=None): 460 | """Initialize the Shopify module with MCP reference and credentials""" 461 | if mcp: 462 | set_external_mcp(mcp) 463 | 464 | # Initialize the service 465 | service = initialize_shopify_service() 466 | if service: 467 | logging.info("Shopify API service initialized successfully") 468 | else: 469 | logging.warning("Failed to initialize Shopify API service") 470 | 471 | return service is not None 472 | -------------------------------------------------------------------------------- /app/tools/streamlit.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import sys 4 | import json 5 | import logging 6 | import tempfile 7 | import subprocess 8 | import signal 9 | import time 10 | import re 11 | import psutil 12 | from pathlib import Path 13 | from typing import List, Dict, Optional, Any, Union 14 | from enum import Enum 15 | import threading 16 | 17 | # Ensure compatibility with mcp server 18 | from mcp.server.fastmcp import FastMCP, Context 19 | 20 | # External MCP reference for tool registration 21 | external_mcp = None 22 | 23 | 24 | def set_external_mcp(mcp): 25 | """Set the external MCP reference for tool registration""" 26 | global external_mcp 27 | external_mcp = mcp 28 | logging.info("Streamlit tools MCP reference set") 29 | 30 | 31 | class StreamlitTools(str, Enum): 32 | """Enum of Streamlit tool names""" 33 | CREATE_APP = "streamlit_create_app" 34 | RUN_APP = "streamlit_run_app" 35 | STOP_APP = "streamlit_stop_app" 36 | LIST_APPS = "streamlit_list_apps" 37 | GET_APP_URL = "streamlit_get_app_url" 38 | MODIFY_APP = "streamlit_modify_app" 39 | CHECK_DEPS = "streamlit_check_deps" 40 | 41 | 42 | class StreamlitService: 43 | """Service to manage Streamlit applications via MCP""" 44 | 45 | def __init__(self, apps_dir=None, port_range=(8501, 8599)): 46 | """Initialize the Streamlit service""" 47 | self.apps_dir = apps_dir or os.path.expanduser("~/streamlit_apps") 48 | self.port_range = port_range 49 | self.running_apps = {} # app_id -> {process, port, url} 50 | self.app_logs = {} # app_id -> log_content 51 | 52 | # Create apps directory if it doesn't exist 53 | os.makedirs(self.apps_dir, exist_ok=True) 54 | 55 | # Track used ports 56 | self.used_ports = set() 57 | 58 | # Add lock for thread safety 59 | self.lock = threading.Lock() 60 | 61 | logging.info(f"Initialized Streamlit service at {self.apps_dir}") 62 | 63 | def _find_available_port(self): 64 | """Find an available port for a Streamlit app""" 65 | with self.lock: 66 | for port in range(self.port_range[0], self.port_range[1] + 1): 67 | if port not in self.used_ports: 68 | # Also check if port is really free (in case an external process is using it) 69 | import socket 70 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 71 | result = sock.connect_ex(('localhost', port)) 72 | sock.close() 73 | if result != 0: # Port is available 74 | self.used_ports.add(port) 75 | return port 76 | 77 | raise ValueError(f"No available ports in range {self.port_range}") 78 | 79 | def _release_port(self, port): 80 | """Release a port when an app is stopped""" 81 | with self.lock: 82 | if port in self.used_ports: 83 | self.used_ports.remove(port) 84 | 85 | def validate_app_id(self, app_id): 86 | """Validate that an app ID is safe and suitable for a filename""" 87 | if not re.match(r'^[a-zA-Z0-9_-]+$', app_id): 88 | raise ValueError( 89 | "App ID must contain only letters, numbers, underscores, and hyphens") 90 | return app_id 91 | 92 | def get_app_path(self, app_id): 93 | """Get the file path for a Streamlit app""" 94 | safe_app_id = self.validate_app_id(app_id) 95 | return os.path.join(self.apps_dir, f"{safe_app_id}.py") 96 | 97 | async def create_app(self, app_id, code, overwrite=False): 98 | """Create a new Streamlit app with the given code""" 99 | safe_app_id = self.validate_app_id(app_id) 100 | app_path = self.get_app_path(safe_app_id) 101 | 102 | # Check if app already exists 103 | if os.path.exists(app_path) and not overwrite: 104 | raise ValueError( 105 | f"App {app_id} already exists. Use overwrite=True to replace it.") 106 | 107 | # Write the app code to the file 108 | with open(app_path, 'w') as f: 109 | f.write(code) 110 | 111 | logging.info(f"Created Streamlit app '{app_id}' at {app_path}") 112 | 113 | return { 114 | "app_id": safe_app_id, 115 | "path": app_path, 116 | "status": "created" 117 | } 118 | 119 | async def run_app(self, app_id, port=None, browser=False): 120 | """Run a Streamlit app as a background process""" 121 | safe_app_id = self.validate_app_id(app_id) 122 | app_path = self.get_app_path(safe_app_id) 123 | 124 | # Check if app exists 125 | if not os.path.exists(app_path): 126 | raise ValueError(f"App {app_id} does not exist.") 127 | 128 | # Check if app is already running 129 | if safe_app_id in self.running_apps: 130 | return { 131 | "app_id": safe_app_id, 132 | "status": "already_running", 133 | "port": self.running_apps[safe_app_id]["port"], 134 | "url": self.running_apps[safe_app_id]["url"] 135 | } 136 | 137 | # Find an available port if not specified 138 | if port is None: 139 | port = self._find_available_port() 140 | 141 | # Create a log file 142 | log_path = os.path.join(self.apps_dir, f"{safe_app_id}.log") 143 | log_file = open(log_path, 'w') 144 | 145 | # Build the command 146 | cmd = [ 147 | sys.executable, "-m", "streamlit", "run", 148 | app_path, 149 | "--server.port", str(port), 150 | "--server.headless", "true", 151 | "--server.enableCORS", "false", 152 | "--server.enableXsrfProtection", "false" 153 | ] 154 | 155 | if browser: 156 | cmd.extend(["--server.headless", "false"]) 157 | 158 | # Launch the process 159 | try: 160 | process = subprocess.Popen( 161 | cmd, 162 | stdout=log_file, 163 | stderr=subprocess.STDOUT, 164 | text=True 165 | ) 166 | 167 | # Wait a bit to ensure process starts correctly 168 | time.sleep(2) 169 | 170 | # Check if process is running 171 | if process.poll() is not None: 172 | # Process failed to start 173 | log_file.close() 174 | with open(log_path, 'r') as f: 175 | error_log = f.read() 176 | 177 | self._release_port(port) 178 | raise RuntimeError( 179 | f"Failed to start Streamlit app. Error: {error_log}") 180 | 181 | # Store process info 182 | url = f"http://localhost:{port}" 183 | self.running_apps[safe_app_id] = { 184 | "process": process, 185 | "port": port, 186 | "url": url, 187 | "log_path": log_path, 188 | "log_file": log_file 189 | } 190 | 191 | logging.info(f"Started Streamlit app '{app_id}' on port {port}") 192 | 193 | return { 194 | "app_id": safe_app_id, 195 | "status": "running", 196 | "port": port, 197 | "url": url 198 | } 199 | 200 | except Exception as e: 201 | # Clean up if process failed to start 202 | log_file.close() 203 | self._release_port(port) 204 | raise Exception(f"Error starting Streamlit app: {str(e)}") 205 | 206 | async def stop_app(self, app_id): 207 | """Stop a running Streamlit app""" 208 | safe_app_id = self.validate_app_id(app_id) 209 | 210 | # Check if app is running 211 | if safe_app_id not in self.running_apps: 212 | return { 213 | "app_id": safe_app_id, 214 | "status": "not_running" 215 | } 216 | 217 | # Get process info 218 | app_info = self.running_apps[safe_app_id] 219 | process = app_info["process"] 220 | log_file = app_info["log_file"] 221 | port = app_info["port"] 222 | 223 | # Terminate the process (and all child processes) 224 | try: 225 | parent = psutil.Process(process.pid) 226 | for child in parent.children(recursive=True): 227 | child.terminate() 228 | parent.terminate() 229 | 230 | # Wait for termination (with timeout) 231 | process.wait(timeout=10) 232 | 233 | # Close the log file 234 | if not log_file.closed: 235 | log_file.close() 236 | 237 | # Release the port 238 | self._release_port(port) 239 | 240 | # Remove from running apps 241 | del self.running_apps[safe_app_id] 242 | 243 | logging.info(f"Stopped Streamlit app '{app_id}'") 244 | 245 | return { 246 | "app_id": safe_app_id, 247 | "status": "stopped" 248 | } 249 | 250 | except Exception as e: 251 | logging.error(f"Error stopping Streamlit app '{app_id}': {str(e)}") 252 | 253 | # If process didn't terminate gracefully, kill it 254 | try: 255 | process.kill() 256 | self._release_port(port) 257 | 258 | # Close the log file 259 | if not log_file.closed: 260 | log_file.close() 261 | 262 | # Remove from running apps 263 | del self.running_apps[safe_app_id] 264 | 265 | return { 266 | "app_id": safe_app_id, 267 | "status": "killed" 268 | } 269 | except Exception as kill_error: 270 | return { 271 | "app_id": safe_app_id, 272 | "status": "error", 273 | "error": f"Failed to kill app: {str(kill_error)}" 274 | } 275 | 276 | async def list_apps(self): 277 | """List all available Streamlit apps""" 278 | # Get all .py files in the apps directory 279 | app_files = [f for f in os.listdir(self.apps_dir) if f.endswith('.py')] 280 | 281 | # Sort apps by timestamp (newest first) 282 | app_files.sort(key=lambda f: os.path.getmtime( 283 | os.path.join(self.apps_dir, f)), reverse=True) 284 | 285 | # Format results 286 | apps = [] 287 | for app_file in app_files: 288 | app_id = app_file[:-3] # Remove .py extension 289 | app_path = os.path.join(self.apps_dir, app_file) 290 | 291 | # Get file stats 292 | stats = os.stat(app_path) 293 | 294 | # Check if app is running 295 | is_running = app_id in self.running_apps 296 | 297 | app_info = { 298 | "app_id": app_id, 299 | "path": app_path, 300 | "size_bytes": stats.st_size, 301 | "modified": time.ctime(stats.st_mtime), 302 | "running": is_running 303 | } 304 | 305 | if is_running: 306 | app_info["port"] = self.running_apps[app_id]["port"] 307 | app_info["url"] = self.running_apps[app_id]["url"] 308 | 309 | apps.append(app_info) 310 | 311 | return { 312 | "apps": apps, 313 | "count": len(apps), 314 | "apps_dir": self.apps_dir 315 | } 316 | 317 | async def get_app_url(self, app_id): 318 | """Get the URL for a running Streamlit app""" 319 | safe_app_id = self.validate_app_id(app_id) 320 | 321 | # Check if app is running 322 | if safe_app_id not in self.running_apps: 323 | return { 324 | "app_id": safe_app_id, 325 | "status": "not_running" 326 | } 327 | 328 | # Get app URL 329 | app_info = self.running_apps[safe_app_id] 330 | 331 | return { 332 | "app_id": safe_app_id, 333 | "status": "running", 334 | "port": app_info["port"], 335 | "url": app_info["url"] 336 | } 337 | 338 | async def modify_app(self, app_id, code_updates=None, append_code=None): 339 | """Modify an existing Streamlit app""" 340 | safe_app_id = self.validate_app_id(app_id) 341 | app_path = self.get_app_path(safe_app_id) 342 | 343 | # Check if app exists 344 | if not os.path.exists(app_path): 345 | raise ValueError(f"App {app_id} does not exist.") 346 | 347 | # Read current code 348 | with open(app_path, 'r') as f: 349 | current_code = f.read() 350 | 351 | # Apply code updates if provided 352 | if code_updates: 353 | for old_text, new_text in code_updates: 354 | current_code = current_code.replace(old_text, new_text) 355 | 356 | # Append code if provided 357 | if append_code: 358 | current_code += "\n\n" + append_code 359 | 360 | # Write the updated code back to the file 361 | with open(app_path, 'w') as f: 362 | f.write(current_code) 363 | 364 | logging.info(f"Modified Streamlit app '{app_id}'") 365 | 366 | # Restart the app if it's running 367 | was_running = safe_app_id in self.running_apps 368 | result = {"app_id": safe_app_id, 369 | "status": "modified", "was_running": was_running} 370 | 371 | if was_running: 372 | port = self.running_apps[safe_app_id]["port"] 373 | await self.stop_app(safe_app_id) 374 | restart_result = await self.run_app(safe_app_id, port=port) 375 | result["restart"] = restart_result 376 | 377 | return result 378 | 379 | async def check_dependencies(self): 380 | """Check if Streamlit and required dependencies are installed""" 381 | # Check for Streamlit 382 | try: 383 | # Run streamlit version command 384 | process = subprocess.run( 385 | [sys.executable, "-m", "streamlit", "--version"], 386 | capture_output=True, 387 | text=True, 388 | check=False 389 | ) 390 | 391 | if process.returncode != 0: 392 | return { 393 | "status": "error", 394 | "streamlit_installed": False, 395 | "error": process.stderr.strip() or "Streamlit is not installed correctly", 396 | "install_command": "pip install streamlit" 397 | } 398 | 399 | streamlit_version = process.stdout.strip() 400 | 401 | # Check for other dependencies 402 | dependencies = ["pandas", "numpy", 403 | "matplotlib", "altair", "plotly"] 404 | installed_deps = {} 405 | 406 | for dep in dependencies: 407 | try: 408 | # Try to import the module 409 | __import__(dep) 410 | installed_deps[dep] = True 411 | except ImportError: 412 | installed_deps[dep] = False 413 | 414 | return { 415 | "status": "success", 416 | "streamlit_installed": True, 417 | "streamlit_version": streamlit_version, 418 | "dependencies": installed_deps, 419 | "missing_dependencies": [dep for dep, installed in installed_deps.items() if not installed] 420 | } 421 | 422 | except Exception as e: 423 | return { 424 | "status": "error", 425 | "streamlit_installed": False, 426 | "error": str(e), 427 | "install_command": "pip install streamlit" 428 | } 429 | 430 | # Tool function definitions that will be registered with MCP 431 | 432 | 433 | async def streamlit_create_app(app_id: str, code: str, overwrite: bool = False, ctx: Context = None) -> str: 434 | """Create a new Streamlit app with the provided code 435 | 436 | Parameters: 437 | - app_id: Unique identifier for the app (letters, numbers, underscores, and hyphens only) 438 | - code: Python code for the Streamlit app 439 | - overwrite: Whether to overwrite an existing app with the same ID 440 | """ 441 | streamlit = _get_streamlit_service() 442 | 443 | try: 444 | result = await streamlit.create_app(app_id, code, overwrite) 445 | return json.dumps(result, indent=2) 446 | except Exception as e: 447 | return json.dumps({"status": "error", "error": str(e)}, indent=2) 448 | 449 | 450 | async def streamlit_run_app(app_id: str, port: int = None, browser: bool = False, ctx: Context = None) -> str: 451 | """Run a Streamlit app as a background process 452 | 453 | Parameters: 454 | - app_id: Identifier of the app to run 455 | - port: Optional port number (if not specified, an available port will be used) 456 | - browser: Whether to open the app in a browser window 457 | """ 458 | streamlit = _get_streamlit_service() 459 | 460 | try: 461 | result = await streamlit.run_app(app_id, port, browser) 462 | return json.dumps(result, indent=2) 463 | except Exception as e: 464 | return json.dumps({"status": "error", "error": str(e)}, indent=2) 465 | 466 | 467 | async def streamlit_stop_app(app_id: str, ctx: Context = None) -> str: 468 | """Stop a running Streamlit app 469 | 470 | Parameters: 471 | - app_id: Identifier of the app to stop 472 | """ 473 | streamlit = _get_streamlit_service() 474 | 475 | try: 476 | result = await streamlit.stop_app(app_id) 477 | return json.dumps(result, indent=2) 478 | except Exception as e: 479 | return json.dumps({"status": "error", "error": str(e)}, indent=2) 480 | 481 | 482 | async def streamlit_list_apps(ctx: Context = None) -> str: 483 | """List all available Streamlit apps""" 484 | streamlit = _get_streamlit_service() 485 | 486 | try: 487 | result = await streamlit.list_apps() 488 | return json.dumps(result, indent=2) 489 | except Exception as e: 490 | return json.dumps({"status": "error", "error": str(e)}, indent=2) 491 | 492 | 493 | async def streamlit_get_app_url(app_id: str, ctx: Context = None) -> str: 494 | """Get the URL for a running Streamlit app 495 | 496 | Parameters: 497 | - app_id: Identifier of the app 498 | """ 499 | streamlit = _get_streamlit_service() 500 | 501 | try: 502 | result = await streamlit.get_app_url(app_id) 503 | return json.dumps(result, indent=2) 504 | except Exception as e: 505 | return json.dumps({"status": "error", "error": str(e)}, indent=2) 506 | 507 | 508 | async def streamlit_modify_app(app_id: str, code_updates: List[tuple] = None, append_code: str = None, ctx: Context = None) -> str: 509 | """Modify an existing Streamlit app 510 | 511 | Parameters: 512 | - app_id: Identifier of the app to modify 513 | - code_updates: List of tuples (old_text, new_text) for text replacements 514 | - append_code: Code to append to the end of the app 515 | """ 516 | streamlit = _get_streamlit_service() 517 | 518 | try: 519 | result = await streamlit.modify_app(app_id, code_updates, append_code) 520 | return json.dumps(result, indent=2) 521 | except Exception as e: 522 | return json.dumps({"status": "error", "error": str(e)}, indent=2) 523 | 524 | 525 | async def streamlit_check_deps(ctx: Context = None) -> str: 526 | """Check if Streamlit and required dependencies are installed""" 527 | streamlit = _get_streamlit_service() 528 | 529 | try: 530 | result = await streamlit.check_dependencies() 531 | return json.dumps(result, indent=2) 532 | except Exception as e: 533 | return json.dumps({"status": "error", "error": str(e)}, indent=2) 534 | 535 | # Tool registration and initialization 536 | _streamlit_service = None 537 | 538 | 539 | def initialize_streamlit_service(apps_dir=None, port_range=(8501, 8599)): 540 | """Initialize the Streamlit service""" 541 | global _streamlit_service 542 | 543 | # Check if streamlit is installed 544 | try: 545 | import streamlit 546 | streamlit_version = streamlit.__version__ 547 | logging.info(f"Found Streamlit version {streamlit_version}") 548 | except ImportError: 549 | logging.error( 550 | "Streamlit is not installed. Please install it with 'pip install streamlit'") 551 | return None 552 | 553 | # Rest of initialization code... 554 | 555 | # Use environment variables as fallback 556 | if apps_dir is None: 557 | apps_dir = os.environ.get("STREAMLIT_APPS_DIR") 558 | 559 | if apps_dir is None: 560 | apps_dir = os.path.expanduser("~/streamlit_apps") 561 | 562 | _streamlit_service = StreamlitService(apps_dir, port_range) 563 | return _streamlit_service 564 | 565 | 566 | def _get_streamlit_service(): 567 | """Get or initialize the Streamlit service""" 568 | global _streamlit_service 569 | if _streamlit_service is None: 570 | _streamlit_service = initialize_streamlit_service() 571 | return _streamlit_service 572 | 573 | 574 | def get_streamlit_tools(): 575 | """Get a dictionary of all Streamlit tools for registration with MCP""" 576 | return { 577 | StreamlitTools.CREATE_APP: streamlit_create_app, 578 | StreamlitTools.RUN_APP: streamlit_run_app, 579 | StreamlitTools.STOP_APP: streamlit_stop_app, 580 | StreamlitTools.LIST_APPS: streamlit_list_apps, 581 | StreamlitTools.GET_APP_URL: streamlit_get_app_url, 582 | StreamlitTools.MODIFY_APP: streamlit_modify_app, 583 | StreamlitTools.CHECK_DEPS: streamlit_check_deps 584 | } 585 | 586 | # This function will be called by the unified server to initialize the module 587 | 588 | 589 | def initialize(mcp=None): 590 | """Initialize the Streamlit module with MCP reference""" 591 | if mcp: 592 | set_external_mcp(mcp) 593 | 594 | # Initialize the service 595 | service = initialize_streamlit_service() 596 | if service: 597 | logging.info("Streamlit service initialized successfully") 598 | else: 599 | logging.warning("Failed to initialize Streamlit service") 600 | 601 | return service is not None 602 | 603 | 604 | if __name__ == "__main__": 605 | print("Streamlit service module - use with MCP Unified Server") 606 | -------------------------------------------------------------------------------- /app/tools/time_tools.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import json 3 | from datetime import datetime, timedelta 4 | from enum import Enum 5 | from typing import Optional 6 | from zoneinfo import ZoneInfo 7 | from pydantic import BaseModel 8 | import logging 9 | 10 | # Ensure compatibility with mcp server 11 | from mcp.server.fastmcp import FastMCP, Context 12 | 13 | # External MCP reference for tool registration 14 | external_mcp = None 15 | 16 | 17 | def set_external_mcp(mcp): 18 | """Set the external MCP reference for tool registration""" 19 | global external_mcp 20 | external_mcp = mcp 21 | logging.info("Time tools MCP reference set") 22 | 23 | 24 | class TimeTools(str, Enum): 25 | GET_CURRENT_TIME = "get_current_time" 26 | CONVERT_TIME = "convert_time" 27 | 28 | 29 | class TimeResult(BaseModel): 30 | timezone: str 31 | datetime: str 32 | is_dst: bool 33 | 34 | 35 | class TimeConversionResult(BaseModel): 36 | source: TimeResult 37 | target: TimeResult 38 | time_difference: str 39 | 40 | 41 | class TimeToolsService: 42 | """Service to handle time operations""" 43 | 44 | def get_local_tz(self, local_tz_override: str | None = None) -> ZoneInfo: 45 | if local_tz_override: 46 | return ZoneInfo(local_tz_override) 47 | 48 | # Get local timezone from datetime.now() 49 | tzinfo = datetime.now().astimezone(tz=None).tzinfo 50 | if tzinfo is not None: 51 | return ZoneInfo(str(tzinfo)) 52 | raise ValueError("Could not determine local timezone - tzinfo is None") 53 | 54 | def get_zoneinfo(self, timezone_name: str) -> ZoneInfo: 55 | try: 56 | return ZoneInfo(timezone_name) 57 | except Exception as e: 58 | raise ValueError(f"Invalid timezone: {str(e)}") 59 | 60 | async def get_current_time(self, timezone: str) -> str: 61 | """Get current time in specified timezone""" 62 | try: 63 | timezone_obj = self.get_zoneinfo(timezone) 64 | current_time = datetime.now(timezone_obj) 65 | 66 | result = TimeResult( 67 | timezone=timezone, 68 | datetime=current_time.isoformat(timespec="seconds"), 69 | is_dst=bool(current_time.dst()), 70 | ) 71 | 72 | return json.dumps(result.model_dump(), indent=2) 73 | except Exception as e: 74 | return f"Error processing time query: {str(e)}" 75 | 76 | async def convert_time(self, source_timezone: str, time: str, target_timezone: str) -> str: 77 | """Convert time between timezones""" 78 | try: 79 | source_timezone_obj = self.get_zoneinfo(source_timezone) 80 | target_timezone_obj = self.get_zoneinfo(target_timezone) 81 | 82 | try: 83 | parsed_time = datetime.strptime(time, "%H:%M").time() 84 | except ValueError: 85 | raise ValueError( 86 | "Invalid time format. Expected HH:MM [24-hour format]") 87 | 88 | now = datetime.now(source_timezone_obj) 89 | source_time = datetime( 90 | now.year, 91 | now.month, 92 | now.day, 93 | parsed_time.hour, 94 | parsed_time.minute, 95 | tzinfo=source_timezone_obj, 96 | ) 97 | 98 | target_time = source_time.astimezone(target_timezone_obj) 99 | source_offset = source_time.utcoffset() or timedelta() 100 | target_offset = target_time.utcoffset() or timedelta() 101 | hours_difference = ( 102 | target_offset - source_offset).total_seconds() / 3600 103 | 104 | if hours_difference.is_integer(): 105 | time_diff_str = f"{hours_difference:+.1f}h" 106 | else: 107 | # For fractional hours like Nepal's UTC+5:45 108 | time_diff_str = f"{hours_difference:+.2f}".rstrip( 109 | "0").rstrip(".") + "h" 110 | 111 | result = TimeConversionResult( 112 | source=TimeResult( 113 | timezone=source_timezone, 114 | datetime=source_time.isoformat(timespec="seconds"), 115 | is_dst=bool(source_time.dst()), 116 | ), 117 | target=TimeResult( 118 | timezone=target_timezone, 119 | datetime=target_time.isoformat(timespec="seconds"), 120 | is_dst=bool(target_time.dst()), 121 | ), 122 | time_difference=time_diff_str, 123 | ) 124 | 125 | return json.dumps(result.model_dump(), indent=2) 126 | except Exception as e: 127 | return f"Error processing time conversion: {str(e)}" 128 | 129 | # Tool function definitions that will be registered with MCP 130 | 131 | 132 | async def get_current_time(timezone: str, ctx: Context = None) -> str: 133 | """Get current time in specified timezone""" 134 | try: 135 | return await _get_time_tools().get_current_time(timezone) 136 | except Exception as e: 137 | return f"Error processing time query: {str(e)}" 138 | 139 | 140 | async def convert_time(source_timezone: str, time: str, target_timezone: str, ctx: Context = None) -> str: 141 | """Convert time between timezones""" 142 | try: 143 | return await _get_time_tools().convert_time(source_timezone, time, target_timezone) 144 | except Exception as e: 145 | return f"Error processing time conversion: {str(e)}" 146 | 147 | # Tool registration and initialization 148 | _time_tools_instance = None 149 | 150 | 151 | def initialize_time_tools(): 152 | """Initialize the time tools""" 153 | global _time_tools_instance 154 | _time_tools_instance = TimeToolsService() 155 | return _time_tools_instance 156 | 157 | 158 | def _get_time_tools(): 159 | """Get or initialize the time tools""" 160 | global _time_tools_instance 161 | if _time_tools_instance is None: 162 | _time_tools_instance = initialize_time_tools() 163 | return _time_tools_instance 164 | 165 | 166 | def get_time_tools(): 167 | """Get a dictionary of all time tools for registration with MCP""" 168 | return { 169 | TimeTools.GET_CURRENT_TIME: get_current_time, 170 | TimeTools.CONVERT_TIME: convert_time 171 | } 172 | -------------------------------------------------------------------------------- /app/tools/vapi.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import json 4 | import logging 5 | from typing import List, Dict, Optional, Any, Union, Tuple 6 | from enum import Enum 7 | import asyncio 8 | 9 | # Ensure compatibility with mcp server 10 | from mcp.server.fastmcp import FastMCP, Context 11 | 12 | # External MCP reference for tool registration 13 | external_mcp = None 14 | 15 | 16 | def set_external_mcp(mcp): 17 | """Set the external MCP reference for tool registration""" 18 | global external_mcp 19 | external_mcp = mcp 20 | logging.info("VAPI tools MCP reference set") 21 | 22 | 23 | class VAPITools(str, Enum): 24 | """Enum of VAPI tool names""" 25 | MAKE_CALL = "vapi_make_call" 26 | LIST_CALLS = "vapi_list_calls" 27 | GET_CALL = "vapi_get_call" 28 | END_CALL = "vapi_end_call" 29 | GET_RECORDINGS = "vapi_get_recordings" 30 | ADD_HUMAN = "vapi_add_human" 31 | PAUSE_CALL = "vapi_pause_call" 32 | RESUME_CALL = "vapi_resume_call" 33 | SEND_EVENT = "vapi_send_event" 34 | 35 | 36 | class VAPIService: 37 | """Service to handle VAPI operations""" 38 | 39 | def __init__(self, api_key=None): 40 | """Initialize the VAPI service with API key""" 41 | self.api_key = api_key or os.environ.get("VAPI_API_KEY") 42 | if not self.api_key: 43 | raise ValueError("VAPI API key is required") 44 | 45 | try: 46 | # Import the VAPI client SDK 47 | from vapi import Client 48 | self.client = Client(api_key=self.api_key) 49 | self.initialized = True 50 | logging.info("VAPI client initialized successfully") 51 | except ImportError: 52 | logging.error("VAPI library not installed. Please install with 'pip install vapi'") 53 | self.initialized = False 54 | self.client = None 55 | 56 | def _is_initialized(self): 57 | """Check if the service is properly initialized""" 58 | if not self.initialized or not self.client: 59 | raise ValueError("VAPI service not properly initialized. Check if vapi library is installed.") 60 | return True 61 | 62 | async def make_call(self, to: str, assistant_id: str, 63 | from_number: Optional[str] = None, 64 | assistant_options: Optional[Dict[str, Any]] = None, 65 | server_url: Optional[str] = None) -> Dict[str, Any]: 66 | """Make a call using VAPI""" 67 | try: 68 | self._is_initialized() 69 | 70 | # Prepare call parameters 71 | params = { 72 | "to": to, 73 | "assistant_id": assistant_id 74 | } 75 | 76 | # Add optional parameters if provided 77 | if from_number: 78 | params["from"] = from_number 79 | if assistant_options: 80 | params["options"] = assistant_options 81 | if server_url: 82 | params["server_url"] = server_url 83 | 84 | # Make the API call 85 | call = self.client.calls.create(**params) 86 | return call 87 | except Exception as e: 88 | error_msg = f"Error making call: {str(e)}" 89 | logging.error(error_msg) 90 | return {"error": error_msg} 91 | 92 | async def list_calls(self, 93 | limit: Optional[int] = 10, 94 | before: Optional[str] = None, 95 | after: Optional[str] = None, 96 | status: Optional[str] = None) -> Dict[str, Any]: 97 | """List calls from VAPI""" 98 | try: 99 | self._is_initialized() 100 | 101 | # Prepare parameters 102 | params = {} 103 | if limit: 104 | params["limit"] = limit 105 | if before: 106 | params["before"] = before 107 | if after: 108 | params["after"] = after 109 | if status: 110 | params["status"] = status 111 | 112 | # Make the API call 113 | calls = self.client.calls.list(**params) 114 | return calls 115 | except Exception as e: 116 | error_msg = f"Error listing calls: {str(e)}" 117 | logging.error(error_msg) 118 | return {"error": error_msg} 119 | 120 | async def get_call(self, call_id: str) -> Dict[str, Any]: 121 | """Get details of a specific call""" 122 | try: 123 | self._is_initialized() 124 | 125 | # Make the API call 126 | call = self.client.calls.get(call_id) 127 | return call 128 | except Exception as e: 129 | error_msg = f"Error getting call details: {str(e)}" 130 | logging.error(error_msg) 131 | return {"error": error_msg} 132 | 133 | async def end_call(self, call_id: str) -> Dict[str, Any]: 134 | """End a call""" 135 | try: 136 | self._is_initialized() 137 | 138 | # Make the API call 139 | result = self.client.calls.end(call_id) 140 | return result 141 | except Exception as e: 142 | error_msg = f"Error ending call: {str(e)}" 143 | logging.error(error_msg) 144 | return {"error": error_msg} 145 | 146 | async def get_recordings(self, call_id: str) -> Dict[str, Any]: 147 | """Get recordings for a call""" 148 | try: 149 | self._is_initialized() 150 | 151 | # Make the API call 152 | recordings = self.client.calls.recordings(call_id) 153 | return recordings 154 | except Exception as e: 155 | error_msg = f"Error getting call recordings: {str(e)}" 156 | logging.error(error_msg) 157 | return {"error": error_msg} 158 | 159 | async def add_human(self, call_id: str, 160 | phone_number: str = None, 161 | transfer: bool = False) -> Dict[str, Any]: 162 | """Add a human to a call""" 163 | try: 164 | self._is_initialized() 165 | 166 | # Prepare parameters 167 | params = {} 168 | if phone_number: 169 | params["phone_number"] = phone_number 170 | if transfer is not None: 171 | params["transfer"] = transfer 172 | 173 | # Make the API call 174 | result = self.client.calls.add_human(call_id, **params) 175 | return result 176 | except Exception as e: 177 | error_msg = f"Error adding human to call: {str(e)}" 178 | logging.error(error_msg) 179 | return {"error": error_msg} 180 | 181 | async def pause_call(self, call_id: str) -> Dict[str, Any]: 182 | """Pause a call""" 183 | try: 184 | self._is_initialized() 185 | 186 | # Make the API call 187 | result = self.client.calls.pause(call_id) 188 | return result 189 | except Exception as e: 190 | error_msg = f"Error pausing call: {str(e)}" 191 | logging.error(error_msg) 192 | return {"error": error_msg} 193 | 194 | async def resume_call(self, call_id: str) -> Dict[str, Any]: 195 | """Resume a paused call""" 196 | try: 197 | self._is_initialized() 198 | 199 | # Make the API call 200 | result = self.client.calls.resume(call_id) 201 | return result 202 | except Exception as e: 203 | error_msg = f"Error resuming call: {str(e)}" 204 | logging.error(error_msg) 205 | return {"error": error_msg} 206 | 207 | async def send_event(self, call_id: str, 208 | event_type: str, 209 | data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: 210 | """Send an event to a call""" 211 | try: 212 | self._is_initialized() 213 | 214 | # Prepare parameters 215 | params = { 216 | "type": event_type 217 | } 218 | if data: 219 | params["data"] = data 220 | 221 | # Make the API call 222 | result = self.client.calls.send_event(call_id, params) 223 | return result 224 | except Exception as e: 225 | error_msg = f"Error sending event to call: {str(e)}" 226 | logging.error(error_msg) 227 | return {"error": error_msg} 228 | 229 | 230 | # Tool function definitions that will be registered with MCP 231 | 232 | async def vapi_make_call(to: str, assistant_id: str, 233 | from_number: Optional[str] = None, 234 | assistant_options: Optional[Dict[str, Any]] = None, 235 | server_url: Optional[str] = None, 236 | ctx: Context = None) -> str: 237 | """Make a phone call using VAPI. 238 | 239 | Initiates a call to the specified phone number using a VAPI assistant. 240 | 241 | Parameters: 242 | - to: Phone number to call (E.164 format recommended, e.g., +12125551234) 243 | - assistant_id: ID of the assistant to use for the call 244 | - from_number: Optional phone number to display as caller ID 245 | - assistant_options: Optional dictionary of assistant configuration options 246 | - server_url: Optional server URL for call events 247 | 248 | Returns: 249 | - JSON string with call details including call ID, status, and timestamps 250 | """ 251 | vapi = _get_vapi_service() 252 | if not vapi: 253 | return json.dumps({"error": "VAPI service not properly initialized."}) 254 | 255 | try: 256 | result = await vapi.make_call(to, assistant_id, from_number, assistant_options, server_url) 257 | return json.dumps(result, indent=2) 258 | except Exception as e: 259 | return json.dumps({"error": f"Error making call: {str(e)}"}, indent=2) 260 | 261 | 262 | async def vapi_list_calls(limit: int = 10, 263 | before: Optional[str] = None, 264 | after: Optional[str] = None, 265 | status: Optional[str] = None, 266 | ctx: Context = None) -> str: 267 | """List phone calls made through VAPI. 268 | 269 | Retrieves a list of calls with optional filtering. 270 | 271 | Parameters: 272 | - limit: Maximum number of calls to return (default: 10) 273 | - before: Return calls created before this cursor 274 | - after: Return calls created after this cursor 275 | - status: Filter calls by status (e.g., 'queued', 'ringing', 'in-progress', 'completed') 276 | 277 | Returns: 278 | - JSON string with list of calls and pagination details 279 | """ 280 | vapi = _get_vapi_service() 281 | if not vapi: 282 | return json.dumps({"error": "VAPI service not properly initialized."}) 283 | 284 | try: 285 | result = await vapi.list_calls(limit, before, after, status) 286 | return json.dumps(result, indent=2) 287 | except Exception as e: 288 | return json.dumps({"error": f"Error listing calls: {str(e)}"}, indent=2) 289 | 290 | 291 | async def vapi_get_call(call_id: str, ctx: Context = None) -> str: 292 | """Get detailed information about a specific call. 293 | 294 | Retrieves complete information about a call by its ID. 295 | 296 | Parameters: 297 | - call_id: ID of the call to retrieve 298 | 299 | Returns: 300 | - JSON string with detailed call information including status, timestamps, and metadata 301 | """ 302 | vapi = _get_vapi_service() 303 | if not vapi: 304 | return json.dumps({"error": "VAPI service not properly initialized."}) 305 | 306 | try: 307 | result = await vapi.get_call(call_id) 308 | return json.dumps(result, indent=2) 309 | except Exception as e: 310 | return json.dumps({"error": f"Error getting call: {str(e)}"}, indent=2) 311 | 312 | 313 | async def vapi_end_call(call_id: str, ctx: Context = None) -> str: 314 | """End an ongoing call. 315 | 316 | Terminates an active call by its ID. 317 | 318 | Parameters: 319 | - call_id: ID of the call to end 320 | 321 | Returns: 322 | - JSON string with the result of the operation 323 | """ 324 | vapi = _get_vapi_service() 325 | if not vapi: 326 | return json.dumps({"error": "VAPI service not properly initialized."}) 327 | 328 | try: 329 | result = await vapi.end_call(call_id) 330 | return json.dumps(result, indent=2) 331 | except Exception as e: 332 | return json.dumps({"error": f"Error ending call: {str(e)}"}, indent=2) 333 | 334 | 335 | async def vapi_get_recordings(call_id: str, ctx: Context = None) -> str: 336 | """Get recordings for a specific call. 337 | 338 | Retrieves a list of recordings associated with a call. 339 | 340 | Parameters: 341 | - call_id: ID of the call to get recordings for 342 | 343 | Returns: 344 | - JSON string with recording metadata including URLs, durations, and timestamps 345 | """ 346 | vapi = _get_vapi_service() 347 | if not vapi: 348 | return json.dumps({"error": "VAPI service not properly initialized."}) 349 | 350 | try: 351 | result = await vapi.get_recordings(call_id) 352 | return json.dumps(result, indent=2) 353 | except Exception as e: 354 | return json.dumps({"error": f"Error getting recordings: {str(e)}"}, indent=2) 355 | 356 | 357 | async def vapi_add_human(call_id: str, 358 | phone_number: str = None, 359 | transfer: bool = False, 360 | ctx: Context = None) -> str: 361 | """Add a human participant to a call. 362 | 363 | Adds a human to an ongoing call, optionally transferring control. 364 | 365 | Parameters: 366 | - call_id: ID of the call to add the human to 367 | - phone_number: Phone number of the human to add 368 | - transfer: Whether to transfer the call to the human (default: False) 369 | 370 | Returns: 371 | - JSON string with the result of the operation 372 | """ 373 | vapi = _get_vapi_service() 374 | if not vapi: 375 | return json.dumps({"error": "VAPI service not properly initialized."}) 376 | 377 | try: 378 | result = await vapi.add_human(call_id, phone_number, transfer) 379 | return json.dumps(result, indent=2) 380 | except Exception as e: 381 | return json.dumps({"error": f"Error adding human to call: {str(e)}"}, indent=2) 382 | 383 | 384 | async def vapi_pause_call(call_id: str, ctx: Context = None) -> str: 385 | """Pause an ongoing call. 386 | 387 | Temporarily pauses an active call. 388 | 389 | Parameters: 390 | - call_id: ID of the call to pause 391 | 392 | Returns: 393 | - JSON string with the result of the operation 394 | """ 395 | vapi = _get_vapi_service() 396 | if not vapi: 397 | return json.dumps({"error": "VAPI service not properly initialized."}) 398 | 399 | try: 400 | result = await vapi.pause_call(call_id) 401 | return json.dumps(result, indent=2) 402 | except Exception as e: 403 | return json.dumps({"error": f"Error pausing call: {str(e)}"}, indent=2) 404 | 405 | 406 | async def vapi_resume_call(call_id: str, ctx: Context = None) -> str: 407 | """Resume a paused call. 408 | 409 | Continues a previously paused call. 410 | 411 | Parameters: 412 | - call_id: ID of the call to resume 413 | 414 | Returns: 415 | - JSON string with the result of the operation 416 | """ 417 | vapi = _get_vapi_service() 418 | if not vapi: 419 | return json.dumps({"error": "VAPI service not properly initialized."}) 420 | 421 | try: 422 | result = await vapi.resume_call(call_id) 423 | return json.dumps(result, indent=2) 424 | except Exception as e: 425 | return json.dumps({"error": f"Error resuming call: {str(e)}"}, indent=2) 426 | 427 | 428 | async def vapi_send_event(call_id: str, 429 | event_type: str, 430 | data: Optional[Dict[str, Any]] = None, 431 | ctx: Context = None) -> str: 432 | """Send a custom event to a call. 433 | 434 | Sends an event to a call to trigger custom behaviors. 435 | 436 | Parameters: 437 | - call_id: ID of the call to send the event to 438 | - event_type: Type of event to send 439 | - data: Optional data payload for the event 440 | 441 | Returns: 442 | - JSON string with the result of the operation 443 | """ 444 | vapi = _get_vapi_service() 445 | if not vapi: 446 | return json.dumps({"error": "VAPI service not properly initialized."}) 447 | 448 | try: 449 | result = await vapi.send_event(call_id, event_type, data) 450 | return json.dumps(result, indent=2) 451 | except Exception as e: 452 | return json.dumps({"error": f"Error sending event to call: {str(e)}"}, indent=2) 453 | 454 | 455 | # Tool registration and initialization 456 | _vapi_service = None 457 | 458 | 459 | def initialize_vapi_service(api_key=None): 460 | """Initialize the VAPI service with API key""" 461 | global _vapi_service 462 | 463 | if api_key is None: 464 | api_key = os.environ.get("VAPI_API_KEY") 465 | 466 | if not api_key: 467 | logging.warning("VAPI API key not configured. Please set the VAPI_API_KEY environment variable.") 468 | return None 469 | 470 | try: 471 | _vapi_service = VAPIService(api_key=api_key) 472 | return _vapi_service 473 | except Exception as e: 474 | logging.error(f"Failed to initialize VAPI service: {str(e)}") 475 | return None 476 | 477 | 478 | def _get_vapi_service(): 479 | """Get or initialize the VAPI service""" 480 | global _vapi_service 481 | if _vapi_service is None: 482 | _vapi_service = initialize_vapi_service() 483 | return _vapi_service 484 | 485 | 486 | def get_vapi_tools(): 487 | """Get a dictionary of all VAPI tools for registration with MCP""" 488 | return { 489 | VAPITools.MAKE_CALL: vapi_make_call, 490 | VAPITools.LIST_CALLS: vapi_list_calls, 491 | VAPITools.GET_CALL: vapi_get_call, 492 | VAPITools.END_CALL: vapi_end_call, 493 | VAPITools.GET_RECORDINGS: vapi_get_recordings, 494 | VAPITools.ADD_HUMAN: vapi_add_human, 495 | VAPITools.PAUSE_CALL: vapi_pause_call, 496 | VAPITools.RESUME_CALL: vapi_resume_call, 497 | VAPITools.SEND_EVENT: vapi_send_event 498 | } 499 | 500 | 501 | # This function will be called by the unified server to initialize the module 502 | def initialize(mcp=None): 503 | """Initialize the VAPI module with MCP reference and API key""" 504 | if mcp: 505 | set_external_mcp(mcp) 506 | 507 | # Initialize the service 508 | service = initialize_vapi_service() 509 | if service and service.initialized: 510 | logging.info("VAPI service initialized successfully") 511 | return True 512 | else: 513 | logging.warning("Failed to initialize VAPI service. Please ensure vapi is installed and API key is configured.") 514 | return False 515 | 516 | 517 | if __name__ == "__main__": 518 | print("VAPI service module - use with MCP Unified Server") -------------------------------------------------------------------------------- /app/tools/worldbank.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import requests 3 | import pandas as pd 4 | import logging 5 | 6 | # Ensure compatibility with mcp server 7 | from mcp.server.fastmcp import FastMCP, Context 8 | 9 | # External MCP reference for tool registration 10 | external_mcp = None 11 | 12 | 13 | def set_external_mcp(mcp): 14 | """Set the external MCP reference for tool registration""" 15 | global external_mcp 16 | external_mcp = mcp 17 | logging.info("World Bank tools MCP reference set") 18 | 19 | 20 | class WorldBankService: 21 | """Service to handle World Bank API operations""" 22 | 23 | def __init__(self): 24 | self.base_url = "https://api.worldbank.org/v2" 25 | 26 | def get_countries(self): 27 | """Get list of countries from World Bank API""" 28 | try: 29 | url = f"{self.base_url}/country?format=json&per_page=1000" 30 | response = requests.get(url) 31 | return response.json() 32 | except Exception as e: 33 | return {"error": str(e)} 34 | 35 | def get_indicators(self): 36 | """Get list of indicators from World Bank API""" 37 | try: 38 | url = f"{self.base_url}/indicator?format=json&per_page=50000" 39 | response = requests.get(url) 40 | return response.json() 41 | except Exception as e: 42 | return {"error": str(e)} 43 | 44 | def get_indicator_for_country(self, country_id, indicator_id): 45 | """Get values for an indicator for a specific country""" 46 | try: 47 | url = f"{self.base_url}/country/{country_id}/indicator/{indicator_id}?format=json&per_page=20000" 48 | response = requests.get(url) 49 | data = response.json() 50 | 51 | # Handle case where API returns error 52 | if not isinstance(data, list) or len(data) < 2: 53 | return {"error": "Invalid API response format"} 54 | 55 | indicator_values = data[1] 56 | return pd.json_normalize(indicator_values).to_csv() 57 | except Exception as e: 58 | return {"error": str(e)} 59 | 60 | # Resource function definitions 61 | 62 | 63 | def get_worldbank_countries(): 64 | """Get list of countries from World Bank API""" 65 | wb_service = _get_worldbank_service() 66 | countries = wb_service.get_countries() 67 | 68 | if "error" in countries: 69 | return f"Error fetching countries: {countries['error']}" 70 | 71 | try: 72 | if isinstance(countries, list) and len(countries) >= 2: 73 | country_data = countries[1] 74 | return pd.json_normalize(country_data).to_csv() 75 | return "No country data available" 76 | except Exception as e: 77 | return f"Error processing country data: {str(e)}" 78 | 79 | 80 | def get_worldbank_indicators(): 81 | """Get list of indicators from World Bank API""" 82 | wb_service = _get_worldbank_service() 83 | indicators = wb_service.get_indicators() 84 | 85 | if "error" in indicators: 86 | return f"Error fetching indicators: {indicators['error']}" 87 | 88 | try: 89 | if isinstance(indicators, list) and len(indicators) >= 2: 90 | indicator_data = indicators[1] 91 | return pd.json_normalize(indicator_data).to_csv() 92 | return "No indicator data available" 93 | except Exception as e: 94 | return f"Error processing indicator data: {str(e)}" 95 | 96 | # Tool function definitions that will be registered with MCP 97 | 98 | 99 | async def worldbank_get_indicator(country_id: str, indicator_id: str, ctx: Context = None) -> str: 100 | """Get indicator data for a specific country from the World Bank API.""" 101 | if not country_id: 102 | return "Error: country_id is required" 103 | 104 | if not indicator_id: 105 | return "Error: indicator_id is required" 106 | 107 | try: 108 | import httpx 109 | 110 | url = f"https://api.worldbank.org/v2/country/{country_id}/indicator/{indicator_id}?format=json&per_page=20000" 111 | 112 | async with httpx.AsyncClient() as client: 113 | response = await client.get(url) 114 | 115 | # Handle non-200 responses 116 | if response.status_code != 200: 117 | return f"Error: API returned status code {response.status_code}: {response.text}" 118 | 119 | data = response.json() 120 | 121 | # Check data structure 122 | if not isinstance(data, list) or len(data) < 2: 123 | return f"Error: Unexpected API response format: {data}" 124 | 125 | # Get the actual data records 126 | indicator_values = data[1] 127 | 128 | # If no data was returned 129 | if not indicator_values: 130 | return "No data available for the specified country and indicator" 131 | 132 | # Convert to CSV 133 | csv_data = pd.json_normalize(indicator_values).to_csv() 134 | return csv_data 135 | 136 | except Exception as e: 137 | return f"Error processing request: {str(e)}" 138 | 139 | # Tool registration and initialization 140 | _worldbank_service = None 141 | 142 | 143 | def initialize_worldbank_service(): 144 | """Initialize the World Bank service""" 145 | global _worldbank_service 146 | _worldbank_service = WorldBankService() 147 | return _worldbank_service 148 | 149 | 150 | def _get_worldbank_service(): 151 | """Get or initialize the World Bank service""" 152 | global _worldbank_service 153 | if _worldbank_service is None: 154 | _worldbank_service = initialize_worldbank_service() 155 | return _worldbank_service 156 | 157 | 158 | def get_worldbank_tools(): 159 | """Get a dictionary of all World Bank tools for registration with MCP""" 160 | return { 161 | "worldbank_get_indicator": worldbank_get_indicator 162 | } 163 | 164 | 165 | def get_worldbank_resources(): 166 | """Get a dictionary of all World Bank resources for registration with MCP""" 167 | return { 168 | "worldbank://countries": get_worldbank_countries, 169 | "worldbank://indicators": get_worldbank_indicators 170 | } 171 | -------------------------------------------------------------------------------- /claude_desktop_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "unified": { 4 | "command": "docker", 5 | "args": [ 6 | "exec", 7 | "-i", 8 | "mcp-tool-kit-mcp-server-1", 9 | "python", 10 | "-u", 11 | "mcp_unified_server.py" 12 | ], 13 | "useStdio": true 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /config_loader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import yaml 4 | from pathlib import Path 5 | import logging 6 | 7 | 8 | def load_config(config_path=None): 9 | """Load configuration from YAML file""" 10 | if not config_path: 11 | # Default config path is config.yaml in the same directory as this file 12 | config_path = Path(__file__).parent / "config.yaml" 13 | 14 | if not os.path.exists(config_path): 15 | logging.warning( 16 | f"Configuration file not found at {config_path}, using default settings") 17 | return {"enabled_tools": {}, "tool_config": {}} 18 | 19 | try: 20 | with open(config_path, 'r') as f: 21 | config = yaml.safe_load(f) 22 | logging.info(f"Loaded configuration from {config_path}") 23 | return config 24 | except Exception as e: 25 | logging.error(f"Error loading configuration: {str(e)}") 26 | return {"enabled_tools": {}, "tool_config": {}} 27 | 28 | 29 | def is_tool_enabled(config, tool_name): 30 | """Check if a tool is enabled in configuration""" 31 | if not config: 32 | return False 33 | 34 | # If no tools are specifically enabled or disabled, assume all are enabled 35 | if "enabled_tools" not in config or not config["enabled_tools"]: 36 | return True 37 | 38 | return config.get("enabled_tools", {}).get(tool_name, False) 39 | 40 | 41 | def get_tool_config(config, tool_name): 42 | """Get configuration for a specific tool""" 43 | if not config: 44 | return {} 45 | 46 | return config.get("tool_config", {}).get(tool_name, {}) 47 | 48 | # Get all enabled tool names 49 | 50 | 51 | def get_enabled_tools(config): 52 | """Get a list of all enabled tool names""" 53 | if not config or "enabled_tools" not in config: 54 | return [] 55 | 56 | return [tool for tool, enabled in config.get("enabled_tools", {}).items() if enabled] 57 | 58 | 59 | # Example config.yaml file structure 60 | DEFAULT_CONFIG = { 61 | "enabled_tools": { 62 | "filesystem": True, 63 | "time_tools": True, 64 | "sequential_thinking": True, 65 | "brave_search": True, 66 | "worldbank": True, 67 | "news_api": True, 68 | "ppt": True, 69 | "data_analysis": False, 70 | "document_management": True, 71 | "yfinance": True, 72 | "excel": True, 73 | "vapi": False, 74 | }, 75 | "tool_config": { 76 | "brave_search": { 77 | "api_key": "" # Override with env var if present 78 | }, 79 | "filesystem": { 80 | "allowed_directories": ["~/documents", "~/downloads"], 81 | "allow_file_deletion": False 82 | } 83 | } 84 | } 85 | 86 | # Create default config file if it doesn't exist 87 | 88 | 89 | def create_default_config(config_path=None): 90 | """Create a default configuration file if it doesn't exist""" 91 | if not config_path: 92 | config_path = Path(__file__).parent / "config.yaml" 93 | 94 | if not os.path.exists(config_path): 95 | try: 96 | with open(config_path, 'w') as f: 97 | yaml.dump(DEFAULT_CONFIG, f, default_flow_style=False) 98 | logging.info(f"Created default configuration at {config_path}") 99 | return True 100 | except Exception as e: 101 | logging.error(f"Error creating default configuration: {str(e)}") 102 | return False 103 | 104 | return False 105 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | mcp-server: 3 | build: . 4 | ports: 5 | - "8000:8000" # Maps host port 8000 to container port 8000 6 | volumes: 7 | - ./.env:/app/.env # Mount .env file for environment variables 8 | environment: 9 | - BRAVE_API_KEY=${BRAVE_API_KEY} 10 | - BROWSERBASE_API_KEY=${BROWSERBASE_API_KEY} 11 | - BROWSERBASE_PROJECT_ID=${BROWSERBASE_PROJECT_ID} 12 | - NEWS_API_KEY=${NEWS_API_KEY} 13 | - PYTHONUNBUFFERED=1 # Ensures python output isn't buffered 14 | - MCP_HOST=0.0.0.0 # Host binding for MCP server 15 | - MCP_PORT=8000 # Port for MCP server 16 | - MCP_LOG_LEVEL=debug # Log level 17 | restart: unless-stopped 18 | tty: true 19 | healthcheck: 20 | test: ["CMD", "./healthcheck.sh"] 21 | interval: 30s 22 | timeout: 10s 23 | retries: 3 24 | start_period: 5s 25 | -------------------------------------------------------------------------------- /mcp_unified_server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import sys 3 | import os 4 | from pathlib import Path 5 | import json 6 | from datetime import datetime 7 | from contextlib import asynccontextmanager 8 | from dotenv import load_dotenv 9 | import logging 10 | import uvicorn 11 | # MCP SDK imports 12 | from mcp.server.fastmcp import FastMCP, Context 13 | 14 | logging.basicConfig( 15 | level=logging.DEBUG if os.environ.get( 16 | "MCP_LOG_LEVEL", "").lower() == "debug" else logging.INFO, 17 | format='%(asctime)s [%(levelname)s] %(message)s', 18 | stream=sys.stderr 19 | ) 20 | 21 | # Add app/tools directory to path to import modules 22 | tools_path = Path(__file__).parent / "app" / "tools" 23 | sys.path.append(str(tools_path)) 24 | 25 | # Load environment variables 26 | load_dotenv() 27 | 28 | # Initialize MCP server 29 | mcp = FastMCP( 30 | "Unified MCP Server", 31 | dependencies=["newsapi-python", "msal", "python-dotenv", 32 | "httpx", "pillow", "requests", "pandas", "python-pptx", "nltk"] 33 | ) 34 | 35 | # Add a health check endpoint 36 | 37 | 38 | @mcp.tool(name="health_check") 39 | async def health_check(ctx: Context): 40 | try: 41 | # Check if all critical components are working 42 | status = { 43 | "status": "ok", 44 | "timestamp": datetime.now().isoformat(), 45 | "python_version": sys.version, 46 | "registered_tools_count": len(mcp.registered_tools), 47 | "uptime_seconds": (datetime.now() - datetime.fromisoformat(mcp.startup_time)).total_seconds() 48 | if hasattr(mcp, 'startup_time') else 0 49 | } 50 | 51 | # Add more detailed component status checks here 52 | return status 53 | except Exception as e: 54 | logging.error(f"Health check failed: {str(e)}") 55 | return { 56 | "status": "error", 57 | "error": str(e), 58 | "timestamp": datetime.now().isoformat() 59 | } 60 | 61 | 62 | # Initialize PowerPoint tools 63 | try: 64 | from app.tools.ppt import get_ppt_tools, PowerPointTools, set_external_mcp 65 | # Pass our MCP instance to the ppt module 66 | set_external_mcp(mcp) 67 | ppt_available = True 68 | 69 | # Register PowerPoint tools 70 | ppt_tools = get_ppt_tools() 71 | 72 | for tool_name, tool_func in ppt_tools.items(): 73 | # Register each PowerPoint tool with the main MCP instance 74 | tool_name_str = tool_name if isinstance( 75 | tool_name, str) else tool_name.value 76 | mcp.tool(name=tool_name_str)(tool_func) 77 | 78 | # Add PowerPoint dependencies to MCP dependencies 79 | mcp.dependencies.extend([ 80 | "python-pptx", 81 | "nltk", 82 | "pillow" 83 | ]) 84 | 85 | logging.info("PowerPoint tools registered successfully.") 86 | except ImportError as e: 87 | ppt_available = False 88 | logging.warning(f"Could not load PowerPoint tools: {e}") 89 | 90 | # Initialize Playwright tools 91 | try: 92 | from app.tools.browser_automation import get_playwright_tools, set_external_mcp, initialize 93 | 94 | # Pass our MCP instance to the playwright module 95 | set_external_mcp(mcp) 96 | 97 | # Initialize playwright tools 98 | initialize() 99 | 100 | # Register playwright tools 101 | playwright_tools = get_playwright_tools() 102 | for tool_name, tool_func in playwright_tools.items(): 103 | # Register each playwright tool with the main MCP instance 104 | tool_name_str = tool_name if isinstance( 105 | tool_name, str) else tool_name.value 106 | mcp.tool(name=tool_name_str)(tool_func) 107 | 108 | # Add Playwright dependencies to MCP dependencies 109 | mcp.dependencies.extend([ 110 | "playwright" 111 | ]) 112 | 113 | logging.info("Playwright tools registered successfully.") 114 | except ImportError as e: 115 | logging.warning(f"Could not load Playwright tools: {e}") 116 | 117 | # Initialize Filesystem tools 118 | try: 119 | from app.tools.filesystem import get_filesystem_tools, set_external_mcp, initialize_fs_tools 120 | 121 | # Pass our MCP instance to the filesystem module 122 | set_external_mcp(mcp) 123 | 124 | # Get allowed directories from environment variable 125 | env_dirs = os.environ.get("MCP_FILESYSTEM_DIRS", "") 126 | allowed_dirs = [os.path.expanduser(d.strip()) 127 | for d in env_dirs.split(",") if d.strip()] 128 | 129 | # Default to user's home directory if no dirs specified 130 | if not allowed_dirs: 131 | allowed_dirs = [os.path.expanduser("~")] 132 | 133 | initialize_fs_tools(allowed_dirs) 134 | 135 | # Register filesystem tools 136 | fs_tools = get_filesystem_tools() 137 | for tool_name, tool_func in fs_tools.items(): 138 | # Register each filesystem tool with the main MCP instance 139 | mcp.tool(name=tool_name)(tool_func) 140 | 141 | logging.info("Filesystem tools registered successfully.") 142 | except ImportError as e: 143 | logging.warning(f"Could not load filesystem tools: {e}") 144 | 145 | # Initialize Time tools 146 | try: 147 | from app.tools.time_tools import get_time_tools, set_external_mcp, initialize_time_tools 148 | 149 | # Pass our MCP instance to the time tools module 150 | set_external_mcp(mcp) 151 | 152 | # Initialize time tools 153 | initialize_time_tools() 154 | 155 | # Register time tools 156 | time_tools = get_time_tools() 157 | for tool_name, tool_func in time_tools.items(): 158 | # Register each time tool with the main MCP instance 159 | tool_name_str = tool_name if isinstance( 160 | tool_name, str) else tool_name.value 161 | mcp.tool(name=tool_name_str)(tool_func) 162 | 163 | logging.info("Time tools registered successfully.") 164 | except ImportError as e: 165 | logging.warning(f"Could not load time tools: {e}") 166 | 167 | # Initialize Sequential Thinking tools 168 | try: 169 | from app.tools.sequential_thinking import get_sequential_thinking_tools, set_external_mcp, initialize_thinking_service 170 | 171 | # Pass our MCP instance to the sequential thinking module 172 | set_external_mcp(mcp) 173 | 174 | # Initialize sequential thinking tools 175 | initialize_thinking_service() 176 | 177 | # Register sequential thinking tools 178 | thinking_tools = get_sequential_thinking_tools() 179 | for tool_name, tool_func in thinking_tools.items(): 180 | # Register each sequential thinking tool with the main MCP instance 181 | mcp.tool(name=tool_name)(tool_func) 182 | 183 | logging.info("Sequential Thinking tools registered successfully.") 184 | except ImportError as e: 185 | logging.warning(f"Could not load Sequential Thinking tools: {e}") 186 | 187 | # Initialize FRED API tools 188 | try: 189 | from app.tools.fred import get_fred_api_tools, set_external_mcp, initialize_fred_api_service, initialize 190 | 191 | # Pass our MCP instance to the FRED module 192 | set_external_mcp(mcp) 193 | 194 | # Initialize FRED tools with API key from environment variable 195 | fred_api_key = os.environ.get("FRED_API_KEY") 196 | if fred_api_key: 197 | # Call the module's initialize function 198 | initialize(mcp) 199 | 200 | # Register FRED tools 201 | fred_tools = get_fred_api_tools() 202 | for tool_name, tool_func in fred_tools.items(): 203 | # Register each FRED tool with the main MCP instance 204 | mcp.tool(name=tool_name)(tool_func) 205 | 206 | # Add FRED dependencies to MCP dependencies 207 | mcp.dependencies.extend(["fredapi", "pandas"]) 208 | 209 | logging.info("FRED API tools registered successfully.") 210 | else: 211 | logging.warning( 212 | "FRED API key not configured. FRED API tools will not be available.") 213 | except ImportError as e: 214 | logging.warning(f"Could not load FRED API tools: {e}") 215 | 216 | # Initialize YFinance tools 217 | try: 218 | from app.tools.yfinance import get_yfinance_tools, set_external_mcp, initialize 219 | 220 | # Pass our MCP instance to the yfinance module 221 | set_external_mcp(mcp) 222 | 223 | # Initialize YFinance tools 224 | if initialize(mcp): 225 | # Register YFinance tools 226 | yfinance_tools = get_yfinance_tools() 227 | for tool_name, tool_func in yfinance_tools.items(): 228 | # Register each YFinance tool with the main MCP instance 229 | tool_name_str = tool_name if isinstance( 230 | tool_name, str) else tool_name.value 231 | mcp.tool(name=tool_name_str)(tool_func) 232 | 233 | # Add YFinance dependencies to MCP dependencies 234 | mcp.dependencies.extend(["yfinance", "pandas", "numpy"]) 235 | 236 | logging.info("YFinance tools registered successfully.") 237 | else: 238 | logging.warning("Failed to initialize YFinance tools.") 239 | except ImportError as e: 240 | logging.warning(f"Could not load YFinance tools: {e}") 241 | 242 | # Initialize Excel tools 243 | try: 244 | from app.tools.excel import get_xlsx_tools, set_external_mcp, initialize_xlsx_service 245 | 246 | # Pass our MCP instance to the xlsx module 247 | set_external_mcp(mcp) 248 | 249 | # Initialize xlsx service 250 | initialize_xlsx_service() 251 | 252 | # Register xlsx tools 253 | xlsx_tools = get_xlsx_tools() 254 | for tool_name, tool_func in xlsx_tools.items(): 255 | # Register each xlsx tool with the main MCP instance 256 | tool_name_str = tool_name if isinstance( 257 | tool_name, str) else tool_name.value 258 | mcp.tool(name=tool_name_str)(tool_func) 259 | 260 | # Add Excel dependencies to MCP dependencies 261 | mcp.dependencies.extend([ 262 | "xlsxwriter", 263 | "pandas", 264 | "openpyxl", 265 | "xlrd" 266 | ]) 267 | 268 | logging.info("Excel tools registered successfully.") 269 | except ImportError as e: 270 | logging.warning(f"Could not load Excel tools: {e}") 271 | 272 | # Initialize Brave Search tools 273 | try: 274 | from app.tools.brave_search import get_brave_search_tools, set_external_mcp, initialize_brave_search 275 | 276 | # Pass our MCP instance to the brave search module 277 | set_external_mcp(mcp) 278 | 279 | # Initialize brave search tools with API key from environment variable 280 | brave_api_key = os.environ.get("BRAVE_API_KEY") 281 | if brave_api_key: 282 | initialize_brave_search(brave_api_key) 283 | 284 | # Register brave search tools 285 | brave_tools = get_brave_search_tools() 286 | for tool_name, tool_func in brave_tools.items(): 287 | # Register each brave search tool with the main MCP instance 288 | mcp.tool(name=tool_name)(tool_func) 289 | 290 | logging.info("Brave Search tools registered successfully.") 291 | else: 292 | logging.warning( 293 | "Brave Search API key not configured. Brave Search tools will not be available.") 294 | except ImportError as e: 295 | logging.warning(f"Could not load Brave Search tools: {e}") 296 | 297 | # Initialize World Bank tools 298 | try: 299 | from app.tools.worldbank import get_worldbank_tools, get_worldbank_resources, set_external_mcp, initialize_worldbank_service 300 | 301 | # Pass our MCP instance to the world bank module 302 | set_external_mcp(mcp) 303 | 304 | # Initialize world bank tools 305 | initialize_worldbank_service() 306 | 307 | # Register world bank tools 308 | worldbank_tools = get_worldbank_tools() 309 | for tool_name, tool_func in worldbank_tools.items(): 310 | # Register each world bank tool with the main MCP instance 311 | mcp.tool(name=tool_name)(tool_func) 312 | 313 | # Register world bank resources 314 | worldbank_resources = get_worldbank_resources() 315 | for resource_path, resource_func in worldbank_resources.items(): 316 | # Register each world bank resource with the main MCP instance 317 | mcp.resource(resource_path)(resource_func) 318 | 319 | logging.info("World Bank tools registered successfully.") 320 | except ImportError as e: 321 | logging.warning(f"Could not load World Bank tools: {e}") 322 | 323 | # Initialize News API tools 324 | try: 325 | from app.tools.news_api import get_news_api_tools, set_external_mcp, initialize_news_api_service 326 | 327 | # Pass our MCP instance to the news api module 328 | set_external_mcp(mcp) 329 | 330 | # Initialize news api tools with API key from environment variable 331 | news_api_key = os.environ.get("NEWS_API_KEY") 332 | if news_api_key: 333 | initialize_news_api_service(news_api_key) 334 | 335 | # Register news api tools 336 | news_api_tools = get_news_api_tools() 337 | for tool_name, tool_func in news_api_tools.items(): 338 | # Register each news api tool with the main MCP instance 339 | mcp.tool(name=tool_name)(tool_func) 340 | 341 | logging.info("News API tools registered successfully.") 342 | else: 343 | logging.warning( 344 | "News API key not configured. News API tools will not be available.") 345 | except ImportError as e: 346 | logging.warning(f"Could not load News API tools: {e}") 347 | 348 | # Initialize VAPI tools 349 | try: 350 | from app.tools.vapi import get_vapi_tools, set_external_mcp, initialize_vapi_service 351 | 352 | # Pass our MCP instance to the VAPI module 353 | set_external_mcp(mcp) 354 | 355 | # Initialize VAPI tools 356 | if initialize_vapi_service(): 357 | # Register VAPI tools 358 | vapi_tools = get_vapi_tools() 359 | for tool_name, tool_func in vapi_tools.items(): 360 | # Register each VAPI tool with the main MCP instance 361 | tool_name_str = tool_name if isinstance( 362 | tool_name, str) else tool_name.value 363 | mcp.tool(name=tool_name_str)(tool_func) 364 | 365 | # Add VAPI dependencies to MCP dependencies 366 | mcp.dependencies.extend(["vapi"]) 367 | 368 | logging.info("VAPI tools registered successfully.") 369 | else: 370 | logging.warning("Failed to initialize VAPI tools.") 371 | except ImportError as e: 372 | logging.warning(f"Could not load VAPI tools: {e}") 373 | 374 | # Initialize Document Management tools 375 | try: 376 | from app.tools.document_management import get_pdf_tools, set_external_mcp, initialize_pdf_service 377 | 378 | # Pass our MCP instance to the document management module 379 | set_external_mcp(mcp) 380 | 381 | # Initialize PDF service 382 | initialize_pdf_service() 383 | 384 | # Register PDF tools 385 | pdf_tools = get_pdf_tools() 386 | for tool_name, tool_func in pdf_tools.items(): 387 | # Register each PDF tool with the main MCP instance 388 | mcp.tool(name=tool_name)(tool_func) 389 | 390 | # Add PDF dependencies to MCP dependencies 391 | mcp.dependencies.extend( 392 | ["PyPDF2", "pdf2image", "pytesseract", "Pillow", "reportlab"]) 393 | 394 | logging.info("Document Management tools registered successfully.") 395 | except ImportError as e: 396 | logging.warning(f"Could not load Document Management tools: {e}") 397 | 398 | # Initialize Streamlit tools 399 | try: 400 | from app.tools.streamlit import get_streamlit_tools, set_external_mcp, initialize 401 | 402 | # Pass our MCP instance to the streamlit module 403 | set_external_mcp(mcp) 404 | 405 | # Initialize Streamlit tools 406 | # Get custom apps directory from environment variable if set 407 | apps_dir = os.environ.get("STREAMLIT_APPS_DIR") 408 | 409 | if initialize(mcp): 410 | # Register Streamlit tools 411 | streamlit_tools = get_streamlit_tools() 412 | for tool_name, tool_func in streamlit_tools.items(): 413 | # Register each Streamlit tool with the main MCP instance 414 | tool_name_str = tool_name if isinstance( 415 | tool_name, str) else tool_name.value 416 | mcp.tool(name=tool_name_str)(tool_func) 417 | 418 | # Add Streamlit dependencies to MCP dependencies 419 | mcp.dependencies.extend( 420 | ["streamlit", "pandas", "numpy", "matplotlib", "plotly"]) 421 | 422 | logging.info("Streamlit tools registered successfully.") 423 | else: 424 | logging.warning( 425 | "Failed to initialize Streamlit tools. Make sure streamlit is installed.") 426 | except ImportError as e: 427 | logging.warning(f"Could not load Streamlit tools: {e}") 428 | 429 | 430 | # Validate required environment variables 431 | REQUIRED_ENV_VARS = { 432 | "BRAVE_API_KEY": "For Brave Search functionality", 433 | "NEWS_API_KEY": "For NewsAPI functionality", 434 | "FRED_API_KEY": "your_fred_api_key", 435 | "STREAMLIT_APPS_DIR": "/path/to/streamlit/apps", 436 | "MCP_FILESYSTEM_DIRS": "/path/to/allowed/dir1,/path/to/allowed/dir2", 437 | "MCP_LOG_LEVEL": "info", 438 | "VAPID_API_KEY": "your_vapid_api_key", 439 | } 440 | 441 | missing_vars = [var for var in REQUIRED_ENV_VARS if not os.environ.get(var)] 442 | if missing_vars: 443 | logging.warning("The following environment variables are missing:") 444 | for var in missing_vars: 445 | logging.warning(f" - {var}: {REQUIRED_ENV_VARS[var]}") 446 | logging.warning("Some functionality may be limited.") 447 | 448 | # Initialize JSON-RPC method for tool discovery 449 | 450 | 451 | @mcp.tool(name="initialize") 452 | async def initialize(ctx: Context): 453 | """Return initialization information including available tools.""" 454 | tool_list = [] 455 | 456 | # Extract registered tools from the MCP instance 457 | for tool_name, tool_func in mcp.registered_tools.items(): 458 | tool_info = { 459 | "name": tool_name, 460 | "description": getattr(tool_func, "__doc__", "No description available"), 461 | "parameters": getattr(tool_func, "__annotations__", {}) 462 | } 463 | tool_list.append(tool_info) 464 | 465 | return { 466 | "status": "ok", 467 | "server_name": mcp.name, 468 | "version": getattr(mcp, "version", "1.0.1"), 469 | "tools": tool_list 470 | } 471 | 472 | # Start the server 473 | host = os.environ.get("SERVER_HOST", "0.0.0.0") 474 | port = int(os.environ.get("SERVER_PORT", "8000")) 475 | 476 | # Server Lifespan and Startup 477 | 478 | 479 | @asynccontextmanager 480 | async def server_lifespan(server: FastMCP): 481 | """Server lifespan manager - initialize and cleanup resources""" 482 | try: 483 | # Log startup message 484 | logging.info("Starting Unified MCP Server...") 485 | 486 | # Initialize any services that need async initialization 487 | # (none in our current implementation) 488 | 489 | # Pass any shared context to the request handlers 490 | yield { 491 | "startup_time": datetime.now().isoformat() 492 | } 493 | finally: 494 | # Cleanup on shutdown 495 | logging.info("Shutting down Unified MCP Server...") 496 | # Close any open resources or connections 497 | 498 | # Set lifespan context manager 499 | mcp.lifespan = server_lifespan 500 | 501 | if __name__ == "__main__": 502 | # Add debugging info 503 | logging.info("Starting MCP Unified Server...") 504 | logging.debug(f"Python version: {sys.version}") 505 | 506 | # Use configuration from environment variables if available 507 | # Must be 0.0.0.0 for containers 508 | host = os.environ.get("MCP_HOST", "0.0.0.0") 509 | # Check both PORT and MCP_PORT 510 | port = int(os.environ.get("PORT", os.environ.get("MCP_PORT", "8000"))) 511 | # Default to info instead of debug 512 | log_level = os.environ.get("MCP_LOG_LEVEL", "info") 513 | 514 | # Enable detailed logging for troubleshooting 515 | if log_level.lower() == "debug": 516 | logging.info("Debug logging enabled") 517 | logging.debug( 518 | f"Environment variables: {json.dumps({k: v for k, v in os.environ.items() if not k.startswith('_')}, indent=2)}") 519 | 520 | # Update configuration 521 | mcp.config = { 522 | "host": host, 523 | "port": port, 524 | "log_level": log_level 525 | } 526 | 527 | # Run the server using the MCP's own method instead of direct uvicorn 528 | logging.info(f"Starting server at http://{host}:{port}") 529 | mcp.run() 530 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | mcp>=0.4.1 2 | newsapi-python>=0.2.7 3 | python-dotenv>=1.0.1 4 | httpx>=0.27.0 5 | pillow>=10.2.0 6 | pydantic>=2.7.3 7 | pandas>=2.2.1 8 | requests>=2.32.3 9 | python-pptx>=0.6.22 10 | nltk>=3.8.1 11 | psutil>=5.9.8 12 | simple_salesforce>=1.12.5 13 | seaborn>=0.13.2 14 | matplotlib>=3.8.4 15 | pdf2image>=1.17.0 16 | PyPDF2>=3.0.1 17 | pytesseract>=0.3.10 18 | aiohttp>=3.9.5 19 | playwright>=1.43.1 20 | yfinance>=0.2.42 21 | fredapi>=0.5.1 22 | streamlit>=1.32.2 23 | pyyaml>=6.0.1 24 | setuptools>=69.2.0 25 | uvicorn>=0.34.0 26 | watchdog>=3.0.0 27 | reportlab>=3.6.15 28 | scipy>=1.12.2 29 | scikit-learn>=1.3.2 30 | fastapi>=0.106.0 31 | xlsxwriter 32 | openpyxl 33 | xlrd 34 | 35 | -------------------------------------------------------------------------------- /setup_env.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Environment Setup Script for MCP Tool Kit 4 | 5 | This script helps you set up environment variables for the MCP Tool Kit 6 | by creating or updating a .env file in the repository. 7 | 8 | Usage: 9 | python setup_env.py 10 | """ 11 | import os 12 | import sys 13 | from pathlib import Path 14 | 15 | 16 | def main(): 17 | print("MCP Tool Kit Environment Setup") 18 | print("==============================") 19 | print("This script will help you set up environment variables for the MCP Tool Kit.") 20 | print("It will create or update a .env file in the repository root.") 21 | print() 22 | 23 | # Check if .env file exists 24 | env_file = Path(".env") 25 | existing_vars = {} 26 | 27 | if env_file.exists(): 28 | print(f"Found existing .env file at {env_file.absolute()}") 29 | # Parse existing variables 30 | with open(env_file, 'r') as f: 31 | for line in f: 32 | line = line.strip() 33 | if line and not line.startswith('#') and '=' in line: 34 | key, value = line.split('=', 1) 35 | existing_vars[key.strip()] = value.strip() 36 | print(f"Found {len(existing_vars)} existing variables.") 37 | print() 38 | 39 | update = input( 40 | "Do you want to update the existing .env file? (y/n): ").lower() 41 | if update != 'y': 42 | print("Setup canceled. Existing .env file was not modified.") 43 | return 44 | 45 | # Define required environment variables with descriptions 46 | env_vars = { 47 | "BRAVE_API_KEY": { 48 | "description": "API key for Brave Search functionality", 49 | "required": True, 50 | "default": existing_vars.get("BRAVE_API_KEY", ""), 51 | "example": "YOUR_BRAVE_API_KEY", 52 | "url": "https://brave.com/search/api/" 53 | }, 54 | "NEWS_API_KEY": { 55 | "description": "API key for NewsAPI functionality", 56 | "required": True, 57 | "default": existing_vars.get("NEWS_API_KEY", ""), 58 | "example": "YOUR_NEWS_API_KEY", 59 | "url": "https://newsapi.org/" 60 | }, 61 | "FRED_API_KEY": { 62 | "description": "API key for FRED economic data API", 63 | "required": True, 64 | "default": existing_vars.get("FRED_API_KEY", ""), 65 | "example": "YOUR_FRED_API_KEY", 66 | "url": "https://fred.stlouisfed.org/docs/api/api_key.html" 67 | }, 68 | "STREAMLIT_APPS_DIR": { 69 | "description": "Directory for Streamlit applications", 70 | "required": False, 71 | "default": existing_vars.get("STREAMLIT_APPS_DIR", os.path.expanduser("~/streamlit_apps")), 72 | "example": "/path/to/streamlit/apps" 73 | }, 74 | "MCP_FILESYSTEM_DIRS": { 75 | "description": "Comma-separated list of directories that can be accessed by filesystem tools", 76 | "required": False, 77 | "default": existing_vars.get("MCP_FILESYSTEM_DIRS", os.path.expanduser("~")), 78 | "example": "/path/to/dir1,/path/to/dir2" 79 | }, 80 | } 81 | 82 | # Collect values from user 83 | new_values = {} 84 | print("\nPlease enter values for the following environment variables:") 85 | print("(Press Enter to use default or existing value shown in brackets)") 86 | print() 87 | 88 | for key, info in env_vars.items(): 89 | default = info["default"] 90 | default_display = f"[{default}]" if default else "" 91 | 92 | # Show URL for API keys that need to be obtained 93 | url_info = f" (Get it from: {info['url']})" if "url" in info else "" 94 | 95 | while True: 96 | prompt = f"{key}: {info['description']}{url_info} {default_display}: " 97 | value = input(prompt).strip() 98 | 99 | # Use default if empty 100 | if not value and default: 101 | value = default 102 | 103 | # Validate required fields 104 | if info["required"] and not value: 105 | print(f"Error: {key} is required.") 106 | continue 107 | 108 | new_values[key] = value 109 | break 110 | 111 | # Write to .env file 112 | print("\nWriting environment variables to .env file...") 113 | 114 | with open(env_file, 'w') as f: 115 | f.write("# Environment variables for MCP Tool Kit\n") 116 | f.write("# Generated by setup_env.py\n\n") 117 | 118 | for key, info in env_vars.items(): 119 | if key in new_values and new_values[key]: 120 | f.write(f"# {info['description']}\n") 121 | f.write(f"{key}={new_values[key]}\n\n") 122 | 123 | print( 124 | f"Environment setup complete. Configuration saved to {env_file.absolute()}") 125 | print("\nYou can manually edit this file at any time to update your configuration.") 126 | print("Remember to restart the MCP server after changing environment variables.") 127 | 128 | 129 | if __name__ == "__main__": 130 | main() 131 | -------------------------------------------------------------------------------- /static/111_tools.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/getfounded/mcp-tool-kit/f3e383e0d3b9c5a28cb42b023ae367bff1edd552/static/111_tools.png -------------------------------------------------------------------------------- /static/123_tools.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/getfounded/mcp-tool-kit/f3e383e0d3b9c5a28cb42b023ae367bff1edd552/static/123_tools.png -------------------------------------------------------------------------------- /static/87_tools.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/getfounded/mcp-tool-kit/f3e383e0d3b9c5a28cb42b023ae367bff1edd552/static/87_tools.png --------------------------------------------------------------------------------