├── .gitignore
├── .pre-commit-config.yaml
├── .python-version
├── .trunk
├── .gitignore
├── configs
│ ├── .isort.cfg
│ ├── .markdownlint.yaml
│ └── ruff.toml
└── trunk.yaml
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── config
├── mcp_cline_settings_for_Cline_and_RooCode.json
└── mcp_servers.example.json
├── docker-dev
├── .devcontainer
│ └── devcontainer.json
├── Dockerfile
└── docker-compose.yml
├── docker
├── .dockerignore
├── Dockerfile
├── README.md
├── docker-compose.yml
└── entrypoint.sh
├── package.json
├── pyproject.toml
├── requirements.txt
├── scripts
├── container_startup.py
├── detect_host_ip.py
├── integrate_config_generator.py
├── manage_mcp.py
├── mcp_manager
│ ├── __init__.py
│ ├── commands.py
│ ├── config.py
│ └── process_utils.py
└── setup_env.py
└── uv.lock
/.gitignore:
--------------------------------------------------------------------------------
1 | # Python-generated files
2 | __pycache__/
3 | *.py[oc]
4 | build/
5 | dist/
6 | wheels/
7 | *.egg-info
8 | # Generated by Poetry
9 | .pytest_cache/
10 | .venv/
11 | poetry.lock
12 |
13 | # Virtual environments
14 | .venv
15 | .venv/
16 | venv/
17 | env/
18 |
19 |
20 | # Config files
21 | config/*
22 | !config/mcp_servers.example.json
23 | !config/mcp_cline_settings_for_Cline_and_RooCode.json
24 | backup/
25 |
26 | # Node.js
27 | package-lock.json
28 | .idea/
29 | yarn.lock
30 | .npm/
31 | .yarn/
32 | node_modules/
33 |
34 | # Misc
35 | logs/
36 | pids/
37 | .vscode/
38 |
39 | .roo/
40 | config/client_configs/
41 | mcp-data/
42 | npm-global/
43 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/psf/black
3 | rev: 23.3.0
4 | hooks:
5 | - id: black
6 | - repo: https://github.com/PyCQA/isort
7 | rev: 5.12.0
8 | hooks:
9 | - id: isort
10 | - repo: https://github.com/charliermarsh/ruff-pre-commit
11 | rev: v0.0.270
12 | hooks:
13 | - id: ruff
14 |
--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------
1 | 3.12
2 |
--------------------------------------------------------------------------------
/.trunk/.gitignore:
--------------------------------------------------------------------------------
1 | *out
2 | *logs
3 | *actions
4 | *notifications
5 | *tools
6 | plugins
7 | user_trunk.yaml
8 | user.yaml
9 | tmp
10 |
--------------------------------------------------------------------------------
/.trunk/configs/.isort.cfg:
--------------------------------------------------------------------------------
1 | [settings]
2 | profile=black
3 |
--------------------------------------------------------------------------------
/.trunk/configs/.markdownlint.yaml:
--------------------------------------------------------------------------------
1 | # Prettier friendly markdownlint config (all formatting rules disabled)
2 | extends: markdownlint/style/prettier
3 |
--------------------------------------------------------------------------------
/.trunk/configs/ruff.toml:
--------------------------------------------------------------------------------
1 | # Generic, formatter-friendly config.
2 | select = ["B", "D3", "E", "F"]
3 |
4 | # Never enforce `E501` (line length violations). This should be handled by formatters.
5 | ignore = ["E501"]
6 |
--------------------------------------------------------------------------------
/.trunk/trunk.yaml:
--------------------------------------------------------------------------------
1 | # This file controls the behavior of Trunk: https://docs.trunk.io/cli
2 | # To learn more about the format of this file, see https://docs.trunk.io/reference/trunk-yaml
3 | version: 0.1
4 | cli:
5 | version: 1.22.12
6 | # Trunk provides extensibility via plugins. (https://docs.trunk.io/plugins)
7 | plugins:
8 | sources:
9 | - id: trunk
10 | ref: v1.6.7
11 | uri: https://github.com/trunk-io/plugins
12 | # Many linters and tools depend on runtimes - configure them here. (https://docs.trunk.io/runtimes)
13 | runtimes:
14 | enabled:
15 | - node@18.20.5
16 | - python@3.10.8
17 | # This is the section where you manage your linters. (https://docs.trunk.io/check/configuration)
18 | lint:
19 | disabled:
20 | - isort
21 | enabled:
22 | - bandit@1.8.3
23 | - black@25.1.0
24 | - checkov@3.2.396
25 | - git-diff-check
26 | - markdownlint@0.44.0
27 | - prettier@3.5.3
28 | - ruff@0.11.2
29 | - taplo@0.9.3
30 | - trufflehog@3.88.20
31 | actions:
32 | disabled:
33 | - trunk-announce
34 | - trunk-check-pre-push
35 | - trunk-fmt-pre-commit
36 | enabled:
37 | - trunk-upgrade-available
38 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing Guidelines | 贡献指南
2 |
3 | [English](#contributing-guidelines-english) | [中文](#贡献指南-中文)
4 |
5 |
6 |
7 | # Contributing Guidelines (English)
8 |
9 | Thank you for considering contributing to the MCP-Server-Unified-Deployment project! Here are some guidelines to help you participate in the project development.
10 |
11 | ## How to Contribute
12 |
13 | ### Reporting Issues
14 |
15 | If you find a bug or have a suggestion for a new feature, please submit it through GitHub Issues. When submitting an issue, please include the following information:
16 |
17 | - Detailed description of the issue
18 | - Steps to reproduce (if applicable)
19 | - Expected behavior vs. actual behavior
20 | - Environment information (operating system, Python version, etc.)
21 | - Possible solution (if any)
22 |
23 | ### Submitting Code
24 |
25 | 1. Fork this repository
26 | 2. Create your feature branch (`git checkout -b feature/amazing-feature`)
27 | 3. Commit your changes (`git commit -m 'Add some amazing feature'`)
28 | - Please follow the [Conventional Commits](https://www.conventionalcommits.org/) specification
29 | 4. Push to the branch (`git push origin feature/amazing-feature`)
30 | 5. Open a Pull Request
31 |
32 | ### Pull Request Process
33 |
34 | 1. Ensure your PR description clearly explains the changes you've made
35 | 2. If your PR resolves an Issue, reference that Issue in the PR description (e.g., `Fixes #123`)
36 | 3. Make sure your code passes all tests
37 | 4. Project maintainers will review your PR and may request some changes
38 | 5. Once the PR is approved, it will be merged into the main branch
39 |
40 | ## Development Guidelines
41 |
42 | ### Code Style
43 |
44 | This project uses the following tools to maintain code quality and consistency:
45 |
46 | - **Black**: Python code formatter
47 | - **isort**: Import statement sorter
48 | - **Ruff**: Python linter
49 |
50 | Please ensure your code complies with these tools' standards. You can use pre-commit hooks to automatically check and format your code:
51 |
52 | ```bash
53 | # Install pre-commit hooks
54 | pip install pre-commit
55 | pre-commit install
56 | ```
57 |
58 | ### Testing
59 |
60 | Before submitting a PR, please make sure your code passes all tests. If you add new functionality, please add corresponding tests as well.
61 |
62 | ### Documentation
63 |
64 | If your changes affect the user experience or API, please update the relevant documentation. Good documentation is crucial for the project's usability.
65 |
66 | ## Code of Conduct
67 |
68 | Please refer to the [Code of Conduct](CODE_OF_CONDUCT.md) document to understand our expectations for community members.
69 |
70 | ## License
71 |
72 | By contributing code, you agree that your contributions will be licensed under the project's [MIT License](LICENSE).
73 |
74 |
75 |
76 | # 贡献指南 (中文)
77 |
78 | 感谢您考虑为MCP-Server-Unified-Deployment项目做出贡献!以下是一些指导原则,帮助您参与项目开发。
79 |
80 | ## 如何贡献
81 |
82 | ### 报告问题
83 |
84 | 如果您发现了bug或有新功能建议,请通过GitHub Issues提交。提交问题时,请包含以下信息:
85 |
86 | - 问题的详细描述
87 | - 复现步骤(如适用)
88 | - 预期行为与实际行为
89 | - 环境信息(操作系统、Python版本等)
90 | - 可能的解决方案(如有)
91 |
92 | ### 提交代码
93 |
94 | 1. Fork本仓库
95 | 2. 创建您的特性分支 (`git checkout -b feature/amazing-feature`)
96 | 3. 提交您的更改 (`git commit -m 'Add some amazing feature'`)
97 | - 请遵循[约定式提交](https://www.conventionalcommits.org/)规范
98 | 4. 推送到分支 (`git push origin feature/amazing-feature`)
99 | 5. 打开Pull Request
100 |
101 | ### Pull Request流程
102 |
103 | 1. 确保您的PR描述清楚地说明了您所做的更改
104 | 2. 如果您的PR解决了某个Issue,请在PR描述中引用该Issue(例如 `Fixes #123`)
105 | 3. 确保您的代码通过了所有测试
106 | 4. 项目维护者将审查您的PR,可能会要求进行一些更改
107 | 5. 一旦PR被批准,它将被合并到主分支
108 |
109 | ## 开发指南
110 |
111 | ### 代码风格
112 |
113 | 本项目使用以下工具来保持代码质量和一致性:
114 |
115 | - **Black**:Python代码格式化工具
116 | - **isort**:导入语句排序工具
117 | - **Ruff**:Python linter
118 |
119 | 请确保您的代码符合这些工具的规范。您可以使用pre-commit钩子来自动检查和格式化代码:
120 |
121 | ```bash
122 | # 安装pre-commit钩子
123 | pip install pre-commit
124 | pre-commit install
125 | ```
126 |
127 | ### 测试
128 |
129 | 在提交PR之前,请确保您的代码通过了所有测试。如果您添加了新功能,请同时添加相应的测试。
130 |
131 | ### 文档
132 |
133 | 如果您的更改影响了用户体验或API,请更新相应的文档。良好的文档对于项目的可用性至关重要。
134 |
135 | ## 行为准则
136 |
137 | 请参阅[行为准则](CODE_OF_CONDUCT.md)文档,了解我们对社区成员的期望。
138 |
139 | ## 许可证
140 |
141 | 通过贡献代码,您同意您的贡献将根据项目的[MIT许可证](LICENSE)进行许可。
142 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023-2024 BigUncle
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## MCP Server Unified Deployment
2 |
3 | [English](#mcp-server-unified-deployment) | [中文](#mcp服务器统一部署工具)
4 |
5 | A unified deployment and management tool for MCP (Model Context Protocol) servers. This project converts MCP servers deployed in various forms (uvx, npx, etc.) into a standardized SSE (Server-Sent Events) deployment, facilitating unified invocation by different tools.
6 |
7 | **Table of Contents**
8 |
9 | - [Features](#features)
10 | - [Prerequisites](#prerequisites)
11 | - [Installation](#installation)
12 | - [Configuration](#configuration)
13 | - [Server Types](#server-types)
14 | - [Environment Variables](#environment-variables-1)
15 | - [Usage](#usage)
16 | - [Basic Commands](#basic-commands)
17 | - [Example](#example)
18 | - [Integrate Configuration Generator Script](#integrate-configuration-generator-script)
19 | - [Monitoring](#monitoring)
20 | - [Client Configuration](#client-configuration)
21 | - [Directory Structure](#directory-structure)
22 | - [Docker Support](#docker-support)
23 | - [Docker Deployment Options](#docker-deployment-options)
24 | - [Using Docker for Development](#using-docker-for-development)
25 | - [Production Deployment with Pre-Built Images](#production-deployment-with-pre-built-images)
26 | - [Production Deployment with Docker Compose](#production-deployment-with-docker-compose)
27 | - [Contributing](#contributing)
28 | - [License](#license)
29 |
30 | **中文导航**
31 |
32 | - [MCP服务器统一部署工具](#mcp服务器统一部署工具)
33 | - [特性](#特性)
34 | - [前提条件](#前提条件)
35 | - [安装](#安装)
36 | - [配置](#配置)
37 | - [服务器类型](#服务器类型)
38 | - [环境变量](#环境变量-1)
39 | - [使用方法](#使用方法)
40 | - [基本命令](#基本命令)
41 | - [示例](#示例)
42 | - [客户端配置](#客户端配置)
43 | - [目录结构](#目录结构)
44 | - [Docker支持](#docker支持)
45 | - [贡献](#贡献)
46 | - [许可证](#许可证)
47 |
48 | ### Features
49 |
50 | - **Unified Management**: Manage multiple MCP servers from a single interface
51 | - **SSE Standardization**: Convert various MCP server implementations to SSE protocol
52 | - **Cross-Platform**: Works on Windows, macOS, and Linux
53 | - **Flexible Configuration**: Easy configuration for different server types and environments
54 |
55 |
56 | # MCP Server Unified Deployment
57 |
58 | ## Features
59 |
60 | - **Unified Management**: Manage multiple MCP servers from a single interface
61 | - **SSE Standardization**: Convert various MCP server implementations to SSE protocol
62 | - **Cross-Platform**: Works on Windows, macOS, and Linux
63 | - **Flexible Configuration**: Easy configuration for different server types and environments
64 | - **Process Management**: Start, stop, restart, and check status of MCP servers
65 | - **Docker Support**: Comprehensive Docker deployment and management options
66 | - **GitHub Workflow Integration**: CI/CD pipeline for automated testing and deployment
67 |
68 | ## Prerequisites
69 |
70 | - Python 3.12+
71 | - Git (for source code type servers)
72 | - Node.js and npm (for Node.js based servers)
73 | - uv (for dependency management)
74 | - pipx (for installing `mcp-proxy`, it is recommended that `mcp-proxy` be installed through `pipx`).
75 | - uvx (for uvx based servers)
76 |
77 | - Docker and Docker Compose (optional, for containerized deployment)
78 |
79 | ## Installation
80 |
81 | 1. Clone this repository:
82 |
83 | ```bash
84 | git clone https://github.com/BigUncle/MCP-Server-Unified-Deployment.git
85 | cd MCP-Server-Unified-Deployment
86 | ```
87 |
88 | 2. Set up a virtual environment and install dependencies using uv:
89 |
90 | ```bash
91 | # Install uv if you don't have it
92 | pip install uv
93 |
94 |
95 | # Create a virtual environment
96 | uv venv --python=3.12
97 |
98 | # install from requirements.txt
99 | uv pip install -r requirements.txt
100 | # OR install dependencies with uv
101 | # uv pip install -e .
102 |
103 |
104 | # Activate the virtual environment (Windows)
105 | .venv\Scripts\activate
106 | # OR Activate the virtual environment (Linux/MacOS)
107 | # source .venv/bin/activate
108 | ```
109 |
110 | Alternatively, you can use our setup script:
111 |
112 | ```bash
113 | python scripts/setup_env.py
114 | ```
115 |
116 | 3. Install mcp-proxy using pipx (recommended):
117 |
118 | ```bash
119 | # Install pipx if you don't have it
120 | pip install pipx
121 | pipx ensurepath
122 |
123 | # Install mcp-proxy
124 | pipx install mcp-proxy
125 | ```
126 |
127 | 4. Create your configuration file:
128 |
129 | ```bash
130 | cp config/mcp_servers.example.json config/mcp_servers.json
131 | ```
132 |
133 | 5. Edit the configuration file to match your requirements.
134 |
135 | ## Configuration
136 |
137 | The configuration file (`config/mcp_servers.json`) contains settings for all MCP servers you want to manage. Each server entry includes:
138 |
139 | ```json
140 | {
141 | "name": "server-name", // Unique name for the server
142 | "enabled": true, // Whether the server is enabled
143 | "type": "uvx", // Server type (uvx, node, source_code, etc.)
144 | "sse_host": "localhost", // Host for SSE endpoint
145 | "sse_port": 23001, // Port for SSE endpoint
146 | "allow_origin": "*", // CORS setting for SSE endpoint
147 | "install_commands": [ // Commands to install the server
148 | "uvx -v mcp-server-fetch"
149 | ],
150 | "sse_start_command": "mcp-proxy {start_command} --sse-host={sse_host} --sse-port={sse_port} --allow-origin='{allow_origin}' ", // Command template for SSE mode
151 | "start_command": "uvx mcp-server-fetch", // Original start command
152 | "env": {}, // Environment variables for the server
153 | "working_directory": "", // Optional working directory for running commands
154 | "repo_url": "", // For source_code type, git repository URL
155 | "branch": "main" // For source_code type, git branch to use
156 | }
157 | ```
158 |
159 | ### Server Types
160 |
161 | - **uvx**: Servers deployed using uvx
162 | - **node**: Node.js based servers
163 | - **source_code**: Servers that need to be built from source code
164 | - **docker**: Servers that run in Docker containers
165 |
166 | ### Environment Variables
167 |
168 | You can specify environment variables for each server in the `env` section:
169 |
170 | ```json
171 | "env": {
172 | "NODE_ENV": "production",
173 | "DEBUG": "true"
174 | }
175 | ```
176 |
177 | ## Usage
178 |
179 | ### Basic Commands
180 |
181 | ```bash
182 | # Start all enabled servers
183 | python scripts/manage_mcp.py start
184 |
185 | # Start a specific server
186 | python scripts/manage_mcp.py start
187 |
188 | # Stop all servers
189 | python scripts/manage_mcp.py stop
190 |
191 | # Stop a specific server
192 | python scripts/manage_mcp.py stop
193 |
194 | # Restart a specific server
195 | python scripts/manage_mcp.py restart
196 |
197 | # Check status of all servers
198 | python scripts/manage_mcp.py status
199 | ```
200 |
201 | ### Example
202 |
203 | To start the fetch server:
204 |
205 | ```bash
206 | python scripts/manage_mcp.py start fetch
207 | ```
208 |
209 | ## Integrate Configuration Generator Script
210 |
211 | The `integrate_config_generator.py` script is used to generate client-specific configuration files based on the `mcp_servers.json` file. It reads the `mcp_servers.json` file and generates client-specific configuration files in the `config/client_configs/` directory.
212 |
213 | ### Usage
214 |
215 | ```bash
216 | python scripts/integrate_config_generator.py
217 | ```
218 |
219 | This script will create configuration files for each client found in the `mcp_servers.json` file and place them in the `config/client_configs/` directory. The generated configuration files are named `mcp__*.json`, where `` is the name of the client. You can then use these configuration files to configure your client.
220 |
221 |
222 | ## Directory Structure
223 |
224 | ```
225 | .
226 | ├── config
227 | │ ├── host_info.json # Host info cache (auto-generated, used for network/config assist)
228 | │ ├── mcp_servers.example.json # Example MCP server configuration
229 | │ ├── mcp_servers.json # Main MCP server configuration
230 | │ └── client_configs/ # Client configuration files generated by integrate_config_generator.py
231 | | └── mcp__*.json # Client configuration files
232 | ├── docker
233 | │ ├── docker-compose.yml # Production Docker Compose configuration
234 | │ ├── Dockerfile # Production Docker image build file
235 | │ ├── entrypoint.sh # Container entrypoint script
236 | │ └── README.md # Docker-related documentation
237 | ├── docker-dev
238 | │ ├── docker-compose.yml # Development Docker Compose configuration
239 | │ ├── Dockerfile # Development Docker image build file
240 | │ └── .devcontainer # Devcontainer directory
241 | │ └─ devcontainer.json # VS Code devcontainer configuration
242 | ├── docs/ # Project documentation
243 | ├── logs/ # Server runtime logs
244 | ├── mcp-data/ # Runtime data storage (if needed)
245 | ├── mcp-servers/ # MCP server source code (if any)
246 | ├── node-modules/ # Node.js dependencies (if needed)
247 | ├── npm-global # Global npm dependencies and cache
248 | │ ├── bin
249 | │ ├── _cacache
250 | │ ├── lib
251 | │ ├── _logs
252 | │ ├── _npx
253 | ├── pids/ # Process ID files
254 | ├── scripts
255 | │ ├── mcp_manager # Management modules (commands, config, process utils, etc.)
256 | │ │ ├── commands.py # Command modules
257 | │ │ ├── config.py # Configuration modules
258 | │ │ └── process_utils.py # Process utilities
259 | │ ├── container_startup.py # Container startup helper script
260 | │ ├── detect_host_ip.py # Host IP detection script
261 | │ ├── integrate_config_generator.py # Client config generator script
262 | │ ├── manage_mcp.py # Main MCP management script
263 | │ └── setup_env.py # Environment setup script
264 | ├── uv-cache/ # Python dependency cache (auto-generated by uv)
265 | ├── README.md # Project documentation
266 | └── requirements.txt # Python dependency list
267 | ```
268 |
269 | **Notes:**
270 | - `config/client_configs/`: Stores client-specific configuration files generated by `integrate_config_generator.py`.
271 | - `config/host_info.json`: Auto-generated host info cache, used for network configuration and automation.
272 | - `docker/` and `docker-dev/`: Production and development Docker configurations for easy environment switching.
273 | - `mcp-servers/`: Place your custom or extended MCP server source code here if needed.
274 | - `scripts/`: All management, automation, and configuration scripts. The main entry point is `manage_mcp.py`.
275 | - Other directories like `logs/`, `pids/`, `mcp-data/`, `uv-cache/` are for runtime or cache data and do not require manual maintenance.
276 |
277 |
278 | ## Docker Support
279 |
280 | This project provides comprehensive Docker support for both development and deployment environments.
281 |
282 | ### Docker Deployment Options
283 |
284 | 1. **Development Environment**:
285 | - A development container with all necessary tools pre-installed
286 | - Visual Studio Code integration via devcontainer configuration
287 |
288 | 2. **Production Deployment**:
289 | - Multi-container setup with Docker Compose
290 | - Individual server containers with proper isolation
291 | - Persistent volume management for data and configurations
292 |
293 | ### Using Docker for Development
294 |
295 | To start the development environment:
296 |
297 | ```bash
298 | # Start the development container
299 | docker compose -f docker-dev/docker-compose.yml up -d
300 |
301 | # Connect to the container
302 | docker exec -it mcp-dev zsh
303 | ```
304 | ### Production Deployment with pre-built images
305 |
306 | To deploy in a production environment, you can use pre-built images from Docker Hub.
307 | ```bash
308 | docker pull biguncle2018/mcp-server-unified:latest
309 |
310 | # Start the production container
311 | docker run -d --name mcp-server-unified biguncle2018/mcp-server-unified:latest
312 |
313 | # View logs
314 | docker logs -f mcp-server-unified
315 |
316 | # Connect to the container
317 | docker exec -it mcp-server-unified zsh
318 | ```
319 |
320 | ### Production Deployment with Docker Compose
321 |
322 | To deploy in a production environment:
323 | #### Configuration
324 | Copy the example configuration file:
325 |
326 | ```bash
327 | cp config/mcp_servers.example.json config/mcp_servers.json
328 | ```
329 | Or edit the configuration file as needed.
330 |
331 | #### Modify Dockerfile
332 | If necessary, modify the Dockerfile or `docker-compose.yml` in the `docker/` directory to suit your needs.
333 | For example, you may need to adjust the `ENTRYPOINT` or `REAL_HOST_IP` variables or `TIME ZONE` variables.
334 |
335 | #### Build and Start Containers
336 |
337 | ```bash
338 | # Build and start all containers
339 | docker compose -f docker/docker-compose.yml up -d
340 |
341 | # View logs
342 | docker compose -f docker/docker-compose.yml logs -f
343 |
344 | ```
345 |
346 |
347 | ## Contributing
348 |
349 | Contributions are welcome! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details.
350 |
351 | ## License
352 |
353 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
354 |
355 | ---
356 |
357 | # MCP服务器统一部署工具
358 |
359 | [English](#mcp-server-unified-deployment) | [中文](#mcp服务器统一部署工具)
360 |
361 | 这是一个用于统一部署和管理MCP(Model Context Protocol)服务器的工具。该项目将以不同形式(uvx、npx等)部署的MCP服务器统一转换为标准化的SSE(Server-Sent Events)部署方式,方便不同工具的统一调用。
362 |
363 | ## 特性
364 |
365 | - **统一管理**:通过单一界面管理多个MCP服务器
366 | - **SSE标准化**:将各种MCP服务器实现转换为SSE协议
367 | - **跨平台**:支持Windows、macOS和Linux
368 | - **灵活配置**:轻松配置不同类型和环境的服务器
369 | - **进程管理**:启动、停止、重启和检查MCP服务器状态
370 | - **Docker支持**:全面的Docker部署和管理选项
371 | - **GitHub工作流集成**:自动化测试和部署的CI/CD管道
372 |
373 | ## 前提条件
374 |
375 | - Python 3.12+
376 | - Git(用于源代码类型服务器)
377 | - Node.js和npm(用于基于Node.js的服务器)
378 | - uv(用于依赖管理)
379 | - pipx(用于安装mcp-proxy)
380 | - uvx(用于基于uvx的服务器)
381 | - Docker和Docker Compose(可选,用于容器化部署)
382 |
383 | ## 安装
384 |
385 | 1. 克隆此仓库:
386 |
387 | ```bash
388 | git clone https://github.com/BigUncle/MCP-Server-Unified-Deployment.git
389 | cd MCP-Server-Unified-Deployment
390 | ```
391 |
392 | 2. 使用uv设置虚拟环境并安装所需的Python依赖:
393 |
394 | ```bash
395 | # 如果没有安装uv,先安装uv
396 | pip install uv
397 |
398 | # 创建虚拟环境
399 | uv venv --python=3.12
400 |
401 | # 使用uv安装依赖
402 | uv pip install -r requirements.txt
403 |
404 | # 激活虚拟环境(Windows)
405 | .venv\Scripts\activate
406 | # 或激活虚拟环境(Linux/MacOS)
407 | # source .venv/bin/activate
408 | ```
409 |
410 | 或者,您可以使用我们的设置脚本:
411 |
412 | ```bash
413 | python scripts/setup_env.py
414 | ```
415 |
416 | 3. 使用pipx安装mcp-proxy(推荐):
417 |
418 | ```bash
419 | # 如果没有安装pipx,先安装pipx
420 | pip install pipx
421 | pipx ensurepath
422 |
423 | # 安装mcp-proxy
424 | pipx install mcp-proxy
425 | ```
426 |
427 | 4. 创建配置文件:
428 |
429 | ```bash
430 | cp config/mcp_servers.example.json config/mcp_servers.json
431 | ```
432 |
433 | 5. 编辑配置文件以满足您的需求。
434 |
435 | ## 配置
436 |
437 | 配置文件(`config/mcp_servers.json`)包含您想要管理的所有MCP服务器的设置。每个服务器条目包括:
438 |
439 | ```json
440 | {
441 | "name": "server-name", // 服务器的唯一名称
442 | "enabled": true, // 服务器是否启用
443 | "type": "uvx", // 服务器类型(uvx、node、source_code等)
444 | "sse_host": "localhost", // SSE端点的主机
445 | "sse_port": 23001, // SSE端点的端口
446 | "allow_origin": "*", // SSE端点的CORS设置
447 | "install_commands": [ // 安装服务器的命令
448 | "uvx -v mcp-server-fetch"
449 | ],
450 | "sse_start_command": "mcp-proxy {start_command} --sse-host={sse_host} --sse-port={sse_port} --allow-origin='{allow_origin}' ", // SSE模式的命令模板
451 | "start_command": "uvx mcp-server-fetch", // 原始启动命令
452 | "env": {}, // 服务器的环境变量
453 | "working_directory": "", // 运行命令的可选工作目录
454 | "repo_url": "", // 对于source_code类型,git仓库URL
455 | "branch": "main" // 对于source_code类型,使用的git分支
456 | }
457 | ```
458 |
459 | ### 服务器类型
460 |
461 | - **uvx**:使用uvx部署的服务器
462 | - **node**:基于Node.js的服务器
463 | - **source_code**:需要从源代码构建的服务器
464 | - **docker**:在Docker容器中运行的服务器
465 |
466 | ### 环境变量
467 |
468 | 您可以在`env`部分为每个服务器指定环境变量:
469 |
470 | ```json
471 | "env": {
472 | "NODE_ENV": "production",
473 | "DEBUG": "true"
474 | }
475 | ```
476 |
477 | ## 使用方法
478 |
479 | ### 基本命令
480 |
481 | ```bash
482 | # 启动所有已启用的服务器
483 | python scripts/manage_mcp.py start
484 |
485 | # 启动特定服务器
486 | python scripts/manage_mcp.py start
487 |
488 | # 停止所有服务器
489 | python scripts/manage_mcp.py stop
490 |
491 | # 停止特定服务器
492 | python scripts/manage_mcp.py stop
493 |
494 | # 重启特定服务器
495 | python scripts/manage_mcp.py restart
496 |
497 | # 检查所有服务器的状态
498 | python scripts/manage_mcp.py status
499 | ```
500 |
501 | ### 示例
502 |
503 | 启动fetch服务器:
504 |
505 | ```bash
506 | python scripts/manage_mcp.py start fetch
507 | ```
508 |
509 | ## 目录结构
510 |
511 | ```
512 | .
513 | ├── config/ # 配置文件
514 | │ ├── host_info.json # 主机信息缓存文件(自动生成/用于辅助配置)
515 | │ ├── mcp_servers.example.json # MCP服务器配置示例
516 | │ ├── mcp_servers.json # MCP服务器主配置文件
517 | | └── client_configs/ # 由integrate_config_generator.py生成的客户端配置
518 | | └── mcp__*.json # 客户端配置文件
519 | ├── docker
520 | │ ├── docker-compose.yml # 生产环境 Docker Compose 配置
521 | │ ├── Dockerfile # 生产环境 Docker 镜像构建文件
522 | │ ├── entrypoint.sh # 容器入口脚本
523 | │ └── README.md # Docker 相关说明文档
524 | ├── docker-dev
525 | │ ├── docker-compose.yml # 开发环境 Docker Compose 配置
526 | │ ├── Dockerfile # 开发环境 Docker 镜像构建文件
527 | │ └── .devcontainer # 容器目录
528 | │ └─ devcontainer.json # devcontainer.json
529 | ├── docs/ # 项目文档目录
530 | ├── logs/ # 服务器运行日志目录
531 | ├── mcp-data/ # 运行时数据存储目录(如有需要)
532 | ├── mcp-servers/ # MCP服务器源代码目录(如有需要)
533 | ├── node-modules/ # Node.js 依赖目录(如有需要)
534 | ├── npm-global # 全局 npm 依赖及缓存目录
535 | │ ├── bin
536 | │ ├── _cacache
537 | │ ├── lib
538 | │ ├── _logs
539 | │ ├── _npx
540 | ├── pids/ # 进程ID文件目录
541 | ├── scripts
542 | │ ├── mcp_manager/ # 管理模块(如命令、配置、进程工具等)
543 | │ │ ├── commands.py #
544 | │ │ ├── config.py # 配置管理
545 | │ │ └── process_utils.py # 进程工具
546 | │ ├── container_startup.py # 容器启动辅助脚本
547 | │ ├── detect_host_ip.py # 主机IP检测脚本
548 | │ ├── integrate_config_generator.py # 客户端配置生成脚本
549 | │ ├── manage_mcp.py # MCP统一管理主脚本
550 | │ └── setup_env.py # 环境初始化脚本
551 | ├── uv-cache/ # Python依赖缓存目录(uv工具自动生成)
552 | ├── README.md # 项目说明文档
553 | └── requirements.txt # Python依赖清单
554 | ```
555 |
556 | ## Docker支持
557 |
558 | 本项目为开发和部署环境提供全面的Docker支持。
559 |
560 | ### Docker部署选项
561 |
562 | 1. **开发环境**:
563 | - 预装所有必要工具的开发容器
564 | - 通过devcontainer配置集成Visual Studio Code
565 |
566 | 2. **生产部署**:
567 | - 使用Docker Compose的多容器设置
568 | - 具有适当隔离的单独服务器容器
569 | - 用于数据和配置的持久卷管理
570 |
571 | ### 使用Docker进行开发
572 |
573 | 启动开发环境:
574 |
575 | ```bash
576 | # 启动开发容器
577 | docker compose -f docker-dev/docker-compose.yml up -d
578 |
579 | # 连接到容器
580 | docker exec -it mcp-dev zsh
581 | ```
582 | ### 拉取项目镜像生产部署
583 |
584 | 见 [镜像部署推荐流程](docs/镜像部署推荐流程.md)
585 |
586 | ### 使用Docker compose 进行生产部署
587 |
588 | 在生产环境中部署:
589 |
590 | #### 配置
591 |
592 | ```bash
593 | cp config/mcp_servers.example.json config/mcp_servers.json
594 | ```
595 | 或者根据需求编辑配置文件。
596 |
597 | #### 修改Dockerfile
598 | 有需要时,修改`docker/`目录中的`Dockerfile`或`docker-compose.yml`以适应您的需求。
599 | 比如,您可能需要调整`ENTRYPOINT`或`REAL_HOST_IP`变量或`TIME ZONE`变量。
600 |
601 | #### 构建和启动容器
602 | ```bash
603 | # 构建并启动所有容器
604 | docker compose -f docker/docker-compose.yml up -d
605 |
606 | # 查看日志
607 | docker compose -f docker/docker-compose.yml logs -f
608 | ```
609 |
610 | ## 贡献
611 |
612 | 欢迎贡献!详情请参阅[CONTRIBUTING.md](CONTRIBUTING.md)。
613 |
614 | ## 许可证
615 |
616 | 本项目采用MIT许可证 - 详情请参阅[LICENSE](LICENSE)文件。
617 |
--------------------------------------------------------------------------------
/config/mcp_cline_settings_for_Cline_and_RooCode.json:
--------------------------------------------------------------------------------
1 | {
2 | "mcpServers": {
3 | "fetch": {
4 | "url": "http://127.0.0.1:23001/sse"
5 | },
6 | "filesystem": {
7 | "url": "http://127.0.0.1:23002/sse"
8 | },
9 | "git": {
10 | "url": "http://127.0.0.1:23003/sse"
11 | },
12 | "github": {
13 | "url": "http://127.0.0.1:23005/sse"
14 | },
15 | "duckduckgo": {
16 | "url": "http://127.0.0.1:23008/sse",
17 | "alwaysAllow": ["search", "search_repositories", "get_file_contents"]
18 | }
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/config/mcp_servers.example.json:
--------------------------------------------------------------------------------
1 | {
2 | "servers": [
3 | {
4 | "name": "fetch",
5 | "package": "mcp-server-fetch",
6 | "enabled": true,
7 | "type": "uvx",
8 | "repo": "https://github.com/modelcontextprotocol/servers.git",
9 | "transport_type": "sse",
10 | "sse_host": "0.0.0.0",
11 | "sse_port": 23001,
12 | "allow_origin": "*",
13 | "install_commands": [
14 | "uvx mcp-server-fetch"
15 | ],
16 | "sse_start_command": "mcp-proxy {start_command} --sse-host={sse_host} --sse-port={sse_port} --allow-origin='{allow_origin}' ",
17 | "start_command": "uvx mcp-server-fetch ",
18 | "env": {},
19 | "autoApprove": "*"
20 | },
21 | {
22 | "name": "filesystem",
23 | "package": "@modelcontextprotocol/server-filesystem",
24 | "enabled": true,
25 | "type": "npx",
26 | "repo": "https://github.com/modelcontextprotocol/servers.git",
27 | "sse_host": "0.0.0.0",
28 | "sse_port": 23002,
29 | "allow_origin": "*",
30 | "install_commands": [
31 | "npx -y @modelcontextprotocol/server-filesystem"
32 | ],
33 | "sse_start_command": "mcp-proxy {start_command} --sse-host={sse_host} --sse-port={sse_port} --allow-origin='{allow_origin}' ",
34 | "start_command": "npx @modelcontextprotocol/server-filesystem ~ ",
35 | "env": {},
36 | "autoApprove": [
37 | "read_file",
38 | "read_multiple_files",
39 | "write_file",
40 | "edit_file",
41 | "create_directory",
42 | "list_directory",
43 | "directory_tree",
44 | "move_file",
45 | "search_files",
46 | "get_file_info",
47 | "list_allowed_directories"
48 | ]
49 | },
50 | {
51 | "name": "git",
52 | "package": "mcp-server-git",
53 | "enabled": true,
54 | "type": "uvx",
55 | "repo": "https://github.com/modelcontextprotocol/servers.git",
56 | "sse_host": "0.0.0.0",
57 | "sse_port": 23003,
58 | "allow_origin": "*",
59 | "install_commands": [
60 | "uvx mcp-server-git"
61 | ],
62 | "sse_start_command": "mcp-proxy {start_command} --sse-host={sse_host} --sse-port={sse_port} --allow-origin='{allow_origin}' ",
63 | "start_command": "uvx mcp-server-git --repository .",
64 | "args": [
65 | "--repository",
66 | "."
67 | ],
68 | "env": {},
69 | "autoApprove": []
70 | },
71 | {
72 | "name": "amap",
73 | "package": "@amap/amap-maps-mcp-server",
74 | "enabled": true,
75 | "type": "npx",
76 | "sse_host": "0.0.0.0",
77 | "sse_port": 23004,
78 | "allow_origin": "*",
79 | "install_commands": [
80 | "npx -y @amap/amap-maps-mcp-server"
81 | ],
82 | "sse_start_command": "mcp-proxy {start_command} --sse-host={sse_host} --sse-port={sse_port} --allow-origin='{allow_origin}' ",
83 | "start_command": "npx @amap/amap-maps-mcp-server",
84 | "env": {
85 | "AMAP_MAPS_API_KEY": ""
86 | },
87 | "autoApprove": [
88 | "search",
89 | "fetch_content",
90 | "maps_regeocode",
91 | "maps_geo",
92 | "maps_ip_location",
93 | "maps_weather",
94 | "maps_search_detail",
95 | "maps_bicycling",
96 | "maps_direction_walking",
97 | "maps_direction_driving",
98 | "maps_direction_transit_integrated",
99 | "maps_distance",
100 | "maps_text_search",
101 | "maps_around_search"
102 | ]
103 | },
104 | {
105 | "name": "github",
106 | "package": "@modelcontextprotocol/server-github",
107 | "enabled": true,
108 | "type": "npx",
109 | "repo": "https://github.com/modelcontextprotocol/servers.git",
110 | "sse_host": "0.0.0.0",
111 | "sse_port": 23005,
112 | "allow_origin": "*",
113 | "install_commands": [
114 | "npx -y @modelcontextprotocol/server-github"
115 | ],
116 | "sse_start_command": "mcp-proxy {start_command} --sse-host={sse_host} --sse-port={sse_port} --allow-origin='{allow_origin}' ",
117 | "start_command": "npx @modelcontextprotocol/server-github",
118 | "env": {
119 | "GITHUB_PERSONAL_ACCESS_TOKEN": ""
120 | },
121 | "autoApprove": [
122 | "create_or_update_file",
123 | "search_repositories",
124 | "create_repository",
125 | "get_file_contents",
126 | "push_files",
127 | "create_issue",
128 | "create_pull_request",
129 | "fork_repository",
130 | "create_branch",
131 | "list_commits",
132 | "list_issues",
133 | "update_issue",
134 | "add_issue_comment",
135 | "search_code",
136 | "search_issues",
137 | "search_users",
138 | "get_issue",
139 | "get_pull_request",
140 | "list_pull_requests",
141 | "create_pull_request_review",
142 | "merge_pull_request",
143 | "get_pull_request_files",
144 | "get_pull_request_status",
145 | "update_pull_request_branch",
146 | "get_pull_request_comments",
147 | "get_pull_request_reviews"
148 | ]
149 | },
150 | {
151 | "name": "firecrawl",
152 | "package": "firecrawl-mcp",
153 | "enabled": true,
154 | "type": "npx",
155 | "sse_host": "0.0.0.0",
156 | "sse_port": 23006,
157 | "allow_origin": "*",
158 | "install_commands": [
159 | "npx -y firecrawl-mcp"
160 | ],
161 | "sse_start_command": "mcp-proxy {start_command} --sse-host={sse_host} --sse-port={sse_port} --allow-origin='{allow_origin}' ",
162 | "start_command": "npx firecrawl-mcp",
163 | "env": {
164 | "FIRECRAWL_API_KEY": ""
165 | },
166 | "autoApprove": [
167 | "firecrawl_scrape",
168 | "firecrawl_map",
169 | "firecrawl_crawl",
170 | "firecrawl_batch_scrape",
171 | "firecrawl_check_batch_status",
172 | "firecrawl_check_crawl_status",
173 | "firecrawl_search",
174 | "firecrawl_extract",
175 | "firecrawl_deep_research",
176 | "firecrawl_generate_llmstxt"
177 | ]
178 | },
179 | {
180 | "name": "duckduckgo",
181 | "package": "duckduckgo-mcp-server",
182 | "enabled": true,
183 | "type": "uvx",
184 | "repo": "https://github.com/nickclyde/duckduckgo-mcp-server.git",
185 | "sse_host": "0.0.0.0",
186 | "sse_port": 23008,
187 | "allow_origin": "*",
188 | "install_commands": [
189 | "uv pip install duckduckgo-mcp-server --system"
190 | ],
191 | "sse_start_command": "mcp-proxy {start_command} --sse-host={sse_host} --sse-port={sse_port} --allow-origin='{allow_origin}' ",
192 | "start_command": "uvx duckduckgo-mcp-server",
193 | "env": {},
194 | "autoApprove": [
195 | "search",
196 | "fetch_content"
197 | ]
198 | }
199 | ]
200 | }
201 |
--------------------------------------------------------------------------------
/docker-dev/.devcontainer/devcontainer.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "MCPDev",
3 | "dockerComposeFile": "../docker-compose.yml",
4 | "service": "mcpdev",
5 | "workspaceFolder": "/workspace",
6 | "remoteUser": "root",
7 | // "overrideCommand": false,
8 | // "mounts": [],
9 |
10 | "customizations": {
11 | "vscode": {
12 | "extensions": [
13 | "ms-python.python",
14 | "ms-python.vscode-pylance",
15 | "ms-python.isort",
16 | "ms-python.debugpy",
17 | "ms-python.autopep8",
18 | "ms-toolsai.jupyter",
19 | "ms-azuretools.vscode-docker",
20 | "GitHub.copilot",
21 | "GitHub.copilot-chat",
22 | "github.vscode-github-actions",
23 | "rooveterinaryinc.roo-cline",
24 | "formulahendry.code-runner",
25 | "mtxr.sqltools",
26 | "tldraw-org.tldraw-vscode",
27 | "cweijan.xmind-viewer",
28 | "alibaba-cloud.tongyi-lingma",
29 | "saoudrizwan.claude-dev",
30 | "aminer.codegeex",
31 | "ms-windows-ai-studio.windows-ai-studio",
32 | "google.geminicodeassist",
33 | "googlecloudtools.cloudcode",
34 | "shd101wyy.markdown-preview-enhanced",
35 | "bierner.markdown-preview-github-styles"
36 | ],
37 | "settings": {
38 | "python.defaultInterpreterPath": "/usr/local/bin/python",
39 | "python.linting.enabled": true,
40 | // using the default terminal in the container
41 | "terminal.integrated.defaultProfile.linux": "zsh",
42 | "terminal.integrated.profiles.linux": {
43 | "zsh": {
44 | "path": "/usr/bin/zsh"
45 | }
46 | }
47 | // "remote.containers.enableWSLg": false,
48 | // "remote.containers.mountWslDistro": false
49 | }
50 | }
51 | },
52 | "forwardPorts": [
53 | 5678, 33001, 33002, 33003, 33004, 33005, 33006, 33007, 33008, 33009, 33010
54 | ],
55 | "remoteEnv": {
56 | "PYTHONUNBUFFERED": "1"
57 | },
58 | "features": {
59 | "ghcr.io/devcontainers/features/github-cli:1": {}
60 | }
61 | }
62 |
--------------------------------------------------------------------------------
/docker-dev/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.13.3-slim
2 |
3 | # Set working directory
4 | WORKDIR /workspace
5 |
6 | # Install system dependencies
7 | RUN apt-get update && apt-get install -y \
8 | git curl wget vim tmux unzip zip tree htop iputils-ping dnsutils netcat-openbsd connect-proxy ffmpeg locales locales-all zsh \
9 | nodejs npm \
10 | && apt-get clean && rm -rf /var/lib/apt/lists/*
11 |
12 | # Set Chinese locale and timezone
13 | ENV TZ=Asia/Shanghai
14 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
15 | RUN echo "zh_CN.UTF-8 UTF-8" > /etc/locale.gen && locale-gen zh_CN.UTF-8 && update-locale LANG=zh_CN.UTF-8 LANGUAGE=zh_CN:zh LC_ALL=zh_CN.UTF-8
16 | ENV LANG=zh_CN.UTF-8
17 | ENV LANGUAGE=zh_CN:zh
18 | ENV LC_ALL=zh_CN.UTF-8
19 |
20 | # Optional: Set pip mirrors
21 | RUN pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
22 |
23 | # Upgrade pip
24 | RUN python -m pip install --upgrade pip
25 |
26 | # Install uv, pipx, debugpy
27 | RUN pip install --no-cache-dir uv pipx debugpy && pipx ensurepath
28 |
29 | # Install mcp-proxy
30 | RUN pipx install mcp-proxy
31 |
32 | # Install oh-my-zsh and powerlevel10k
33 | RUN sh -c "$(wget -O- https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)" || true
34 | RUN git clone --depth=1 https://github.com/romkatv/powerlevel10k.git ${ZSH_CUSTOM:-/root/.oh-my-zsh/custom}/themes/powerlevel10k || true
35 | RUN echo 'ZSH_THEME="powerlevel10k/powerlevel10k"' >> ~/.zshrc
36 |
37 | # Install fonts
38 | RUN apt-get update && apt-get install -y fonts-powerline && apt-get clean && rm -rf /var/lib/apt/lists/*
39 |
40 | COPY requirements.txt requirements.txt
41 |
42 | # Install Python dependencies
43 | RUN if [ -f requirements.txt ]; then uv pip install -r requirements.txt --system; fi
44 |
45 | # Pre-create directories
46 | RUN mkdir -p /workspace/pids /workspace/logs /root/.ssh
47 |
48 | # Common aliases
49 | RUN echo 'alias ls="ls --color=auto"' && \
50 | echo 'alias ll="ls -al"' >> ~/.zshrc && \
51 | echo 'alias grep="grep --color=auto"' >> ~/.zshrc && \
52 | echo 'alias tmux="tmux attach -t main || tmux new -s main"' >> ~/.zshrc && \
53 | echo 'alias cls="clear"' >> ~/.zshrc
54 |
55 | # Add common aliases
56 | RUN echo 'alias ls="ls --color=auto"' >> ~/.bashrc && \
57 | echo 'alias ll="ls -al"' >> ~/.bashrc && \
58 | echo 'alias grep="grep --color=auto"' >> ~/.bashrc && \
59 | echo 'alias tmux="tmux attach -t main || tmux new -s main"' >> ~/.bashrc && \
60 | echo 'alias cls="clear"' >> ~/.bashrc
61 |
62 | # Default command for easy attach
63 | CMD ["tail", "-f", "/dev/null"]
--------------------------------------------------------------------------------
/docker-dev/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | mcpdev:
3 | build:
4 | context: ..
5 | dockerfile: docker-dev/Dockerfile
6 | volumes:
7 | - ../:/workspace:cached
8 | - ~/.ssh:/root/.ssh:cached
9 | - ~/gitconfig:/root/gitconfig:cached
10 | working_dir: /workspace
11 | entrypoint: ["tail", "-f", "/dev/null"]
12 | ports:
13 | - "5678:5678" # debugpy remote debugging
14 | - "33001-33010:33001-33010" # MCP ports SSE ports
15 | extra_hosts:
16 | - "host.docker.internal:host-gateway"
17 | tty: true
18 | # For Chinese users
19 | environment:
20 | TZ: Asia/Shanghai
21 | LANG: zh_CN.UTF-8
22 | LANGUAGE: zh_CN:zh
23 | LC_ALL: zh_CN.UTF-8
24 | no_proxy: "localhost,127.0.0.1"
25 | NO_PROXY: "localhost,127.0.0.1"
26 | # EXTERNAL_HOST: "your-host-ip-or-domain"
27 | EXTERNAL_HOST: "host.docker.internal"
28 | # Explicitly set the real host IP for external client access
29 | # Replace with your actual host machine IP on the network
30 | REAL_HOST_IP: "192.168.1.8"
31 |
32 | # labels:
33 | # devcontainer.metadata: '{"customizations": {"vscode": {"remote.containers.mountWslDistro": false}}}'
34 |
--------------------------------------------------------------------------------
/docker/.dockerignore:
--------------------------------------------------------------------------------
1 | # Docker ignore file for MCP Server production build
2 |
3 | # Version control
4 | .git/
5 | .gitignore
6 | .github/
7 |
8 | # Persistent data
9 | config/
10 | !config/mcp_servers.example.json
11 | !config/mcp_cline_settings_for_Cline_and_RooCode.json
12 | backup/
13 |
14 | # Python cache files
15 | __pycache__/
16 | *.py[cod]
17 | *$py.class
18 | .pytest_cache/
19 | .coverage
20 | .coverage.*
21 | htmlcov/
22 |
23 | # Virtual environments
24 | .env
25 | .venv
26 | env/
27 | venv/
28 | ENV/
29 |
30 | # Development environments
31 | .idea/
32 | .vscode/
33 | *.sublime-*
34 |
35 | # Docker development files
36 | docker-dev/
37 |
38 | # Documentation
39 | docs/
40 |
41 | # Development configuration
42 | .pre-commit-config.yaml
43 | .trunk/
44 |
45 | # Local development files
46 | logs/
47 | *.log
48 | *.sqlite3
49 |
50 | # Distribution / packaging
51 | dist/
52 | build/
53 | *.egg-info/
54 |
55 | # Temporary files
56 | *.swp
57 | *.swo
58 | *~
59 |
60 | # OS specific files
61 | .DS_Store
62 | Thumbs.db
63 |
64 |
65 | .roo/
66 | config/client_configs/
67 | mcp-data/
68 | npm-global/
69 | node_modules/
70 | node-modules/
71 | yarn-global/
72 | yarn-cache/
73 | uvx-global/
74 | mcp-servers/
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | # Production Dockerfile for MCP Server Unified Deployment
2 | # Uses multi-stage build, non-root user, and installs tools efficiently.
3 |
4 | # ---- Base Stage ----
5 | # Use a specific Python version for reproducibility
6 | FROM python:3.13.3-slim AS base
7 |
8 | # Set environment variables for locale, timezone, Python, and tool paths for non-root user
9 | ENV LANG=zh_CN.UTF-8 \
10 | LANGUAGE=zh_CN:zh \
11 | LC_ALL=zh_CN.UTF-8 \
12 | TZ=Asia/Shanghai \
13 | PYTHONUNBUFFERED=1 \
14 | PYTHONDONTWRITEBYTECODE=1 \
15 | # Paths for tools installed as non-root user 'mcp'
16 | PATH="/home/mcp/.local/bin:/home/mcp/.uv/bin:$PATH" \
17 | PIPX_HOME="/home/mcp/.local/pipx" \
18 | PIPX_BIN_DIR="/home/mcp/.local/bin" \
19 | UV_CACHE_DIR="/home/mcp/.cache/uv" \
20 | NPM_CONFIG_PREFIX="/home/mcp/.npm" \
21 | NODE_PATH="/app/node_modules:/home/mcp/.npm/lib/node_modules"
22 |
23 | # Create a non-root user and group first
24 | RUN groupadd --gid 1000 mcp && \
25 | useradd --uid 1000 --gid 1000 --shell /bin/bash --create-home mcp
26 |
27 | # Install essential OS packages + Node.js + Git + Set timezone/locale
28 | # Use mirrors for faster downloads (optional)
29 | RUN printf "deb http://mirrors.aliyun.com/debian/ bookworm main contrib non-free non-free-firmware\n\
30 | deb http://mirrors.aliyun.com/debian/ bookworm-updates main contrib non-free non-free-firmware\n\
31 | deb http://mirrors.aliyun.com/debian-security bookworm-security main contrib non-free non-free-firmware\n" > /etc/apt/sources.list \
32 | && rm -rf /etc/apt/sources.list.d/* \
33 | && apt-get update && apt-get install -y --no-install-recommends \
34 | curl \
35 | ca-certificates \
36 | git \
37 | locales \
38 | # Install Node.js (e.g., LTS version 22.x)
39 | && curl -fsSL https://deb.nodesource.com/setup_22.x | bash - \
40 | && apt-get install -y nodejs \
41 | # Configure timezone (optional)
42 | && ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone \
43 | # Configure locale (optional)
44 | && echo "zh_CN.UTF-8 UTF-8" >> /etc/locale.gen && locale-gen zh_CN.UTF-8 \
45 | # Install gosu for entrypoint privilege drop
46 | && apt-get install -y --no-install-recommends gosu \
47 | # Clean up
48 | && apt-get clean && rm -rf /var/lib/apt/lists/*
49 |
50 | # Copy the entrypoint script early and set permissions as root
51 | COPY docker/entrypoint.sh /usr/local/bin/entrypoint.sh
52 | RUN chmod +x /usr/local/bin/entrypoint.sh
53 |
54 | # Set WORKDIR for user installs first
55 | WORKDIR /home/mcp
56 |
57 | # Switch to mcp only for the commands that need it
58 | USER mcp
59 | # Upgrade pip and set pip mirrors (Optional)
60 | RUN python -m pip install --upgrade pip && \
61 | python -m pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
62 |
63 | # Install uv and pipx for the 'mcp' user
64 | # Use --user flag for non-root install
65 | RUN python -m pip install --user --no-cache-dir uv pipx
66 | # Ensure pipx paths are recognised (redundant with ENV PATH but safe)
67 | RUN python -m pipx ensurepath
68 |
69 | # Install mcp-proxy using pipx as the 'mcp' user
70 | # No need for symlink if PATH is correct
71 | RUN pipx install mcp-proxy
72 | # Switch back to root after mcp-specific commands
73 | USER root
74 | # Reset WORKDIR to root's default or /
75 | WORKDIR /
76 |
77 | # ---- Builder Stage (Optional but good practice for deps) ----
78 | FROM base AS builder
79 |
80 | USER mcp
81 | WORKDIR /app
82 |
83 | # Copy only necessary files for dependency installation
84 | COPY --chown=mcp:mcp requirements.txt ./
85 | COPY --chown=mcp:mcp package.json ./
86 |
87 | # Install Python dependencies using uv as 'mcp' user
88 | RUN python -m pip install --user --no-cache-dir -r requirements.txt
89 |
90 | # Install node dependencies if package.json has entries (optional)
91 | # RUN npm install --prefix /app
92 |
93 | # ---- Final Stage ----
94 | FROM base AS final
95 |
96 | # Create directory structure as root first
97 | WORKDIR /app
98 |
99 | # 1. Create app directories with correct ownership
100 | RUN mkdir -p \
101 | /app/config \
102 | /app/logs \
103 | /app/pids \
104 | /app/mcp-data \
105 | /app/mcp-servers \
106 | /app/client_configs
107 |
108 | RUN chown -R mcp:mcp /app && \
109 | chmod 2775 /app && \
110 | find /app -type d -exec chmod 2775 {} \; && \
111 | find /app -type f -exec chmod 0664 {} \;
112 | # 2. Switch to mcp user for application setup
113 | USER mcp
114 |
115 | # Copy installed Python packages from builder stage to user's .local
116 | COPY --from=builder --chown=mcp:mcp /home/mcp/.local /home/mcp/.local
117 | # Copy node_modules if built in builder stage
118 | # COPY --from=builder --chown=mcp:mcp /app/node_modules /app/node_modules
119 |
120 |
121 | # Copy application code
122 | COPY --chown=mcp:mcp scripts /app/scripts
123 | COPY --chown=mcp:mcp scripts/mcp_manager /app/mcp_manager
124 | COPY --chown=mcp:mcp config/mcp_servers.example.json /app/config/mcp_servers.example.json
125 | # Copy requirements.txt again for reference if needed, though installed already
126 | COPY --chown=mcp:mcp requirements.txt /app/requirements.txt
127 |
128 | # Ensure necessary application directories exist and have correct ownership
129 | # These paths should match the volumes in docker-compose.yml
130 | # RUN mkdir -p /app/config /app/logs /app/pids /app/mcp-data /app/mcp-servers /app/client_configs && \
131 | # chown -R mcp:mcp /app
132 |
133 | # Verify key tools are runnable by 'mcp' user
134 | # Use full path if PATH isn't immediately active in RUN layer
135 | RUN /home/mcp/.local/bin/uv --version && \
136 | /home/mcp/.local/bin/pipx --version && \
137 | /home/mcp/.local/bin/mcp-proxy --help && \
138 | node --version && npm --version && npx --version
139 | # Switch back to root before EXPOSE and ENTRYPOINT
140 | USER root
141 |
142 | # Expose the typical MCP port range (adjust if needed)
143 | EXPOSE 23001-23020
144 |
145 | # Entrypoint script is copied and made executable in the base stage as root
146 |
147 | # Define the entrypoint to run the wrapper script
148 | # The wrapper script will handle permissions and then execute the original command as the 'mcp' user
149 | ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
150 |
151 | # The default command to be executed by the entrypoint script (passed as arguments "$@")
152 | CMD ["/bin/bash", "-c", "python /app/scripts/container_startup.py && python /app/scripts/manage_mcp.py daemon"]
153 |
154 | # CMD can provide default arguments to ENTRYPOINT, but not needed here.
155 | # CMD ["daemon"]
--------------------------------------------------------------------------------
/docker/README.md:
--------------------------------------------------------------------------------
1 | # Production Docker Setup for MCP Server
2 |
3 | **English Navigation** | **英文导航**
4 |
5 | - [Introduction](#production-docker-setup-for-mcp-server)
6 | - [Files](#files)
7 | - [Features](#features)
8 | - [Usage](#usage)
9 | - [Configuration](#configuration)
10 | - [Volumes](#volumes)
11 | - [Environment Variables](#environment-variables)
12 | - [Security Considerations](#security-considerations)
13 | - [Health Checks](#health-checks)
14 |
15 | **Chinese Navigation** | **中文导航**
16 |
17 | - [介绍](#mcp服务器生产环境docker设置)
18 | - [文件](#文件)
19 | - [特性](#特性)
20 | - [使用方法](#使用方法)
21 | - [配置](#配置)
22 | - [卷挂载](#卷挂载)
23 | - [环境变量](#环境变量)
24 | - [安全考虑](#安全考虑)
25 | - [健康检查](#健康检查)
26 |
27 | **Chinese Navigation** | **中文导航**
28 |
29 | - [镜像部署](#镜像部署)
30 | - [环境准备](#1-环境准备)
31 | - [拉取镜像](#2-拉取镜像)]
32 | - [运行容器](#3-运行容器)
33 | - [配置参数](#4-配置参数)
34 | - [常见问题排查](#5-常见问题排查)
35 | - [参考资料](#6-参考资料)
36 |
37 |
38 | This directory contains Docker configuration files optimized for production deployment of the MCP Server Unified Deployment application.
39 |
40 | ## Files
41 |
42 | - `Dockerfile`: Multi-stage build optimized for production with minimal image size
43 | - `docker-compose.yml`: Production-ready compose configuration with appropriate volumes and security settings
44 |
45 | ## Features
46 |
47 | - **Optimized Image Size**: Uses multi-stage builds to minimize the final image size
48 | - **Security Enhancements**: Runs as non-root user with minimal permissions
49 | - **Production-Ready**: Includes only essential runtime dependencies
50 | - **Performance Optimized**: Configured for stable production operation
51 |
52 | ## Usage
53 |
54 | ### Building the Image
55 |
56 | ```bash
57 | cd /path/to/MCP-Server-Unified-Deployment
58 | docker-compose -f docker/docker-compose.yml build
59 | ```
60 |
61 | ### Starting the Container
62 |
63 | ```bash
64 | docker-compose -f docker/docker-compose.yml up -d
65 | ```
66 |
67 | ### Viewing Logs
68 |
69 | ```bash
70 | docker-compose -f docker/docker-compose.yml logs -f
71 | ```
72 |
73 | ### Stopping the Container
74 |
75 | ```bash
76 | docker-compose -f docker/docker-compose.yml down
77 | ```
78 |
79 | ## Configuration
80 |
81 | The production setup uses the configuration files from the `config` directory. Make sure to create your configuration file before starting the container:
82 |
83 | ```bash
84 | cp config/mcp_servers.example.json config/mcp_servers.json
85 | # Edit config/mcp_servers.json with your settings
86 | ```
87 |
88 | ## Volumes
89 |
90 | The following volumes are mounted:
91 |
92 | - `../config:/app/config:ro`: Configuration files (read-only)
93 | - `../logs:/app/logs`: Log files
94 | - `../pids:/app/pids`: Process ID files
95 |
96 |
97 | ## Environment Variables
98 |
99 | You can customize the container by setting environment variables in the `docker-compose.yml` file:
100 |
101 | | Variable | Example Value | Description |
102 | | --------------- | --------------- | ------------------------------------------------------------------------------------------ |
103 | | `TZ` | Asia/Shanghai | Timezone for the container. |
104 | | `LANG` | zh_CN.UTF-8 | Primary locale setting. |
105 | | `LANGUAGE` | zh_CN:zh | Locale preference for messages and text. |
106 | | `LC_ALL` | zh_CN.UTF-8 | Overrides all locale categories. |
107 | | `REAL_HOST_IP` | 192.168.1.8 | Host IP address accessible by external clients. Important for client access. |
108 | | `EXTERNAL_HOST` | host.docker.internal | Alias for REAL_HOST_IP, to access host from container. |
109 | | ... | ... | Other environment variables (Refer to `docker-compose.yml` for full list) |
110 |
111 | > **Note**: Timezone and locale settings are commented out in the Dockerfile to allow users to customize them according to their specific requirements. You can uncomment and modify these settings in the Dockerfile or set them through environment variables in the docker-compose.yml file.
112 |
113 |
114 | ## Security Considerations
115 |
116 | - The application runs as a non-root user (`mcp`), reducing the risk of privilege escalation.
117 | - Container has `no-new-privileges` security option enabled, preventing child processes from gaining additional privileges.
118 | - Only essential packages are installed, minimizing the attack surface.
119 | - Using minimal base images also reduces the attack surface and potential vulnerabilities.
120 | - Consider regularly updating the base image and application dependencies to patch known security vulnerabilities.
121 | - Use the principle of least privilege whenever possible.
122 |
123 | ## Health Checks
124 |
125 | The container includes a health check that verifies the application is running correctly by testing connectivity to port 23001.
126 |
127 | # MCP服务器生产环境Docker设置
128 |
129 | 本目录包含针对MCP服务器统一部署应用程序的生产环境部署优化的Docker配置文件。
130 |
131 | ## 文件
132 |
133 | - `Dockerfile`: 针对生产环境优化的多阶段构建,最小化镜像大小
134 | - `docker-compose.yml`: 具有适当卷挂载和安全设置的生产就绪compose配置
135 |
136 | ## 特性
137 |
138 | - **优化的镜像大小**: 使用多阶段构建最小化最终镜像大小
139 | - **安全增强**: 以非root用户运行,权限最小化
140 | - **生产就绪**: 仅包含必要的运行时依赖
141 | - **性能优化**: 配置为稳定的生产操作
142 |
143 | ## 使用方法
144 |
145 | ### 构建镜像
146 |
147 | ```bash
148 | cd /path/to/MCP-Server-Unified-Deployment
149 | docker-compose -f docker/docker-compose.yml build
150 | ```
151 |
152 | ### 启动容器
153 |
154 | ```bash
155 | docker-compose -f docker/docker-compose.yml up -d
156 | ```
157 |
158 | ### 查看日志
159 |
160 | ```bash
161 | docker-compose -f docker/docker-compose.yml logs -f
162 | ```
163 |
164 | ### 停止容器
165 |
166 | ```bash
167 | docker-compose -f docker/docker-compose.yml down
168 | ```
169 |
170 | ## 配置
171 |
172 | 生产设置使用`config`目录中的配置文件。确保在启动容器前创建您的配置文件:
173 |
174 | ```bash
175 | cp config/mcp_servers.example.json config/mcp_servers.json
176 | # 使用您的设置编辑config/mcp_servers.json
177 | ```
178 |
179 | ## 卷挂载
180 |
181 | 以下卷被挂载:
182 |
183 | - `../config:/app/config:ro`: 配置文件(只读)
184 | - `../logs:/app/logs`: 日志文件
185 | - `../pids:/app/pids`: 进程ID文件
186 |
187 | ## 环境变量
188 |
189 | 您可以通过在`docker-compose.yml`文件中设置环境变量来自定义容器:
190 |
191 | - `TZ`: 时区(默认:Asia/Shanghai)
192 | - `LANG`, `LANGUAGE`, `LC_ALL`: 语言环境设置
193 |
194 | > **注意**:Dockerfile中的时区和语言环境设置已被注释,以便用户根据自己的特定需求进行自定义。您可以在Dockerfile中取消注释并修改这些设置,或通过docker-compose.yml文件中的环境变量进行设置。
195 |
196 | ## 安全考虑
197 |
198 | - 应用程序以非root用户(`mcp`)运行
199 | - 容器启用了`no-new-privileges`安全选项
200 | - 仅安装必要的软件包
201 |
202 | ## 健康检查
203 |
204 | 容器包含一个健康检查,通过测试与23001端口的连接来验证应用程序是否正常运行。
205 |
206 | # 镜像部署
207 |
208 | 本指南适用于基于 `biguncle2018/mcp-server-unified:latest` 镜像的 MCP Server 统一部署,推荐先克隆 GitHub 项目到本地,再用镜像部署。即使无 Docker 经验也能顺利完成部署。
209 |
210 | ---
211 |
212 | ## 1. 环境准备
213 |
214 | ### 1.1 安装 Docker
215 |
216 | - 推荐使用最新版 Docker。可参考官方文档:[Docker 安装指南](https://docs.docker.com/engine/install/)
217 | - 安装完成后,建议配置国内镜像加速(如阿里云、DaoCloud 等)。
218 |
219 | ### 1.2 克隆项目代码
220 |
221 | 建议先将官方项目仓库克隆到本地,便于获取最新配置、脚本和文档:
222 |
223 | ```bash
224 | git clone https://github.com/BigUncle/MCP-Server-Unified-Deployment.git
225 | cd MCP-Server-Unified-Deployment
226 | ```
227 |
228 | ### 1.3 目录准备
229 |
230 | 在本地项目目录下,确保以下子目录存在(首次克隆后大部分已包含):
231 |
232 | ```bash
233 | mkdir -p config logs pids mcp-data mcp-servers client_configs
234 | ```
235 |
236 | ### 1.4 端口说明
237 |
238 | - 默认开放端口范围:`23001-23020`
239 | - 如有端口冲突,请在运行时调整映射
240 |
241 | ---
242 |
243 | ## 2. 拉取镜像
244 |
245 | ```bash
246 | docker pull biguncle2018/mcp-server-unified:latest
247 | ```
248 |
249 | ---
250 |
251 | ## 3. 运行容器
252 |
253 | ### 3.1 推荐启动命令
254 |
255 | 在项目根目录下运行:
256 |
257 | ```bash
258 | docker run -d \
259 | --name mcp-server \
260 | -p 23001-23020:23001-23020 \
261 | -v $(pwd)/config:/app/config \
262 | -v $(pwd)/logs:/app/logs \
263 | -v $(pwd)/pids:/app/pids \
264 | -v $(pwd)/mcp-data:/app/mcp-data \
265 | -v $(pwd)/mcp-servers:/app/mcp-servers \
266 | -v $(pwd)/client_configs:/app/client_configs \
267 | -e REAL_HOST_IP="你自己主机IP地址" \
268 | biguncle2018/mcp-server-unified:latest
269 | ```
270 |
271 | > **说明:**
272 | > - `-d`:后台运行
273 | > - `--name`:容器名称,可自定义
274 | > - `-p`:端口映射,确保宿主机端口未被占用
275 | > - `-v`:目录挂载,保证数据和配置持久化
276 | > - `$(pwd)` 表示当前项目目录,适合在项目根目录下执行
277 |
278 | ### 3.2 配置文件说明
279 |
280 | - 默认配置文件为 `config/mcp_servers.example.json`,首次启动后可复制为 `mcp_servers.json` 并根据实际需求修改。
281 | - 推荐直接在本地 `config/` 目录下维护 `mcp_servers.json`,容器会自动加载。
282 |
283 | ### 3.3 以非 root 用户运行(推荐)
284 |
285 | 镜像内已创建 `mcp` 用户,默认以该用户运行,安全性更高。无需额外参数。
286 |
287 | ---
288 |
289 | ## 4. 配置参数
290 |
291 | - **环境变量**:如需自定义 Python、Node、时区等参数,可通过 `-e` 传递环境变量。
292 | - **自定义启动命令**:如需覆盖默认启动命令,可在 `docker run` 后追加命令参数。
293 |
294 | ---
295 |
296 | ## 5. 常见问题排查
297 |
298 | ### 5.1 权限问题
299 |
300 | - 挂载目录建议归属当前用户,避免 root 权限导致读写失败。
301 | - 如遇权限报错,可尝试 `sudo chown -R $(id -u):$(id -g) .`(在项目根目录下执行)
302 |
303 | ### 5.2 端口冲突
304 |
305 | - 若 `23001-23020` 端口被占用,可修改 `-p` 参数映射到其他端口。
306 |
307 | ### 5.3 配置未生效
308 |
309 | - 确认 `mcp_servers.json` 已正确挂载到 `/app/config/`,并格式无误。
310 |
311 | ### 5.4 日志查看
312 |
313 | - 容器日志:`docker logs mcp-server`
314 | - 应用日志:本地 `logs/` 目录下
315 |
316 | ### 5.5 镜像更新
317 |
318 | - 更新镜像:`docker pull biguncle2018/mcp-server-unified:latest`
319 | - 重启容器:`docker stop mcp-server && docker rm mcp-server && [重新运行上方命令]`
320 |
321 | ### 5.6 容器内缺少 ip 命令导致 IP 检测失败
322 |
323 | **现象:**
324 | - 日志出现如下报错:
325 | ```
326 | Error getting Docker gateway IP: [Errno 2] No such file or directory: 'ip'
327 | Could not determine a suitable host IP. Falling back to 'localhost'. External access will likely fail.
328 | ```
329 | - 这会导致客户端无法通过宿主机 IP 访问服务。
330 |
331 | **解决方法:**
332 | 1. **推荐修正 Dockerfile**
333 | 在 Dockerfile 的 apt 安装部分添加:
334 | ```dockerfile
335 | apt-get install -y iproute2
336 | ```
337 | 重新构建并推送镜像后再部署。
338 |
339 | 2. **临时解决办法**
340 | 启动容器时手动指定宿主机 IP,例如:
341 | ```bash
342 | docker run -d \
343 | ...(其余参数同上) \
344 | -e REAL_HOST_IP=你的宿主机IP \
345 | biguncle2018/mcp-server-unified:latest
346 | ```
347 | 这样脚本会优先使用该 IP,避免 fallback 到 localhost。
348 |
349 | ---
350 |
351 | ## 6. 参考资料
352 |
353 | - [Docker 官方文档](https://docs.docker.com/)
354 | - [MCP Server 项目仓库](https://github.com/BigUncle/MCP-Server-Unified-Deployment)
355 | - [MCP Server 项目文档/README](./README.md)
356 |
357 | ---
358 |
359 | 如有其他问题,请先查阅日志和配置文件,或在 GitHub 项目仓库提交 issue 获取支持。
--------------------------------------------------------------------------------
/docker/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | mcp-server:
3 | build:
4 | context: ..
5 | dockerfile: docker/Dockerfile
6 | image: mcp-server-unified:latest # Use the same tag as built
7 | container_name: mcp-server-unified
8 | restart: unless-stopped
9 | ports:
10 | - "23001-23020:23001-23020" # Match Dockerfile EXPOSE
11 |
12 | volumes:
13 | # Configuration is read-write (client config generation)
14 | - ../config:/app/config
15 | - ../logs:/app/logs
16 | - ../pids:/app/pids
17 | - ../mcp-data:/app/mcp-data # Generic data persistence
18 | - ../mcp-servers:/app/mcp-servers # Source code persistence
19 |
20 | # --- Persistent volumes for runtime installations (MATCH NON-ROOT USER) ---
21 | # Ensure these directories exist on the host and have correct permissions (UID/GID 1000)
22 | # Example host commands:
23 | # mkdir -p npm-global uvx-global uv-cache
24 | # sudo chown -R 1000:1000 npm-global uvx-global uv-cache
25 | - ../npm-global:/home/mcp/.npm # NPM global cache/install for 'mcp' user
26 | - ../uvx-global:/home/mcp/.uvx # UVX install location for 'mcp' user (assuming ~/.uvx)
27 | - ../uv-cache:/home/mcp/.cache/uv # UV cache persistence for 'mcp' user
28 |
29 | # --- Volumes NOT typically needed for production ---
30 | # - ../scripts:/app/scripts # Scripts are copied in Dockerfile
31 | # - ../pipx-home:/home/mcp/.local/pipx # mcp-proxy installed in image
32 | # - ../node-modules:/app/node_modules # Only if project-local node deps needed & not installed in image
33 |
34 | environment:
35 | # Keep locale and timezone settings (match Dockerfile)
36 | TZ: Asia/Shanghai
37 | LANG: zh_CN.UTF-8
38 | LANGUAGE: zh_CN:zh
39 | LC_ALL: zh_CN.UTF-8
40 | # Host resolution - REAL_HOST_IP is critical for client access from outside Docker
41 | EXTERNAL_HOST: "host.docker.internal" # Docker Desktop default, might vary
42 | # !!! IMPORTANT: Replace with your host machine's actual IP accessible by clients !!!
43 | REAL_HOST_IP: "192.168.1.8" # <---- UPDATE THIS IP ADDRESS
44 | # Environment variables needed by the container/tools (match Dockerfile ENV for non-root)
45 | MCP_DAEMON_MODE: "true" # Set explicitly, though entrypoint uses 'daemon' command
46 | MCP_DATA_DIR: "/app/mcp-data"
47 | NPM_CONFIG_PREFIX: "/home/mcp/.npm"
48 | NODE_PATH: "/app/node_modules:/home/mcp/.npm/lib/node_modules" # Include global path
49 | PIPX_HOME: "/home/mcp/.local/pipx"
50 | PIPX_BIN_DIR: "/home/mcp/.local/bin"
51 | UV_CACHE_DIR: "/home/mcp/.cache/uv"
52 |
53 | # Entrypoint is defined in the Dockerfile, no need to override here unless necessary
54 |
55 | # Security: Already handled by non-root user. `no-new-privileges` is still good.
56 | security_opt:
57 | - no-new-privileges:true
58 |
59 | # Healthcheck (adapt port/command if needed, uses localhost inside container)
60 | healthcheck:
61 | # Test connection to the primary fetch server port (adjust if different)
62 | test: ["CMD", "python", "-c", "import socket; s=socket.create_connection(('localhost', 23001), timeout=5)"]
63 | interval: 30s
64 | timeout: 10s
65 | retries: 3
66 | start_period: 45s # Increased start period slightly
--------------------------------------------------------------------------------
/docker/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e # Exit immediately if a command exits with a non-zero status.
3 |
4 | # Default user/group IDs
5 | USER_ID=${LOCAL_USER_ID:-1000}
6 | GROUP_ID=${LOCAL_GROUP_ID:-1000}
7 |
8 | echo "Entrypoint: Running as $(id)"
9 | echo "Entrypoint: Ensuring user 'mcp' (UID: $USER_ID, GID: $GROUP_ID) owns required directories..."
10 |
11 | # List of directories that need correct permissions (based on docker-compose volumes)
12 | # Note: /home/mcp should already be owned by mcp due to useradd in Dockerfile
13 | # We focus on the mount points inside /app and /home/mcp that receive volumes
14 | declare -a DIRS_TO_FIX=(
15 | "/app/config"
16 | "/app/logs"
17 | "/app/pids"
18 | "/app/mcp-data"
19 | "/app/mcp-servers"
20 | "/home/mcp/.npm"
21 | "/home/mcp/.local" # Ensure .local and subdirs are correct, pipx/pip install here
22 | "/home/mcp/.cache" # Ensure .cache and subdirs are correct, uv cache here
23 | # Add /home/mcp/.uvx if it's a separate volume mount target
24 | )
25 |
26 | # Create directories if they don't exist and set ownership
27 | # Use find to only chown directories that actually exist (might not be mounted)
28 | for dir in "${DIRS_TO_FIX[@]}"; do
29 | # Create directory if it doesn't exist first
30 | if [ ! -d "$dir" ]; then
31 | mkdir -p "$dir"
32 | echo "Entrypoint: Created missing directory $dir"
33 | fi
34 |
35 | # Check current ownership
36 | if [ -d "$dir" ]; then
37 | current_uid=$(stat -c '%u' "$dir")
38 | current_gid=$(stat -c '%g' "$dir")
39 |
40 | if [ "$current_uid" != "$USER_ID" ] || [ "$current_gid" != "$GROUP_ID" ]; then
41 | echo "Entrypoint: Correcting permissions for $dir (UID:$current_uid->$USER_ID, GID:$current_gid->$GROUP_ID)..."
42 | chown -R "$USER_ID:$GROUP_ID" "$dir" || echo "Entrypoint: Warning - Failed to chown $dir"
43 | else
44 | echo "Entrypoint: Permissions OK for $dir (UID:$USER_ID, GID:$GROUP_ID)"
45 | fi
46 | else
47 | echo "Entrypoint: Directory $dir not found, skipping chown."
48 | fi
49 | done
50 |
51 | # Ensure the primary user's home directory itself is correct, just in case
52 | echo "Entrypoint: Verifying ownership for /home/mcp..."
53 | chown "$USER_ID:$GROUP_ID" /home/mcp || echo "Entrypoint: Warning - Failed to chown /home/mcp"
54 |
55 | echo "Entrypoint: Permissions check complete."
56 | echo "Entrypoint: Switching to user 'mcp' (UID: $USER_ID) to execute command: $@"
57 |
58 | # Execute the command passed into the script (CMD in Dockerfile) as the 'mcp' user
59 | exec gosu mcp "$@"
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "dependencies": {
3 | "node-fetch": "3.3.2"
4 | }
5 | }
6 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "mcp-server-unified-deployment"
3 | version = "0.0.1"
4 | description = "A MCP Server Unified Deployment"
5 | readme = "README.md"
6 | requires-python = ">=3.12"
7 | dependencies = [
8 | "mcp>=1.6.0",
9 | "psutil>=5.9.0",
10 | "httpx>=0.28.0",
11 | "httpx-sse>=0.4.0",
12 | "typing-extensions>=4.9.0"
13 | ]
14 | [[project.authors]]
15 | name = "BigUncle"
16 | email = "biguncle2017@gmail.com"
17 |
18 | [tool.hatch.build.targets.wheel]
19 | packages = ["scripts/mcp_manager"]
20 |
21 | [build-system]
22 | requires = ["hatchling"]
23 | build-backend = "hatchling.build"
24 |
25 | [project.scripts]
26 | mcp-server-unified-deployment = "mcp_server_unified_deployment:main"
27 |
28 |
29 | [tool.black]
30 | line-length = 120
31 | target-version = ['py38']
32 |
33 | [tool.isort]
34 | profile = "black"
35 | known_first_party = ["mcp_manager"]
36 |
37 | [tool.ruff]
38 | select = ["E", "F", "W", "I", "B", "Q"]
39 | ignore = ["E203"]
40 | target-version = "py38"
41 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # MCP Server Unified Deployment dependencies
2 | mcp>=1.6.0
3 | psutil>=5.9.0
4 | httpx>=0.28.0
5 | httpx-sse>=0.4.0
6 | typing-extensions>=4.9.0
7 | requests
--------------------------------------------------------------------------------
/scripts/container_startup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | """
5 | Container Startup Script for MCP Server
6 | This script runs during container initialization to:
7 | 1. Verify essential directories exist and have correct permissions.
8 | 2. Verify essential tools (installed in Dockerfile) are accessible.
9 | 3. Check and potentially copy the default MCP server configuration.
10 | 4. Detect the real host IP address for external client access.
11 | 5. Set up appropriate environment variables based on detected IP.
12 | 6. Generate client configurations with correct IP addresses.
13 | """
14 |
15 | import os
16 | import socket
17 | import sys
18 | import json
19 | from pathlib import Path
20 | import subprocess
21 | import time
22 | import logging
23 | import stat
24 | import shutil
25 |
26 | # Configure logging
27 | logging.basicConfig(
28 | level=logging.INFO,
29 | format='%(asctime)s - %(name)s - %(levelname)s - [%(module)s:%(lineno)d] - %(message)s'
30 | )
31 | logger = logging.getLogger('container_startup')
32 |
33 | # Constants - Define paths relative to the application root (/app)
34 | APP_DIR = Path('/app')
35 | CONFIG_DIR = APP_DIR / 'config'
36 | LOGS_DIR = APP_DIR / 'logs'
37 | PIDS_DIR = APP_DIR / 'pids'
38 | MCP_DATA_DIR = APP_DIR / 'mcp-data'
39 | MCP_SERVERS_DIR = APP_DIR / 'mcp-servers'
40 | CLIENT_CONFIGS_DIR = CONFIG_DIR / 'client_configs'
41 |
42 | CONFIG_FILE = CONFIG_DIR / 'mcp_servers.json'
43 | EXAMPLE_CONFIG_FILE = CONFIG_DIR / 'mcp_servers.example.json'
44 |
45 | SCRIPTS_DIR = APP_DIR / 'scripts'
46 | HOST_DETECTOR_SCRIPT = SCRIPTS_DIR / 'detect_host_ip.py'
47 |
48 | # --- Helper Functions ---
49 |
50 | def ensure_directory(dir_path: Path):
51 | """Ensure directory exists and is writable by the current user."""
52 | try:
53 | if not dir_path.exists():
54 | logger.info(f"Creating directory: {dir_path}")
55 | # Create directory with default permissions (should be usable by user mcp)
56 | dir_path.mkdir(parents=True, exist_ok=True)
57 | else:
58 | logger.debug(f"Directory already exists: {dir_path}")
59 |
60 | # Check writability for the current user
61 | if not os.access(dir_path, os.W_OK):
62 | logger.error(f"FATAL: Directory {dir_path} is not writable by user {os.geteuid()}. Check volume permissions on the host.")
63 | # Attempting chmod might fail if it's a volume mount issue
64 | try:
65 | current_mode = dir_path.stat().st_mode
66 | # Add write permission for the owner (user mcp)
67 | os.chmod(dir_path, current_mode | stat.S_IWUSR)
68 | if os.access(dir_path, os.W_OK):
69 | logger.info(f"Added write permission for user to {dir_path}.")
70 | else:
71 | # This is likely a host volume permission issue
72 | return False
73 | except Exception as e:
74 | logger.error(f"Error attempting to fix permissions for {dir_path}: {e}")
75 | return False
76 | return True
77 |
78 | except Exception as e:
79 | logger.error(f"Failed to ensure directory {dir_path}: {e}")
80 | return False
81 |
82 | def check_essential_tools():
83 | """Verify that essential command-line tools (expected in PATH) are available."""
84 | # Tools expected to be installed in the Docker image for user 'mcp'
85 | tools = ["python", "node", "npm", "npx", "git", "uv", "pipx", "mcp-proxy"]
86 | all_found = True
87 | logger.info("Checking for essential tools in PATH...")
88 | for tool in tools:
89 | tool_path = shutil.which(tool)
90 | if tool_path:
91 | logger.info(f" [OK] Found: {tool} at {tool_path}")
92 | else:
93 | logger.error(f" [MISSING] Tool not found in PATH: {tool}")
94 | all_found = False
95 |
96 | if not all_found:
97 | logger.critical("One or more essential tools are missing from PATH. Check Dockerfile installation and PATH environment variable.")
98 | # Consider exiting if critical tools like python or mcp-proxy are missing
99 | # sys.exit(1)
100 | return all_found
101 |
102 | def check_mcp_config():
103 | """Check if MCP server configuration file exists and is valid JSON."""
104 | logger.info(f"Checking MCP configuration file: {CONFIG_FILE}...")
105 | if not CONFIG_FILE.exists():
106 | logger.warning(f"Configuration file not found.")
107 | if EXAMPLE_CONFIG_FILE.exists():
108 | try:
109 | logger.info(f"Copying example configuration from {EXAMPLE_CONFIG_FILE} to {CONFIG_FILE}")
110 | shutil.copy(EXAMPLE_CONFIG_FILE, CONFIG_FILE)
111 | logger.info("Example configuration copied. Please review and customize it as needed.")
112 | # Re-check existence after copy
113 | if not CONFIG_FILE.exists():
114 | logger.error("Failed to create config file from example.")
115 | return False
116 | except Exception as e:
117 | logger.error(f"Failed to copy example configuration: {e}")
118 | return False
119 | else:
120 | logger.error(f"No configuration file found at {CONFIG_FILE}, and no example file available at {EXAMPLE_CONFIG_FILE}.")
121 | return False
122 |
123 | # Now check the content of the config file (whether copied or pre-existing)
124 | try:
125 | with CONFIG_FILE.open('r', encoding='utf-8') as f:
126 | config = json.load(f)
127 |
128 | # Basic structure validation
129 | if not isinstance(config, dict) or 'servers' not in config:
130 | logger.error(f"Invalid format in {CONFIG_FILE}: Must be a JSON object with a 'servers' key (list).")
131 | return False
132 | if not isinstance(config['servers'], list):
133 | logger.error(f"Invalid format in {CONFIG_FILE}: 'servers' key must be a JSON list.")
134 | return False
135 |
136 | # Check if there are any enabled servers
137 | enabled_servers = [s for s in config.get('servers', []) if s.get('enabled', True)]
138 | if not enabled_servers:
139 | logger.warning(f"No enabled MCP servers found in configuration: {CONFIG_FILE}")
140 | else:
141 | logger.info(f"Found {len(enabled_servers)} enabled MCP servers in configuration.")
142 |
143 | logger.info(f"MCP configuration file {CONFIG_FILE} is valid.")
144 | return True
145 | except json.JSONDecodeError as e:
146 | logger.error(f"Invalid JSON in configuration file {CONFIG_FILE}: {e}")
147 | return False
148 | except Exception as e:
149 | logger.error(f"Error reading or parsing configuration file {CONFIG_FILE}: {e}")
150 | return False
151 |
152 | def detect_host_ip():
153 | """Detect the real host IP address using the detection script or fallbacks."""
154 | logger.info("Attempting to detect host IP...")
155 | detected_ip = None
156 |
157 | if HOST_DETECTOR_SCRIPT.exists():
158 | logger.info(f"Running host IP detector script: {HOST_DETECTOR_SCRIPT}")
159 | try:
160 | result = subprocess.run(
161 | [sys.executable, str(HOST_DETECTOR_SCRIPT)],
162 | capture_output=True, text=True, check=False, timeout=10
163 | )
164 | if result.returncode == 0:
165 | for line in result.stdout.splitlines():
166 | if "Detected host IP:" in line:
167 | ip = line.split("Detected host IP:")[1].strip()
168 | try:
169 | socket.inet_aton(ip) # Basic IPv4 check
170 | logger.info(f"Detected host IP via script: {ip}")
171 | detected_ip = ip
172 | break
173 | except socket.error:
174 | logger.warning(f"IP detected by script is invalid: {ip}")
175 | if not detected_ip:
176 | logger.warning(f"Host IP detector script ran but did not output a valid IP. Output:\n{result.stdout}\n{result.stderr}")
177 | else:
178 | logger.error(f"Host IP detector script failed (exit code {result.returncode}):\n{result.stderr}")
179 | except subprocess.TimeoutExpired:
180 | logger.error("Host IP detector script timed out.")
181 | except Exception as e:
182 | logger.error(f"Error running host IP detector script: {e}")
183 | else:
184 | logger.warning(f"Host IP detector script not found at {HOST_DETECTOR_SCRIPT}. Using fallback methods.")
185 |
186 | # Fallback methods if script fails or doesn't exist
187 | if not detected_ip:
188 | logger.info("Attempting fallback IP detection methods...")
189 | try:
190 | # Method 1: Check REAL_HOST_IP env var (set externally, highest priority fallback)
191 | env_ip = os.environ.get('REAL_HOST_IP')
192 | if env_ip:
193 | logger.info(f"Using IP from REAL_HOST_IP environment variable: {env_ip}")
194 | return env_ip # Trust externally set IP
195 |
196 | # Method 2: Check EXTERNAL_HOST env var
197 | env_ip = os.environ.get('EXTERNAL_HOST')
198 | if env_ip:
199 | try:
200 | socket.inet_aton(env_ip)
201 | logger.info(f"Using IP from EXTERNAL_HOST environment variable: {env_ip}")
202 | return env_ip
203 | except socket.error:
204 | logger.warning(f"EXTERNAL_HOST value '{env_ip}' is not a valid IP. Ignoring.")
205 |
206 | # Method 3: Try default gateway (often the host in bridge network)
207 | result = subprocess.run(["ip", "route", "show", "default"], capture_output=True, text=True, check=False)
208 | if result.returncode == 0:
209 | parts = result.stdout.split()
210 | if 'via' in parts:
211 | gateway_ip = parts[parts.index('via') + 1]
212 | try:
213 | socket.inet_aton(gateway_ip)
214 | logger.info(f"Using default gateway IP as potential host IP: {gateway_ip}")
215 | return gateway_ip
216 | except socket.error:
217 | logger.debug(f"Default gateway value '{gateway_ip}' is not a valid IP.")
218 |
219 | # Method 4: Use `hostname -I` (may give multiple IPs)
220 | result = subprocess.run(["hostname", "-I"], capture_output=True, text=True, check=False)
221 | if result.returncode == 0:
222 | ips = result.stdout.strip().split()
223 | if ips:
224 | for ip in ips: # Prefer non-internal IPs if possible
225 | if not ip.startswith('172.') and not ip.startswith('127.'):
226 | try:
227 | socket.inet_aton(ip)
228 | logger.info(f"Using first non-internal IP from 'hostname -I': {ip}")
229 | return ip
230 | except socket.error: continue
231 | # If only internal IPs found, return the first valid one
232 | for ip in ips:
233 | try:
234 | socket.inet_aton(ip)
235 | logger.info(f"Using first valid IP from 'hostname -I': {ip}")
236 | return ip
237 | except socket.error: continue
238 | except Exception as e:
239 | logger.warning(f"Internal host IP detection methods failed: {e}")
240 |
241 | # Final fallback if no IP found
242 | if not detected_ip:
243 | logger.error("Could not determine a suitable host IP. Falling back to 'localhost'. External access will likely fail.")
244 | return 'localhost'
245 | else:
246 | return detected_ip
247 |
248 | def update_environment_variables(host_ip):
249 | """Update environment variables REAL_HOST_IP and EXTERNAL_HOST."""
250 | if not host_ip:
251 | logger.error("Cannot update environment variables: No host IP provided.")
252 | return
253 |
254 | # Always set REAL_HOST_IP and EXTERNAL_HOST based on the final determined IP
255 | logger.info(f"Setting container environment: REAL_HOST_IP={host_ip}, EXTERNAL_HOST={host_ip}")
256 | os.environ['REAL_HOST_IP'] = host_ip
257 | os.environ['EXTERNAL_HOST'] = host_ip
258 |
259 | # Optionally write to a file, though less necessary now
260 | try:
261 | env_file = Path('/tmp/mcp_environment')
262 | with env_file.open('w') as f:
263 | f.write(f'export REAL_HOST_IP="{host_ip}"\n')
264 | f.write(f'export EXTERNAL_HOST="{host_ip}"\n')
265 | logger.debug(f"Environment variables also saved to {env_file}")
266 | except Exception as e:
267 | logger.warning(f"Failed to write environment file {env_file}: {e}")
268 |
269 | def generate_client_configs():
270 | """Generate client configurations using the dedicated script."""
271 | config_generator_script = SCRIPTS_DIR / "integrate_config_generator.py"
272 | logger.info(f"Attempting to generate client configurations using {config_generator_script}...")
273 |
274 | if not config_generator_script.exists():
275 | logger.error(f"Client config generator script not found.")
276 | return False
277 |
278 | try:
279 | # Ensure output directory exists and is writable
280 | if not ensure_directory(CLIENT_CONFIGS_DIR):
281 | logger.error(f"Cannot generate client configs: Output directory {CLIENT_CONFIGS_DIR} is not writable.")
282 | return False
283 |
284 | # Run the config generator script
285 | result = subprocess.run(
286 | [sys.executable, str(config_generator_script)],
287 | capture_output=True, text=True, check=False, timeout=30
288 | )
289 |
290 | if result.returncode != 0:
291 | logger.error(f"Client configuration generator script failed (exit code {result.returncode}):\nSTDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}")
292 | return False
293 |
294 | logger.info("Client configurations generated successfully.")
295 | logger.debug(f"Generator output:\n{result.stdout}")
296 | return True
297 | except subprocess.TimeoutExpired:
298 | logger.error("Client config generator script timed out.")
299 | return False
300 | except Exception as e:
301 | logger.error(f"Error running client config generator: {e}")
302 | return False
303 |
304 | # --- Main Execution ---
305 |
306 | def main():
307 | """Main container initialization sequence."""
308 | start_time = time.time()
309 | logger.info("--- Starting MCP Server Container Initialization ---")
310 |
311 | # 0. Log basic environment info
312 | uid = os.geteuid()
313 | gid = os.getegid()
314 | home = os.environ.get('HOME', '/root') # Default to /root if HOME not set
315 | logger.info(f"Running initialization as UID={uid}, GID={gid}, HOME={home}")
316 | logger.info(f"Current PATH: {os.environ.get('PATH', 'Not Set')}")
317 |
318 | # 1. Ensure essential directories exist and are writable
319 | logger.info("Step 1: Ensuring essential directories...")
320 | dirs_ok = all([
321 | ensure_directory(CONFIG_DIR),
322 | ensure_directory(LOGS_DIR),
323 | ensure_directory(PIDS_DIR),
324 | ensure_directory(CLIENT_CONFIGS_DIR),
325 | ensure_directory(MCP_DATA_DIR),
326 | ensure_directory(MCP_SERVERS_DIR),
327 | ])
328 | if not dirs_ok:
329 | logger.critical("One or more essential directories are not writable. Aborting initialization.")
330 | return 1 # Exit if directories are not usable
331 |
332 | # 2. Check for essential tools
333 | logger.info("Step 2: Checking for essential tools...")
334 | tools_ok = check_essential_tools()
335 | if not tools_ok:
336 | # Decide whether to proceed if tools are missing
337 | logger.warning("Essential tool check failed. Continuing, but functionality may be impaired.")
338 | # return 1 # Or exit
339 |
340 | # 3. Check MCP server configuration file
341 | logger.info("Step 3: Checking MCP server configuration...")
342 | config_ok = check_mcp_config()
343 | if not config_ok:
344 | logger.critical("MCP server configuration check failed. Aborting initialization.")
345 | return 1 # Exit if config is bad
346 |
347 | # 4. Detect host IP
348 | logger.info("Step 4: Detecting host IP...")
349 | host_ip = detect_host_ip() # Handles fallbacks internally
350 |
351 | # 5. Update environment variables
352 | logger.info("Step 5: Updating environment variables...")
353 | update_environment_variables(host_ip)
354 | final_ip_for_clients = os.environ.get('REAL_HOST_IP', 'Not Set')
355 |
356 | # 6. Generate client configurations
357 | logger.info("Step 6: Generating client configurations...")
358 | generate_client_configs() # Errors logged within the function
359 |
360 | end_time = time.time()
361 | logger.info(f"--- Container initialization complete ({end_time - start_time:.2f} seconds). Effective host IP for clients: {final_ip_for_clients} ---")
362 | return 0 # Indicate success to the entrypoint script
363 |
364 | if __name__ == "__main__":
365 | sys.exit(main())
--------------------------------------------------------------------------------
/scripts/detect_host_ip.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | """
5 | Host IP Detection Script
6 | ------------------------
7 | This script provides reliable methods to detect the real host IP address
8 | that is accessible from external clients in containerized environments.
9 | """
10 |
11 | import os
12 | import socket
13 | import subprocess
14 | import logging
15 | import json
16 | import time
17 | from pathlib import Path
18 | import ipaddress
19 |
20 | # Configure logging
21 | logging.basicConfig(
22 | level=logging.INFO,
23 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
24 | )
25 | logger = logging.getLogger('host_ip_detection')
26 |
27 | def is_valid_ip(ip):
28 | """
29 | Validate if a string is a valid IP address
30 |
31 | Args:
32 | ip (str): IP address to validate
33 |
34 | Returns:
35 | bool: True if valid IP address
36 | """
37 | try:
38 | ipaddress.ip_address(ip)
39 | return True
40 | except ValueError:
41 | return False
42 |
43 | def get_docker_gateway_ip():
44 | """
45 | Get the Docker gateway IP address (usually host IP from container's perspective)
46 |
47 | Returns:
48 | str: IP address or None if not found
49 | """
50 | try:
51 | # Try to get the default gateway using route command
52 | result = subprocess.run(
53 | ["ip", "route", "show", "default"],
54 | capture_output=True,
55 | text=True,
56 | check=False
57 | )
58 |
59 | if result.returncode == 0:
60 | # Parse the output to find the gateway IP
61 | for line in result.stdout.splitlines():
62 | if "default via" in line:
63 | parts = line.split()
64 | for i, part in enumerate(parts):
65 | if part == "via" and i < len(parts) - 1:
66 | gateway = parts[i+1]
67 | if is_valid_ip(gateway):
68 | return gateway
69 |
70 | # Alternative method using netstat
71 | result = subprocess.run(
72 | ["netstat", "-rn"],
73 | capture_output=True,
74 | text=True,
75 | check=False
76 | )
77 |
78 | if result.returncode == 0:
79 | for line in result.stdout.splitlines():
80 | if "0.0.0.0" in line or "default" in line:
81 | parts = line.split()
82 | for part in parts:
83 | if is_valid_ip(part) and not part.startswith("0.0.0.0"):
84 | return part
85 |
86 | return None
87 | except Exception as e:
88 | logger.error(f"Error getting Docker gateway IP: {e}")
89 | return None
90 |
91 | def get_local_ips():
92 | """
93 | Get all non-loopback local network interface IPs
94 |
95 | Returns:
96 | list: List of IP addresses
97 | """
98 | local_ips = []
99 |
100 | try:
101 | # Method 1: Using hostname lookup
102 | hostname = socket.gethostname()
103 | for info in socket.getaddrinfo(hostname, None):
104 | ip = info[4][0]
105 | if is_valid_ip(ip) and not ip.startswith("127."):
106 | local_ips.append(ip)
107 |
108 | # Method 2: Using socket connection method
109 | if not local_ips:
110 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
111 | try:
112 | s.connect(('10.255.255.255', 1))
113 | ip = s.getsockname()[0]
114 | if is_valid_ip(ip) and not ip.startswith("127."):
115 | local_ips.append(ip)
116 | except:
117 | pass
118 | finally:
119 | s.close()
120 |
121 | # Method 3: Using hostname -I command
122 | try:
123 | result = subprocess.run(
124 | ["hostname", "-I"],
125 | capture_output=True,
126 | text=True,
127 | check=False
128 | )
129 |
130 | if result.returncode == 0:
131 | for ip in result.stdout.split():
132 | if is_valid_ip(ip) and not ip.startswith("127."):
133 | local_ips.append(ip)
134 | except:
135 | pass
136 |
137 | except Exception as e:
138 | logger.error(f"Error getting local IPs: {e}")
139 |
140 | # Filter duplicate IPs
141 | return list(set(local_ips))
142 |
143 | def check_environment_variables():
144 | """
145 | Check environment variables for explicitly configured host IP
146 |
147 | Returns:
148 | str: IP address from environment or None
149 | """
150 | # Check for explicit REAL_HOST_IP setting
151 | real_host_ip = os.environ.get('REAL_HOST_IP')
152 | if real_host_ip and is_valid_ip(real_host_ip):
153 | logger.info(f"Using explicitly configured REAL_HOST_IP: {real_host_ip}")
154 | return real_host_ip
155 |
156 | # Check for EXTERNAL_HOST setting
157 | external_host = os.environ.get('EXTERNAL_HOST')
158 | if external_host:
159 | # If it's an IP address, use it directly
160 | if is_valid_ip(external_host):
161 | logger.info(f"Using EXTERNAL_HOST IP: {external_host}")
162 | return external_host
163 |
164 | # If it's a hostname, try to resolve it
165 | try:
166 | ip = socket.gethostbyname(external_host)
167 | logger.info(f"Resolved EXTERNAL_HOST {external_host} to {ip}")
168 | return ip
169 | except socket.gaierror:
170 | logger.warning(f"Could not resolve EXTERNAL_HOST: {external_host}")
171 |
172 | return None
173 |
174 | def is_docker_internal_ip(ip):
175 | """
176 | Check if an IP is likely a Docker internal network IP
177 |
178 | Args:
179 | ip (str): IP address to check
180 |
181 | Returns:
182 | bool: True if likely Docker internal IP
183 | """
184 | if not ip:
185 | return False
186 |
187 | try:
188 | # Docker commonly uses these network ranges for internal addressing
189 | ip_obj = ipaddress.ip_address(ip)
190 |
191 | # Docker default bridge network
192 | if ip_obj in ipaddress.ip_network('172.17.0.0/16'):
193 | return True
194 |
195 | # User-defined networks often use these ranges
196 | if ip_obj in ipaddress.ip_network('172.18.0.0/16'):
197 | return True
198 | if ip_obj in ipaddress.ip_network('172.19.0.0/16'):
199 | return True
200 | if ip_obj in ipaddress.ip_network('172.20.0.0/16'):
201 | return True
202 |
203 | # Docker Desktop specific ranges
204 | if ip_obj in ipaddress.ip_network('198.18.0.0/16'):
205 | return True
206 |
207 | return False
208 | except:
209 | return False
210 |
211 | def find_best_host_ip():
212 | """
213 | Find the best IP address for external clients to connect to the host
214 |
215 | This function tries multiple strategies to determine the most appropriate
216 | IP address for external clients to connect to services running in the container.
217 |
218 | Returns:
219 | str: Best IP address to use
220 | """
221 | # Strategy 1: Check environment variables for explicit configuration
222 | env_ip = check_environment_variables()
223 | if env_ip and not is_docker_internal_ip(env_ip):
224 | return env_ip
225 |
226 | # Strategy 2: Get the Docker gateway IP (often the host's IP)
227 | gateway_ip = get_docker_gateway_ip()
228 | if gateway_ip and not is_docker_internal_ip(gateway_ip):
229 | logger.info(f"Using Docker gateway IP: {gateway_ip}")
230 | return gateway_ip
231 |
232 | # Strategy 3: Try to find a suitable local IP
233 | local_ips = get_local_ips()
234 |
235 | # Filter out Docker internal IPs
236 | external_ips = [ip for ip in local_ips if not is_docker_internal_ip(ip)]
237 |
238 | if external_ips:
239 | # Prefer IPs in common LAN subnets
240 | for ip in external_ips:
241 | ip_obj = ipaddress.ip_address(ip)
242 |
243 | # 192.168.x.x range (common home/office networks)
244 | if ip_obj in ipaddress.ip_network('192.168.0.0/16'):
245 | logger.info(f"Using LAN IP from 192.168.x.x range: {ip}")
246 | return ip
247 |
248 | # 10.x.x.x range (common for larger networks)
249 | if ip_obj in ipaddress.ip_network('10.0.0.0/8'):
250 | logger.info(f"Using LAN IP from 10.x.x.x range: {ip}")
251 | return ip
252 |
253 | # 172.16.x.x range (excluding Docker ranges we already checked)
254 | if ip_obj in ipaddress.ip_network('172.16.0.0/16'):
255 | logger.info(f"Using LAN IP from 172.16.x.x range: {ip}")
256 | return ip
257 |
258 | # If no preferred subnet, use the first external IP
259 | logger.info(f"Using first available external IP: {external_ips[0]}")
260 | return external_ips[0]
261 |
262 | # Strategy 4: Fall back to environment IP even if it's a Docker internal IP
263 | if env_ip:
264 | logger.warning(f"No suitable external IP found, using configured IP: {env_ip}")
265 | return env_ip
266 |
267 | # Last resort: use localhost, which isn't externally accessible
268 | logger.error("Could not determine a suitable host IP, falling back to localhost")
269 | return "localhost"
270 |
271 | def write_host_info(ip):
272 | """
273 | Write the detected host IP to a file for other processes to use
274 |
275 | Args:
276 | ip (str): The detected host IP
277 | """
278 | try:
279 | # Create host info record with metadata
280 | host_info = {
281 | "host_ip": ip,
282 | "timestamp": time.time(),
283 | "detected_at": time.strftime("%Y-%m-%d %H:%M:%S"),
284 | "is_docker_internal": is_docker_internal_ip(ip),
285 | "detection_method": "detect_host_ip.py script"
286 | }
287 |
288 | config_dir = Path("/app/config")
289 | if not config_dir.exists():
290 | config_dir = Path.home()
291 |
292 | # Write to JSON file
293 | info_file = config_dir / "host_info.json"
294 | with open(info_file, "w") as f:
295 | json.dump(host_info, f, indent=2)
296 |
297 | logger.info(f"Host IP information written to {info_file}")
298 |
299 | # Also set environment variable for current process
300 | os.environ["DETECTED_HOST_IP"] = ip
301 |
302 | except Exception as e:
303 | logger.error(f"Error writing host info: {e}")
304 |
305 | if __name__ == "__main__":
306 | # When run directly, find and print the best host IP
307 | detected_ip = find_best_host_ip()
308 | print(f"Detected host IP: {detected_ip}")
309 | write_host_info(detected_ip)
--------------------------------------------------------------------------------
/scripts/integrate_config_generator.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | """
5 | MCP Configuration Generator
6 | Generate different client format configuration files based on mcp_servers.json
7 | Supported formats:
8 | - Cline
9 | - Roo Code
10 | - Cherry Studio
11 | - GitHub Copilot
12 | """
13 |
14 | import json
15 | import os
16 | import random
17 | import string
18 | import time
19 | from pathlib import Path
20 |
21 | # Import functions from config.py to avoid duplication
22 | from mcp_manager.config import load_config, get_server_ip_port
23 |
24 | # Configuration file paths
25 | CONFIG_FILE = Path(__file__).parent.parent / "config" / "mcp_servers.json"
26 | CONFIG_OUTPUT_DIR = Path(__file__).parent.parent / "config" / "client_configs"
27 |
28 | # Ensure output directory exists
29 | CONFIG_OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
30 |
31 | # Default configurations for different clients
32 | CLIENT_DEFAULTS = {
33 | "cline": {
34 | "timeout": 60,
35 | "transportType": "sse"
36 | },
37 | "roo_code": {},
38 | "cherry_studio": {
39 | "isActive": True,
40 | "description": ""
41 | },
42 | "github_copilot": {
43 | "type": "sse"
44 | }
45 | }
46 |
47 | def generate_random_id(length=20):
48 | """Generate random ID for Cherry Studio configuration"""
49 | chars = string.ascii_letters + string.digits
50 | return ''.join(random.choice(chars) for _ in range(length))
51 |
52 | def generate_cline_config(servers_config):
53 | """Generate Cline format configuration file"""
54 | config = {"mcpServers": {}}
55 |
56 | for server in servers_config["servers"]:
57 | host, port = get_server_ip_port(server)
58 | url = f"http://{host}:{port}/sse"
59 | server_config = {
60 | "disabled": False,
61 | "timeout": 60,
62 | "url": url,
63 | "transportType": "sse"
64 | }
65 | if not server.get("enabled", True):
66 | # If server is disabled, set disabled flag
67 | server_config.update({
68 | "disabled": True
69 | })
70 | config["mcpServers"][server["name"]] = server_config
71 | continue
72 |
73 | # Add auto-approved function list from server configuration
74 | if "autoApprove" in server:
75 | if server["autoApprove"] == "*":
76 | # For now, just include an empty list for "*" - could be enhanced to fetch all available functions
77 | server_config["autoApprove"] = []
78 | else:
79 | server_config["autoApprove"] = server["autoApprove"]
80 |
81 | config["mcpServers"][server["name"]] = server_config
82 |
83 | return config
84 |
85 | def generate_roo_code_config(servers_config):
86 | """Generate Roo Code format configuration file"""
87 | config = {"mcpServers": {}}
88 |
89 | for server in servers_config["servers"]:
90 | if not server.get("enabled", True):
91 | # If server is disabled, set disabled flag
92 | config["mcpServers"][server["name"]] = {
93 | "disabled": True,
94 | # "alwaysAllow": []
95 | }
96 | continue
97 |
98 | host, port = get_server_ip_port(server)
99 | url = f"http://{host}:{port}/sse"
100 |
101 | server_config = {
102 | "url": url
103 | }
104 |
105 | # Add auto-approved function list from server configuration (renamed to alwaysAllow for Roo Code)
106 | if "autoApprove" in server:
107 | if server["autoApprove"] == "*":
108 | # For now, just include an empty list for "*" - could be enhanced to fetch all available functions
109 | server_config["alwaysAllow"] = []
110 | else:
111 | server_config["alwaysAllow"] = server["autoApprove"]
112 |
113 | config["mcpServers"][server["name"]] = server_config
114 |
115 | return config
116 |
117 | def generate_cherry_studio_config(servers_config):
118 | """Generate Cherry Studio format configuration file"""
119 | config = {"mcpServers": {}}
120 |
121 | # Add an mcp-auto-install entry
122 | config["mcpServers"]["cPqOEdSHLwBLnukhxTppp"] = {
123 | "isActive": True,
124 | "name": "mcp-auto-install",
125 | "description": "Automatically install MCP services (Beta version)",
126 | "baseUrl": "",
127 | "command": "npx",
128 | "args": ["-y", "@mcpmarket/mcp-auto-install", "connect", "--json"],
129 | "registryUrl": "https://registry.npmmirror.com",
130 | "env": {}
131 | }
132 |
133 | for server in servers_config["servers"]:
134 | server_id = generate_random_id()
135 |
136 | # If server is disabled, set isActive to false
137 | isActive = server.get("enabled", True)
138 |
139 | host, port = get_server_ip_port(server)
140 | url = f"http://{host}:{port}/sse"
141 |
142 | server_config = {
143 | "isActive": isActive,
144 | "name": server["name"],
145 | "description": server.get("description", server["name"]),
146 | "baseUrl": url
147 | }
148 |
149 | config["mcpServers"][server_id] = server_config
150 |
151 | return config
152 |
153 | def generate_github_copilot_config(servers_config):
154 | """Generate GitHub Copilot format configuration file"""
155 | config = {"mcp": {"servers": {}}}
156 |
157 | for server in servers_config["servers"]:
158 | if not server.get("enabled", True):
159 | continue
160 |
161 | host, port = get_server_ip_port(server)
162 | url = f"http://{host}:{port}/sse"
163 |
164 | # Get server type from config, default to "sse" if not specified
165 | server_type = server.get("transport_type", "sse")
166 |
167 | server_config = {
168 | "type": server_type,
169 | "url": url
170 | }
171 |
172 | # GitHub Copilot format doesn't include autoApprove field
173 | # Uncomment this block if autoApprove field in GitHub Copilot was released
174 | # if "autoApprove" in server:
175 | # if server["autoApprove"] == "*":
176 | # # For "*", don't include the autoApprove field as this means "allow all"
177 | # pass
178 | # else:
179 | # server_config["autoApprove"] = server["autoApprove"]
180 |
181 | config["mcp"]["servers"][server["name"]] = server_config
182 |
183 | return config
184 |
185 | def save_config_to_file(config, filename):
186 | """Save configuration to file"""
187 | file_path = CONFIG_OUTPUT_DIR / filename
188 | with open(file_path, "w", encoding="utf-8") as f:
189 | json.dump(config, f, ensure_ascii=False, indent=2)
190 | return file_path
191 |
192 | def generate_all_configs():
193 | """Generate all client configuration files"""
194 | # Load server configuration
195 | servers_config = load_config()
196 | if not servers_config:
197 | print("Failed to load server configuration")
198 | return None
199 |
200 | # Generate different format configurations
201 | cline_config = generate_cline_config(servers_config)
202 | roo_code_config = generate_roo_code_config(servers_config)
203 | cherry_studio_config = generate_cherry_studio_config(servers_config)
204 | github_copilot_config = generate_github_copilot_config(servers_config)
205 |
206 | # Generate filenames with timestamp
207 | timestamp = time.strftime("%Y%m%d%H%M%S")
208 |
209 | # Save configuration files
210 | cline_path = save_config_to_file(cline_config, f"mcp_cline_{timestamp}.json")
211 | roo_code_path = save_config_to_file(roo_code_config, f"mcp_roo_code_{timestamp}.json")
212 | cherry_studio_path = save_config_to_file(cherry_studio_config, f"mcp_cherry_studio_{timestamp}.json")
213 | github_copilot_path = save_config_to_file(github_copilot_config, f"mcp_github_copilot_{timestamp}.json")
214 |
215 | # Also save a copy of the latest configuration (without timestamp)
216 | latest_cline_path = save_config_to_file(cline_config, "mcp_cline_latest.json")
217 | latest_roo_code_path = save_config_to_file(roo_code_config, "mcp_roo_code_latest.json")
218 | latest_cherry_studio_path = save_config_to_file(cherry_studio_config, "mcp_cherry_studio_latest.json")
219 | latest_github_copilot_path = save_config_to_file(github_copilot_config, "mcp_github_copilot_latest.json")
220 |
221 | return {
222 | "cline": str(cline_path),
223 | "roo_code": str(roo_code_path),
224 | "cherry_studio": str(cherry_studio_path),
225 | "github_copilot": str(github_copilot_path),
226 | "latest": {
227 | "cline": str(latest_cline_path),
228 | "roo_code": str(latest_roo_code_path),
229 | "cherry_studio": str(latest_cherry_studio_path),
230 | "github_copilot": str(latest_github_copilot_path)
231 | }
232 | }
233 |
234 | if __name__ == "__main__":
235 | """Generate all configuration files when executed from command line"""
236 | result = generate_all_configs()
237 | if result:
238 | print("Client configuration files generation completed:")
239 | for client_type, path in result.items():
240 | if client_type != "latest":
241 | print(f"- {client_type}: {path}")
242 | print("\nLatest configuration files:")
243 | for client_type, path in result["latest"].items():
244 | print(f"- {client_type}: {path}")
245 | else:
246 | print("Failed to generate client configuration files")
--------------------------------------------------------------------------------
/scripts/manage_mcp.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # -*- coding: utf-8 -*-
4 |
5 | import json
6 | import os
7 | import platform
8 | import signal
9 | import subprocess
10 | import sys
11 | import time
12 | from pathlib import Path
13 |
14 | import psutil
15 | from mcp_manager.config import get_server_ip_port
16 | # Configuration file paths
17 | CONFIG_FILE = Path(__file__).parent.parent / "config" / "mcp_servers.json"
18 | PID_DIR = Path(__file__).parent.parent / "pids"
19 | LOG_DIR = Path(__file__).parent.parent / "logs"
20 |
21 | # Ensure directories exist
22 | PID_DIR.mkdir(exist_ok=True)
23 | LOG_DIR.mkdir(exist_ok=True)
24 |
25 |
26 | # Load configuration
27 | def load_config():
28 | with open(CONFIG_FILE, "r", encoding="utf-8") as f:
29 | return json.load(f)
30 |
31 |
32 | # Save PID to file
33 | def save_pid(name, pid):
34 | with open(PID_DIR / f"{name}.pid", "w") as f:
35 | f.write(str(pid))
36 |
37 |
38 | # Load PID from file
39 | def load_pid(name):
40 | pid_file = PID_DIR / f"{name}.pid"
41 | if pid_file.exists():
42 | with open(pid_file, "r") as f:
43 | return int(f.read().strip())
44 | return None
45 |
46 |
47 | # Remove PID file
48 | def remove_pid_file(name):
49 | pid_file = PID_DIR / f"{name}.pid"
50 | if pid_file.exists():
51 | pid_file.unlink()
52 |
53 |
54 | # Check if process is running
55 | def is_running(pid):
56 | try:
57 | return psutil.pid_exists(pid)
58 | except psutil.NoSuchProcess: # Be specific about expected errors
59 | # PID does not exist, which means it's not running
60 | return False
61 | except Exception as e: # Catch other potential errors during check
62 | print(f"Error checking PID {pid}: {e}")
63 | return False # Assume not running if error occurs
64 |
65 |
66 | # Check if port is in use
67 | def is_port_in_use(port):
68 | for conn in psutil.net_connections():
69 | if conn.laddr.port == port:
70 | return conn.pid
71 | return None
72 |
73 |
74 | # Start server
75 | def start_server(server):
76 | name = server["name"]
77 |
78 | # Check if already running
79 | pid = load_pid(name)
80 | if pid and is_running(pid):
81 | print(f"Server '{name}' is already running (PID: {pid})")
82 | return
83 |
84 | # Check if port is already in use
85 | port = server.get("sse_port", server.get("port"))
86 | if port:
87 | existing_pid = is_port_in_use(port)
88 | if existing_pid:
89 | print(f"Warning: Port {port} is already in use by process {existing_pid}")
90 |
91 | # Prepare environment variables
92 | env = os.environ.copy()
93 | for key, value in server.get("env", {}).items():
94 | env[key] = value
95 |
96 | # Prepare start command
97 | if "sse_host" in server and "sse_port" in server:
98 | print(f"Starting server '{name}' (SSE mode)")
99 | # SSE mode
100 | cmd = server["sse_start_command"]
101 | # Replace placeholders in command
102 | for key, value in server.items():
103 | # Always try to replace, and convert value to string to ensure compatibility
104 | cmd = cmd.replace("{" + key + "}", str(value))
105 | # Replace environment variable placeholders
106 | for key, value in server.get("env", {}).items():
107 | # cmd = cmd.replace("{" + key + "}", value)
108 | cmd += f" -e {key} {value}"
109 | # Replace start command
110 | start_cmd = server["start_command"]
111 | cmd = cmd.replace("{start_command}", start_cmd)
112 | else:
113 | # Non-SSE mode
114 | cmd = server["start_command"]
115 | if "port" in server:
116 | cmd = cmd.replace("{port}", str(server["port"]))
117 |
118 | # Create log file
119 | log_file = open(LOG_DIR / f"{name}_{time.strftime('%Y%m%d%H%M%S')}.log", "a")
120 |
121 | # Add log header information
122 | log_file.write(f"=== Service Start {time.ctime()} ===\n")
123 | log_file.write(f"Execute command: {cmd}\n\n")
124 |
125 | # Print startup information for debugging
126 | print(f"Starting server '{name}' with command: {cmd}")
127 |
128 | # Start server
129 | try:
130 | # Bandit B602: shell=True is a security risk if cmd contains untrusted input.
131 | print(f"Start command: {cmd}")
132 |
133 | process = subprocess.Popen(cmd, shell=True, env=env, stdout=log_file, stderr=log_file)
134 | save_pid(name, process.pid)
135 | print(f"Server '{name}' started (PID: {process.pid})")
136 | except Exception as e:
137 | print(f"Failed to start server '{name}': {e}")
138 |
139 |
140 | # Stop server
141 | def stop_server(server):
142 | name = server["name"]
143 | pid = load_pid(name)
144 |
145 | # Check if started by our script
146 | if pid and is_running(pid):
147 | try:
148 | # On Windows, use taskkill to kill process tree
149 | if platform.system() == "Windows":
150 | subprocess.run(["taskkill", "/F", "/T", "/PID", str(pid)], check=False)
151 | else:
152 | os.kill(pid, signal.SIGTERM)
153 | # Give process some time to exit normally
154 | time.sleep(1)
155 | if is_running(pid):
156 | os.kill(pid, signal.SIGKILL)
157 | print(f"Server '{name}' stopped (PID: {pid})")
158 | except Exception as e:
159 | print(f"Failed to stop server '{name}': {e}")
160 | finally:
161 | remove_pid_file(name)
162 | else:
163 | # Check if port is in use, try to kill the process using it
164 | port = server.get("sse_port", server.get("port"))
165 | if port:
166 | port_pid = is_port_in_use(port)
167 | if port_pid:
168 | try:
169 | # On Windows, use taskkill to kill process tree
170 | if platform.system() == "Windows":
171 | subprocess.run(["taskkill", "/F", "/T", "/PID", str(port_pid)], check=False)
172 | else:
173 | os.kill(port_pid, signal.SIGTERM)
174 | # Give process some time to exit normally
175 | time.sleep(1)
176 | if is_running(port_pid):
177 | os.kill(port_pid, signal.SIGKILL)
178 | print(f"Stopped server '{name}' running on port {port} (PID: {port_pid})")
179 | except Exception as e:
180 | print(f"Failed to stop process on port {port}: {e}")
181 | else:
182 | print(f"Server '{name}' is not running.")
183 | else:
184 | print(f"Server '{name}' is not running under this script instance.")
185 |
186 |
187 | # Restart server
188 | def restart_server(server):
189 | stop_server(server)
190 | time.sleep(1) # Wait one second to ensure complete shutdown
191 | start_server(server)
192 |
193 |
194 | # Check server status
195 | def server_status(server):
196 | name = server["name"]
197 | enabled = server.get("enabled", True)
198 | server_type = server.get("type", "unknown")
199 | port = server.get("sse_port", server.get("port", "N/A"))
200 |
201 | # Fix dynamic host and url retrieval logic
202 | resolved_host, _ = get_server_ip_port(server)
203 | # sse_host = server.get("sse_host", "localhost")
204 |
205 | # Use resolved_host rather than the original host to ensure the URL uses a valid address
206 | url = f"http://{resolved_host}:{port}/sse"
207 |
208 | # Check PID file
209 | pid = load_pid(name)
210 | pid_running = pid and is_running(pid)
211 |
212 | # Check port
213 | port_pid = None
214 | if port != "N/A":
215 | port_pid = is_port_in_use(port)
216 |
217 | # Determine status
218 | if not enabled:
219 | status = "Disabled"
220 | pid_str = "N/A"
221 | elif port_pid:
222 | if pid_running and port_pid == pid:
223 | status = f"Running (port {port} listening)"
224 | pid_str = str(pid)
225 | else:
226 | status = f"Running (port {port} listening)"
227 | pid_str = "(External start)"
228 | elif pid_running:
229 | status = "Running"
230 | pid_str = str(pid)
231 | else:
232 | status = "Stopped"
233 | pid_str = "N/A"
234 |
235 | return {
236 | "name": name,
237 | "enabled": enabled,
238 | "type": server_type,
239 | "port": port,
240 | "status": status,
241 | "pid": pid_str,
242 | "url": url,
243 | }
244 |
245 |
246 | # Get status of all servers
247 | def get_all_status():
248 | config = load_config()
249 | status_list = []
250 | for server in config["servers"]:
251 | status_list.append(server_status(server))
252 | return status_list
253 |
254 |
255 | # Display status table
256 | def print_status_table():
257 | status_list = get_all_status()
258 |
259 | # Print header
260 | print("\n--- MCP Server Status ---")
261 | headers = ["Name", "Enabled", "Type", "Port", "Status", "PID (This Instance)", "Path"]
262 | col_widths = [20, 10, 15, 10, 30, 20, 70]
263 |
264 | print(
265 | "{:<{}} {:<{}} {:<{}} {:<{}} {:<{}} {:<{}} {:<{}}".format(
266 | headers[0],
267 | col_widths[0],
268 | headers[1],
269 | col_widths[1],
270 | headers[2],
271 | col_widths[2],
272 | headers[3],
273 | col_widths[3],
274 | headers[4],
275 | col_widths[4],
276 | headers[5],
277 | col_widths[5],
278 | headers[6],
279 | col_widths[6],
280 | )
281 | )
282 |
283 | print("-" * 100)
284 |
285 | # Print status for each server
286 | for status in status_list:
287 | print(
288 | "{:<{}} {:<{}} {:<{}} {:<{}} {:<{}} {:<{}} {:<{}}".format(
289 | status["name"],
290 | col_widths[0],
291 | str(status["enabled"]),
292 | col_widths[1],
293 | status["type"],
294 | col_widths[2],
295 | status["port"],
296 | col_widths[3],
297 | status["status"],
298 | col_widths[4],
299 | status["pid"],
300 | col_widths[5],
301 | status["url"],
302 | col_widths[6],
303 | )
304 | )
305 |
306 |
307 | # Start all enabled servers
308 | def start_all_servers():
309 | config = load_config()
310 | for server in config["servers"]:
311 | if server.get("enabled", True):
312 | start_server(server)
313 |
314 |
315 | # Stop all servers
316 | def stop_all_servers():
317 | config = load_config()
318 | for server in config["servers"]:
319 | stop_server(server)
320 |
321 |
322 | # Main function
323 | def main():
324 | if len(sys.argv) < 2:
325 | print("Usage: python manage_mcp.py [server_name]")
326 | print("Commands: start, stop, restart, status, daemon")
327 | return
328 |
329 | command = sys.argv[1]
330 | server_name = sys.argv[2] if len(sys.argv) > 2 else None
331 |
332 | config = load_config()
333 |
334 | if command == "status":
335 | print_status_table()
336 | return
337 |
338 | if command == "start" and not server_name:
339 | # Start all enabled servers
340 | start_all_servers()
341 | # Check if running in daemon mode (Docker container)
342 | if os.environ.get("MCP_DAEMON_MODE", "false").lower() == "true":
343 | print("Running in daemon mode, keeping process alive...")
344 | try:
345 | # Keep the process running and periodically check server status
346 | while True:
347 | time.sleep(30) # Reduced check interval to 30 seconds
348 |
349 | # Added health check logic
350 | for server in config["servers"]:
351 | if server.get("enabled", True):
352 | pid = load_pid(server["name"])
353 | port = server.get("sse_port", server.get("port"))
354 |
355 | # Double check: process exists and port is listening
356 | if pid and is_running(pid) and port:
357 | if not is_port_in_use(port):
358 | print(f"Service '{server['name']}' process exists but port {port} is not listening, restarting...")
359 | stop_server(server)
360 | start_server(server)
361 | elif pid and not is_running(pid):
362 | print(f"Service '{server['name']}' abnormally stopped, restarting...")
363 | start_server(server)
364 | except KeyboardInterrupt:
365 | print("Daemon mode interrupted, stopping all servers...")
366 | stop_all_servers()
367 | return
368 |
369 | if command == "daemon":
370 | # Explicit daemon mode command
371 | print("Starting all servers in daemon mode...")
372 | start_all_servers()
373 | try:
374 | # Keep the process running and periodically check server status
375 | while True:
376 | time.sleep(30) # Reduced check interval to 30 seconds
377 |
378 | # Added health check logic
379 | for server in config["servers"]:
380 | if server.get("enabled", True):
381 | pid = load_pid(server["name"])
382 | port = server.get("sse_port", server.get("port"))
383 |
384 | # Double check: process exists and port is listening
385 | if pid and is_running(pid) and port:
386 | if not is_port_in_use(port):
387 | print(f"Service '{server['name']}' process exists but port {port} is not listening, restarting...")
388 | stop_server(server)
389 | start_server(server)
390 | elif pid and not is_running(pid):
391 | print(f"Service '{server['name']}' abnormally stopped, restarting...")
392 | start_server(server)
393 | except KeyboardInterrupt:
394 | print("Daemon mode interrupted, stopping all servers...")
395 | stop_all_servers()
396 | return
397 |
398 | if command == "stop" and not server_name:
399 | # Stop all servers
400 | stop_all_servers()
401 | return
402 |
403 | if not server_name:
404 | print("Please specify a server name")
405 | return
406 |
407 | # Find server configuration
408 | server = None
409 | for s in config["servers"]:
410 | if s["name"] == server_name:
411 | server = s
412 | break
413 |
414 | if not server:
415 | print(f"Server '{server_name}' not found")
416 | return
417 |
418 | # Check if server is enabled
419 | if not server.get("enabled", True):
420 | print(f"Server '{server_name}' is disabled. To enable it, please modify the configuration file.")
421 | return
422 |
423 | # Execute command
424 | if command == "start":
425 | start_server(server)
426 | elif command == "stop":
427 | stop_server(server)
428 | elif command == "restart":
429 | restart_server(server)
430 | else:
431 | print(f"Unknown command: {command}")
432 |
433 |
434 | if __name__ == "__main__":
435 | main()
436 |
--------------------------------------------------------------------------------
/scripts/mcp_manager/__init__.py:
--------------------------------------------------------------------------------
1 | # This file makes the 'mcp_manager' directory a Python package.
2 |
--------------------------------------------------------------------------------
/scripts/mcp_manager/commands.py:
--------------------------------------------------------------------------------
1 | # scripts/mcp_manager/commands.py
2 | import json
3 | import os
4 | import signal
5 | import sys
6 | import time
7 |
8 | # Use relative imports
9 | from .config import SERVERS_DIR, load_config
10 | from .process_utils import (RUNNING_PROCESSES, clone_repo, is_port_in_use,
11 | run_command, stop_process, stream_output)
12 |
13 | # --- Command Functions ---
14 |
15 |
16 | def setup_server(server_config: dict):
17 | """Install dependencies for the specified server"""
18 | name = server_config.get("name", "Unknown Server")
19 | print(f"\n--- Setting up server: {name} ---")
20 | if not server_config.get("enabled", True):
21 | print(f"Server '{name}' is disabled. Skipping setup.")
22 | return
23 |
24 | server_type = server_config.get("type")
25 | server_path = server_config.get("path") # load_config 应该已经修正了这个路径
26 |
27 | # 1. Clone/update repository (only for source_code type)
28 | if server_type == "source_code":
29 | repo_url = server_config.get("repo")
30 | if repo_url:
31 | # Path should be determined by load_config, here we assume it's in the parent directory of server_config['path']
32 | # Or a more robust way is to infer from the repo url
33 | repo_name = repo_url.split("/")[-1].replace(".git", "")
34 | clone_target_dir = os.path.join(SERVERS_DIR, repo_name)
35 | if not clone_repo(repo_url, clone_target_dir, server_name=name):
36 | print(f"[{name}] Repository operation failed. Stopping setup.")
37 | return # Stop if clone/update fails
38 | else:
39 | print(
40 | f"[{name}] Warning: source_code type server missing 'repo' configuration, cannot automatically clone/update."
41 | )
42 |
43 | # Check if the final path exists (after clone/update or when manually specified)
44 | if not server_path or not os.path.isdir(server_path):
45 | print(
46 | f"Error: Server path '{server_path}' not found or invalid for '{name}'. Please check configuration or repository cloning step."
47 | )
48 | return
49 |
50 | # 2. Run installation commands
51 | install_commands = server_config.get("install_commands", [])
52 | if not install_commands:
53 | print(f"[{name}] No installation commands specified. Skipping installation step.")
54 | else:
55 | print(f"[{name}] Executing installation commands...")
56 | # Determine the working directory for executing commands
57 | # For source_code, use its path; for other types, may not need specific cwd
58 | cwd = server_path if server_type == "source_code" else None
59 |
60 | for command in install_commands:
61 | process = run_command(command, cwd=cwd, server_name=f"{name}-install")
62 | if process:
63 | # Synchronously wait for installation command to complete and print output
64 | stdout_lines = []
65 | stderr_lines = []
66 | import subprocess
67 |
68 | try:
69 | # Use communicate() to get all output and wait for the process to end
70 | stdout, stderr = process.communicate(timeout=300) # 5 minutes timeout
71 | stdout_lines = stdout.strip().splitlines()
72 | stderr_lines = stderr.strip().splitlines()
73 |
74 | if stdout_lines:
75 | print(f"[{name}-install-out] {' '.join(stdout_lines)}")
76 | if process.returncode != 0:
77 | print(
78 | f"Error: Error executing installation command for '{name}'. Command failed: {command}"
79 | )
80 | if stderr_lines:
81 | print(f"[{name}-install-err] {' '.join(stderr_lines)}")
82 | return # Stop if installation fails
83 | else:
84 | print(f"[{name}] Command '{command}' completed successfully.")
85 |
86 | except subprocess.TimeoutExpired:
87 | print(f"Error: Timeout executing installation command '{command}' for '{name}'.")
88 | stop_process(f"{name}-install", process) # Try to stop the timed-out process
89 | return
90 | except Exception as e:
91 | print(f"Error: Unexpected error occurred while waiting for installation command '{command}' to complete: {e}")
92 | # Try to read remaining output (if possible)
93 | try:
94 | stdout, stderr = process.communicate()
95 | except:
96 | pass
97 | if process.poll() is None: # If still running, try to stop
98 | stop_process(f"{name}-install", process)
99 | return
100 | else:
101 | print(f"Error: Unable to start installation command '{command}' for '{name}'.")
102 | return # Stop if unable to execute command
103 |
104 | print(f"--- Server setup completed: {name} ---")
105 |
106 |
107 | def start_server(server_config: dict, watch: bool = False):
108 | """Start the specified server"""
109 | name = server_config.get("name", "Unknown Server")
110 | if not server_config.get("enabled", True):
111 | print(f"Server '{name}' is disabled. Skipping startup.")
112 | return
113 |
114 | # Check if already running in this script instance
115 | if name in RUNNING_PROCESSES and RUNNING_PROCESSES[name].poll() is None:
116 | print(
117 | f"Server '{name}' appears to have been started by this script already (PID: {RUNNING_PROCESSES[name].pid})."
118 | )
119 | # Can optionally check if the port is actually listening as a secondary confirmation
120 | port = server_config.get("sse_port") or server_config.get("port")
121 | if port and is_port_in_use(int(port)):
122 | print(f"Port {port} is listening. No need to start again.")
123 | return
124 | else:
125 | print(
126 | f"Warning: Process record exists, but port {port} is not listening. Process may have crashed or not fully started. Attempting to restart..."
127 | )
128 | # Clean up old record, allowing restart
129 | del RUNNING_PROCESSES[name]
130 |
131 | print(f"\n--- Starting server: {name} ---")
132 | server_type = server_config.get("type")
133 | server_path = server_config.get("path") # load_config should have already corrected this path
134 |
135 | # Check path (only for source_code type)
136 | if server_type == "source_code":
137 | if not server_path or not os.path.isdir(server_path):
138 | print(
139 | f"Error: Server path '{server_path}' not found for '{name}'. Please run 'setup' first."
140 | )
141 | return
142 |
143 | # Prepare start command
144 | start_command = server_config.get("start_command")
145 | if not start_command:
146 | print(f"Error: Server '{name}' does not define 'start_command'.")
147 | return
148 |
149 | # Process SSE wrapper command (if exists)
150 | sse_start_command_template = server_config.get("sse_start_command")
151 | final_start_command = start_command # Default to original command
152 | if sse_start_command_template:
153 | sse_host = server_config.get("sse_host", "localhost")
154 | sse_port = server_config.get("sse_port")
155 | allow_origin = server_config.get("allow_origin", "*")
156 | if not sse_port:
157 | print(
158 | f"Error: Server '{name}' defines 'sse_start_command' but is missing 'sse_port'."
159 | )
160 | return
161 |
162 | # Replace placeholders
163 | try:
164 | final_start_command = sse_start_command_template.format(
165 | sse_host=sse_host,
166 | sse_port=sse_port,
167 | allow_origin=allow_origin,
168 | start_command=start_command, # Original command passed as parameter
169 | )
170 | print(f"[{name}] Using SSE wrapper command: {final_start_command}")
171 | except KeyError as e:
172 | print(
173 | f"Error: Error replacing placeholder {{{e}}} in 'sse_start_command'. Please check the template."
174 | )
175 | return
176 | else:
177 | print(f"[{name}] Using start command: {final_start_command}")
178 |
179 | # Get environment variables
180 | env = server_config.get("env", {})
181 |
182 | # Determine working directory
183 | cwd = server_path if server_type == "source_code" else None
184 |
185 | # Start process
186 | process = run_command(final_start_command, cwd=cwd, env=env, server_name=name)
187 |
188 | if process:
189 | RUNNING_PROCESSES[name] = process
190 | port_to_check = server_config.get("sse_port") or server_config.get("port")
191 | print(
192 | f"Server '{name}' start command executed (PID: {process.pid})."
193 | f"{f' Expected listening port: {port_to_check}' if port_to_check else ''}"
194 | )
195 |
196 | # Start output stream threads
197 | stdout_thread, stderr_thread = stream_output(process, name)
198 |
199 | if watch:
200 | print(f"[{name}] Entering watch mode. Press Ctrl+C to stop.")
201 | try:
202 | # Wait for process to end
203 | process.wait()
204 | except KeyboardInterrupt:
205 | print(f"\n[{name}] Ctrl+C detected. Stopping server...")
206 | stop_process(name, process) # Directly call stop_process
207 | except Exception as e:
208 | print(f"\n[{name}] Error occurred while waiting for process: {e}. Attempting to stop...")
209 | stop_process(name, process)
210 | finally:
211 | # Ensure threads end (although they are daemon threads, joining them is safer)
212 | if stdout_thread.is_alive():
213 | stdout_thread.join(timeout=1)
214 | if stderr_thread.is_alive():
215 | stderr_thread.join(timeout=1)
216 | if name in RUNNING_PROCESSES:
217 | del RUNNING_PROCESSES[name] # Remove from running list
218 | print(f"--- Server '{name}' has stopped (watch mode ended). ---")
219 | else:
220 | # Non-watch mode, run in background
221 | # Brief wait and check if process fails quickly
222 | time.sleep(3) # Wait 3 seconds to give the service a chance to start or fail
223 | if process.poll() is not None: # Process has exited
224 | print(
225 | f"Error: Server '{name}' (PID: {process.pid}) seems to have exited shortly after starting (exit code: {process.poll()})."
226 | )
227 | # Try to read the last error output (may have been read by stream_output thread)
228 | # Consider having stream_output collect the last few lines of error information
229 | if name in RUNNING_PROCESSES:
230 | del RUNNING_PROCESSES[name] # Remove from running list
231 | else:
232 | print(f"Server '{name}' (PID: {process.pid}) is running in the background.")
233 | # Check if port is listening as expected (optional but recommended)
234 | if port_to_check:
235 | time.sleep(2) # Wait a bit longer to ensure service is listening
236 | if is_port_in_use(int(port_to_check)):
237 | print(f"[{name}] Port {port_to_check} confirmed to be listening.")
238 | else:
239 | print(
240 | f"Warning: Server '{name}' is running, but port {port_to_check} is not listening as expected."
241 | )
242 |
243 | else:
244 | print(f"Error: Unable to start server '{name}'.")
245 |
246 |
247 | def stop_server(server_config: dict):
248 | """Stop the specified server (if started by the current script)"""
249 | name = server_config.get("name", "Unknown Server")
250 | print(f"\n--- Stopping server: {name} ---")
251 | if name in RUNNING_PROCESSES:
252 | process = RUNNING_PROCESSES[name]
253 | stop_process(name, process) # Use the refactored stop function
254 | # Remove from monitoring list regardless of whether stopping was successful
255 | del RUNNING_PROCESSES[name]
256 | else:
257 | print(f"Server '{name}' is not running under the management of the current script (or has already been stopped).")
258 | # Note: This function cannot stop processes not started by the current script instance
259 | # If you need to stop any process listening on a specific port, more complex logic is needed (e.g., finding PID)
260 |
261 |
262 | def status_servers():
263 | """Display the status of all configured servers"""
264 | print("\n--- MCP Server Status ---")
265 | # Import get_server_ip_port here to avoid circular imports
266 | from .config import load_config, get_server_ip_port
267 |
268 | config = load_config()
269 | servers = config.get("servers", [])
270 |
271 | if not servers:
272 | print("No servers defined in the configuration file.")
273 | return
274 |
275 | print(
276 | f"{'Name':<20} {'Enabled':<10} {'Type':<15} {'Port':<10} {'Status':<30} {'PID (This Instance)':<20} {'Url'}"
277 | )
278 | print("-" * 100) # Adjust separator line length
279 |
280 | # Clean up processes in RUNNING_PROCESSES that have already ended
281 | for name, process in list(RUNNING_PROCESSES.items()):
282 | if process.poll() is not None:
283 | print(f"[Status Check] Cleaning up ended process record: {name} (PID: {process.pid})")
284 | del RUNNING_PROCESSES[name]
285 |
286 | for server in servers:
287 | name = server.get("name", "N/A")
288 | enabled = str(server.get("enabled", True))
289 | stype = server.get("type", "N/A")
290 | # Prioritize sse_port, then port
291 | port = server.get("sse_port") or server.get("port")
292 | path = server.get("path", "N/A")
293 | status = "Unknown"
294 | pid_str = "N/A"
295 |
296 | # Get resolved host and port using get_server_ip_port function
297 | resolved_host, resolved_port = get_server_ip_port(server)
298 | # If port is explicitly set in the server config, use that instead of the resolved port
299 | if port:
300 | resolved_port = port
301 |
302 | # Generate URL with resolved host instead of using raw sse_host from config
303 | # sse_host = server.get("sse_host", "localhost")
304 | # if sse_host == "0.0.0.0":
305 | # url = f"http://{host}:{resolved_port}/sse"
306 | # else:
307 | # url = f"http://{sse_host}:{resolved_port}/sse"
308 | url = f"http://{resolved_host}:{port}/sse"
309 |
310 | if enabled == "True":
311 | if port:
312 | port_int = int(port)
313 | if is_port_in_use(port_int):
314 | status = f"Running (port {port} listening)"
315 | # Check if started by this instance
316 | if name in RUNNING_PROCESSES:
317 | pid_str = str(RUNNING_PROCESSES[name].pid)
318 | else:
319 | pid_str = "(External start)"
320 | else:
321 | status = "Stopped"
322 | # If there's a record in this instance but the port is not listening, it may have failed to start or crashed
323 | if name in RUNNING_PROCESSES:
324 | exit_code = RUNNING_PROCESSES[name].poll()
325 | status = f"Error/Exited (code: {exit_code})"
326 | pid_str = str(RUNNING_PROCESSES[name].pid)
327 | # Consider cleaning up RUNNING_PROCESSES[name] here
328 | else:
329 | status = "No port configured" # For services without ports, status is unknown
330 | if name in RUNNING_PROCESSES: # But if this instance started it...
331 | if RUNNING_PROCESSES[name].poll() is None:
332 | status = "Running (no port check)"
333 | pid_str = str(RUNNING_PROCESSES[name].pid)
334 | else:
335 | exit_code = RUNNING_PROCESSES[name].poll()
336 | status = f"Exited (code: {exit_code})"
337 | pid_str = str(RUNNING_PROCESSES[name].pid)
338 |
339 | else: # enabled == "False"
340 | status = "Disabled"
341 |
342 | print(
343 | f"{name:<20} {enabled:<10} {stype:<15} {str(port):<10} {status:<30} {pid_str:<20} {url}"
344 | )
345 |
346 |
347 | def stop_all_servers():
348 | """Stop all servers started by the current script instance"""
349 | print("\n--- Stopping all managed servers ---")
350 | if not RUNNING_PROCESSES:
351 | print("The current script is not managing any running servers.")
352 | return
353 |
354 | # Create a copy for iteration, because stop_process will modify the dictionary
355 | processes_to_stop = list(RUNNING_PROCESSES.items())
356 |
357 | for name, process in processes_to_stop:
358 | print(f"Requesting stop: {name} (PID: {process.pid})")
359 | stop_process(name, process)
360 |
361 | # Confirm cleanup (theoretically stop_process has already handled it internally, but just in case)
362 | remaining = list(RUNNING_PROCESSES.keys())
363 | if remaining:
364 | print(f"Warning: The following servers may not have been completely stopped or cleaned up: {', '.join(remaining)}")
365 | else:
366 | print("All managed servers have been processed with stop requests.")
367 |
368 | RUNNING_PROCESSES.clear() # Ensure it's empty
369 |
370 |
371 | def list_servers():
372 | """List all configured servers (print configuration)"""
373 | print("\n--- Configured MCP Servers ---")
374 | config = load_config()
375 | print(json.dumps(config, indent=2, ensure_ascii=False))
376 |
377 |
378 | # --- Signal Handling for Graceful Exit ---
379 |
380 |
381 | def setup_signal_handlers():
382 | """Set up signal handlers to try to gracefully stop all servers"""
383 |
384 | def signal_handler(sig, frame):
385 | print(f"\nSignal {signal.Signals(sig).name} detected. Attempting to stop all managed servers...")
386 | stop_all_servers()
387 | print("Exiting script.")
388 | sys.exit(0)
389 |
390 | # Handle SIGINT (Ctrl+C) and SIGTERM (signal sent by kill command by default)
391 | try:
392 | signal.signal(signal.SIGINT, signal_handler)
393 | signal.signal(signal.SIGTERM, signal_handler)
394 | # On Windows, SIGBREAK may also be relevant, but CTRL_BREAK_EVENT is used for child processes
395 | if os.name == "nt":
396 | # signal.signal(signal.SIGBREAK, signal_handler) # Usually no need to handle SIGBREAK for the main script
397 | pass
398 | except ValueError:
399 | print("Warning: Running in a non-main thread, cannot set signal handlers.")
400 | except AttributeError:
401 | print("Warning: The current environment does not support signal handling (e.g., some Windows environments or restricted environments).")
402 |
--------------------------------------------------------------------------------
/scripts/mcp_manager/config.py:
--------------------------------------------------------------------------------
1 | """MCP Server Configuration Management Module
2 |
3 | This module provides functionality for loading and saving MCP server configurations.
4 | """
5 |
6 | import json
7 | import os
8 | import sys
9 |
10 | # --- Constants ---
11 | BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
12 | CONFIG_FILE = os.path.join(BASE_DIR, "config", "mcp_servers.json")
13 | SOURCE_CODE_SERVERS_DIR = os.path.join(BASE_DIR, "mcp-servers")
14 |
15 |
16 | # --- Helper Functions ---
17 |
18 |
19 | def load_config():
20 | """Load server configuration and automatically correct paths"""
21 | try:
22 | with open(CONFIG_FILE, "r", encoding="utf-8") as f:
23 | config = json.load(f)
24 | except FileNotFoundError:
25 | print(f"Error: Configuration file not found at {CONFIG_FILE}")
26 | sys.exit(1)
27 | except json.JSONDecodeError:
28 | print(f"Error: Configuration file {CONFIG_FILE} is not valid JSON.")
29 | sys.exit(1)
30 |
31 | updated = False
32 | for server in config.get("servers", []):
33 | # Only process paths for source_code type servers
34 | if server.get("type") == "source_code":
35 | if server.get("repo") and server.get("subdir") is not None:
36 | repo_name = server["repo"].split("/")[-1].replace(".git", "")
37 | repo_base_path = os.path.join(SOURCE_CODE_SERVERS_DIR, repo_name)
38 |
39 | if server["subdir"] == ".":
40 | expected_path = repo_base_path
41 | else:
42 | expected_path = os.path.join(repo_base_path, server["subdir"])
43 |
44 | # Normalize paths for comparison
45 | expected_path_norm = os.path.normpath(expected_path).replace("\\", "/")
46 | current_path_norm = (
47 | os.path.normpath(server.get("path", "")).replace("\\", "/")
48 | if server.get("path")
49 | else ""
50 | )
51 |
52 | # If the path in the configuration is incorrect or empty, update it to the expected path
53 | if current_path_norm != expected_path_norm:
54 | print(
55 | f"Updating server '{server['name']}' path to: {expected_path_norm}"
56 | )
57 | server["path"] = expected_path_norm
58 | updated = True
59 | elif not server.get("path"):
60 | print(
61 | f"Warning: source_code server '{server['name']}' is missing 'repo'/'subdir' or 'path' configuration."
62 | )
63 |
64 | # If there are updates, save the configuration
65 | if updated:
66 | save_config(config)
67 |
68 | return config
69 |
70 |
71 | def save_config(config):
72 | """Save server configuration"""
73 | try:
74 | with open(CONFIG_FILE, "w", encoding="utf-8") as f:
75 | json.dump(config, f, indent=2, ensure_ascii=False)
76 | print(f"Configuration updated and saved to {CONFIG_FILE}")
77 | except IOError:
78 | print(f"Error: Cannot write to configuration file {CONFIG_FILE}")
79 |
80 |
81 | def get_server_ip_port(server_config):
82 | """
83 | Extract IP and port from server configuration
84 |
85 | This function handles address resolution for containerized environments:
86 | - If host is bound to 0.0.0.0 (all interfaces), it substitutes with an externally accessible address
87 | - It prioritizes REAL_HOST_IP over EXTERNAL_HOST to ensure external clients can connect
88 | - Performs basic validation of the host address
89 |
90 | Args:
91 | server_config: Dictionary containing server configuration
92 |
93 | Returns:
94 | tuple: (host, port) containing the resolved address and port number
95 | """
96 | host = server_config.get("sse_host", "127.0.0.1")
97 |
98 | # If sse_host is 0.0.0.0, we need to return a host that is accessible from outside
99 | if host == "0.0.0.0":
100 | # Try to get the external host from environment variables
101 | import os
102 | import socket
103 |
104 | # Try to get the real host IP first (highest priority)
105 | real_host_ip = os.environ.get("REAL_HOST_IP")
106 | if real_host_ip:
107 | host = real_host_ip
108 | else:
109 | # Fall back to EXTERNAL_HOST
110 | external_host = os.environ.get("EXTERNAL_HOST")
111 |
112 | if external_host:
113 | # Use the configured external host
114 | host = external_host
115 | else:
116 | # Try common Docker host names if no environment variables are set
117 | host = socket.gethostbyname(socket.gethostname())
118 | # Try to get the machine's actual IP address
119 | try:
120 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
121 | s.connect(("8.8.8.8", 80))
122 | host = s.getsockname()[0]
123 | s.close()
124 | except:
125 | pass
126 | docker_hosts = ["host.docker.internal", "host.lima.internal"]
127 | for docker_host in docker_hosts:
128 | try:
129 | socket.gethostbyname(docker_host)
130 | host = docker_host
131 | break
132 | except socket.gaierror:
133 | continue
134 |
135 | # If no Docker host is resolvable, fall back to localhost
136 | if host == "0.0.0.0":
137 | host = "localhost"
138 |
139 | port = server_config.get("sse_port", 23001)
140 | return host, port
141 |
--------------------------------------------------------------------------------
/scripts/mcp_manager/process_utils.py:
--------------------------------------------------------------------------------
1 | """MCP Server Management Tool - Process Utility Module
2 |
3 | Contains utility functions for process management, including command execution, port checking, process termination, and other functionalities.
4 | """
5 |
6 | # scripts/mcp_manager/process_utils.py
7 | import os
8 | import signal
9 | import socket
10 | import subprocess
11 | import sys
12 | import threading
13 | import time
14 |
15 | # Used to store process information started by this script {name: Popen_object}
16 | # Note: This only tracks processes started by the current running instance
17 | RUNNING_PROCESSES = {}
18 |
19 | # --- Port Checking ---
20 |
21 |
22 | def is_port_in_use(port: int) -> bool:
23 | """Check if a local port is being listened on"""
24 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
25 | try:
26 | s.settimeout(0.2) # Brief timeout
27 | # Try to connect, if successful it means the port is in use
28 | s.connect(("127.0.0.1", port))
29 | return True
30 | except (socket.timeout, ConnectionRefusedError):
31 | return False
32 | except Exception as e:
33 | # Other errors also indicate that the port is unavailable or the check failed
34 | # print(f"Error checking port {port}: {e}") # Optional debug information
35 | return False
36 |
37 |
38 | # --- Command Execution ---
39 |
40 |
41 | def run_command(
42 | command: str, cwd: str | None = None, env: dict | None = None, server_name: str = ""
43 | ) -> subprocess.Popen | None:
44 | """Run a command in the specified directory and return a Popen object"""
45 | print(f"[{server_name}] Preparing to execute command: '{command}' in directory '{cwd or os.getcwd()}'")
46 |
47 | current_env = os.environ.copy()
48 | if env:
49 | current_env.update(env)
50 | # print(f"[{server_name}] Using custom environment variables: {env}") # Debug information
51 |
52 | # Ensure PATH exists
53 | if "PATH" not in current_env or not current_env["PATH"]:
54 | current_env["PATH"] = os.environ.get("PATH", "") # Get from os.environ just in case
55 |
56 | shell = False
57 | args = command # 默认将整个命令传递
58 |
59 | # Windows-specific command handling
60 | if os.name == "nt":
61 | # For commands that need cmd /c (e.g., containing pipes, redirections, or built-in commands)
62 | if command.lower().startswith("cmd /c") or any(
63 | op in command for op in ["|", ">", "<", "&", "&&", "||"]
64 | ):
65 | args = command # 保持原样
66 | shell = True # cmd /c 需要 shell=True
67 | print(f"[DEBUG][{server_name}] Using cmd /c (shell=True) to execute: {args}")
68 | # For npm/npx, it's better to call the .cmd file directly, avoiding an extra layer of cmd /c
69 | elif command.lower().startswith("npm ") or command.lower().startswith("npx "):
70 | parts = command.split(" ", 1)
71 | cmd_name = parts[0]
72 | cmd_args = parts[1] if len(parts) > 1 else ""
73 | # Try to find the full path of npm.cmd or npx.cmd (may be in PATH or node_modules/.bin)
74 | # Simplified handling here, using cmd /c directly for compatibility, although slightly less efficient
75 | args = f"cmd /c {cmd_name} {cmd_args}"
76 | shell = True
77 | print(
78 | f"[DEBUG][{server_name}] Using cmd /c (shell=True) to execute {cmd_name}: {args}"
79 | )
80 | else:
81 | # For other simple commands, try to execute directly, may not need shell=True
82 | try:
83 | # Try to split the command, if it fails (e.g., path contains spaces and is not quoted), fall back to shell=True
84 | args_list = subprocess.list2cmdline(
85 | [command.split()[0]]
86 | ) # 检查第一个参数是否像可执行文件
87 | args = command.split()
88 | shell = False # 尝试非 shell 模式
89 | print(f"[DEBUG][{server_name}] Attempting to execute directly (shell=False): {args}")
90 | except Exception:
91 | args = command # Fall back to passing the entire command as a string
92 | shell = True
93 | print(f"[DEBUG][{server_name}] Unable to split command, falling back to shell=True: {args}")
94 |
95 | # Linux/macOS handling
96 | else:
97 | # Usually can be split directly, but shell=True better handles complex commands
98 | args = command
99 | shell = True # On non-Windows, using shell=True is usually safer for handling complex commands
100 | print(f"[DEBUG][{server_name}] On non-Windows using shell=True to execute: {args}")
101 |
102 | try:
103 | creationflags = 0
104 | if os.name == "nt":
105 | # CREATE_NEW_PROCESS_GROUP allows us to reliably terminate child processes later
106 | creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
107 |
108 | process = subprocess.Popen(
109 | args,
110 | cwd=cwd,
111 | env=current_env,
112 | stdout=subprocess.PIPE,
113 | stderr=subprocess.PIPE,
114 | text=True,
115 | encoding="utf-8",
116 | errors="replace",
117 | shell=shell,
118 | creationflags=creationflags,
119 | )
120 | print(f"[{server_name}] Command started (PID: {process.pid})")
121 | return process
122 | except FileNotFoundError:
123 | cmd_to_report = args[0] if isinstance(args, list) else args.split()[0]
124 | print(f"Error: Command '{cmd_to_report}' not found. Please ensure it is installed and in the system PATH.")
125 | return None
126 | except Exception as e:
127 | print(f"Error: Error executing command '{command}': {e}")
128 | return None
129 |
130 |
131 | # --- Process Streaming ---
132 |
133 |
134 | def stream_output(process: subprocess.Popen, server_name: str):
135 | """Print process stdout and stderr in real-time"""
136 |
137 | def reader(pipe, prefix):
138 | try:
139 | if pipe:
140 | for line in iter(pipe.readline, ""):
141 | print(f"[{server_name}-{prefix}] {line.strip()}")
142 | except ValueError:
143 | # This error may occur when the pipe is closed as the process ends
144 | print(f"[{server_name}-{prefix}] Error reading pipe (may be closed).")
145 | finally:
146 | if pipe:
147 | try:
148 | pipe.close()
149 | except Exception:
150 | pass # Ignore closing errors
151 |
152 | stdout_thread = threading.Thread(
153 | target=reader, args=(process.stdout, "out"), daemon=True
154 | )
155 | stderr_thread = threading.Thread(
156 | target=reader, args=(process.stderr, "err"), daemon=True
157 | )
158 | stdout_thread.start()
159 | stderr_thread.start()
160 | return stdout_thread, stderr_thread
161 |
162 |
163 | # --- Process Termination ---
164 |
165 |
166 | def stop_process(name: str, process: subprocess.Popen):
167 | """Attempt to stop the specified process"""
168 | if process.poll() is None: # Process is still running
169 | print(f"[{name}] Attempting to stop process (PID: {process.pid})...")
170 | try:
171 | # On Windows, sending CTRL_BREAK_EVENT is usually the recommended way to stop console applications
172 | # But terminate() / kill() is more universal
173 | if os.name == "nt":
174 | print(f"[{name}] (Windows) Sending CTRL_BREAK_EVENT to process group...")
175 | # Note: This will be sent to the entire process group, may affect child processes
176 | os.kill(process.pid, signal.CTRL_BREAK_EVENT)
177 | else:
178 | print(f"[{name}] (Non-Windows) Sending SIGTERM signal...")
179 | process.terminate() # Send SIGTERM
180 |
181 | # Wait for a while to let the process respond
182 | try:
183 | process.wait(timeout=10)
184 | print(f"[{name}] Process (PID: {process.pid}) has been successfully stopped.")
185 | except subprocess.TimeoutExpired:
186 | print(
187 | f"[{name}] Process (PID: {process.pid}) did not respond to SIGTERM/CTRL_BREAK within 10 seconds. Attempting to force terminate (SIGKILL)..."
188 | )
189 | process.kill() # Send SIGKILL
190 | process.wait(timeout=5) # Wait for SIGKILL to take effect
191 | print(f"[{name}] Process (PID: {process.pid}) has been forcibly terminated.")
192 | except Exception as e: # Handle other errors that may occur with wait
193 | print(
194 | f"[{name}] Error occurred while waiting for process {process.pid} to stop: {e}. Attempting to force terminate..."
195 | )
196 | process.kill()
197 | print(f"[{name}] Process (PID: {process.pid}) has been forcibly terminated.")
198 |
199 | except ProcessLookupError:
200 | print(
201 | f"[{name}] Process (PID: {process.pid}) not found when attempting to stop (may have exited on its own)."
202 | )
203 | except OSError as e:
204 | print(f"[{name}] OS error occurred when stopping process {process.pid}: {e}.")
205 | except Exception as e:
206 | print(f"[{name}] Unexpected error occurred when stopping process {process.pid}: {e}")
207 | else:
208 | print(f"[{name}] Process (PID: {process.pid}) is already stopped.")
209 |
210 | # Clean up standard output/error streams
211 | try:
212 | if process.stdout:
213 | process.stdout.close()
214 | if process.stderr:
215 | process.stderr.close()
216 | except Exception:
217 | pass # Ignore closing errors
218 |
219 |
220 | # --- Git Operations ---
221 |
222 |
223 | def clone_repo(repo_url: str, target_dir: str, server_name: str = "") -> bool:
224 | """Clone or update Git repository"""
225 | target_dir_abs = os.path.abspath(target_dir)
226 | git_command_base = ["git"]
227 |
228 | if not os.path.exists(target_dir_abs):
229 | print(
230 | f"[{server_name}] Repository directory does not exist, cloning {repo_url} to {target_dir_abs}..."
231 | )
232 | command = git_command_base + ["clone", repo_url, target_dir_abs]
233 | try:
234 | result = subprocess.run(
235 | command,
236 | capture_output=True,
237 | text=True,
238 | check=True,
239 | encoding="utf-8",
240 | errors="replace",
241 | )
242 | print(f"[{server_name}] Clone successful.")
243 | return True
244 | except subprocess.CalledProcessError as e:
245 | print(f"[{server_name}] Error: Failed to clone repository. Return code: {e.returncode}")
246 | print(f"[{server_name}] Git Stderr:\n{e.stderr}")
247 | print(f"[{server_name}] Git Stdout:\n{e.stdout}")
248 | return False
249 | except FileNotFoundError:
250 | print(
251 | f"[{server_name}] Error: 'git' command not found. Please ensure Git is installed and added to the system PATH."
252 | )
253 | return False
254 | except Exception as e:
255 | print(f"[{server_name}] Unknown error occurred while cloning repository: {e}")
256 | return False
257 | else:
258 | print(f"[{server_name}] Directory {target_dir_abs} already exists. Attempting to update (git pull)...")
259 | command = git_command_base + ["pull"]
260 | try:
261 | # Execute git pull in the target directory
262 | result = subprocess.run(
263 | command,
264 | cwd=target_dir_abs,
265 | capture_output=True,
266 | text=True,
267 | check=True,
268 | encoding="utf-8",
269 | errors="replace",
270 | )
271 | print(f"[{server_name}] Update successful.")
272 | # Print git pull output (optional)
273 | # if result.stdout.strip():
274 | # print(f"[{server_name}] Git Pull Output:\n{result.stdout.strip()}")
275 | return True
276 | except subprocess.CalledProcessError as e:
277 | print(f"[{server_name}] Warning: Failed to update repository. Return code: {e.returncode}")
278 | print(f"[{server_name}] Git Stderr:\n{e.stderr}")
279 | print(f"[{server_name}] Git Stdout:\n{e.stdout}")
280 | # Not considered a fatal error, return True but print a warning
281 | return True # Or return False as needed
282 | except FileNotFoundError:
283 | print(
284 | f"[{server_name}] Error: 'git' command not found. Please ensure Git is installed and added to the system PATH."
285 | )
286 | return False # Update failure is a problem
287 | except Exception as e:
288 | print(f"[{server_name}] Unknown error occurred while updating repository: {e}")
289 | return False # Update failure is a problem
290 |
--------------------------------------------------------------------------------
/scripts/setup_env.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | Setup virtual environment for MCP Server Unified Deployment Tool
4 |
5 | This script uses uv to create a virtual environment and install required dependencies.
6 | uv is a fast Python package manager and virtual environment tool.
7 | """
8 |
9 | import os
10 | import subprocess
11 | import sys
12 |
13 | def run_command(command):
14 | """Run command and print output"""
15 | print(f"Executing: {command}")
16 | result = subprocess.run(command, shell=True, check=False)
17 | if result.returncode != 0:
18 | print(f"Command execution failed, exit code: {result.returncode}")
19 | sys.exit(result.returncode)
20 | return result
21 |
22 | def main():
23 | # Get the parent directory of the script directory (project root directory)
24 | script_dir = os.path.dirname(os.path.abspath(__file__))
25 | project_dir = os.path.dirname(script_dir)
26 |
27 | # Change to project root directory
28 | os.chdir(project_dir)
29 | print(f"Changed to project directory: {project_dir}")
30 |
31 | # Check if uv is installed
32 | try:
33 | subprocess.run(["uv", "--version"], check=True, capture_output=True)
34 | print("uv is already installed")
35 | except (subprocess.CalledProcessError, FileNotFoundError):
36 | print("uv is not installed, installing...")
37 | run_command("pip install uv")
38 |
39 | # Create virtual environment
40 | print("\nCreating virtual environment...")
41 | run_command("uv venv --python=3.12")
42 |
43 | # Display command to activate virtual environment
44 | venv_activate_cmd = ".venv\\Scripts\\activate" if sys.platform == "win32" else "source .venv/bin/activate"
45 | print(f"\nPlease use the following command to activate the virtual environment:\n{venv_activate_cmd}")
46 |
47 | # Install dependencies
48 | print("\nInstalling dependencies...")
49 | run_command("uv pip install -e .")
50 | print("\nOr install dependencies using requirements.txt:")
51 | print("uv pip install -r requirements.txt")
52 |
53 | # Suggest installing mcp-proxy
54 | print("\nRecommended to install mcp-proxy using pipx:")
55 | print("pip install pipx")
56 | print("pipx ensurepath")
57 | print("pipx install mcp-proxy")
58 |
59 | print("\nEnvironment setup complete! Please follow these steps to continue:")
60 | print(f"1. Activate virtual environment: {venv_activate_cmd}")
61 | print("2. Configure MCP server: Edit config/mcp_servers.json file")
62 | print("3. Start MCP server: python scripts/manage_mcp.py start")
63 | print("\nFor more information, please refer to the README.md file")
64 |
65 | print("\nEnvironment setup complete!")
66 | print("Use the following command to activate the virtual environment:")
67 | if os.name == 'nt': # Windows
68 | print(".venv\\Scripts\\activate")
69 | else: # Unix/Linux/MacOS
70 | print("source .venv/bin/activate")
71 |
72 | if __name__ == "__main__":
73 | main()
--------------------------------------------------------------------------------
/uv.lock:
--------------------------------------------------------------------------------
1 | version = 1
2 | revision = 1
3 | requires-python = ">=3.12"
4 |
5 | [[package]]
6 | name = "annotated-types"
7 | version = "0.7.0"
8 | source = { registry = "https://pypi.org/simple" }
9 | sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081 }
10 | wheels = [
11 | { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 },
12 | ]
13 |
14 | [[package]]
15 | name = "anyio"
16 | version = "4.9.0"
17 | source = { registry = "https://pypi.org/simple" }
18 | dependencies = [
19 | { name = "idna" },
20 | { name = "sniffio" },
21 | { name = "typing-extensions", marker = "python_full_version < '3.13'" },
22 | ]
23 | sdist = { url = "https://files.pythonhosted.org/packages/95/7d/4c1bd541d4dffa1b52bd83fb8527089e097a106fc90b467a7313b105f840/anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028", size = 190949 }
24 | wheels = [
25 | { url = "https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c", size = 100916 },
26 | ]
27 |
28 | [[package]]
29 | name = "certifi"
30 | version = "2025.1.31"
31 | source = { registry = "https://pypi.org/simple" }
32 | sdist = { url = "https://files.pythonhosted.org/packages/1c/ab/c9f1e32b7b1bf505bf26f0ef697775960db7932abeb7b516de930ba2705f/certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651", size = 167577 }
33 | wheels = [
34 | { url = "https://files.pythonhosted.org/packages/38/fc/bce832fd4fd99766c04d1ee0eead6b0ec6486fb100ae5e74c1d91292b982/certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe", size = 166393 },
35 | ]
36 |
37 | [[package]]
38 | name = "click"
39 | version = "8.1.8"
40 | source = { registry = "https://pypi.org/simple" }
41 | dependencies = [
42 | { name = "colorama", marker = "sys_platform == 'win32'" },
43 | ]
44 | sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593 }
45 | wheels = [
46 | { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188 },
47 | ]
48 |
49 | [[package]]
50 | name = "colorama"
51 | version = "0.4.6"
52 | source = { registry = "https://pypi.org/simple" }
53 | sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 }
54 | wheels = [
55 | { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 },
56 | ]
57 |
58 | [[package]]
59 | name = "h11"
60 | version = "0.14.0"
61 | source = { registry = "https://pypi.org/simple" }
62 | sdist = { url = "https://files.pythonhosted.org/packages/f5/38/3af3d3633a34a3316095b39c8e8fb4853a28a536e55d347bd8d8e9a14b03/h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", size = 100418 }
63 | wheels = [
64 | { url = "https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761", size = 58259 },
65 | ]
66 |
67 | [[package]]
68 | name = "httpcore"
69 | version = "1.0.7"
70 | source = { registry = "https://pypi.org/simple" }
71 | dependencies = [
72 | { name = "certifi" },
73 | { name = "h11" },
74 | ]
75 | sdist = { url = "https://files.pythonhosted.org/packages/6a/41/d7d0a89eb493922c37d343b607bc1b5da7f5be7e383740b4753ad8943e90/httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c", size = 85196 }
76 | wheels = [
77 | { url = "https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd", size = 78551 },
78 | ]
79 |
80 | [[package]]
81 | name = "httpx"
82 | version = "0.28.1"
83 | source = { registry = "https://pypi.org/simple" }
84 | dependencies = [
85 | { name = "anyio" },
86 | { name = "certifi" },
87 | { name = "httpcore" },
88 | { name = "idna" },
89 | ]
90 | sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406 }
91 | wheels = [
92 | { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517 },
93 | ]
94 |
95 | [[package]]
96 | name = "httpx-sse"
97 | version = "0.4.0"
98 | source = { registry = "https://pypi.org/simple" }
99 | sdist = { url = "https://files.pythonhosted.org/packages/4c/60/8f4281fa9bbf3c8034fd54c0e7412e66edbab6bc74c4996bd616f8d0406e/httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721", size = 12624 }
100 | wheels = [
101 | { url = "https://files.pythonhosted.org/packages/e1/9b/a181f281f65d776426002f330c31849b86b31fc9d848db62e16f03ff739f/httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f", size = 7819 },
102 | ]
103 |
104 | [[package]]
105 | name = "idna"
106 | version = "3.10"
107 | source = { registry = "https://pypi.org/simple" }
108 | sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 }
109 | wheels = [
110 | { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 },
111 | ]
112 |
113 | [[package]]
114 | name = "mcp"
115 | version = "1.6.0"
116 | source = { registry = "https://pypi.org/simple" }
117 | dependencies = [
118 | { name = "anyio" },
119 | { name = "httpx" },
120 | { name = "httpx-sse" },
121 | { name = "pydantic" },
122 | { name = "pydantic-settings" },
123 | { name = "sse-starlette" },
124 | { name = "starlette" },
125 | { name = "uvicorn" },
126 | ]
127 | sdist = { url = "https://files.pythonhosted.org/packages/95/d2/f587cb965a56e992634bebc8611c5b579af912b74e04eb9164bd49527d21/mcp-1.6.0.tar.gz", hash = "sha256:d9324876de2c5637369f43161cd71eebfd803df5a95e46225cab8d280e366723", size = 200031 }
128 | wheels = [
129 | { url = "https://files.pythonhosted.org/packages/10/30/20a7f33b0b884a9d14dd3aa94ff1ac9da1479fe2ad66dd9e2736075d2506/mcp-1.6.0-py3-none-any.whl", hash = "sha256:7bd24c6ea042dbec44c754f100984d186620d8b841ec30f1b19eda9b93a634d0", size = 76077 },
130 | ]
131 |
132 | [[package]]
133 | name = "mcp-server-unified-deployment"
134 | version = "0.0.1"
135 | source = { editable = "." }
136 | dependencies = [
137 | { name = "mcp" },
138 | ]
139 |
140 | [package.metadata]
141 | requires-dist = [{ name = "mcp", specifier = ">=1.6.0" }]
142 |
143 | [[package]]
144 | name = "pydantic"
145 | version = "2.11.1"
146 | source = { registry = "https://pypi.org/simple" }
147 | dependencies = [
148 | { name = "annotated-types" },
149 | { name = "pydantic-core" },
150 | { name = "typing-extensions" },
151 | { name = "typing-inspection" },
152 | ]
153 | sdist = { url = "https://files.pythonhosted.org/packages/93/a3/698b87a4d4d303d7c5f62ea5fbf7a79cab236ccfbd0a17847b7f77f8163e/pydantic-2.11.1.tar.gz", hash = "sha256:442557d2910e75c991c39f4b4ab18963d57b9b55122c8b2a9cd176d8c29ce968", size = 782817 }
154 | wheels = [
155 | { url = "https://files.pythonhosted.org/packages/cc/12/f9221a949f2419e2e23847303c002476c26fbcfd62dc7f3d25d0bec5ca99/pydantic-2.11.1-py3-none-any.whl", hash = "sha256:5b6c415eee9f8123a14d859be0c84363fec6b1feb6b688d6435801230b56e0b8", size = 442648 },
156 | ]
157 |
158 | [[package]]
159 | name = "pydantic-core"
160 | version = "2.33.0"
161 | source = { registry = "https://pypi.org/simple" }
162 | dependencies = [
163 | { name = "typing-extensions" },
164 | ]
165 | sdist = { url = "https://files.pythonhosted.org/packages/b9/05/91ce14dfd5a3a99555fce436318cc0fd1f08c4daa32b3248ad63669ea8b4/pydantic_core-2.33.0.tar.gz", hash = "sha256:40eb8af662ba409c3cbf4a8150ad32ae73514cd7cb1f1a2113af39763dd616b3", size = 434080 }
166 | wheels = [
167 | { url = "https://files.pythonhosted.org/packages/a9/c4/c9381323cbdc1bb26d352bc184422ce77c4bc2f2312b782761093a59fafc/pydantic_core-2.33.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6c32a40712e3662bebe524abe8abb757f2fa2000028d64cc5a1006016c06af43", size = 2025127 },
168 | { url = "https://files.pythonhosted.org/packages/6f/bd/af35278080716ecab8f57e84515c7dc535ed95d1c7f52c1c6f7b313a9dab/pydantic_core-2.33.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8ec86b5baa36f0a0bfb37db86c7d52652f8e8aa076ab745ef7725784183c3fdd", size = 1851687 },
169 | { url = "https://files.pythonhosted.org/packages/12/e4/a01461225809c3533c23bd1916b1e8c2e21727f0fea60ab1acbffc4e2fca/pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4deac83a8cc1d09e40683be0bc6d1fa4cde8df0a9bf0cda5693f9b0569ac01b6", size = 1892232 },
170 | { url = "https://files.pythonhosted.org/packages/51/17/3d53d62a328fb0a49911c2962036b9e7a4f781b7d15e9093c26299e5f76d/pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:175ab598fb457a9aee63206a1993874badf3ed9a456e0654273e56f00747bbd6", size = 1977896 },
171 | { url = "https://files.pythonhosted.org/packages/30/98/01f9d86e02ec4a38f4b02086acf067f2c776b845d43f901bd1ee1c21bc4b/pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f36afd0d56a6c42cf4e8465b6441cf546ed69d3a4ec92724cc9c8c61bd6ecf4", size = 2127717 },
172 | { url = "https://files.pythonhosted.org/packages/3c/43/6f381575c61b7c58b0fd0b92134c5a1897deea4cdfc3d47567b3ff460a4e/pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0a98257451164666afafc7cbf5fb00d613e33f7e7ebb322fbcd99345695a9a61", size = 2680287 },
173 | { url = "https://files.pythonhosted.org/packages/01/42/c0d10d1451d161a9a0da9bbef023b8005aa26e9993a8cc24dc9e3aa96c93/pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecc6d02d69b54a2eb83ebcc6f29df04957f734bcf309d346b4f83354d8376862", size = 2008276 },
174 | { url = "https://files.pythonhosted.org/packages/20/ca/e08df9dba546905c70bae44ced9f3bea25432e34448d95618d41968f40b7/pydantic_core-2.33.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a69b7596c6603afd049ce7f3835bcf57dd3892fc7279f0ddf987bebed8caa5a", size = 2115305 },
175 | { url = "https://files.pythonhosted.org/packages/03/1f/9b01d990730a98833113581a78e595fd40ed4c20f9693f5a658fb5f91eff/pydantic_core-2.33.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ea30239c148b6ef41364c6f51d103c2988965b643d62e10b233b5efdca8c0099", size = 2068999 },
176 | { url = "https://files.pythonhosted.org/packages/20/18/fe752476a709191148e8b1e1139147841ea5d2b22adcde6ee6abb6c8e7cf/pydantic_core-2.33.0-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:abfa44cf2f7f7d7a199be6c6ec141c9024063205545aa09304349781b9a125e6", size = 2241488 },
177 | { url = "https://files.pythonhosted.org/packages/81/22/14738ad0a0bf484b928c9e52004f5e0b81dd8dabbdf23b843717b37a71d1/pydantic_core-2.33.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20d4275f3c4659d92048c70797e5fdc396c6e4446caf517ba5cad2db60cd39d3", size = 2248430 },
178 | { url = "https://files.pythonhosted.org/packages/e8/27/be7571e215ac8d321712f2433c445b03dbcd645366a18f67b334df8912bc/pydantic_core-2.33.0-cp312-cp312-win32.whl", hash = "sha256:918f2013d7eadea1d88d1a35fd4a1e16aaf90343eb446f91cb091ce7f9b431a2", size = 1908353 },
179 | { url = "https://files.pythonhosted.org/packages/be/3a/be78f28732f93128bd0e3944bdd4b3970b389a1fbd44907c97291c8dcdec/pydantic_core-2.33.0-cp312-cp312-win_amd64.whl", hash = "sha256:aec79acc183865bad120b0190afac467c20b15289050648b876b07777e67ea48", size = 1955956 },
180 | { url = "https://files.pythonhosted.org/packages/21/26/b8911ac74faa994694b76ee6a22875cc7a4abea3c381fdba4edc6c6bef84/pydantic_core-2.33.0-cp312-cp312-win_arm64.whl", hash = "sha256:5461934e895968655225dfa8b3be79e7e927e95d4bd6c2d40edd2fa7052e71b6", size = 1903259 },
181 | { url = "https://files.pythonhosted.org/packages/79/20/de2ad03ce8f5b3accf2196ea9b44f31b0cd16ac6e8cfc6b21976ed45ec35/pydantic_core-2.33.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f00e8b59e1fc8f09d05594aa7d2b726f1b277ca6155fc84c0396db1b373c4555", size = 2032214 },
182 | { url = "https://files.pythonhosted.org/packages/f9/af/6817dfda9aac4958d8b516cbb94af507eb171c997ea66453d4d162ae8948/pydantic_core-2.33.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1a73be93ecef45786d7d95b0c5e9b294faf35629d03d5b145b09b81258c7cd6d", size = 1852338 },
183 | { url = "https://files.pythonhosted.org/packages/44/f3/49193a312d9c49314f2b953fb55740b7c530710977cabe7183b8ef111b7f/pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff48a55be9da6930254565ff5238d71d5e9cd8c5487a191cb85df3bdb8c77365", size = 1896913 },
184 | { url = "https://files.pythonhosted.org/packages/06/e0/c746677825b2e29a2fa02122a8991c83cdd5b4c5f638f0664d4e35edd4b2/pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:26a4ea04195638dcd8c53dadb545d70badba51735b1594810e9768c2c0b4a5da", size = 1986046 },
185 | { url = "https://files.pythonhosted.org/packages/11/ec/44914e7ff78cef16afb5e5273d480c136725acd73d894affdbe2a1bbaad5/pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:41d698dcbe12b60661f0632b543dbb119e6ba088103b364ff65e951610cb7ce0", size = 2128097 },
186 | { url = "https://files.pythonhosted.org/packages/fe/f5/c6247d424d01f605ed2e3802f338691cae17137cee6484dce9f1ac0b872b/pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ae62032ef513fe6281ef0009e30838a01057b832dc265da32c10469622613885", size = 2681062 },
187 | { url = "https://files.pythonhosted.org/packages/f0/85/114a2113b126fdd7cf9a9443b1b1fe1b572e5bd259d50ba9d5d3e1927fa9/pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f225f3a3995dbbc26affc191d0443c6c4aa71b83358fd4c2b7d63e2f6f0336f9", size = 2007487 },
188 | { url = "https://files.pythonhosted.org/packages/e6/40/3c05ed28d225c7a9acd2b34c5c8010c279683a870219b97e9f164a5a8af0/pydantic_core-2.33.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5bdd36b362f419c78d09630cbaebc64913f66f62bda6d42d5fbb08da8cc4f181", size = 2121382 },
189 | { url = "https://files.pythonhosted.org/packages/8a/22/e70c086f41eebd323e6baa92cc906c3f38ddce7486007eb2bdb3b11c8f64/pydantic_core-2.33.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:2a0147c0bef783fd9abc9f016d66edb6cac466dc54a17ec5f5ada08ff65caf5d", size = 2072473 },
190 | { url = "https://files.pythonhosted.org/packages/3e/84/d1614dedd8fe5114f6a0e348bcd1535f97d76c038d6102f271433cd1361d/pydantic_core-2.33.0-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:c860773a0f205926172c6644c394e02c25421dc9a456deff16f64c0e299487d3", size = 2249468 },
191 | { url = "https://files.pythonhosted.org/packages/b0/c0/787061eef44135e00fddb4b56b387a06c303bfd3884a6df9bea5cb730230/pydantic_core-2.33.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:138d31e3f90087f42aa6286fb640f3c7a8eb7bdae829418265e7e7474bd2574b", size = 2254716 },
192 | { url = "https://files.pythonhosted.org/packages/ae/e2/27262eb04963201e89f9c280f1e10c493a7a37bc877e023f31aa72d2f911/pydantic_core-2.33.0-cp313-cp313-win32.whl", hash = "sha256:d20cbb9d3e95114325780f3cfe990f3ecae24de7a2d75f978783878cce2ad585", size = 1916450 },
193 | { url = "https://files.pythonhosted.org/packages/13/8d/25ff96f1e89b19e0b70b3cd607c9ea7ca27e1dcb810a9cd4255ed6abf869/pydantic_core-2.33.0-cp313-cp313-win_amd64.whl", hash = "sha256:ca1103d70306489e3d006b0f79db8ca5dd3c977f6f13b2c59ff745249431a606", size = 1956092 },
194 | { url = "https://files.pythonhosted.org/packages/1b/64/66a2efeff657b04323ffcd7b898cb0354d36dae3a561049e092134a83e9c/pydantic_core-2.33.0-cp313-cp313-win_arm64.whl", hash = "sha256:6291797cad239285275558e0a27872da735b05c75d5237bbade8736f80e4c225", size = 1908367 },
195 | { url = "https://files.pythonhosted.org/packages/52/54/295e38769133363d7ec4a5863a4d579f331728c71a6644ff1024ee529315/pydantic_core-2.33.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7b79af799630af263eca9ec87db519426d8c9b3be35016eddad1832bac812d87", size = 1813331 },
196 | { url = "https://files.pythonhosted.org/packages/4c/9c/0c8ea02db8d682aa1ef48938abae833c1d69bdfa6e5ec13b21734b01ae70/pydantic_core-2.33.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eabf946a4739b5237f4f56d77fa6668263bc466d06a8036c055587c130a46f7b", size = 1986653 },
197 | { url = "https://files.pythonhosted.org/packages/8e/4f/3fb47d6cbc08c7e00f92300e64ba655428c05c56b8ab6723bd290bae6458/pydantic_core-2.33.0-cp313-cp313t-win_amd64.whl", hash = "sha256:8a1d581e8cdbb857b0e0e81df98603376c1a5c34dc5e54039dcc00f043df81e7", size = 1931234 },
198 | ]
199 |
200 | [[package]]
201 | name = "pydantic-settings"
202 | version = "2.8.1"
203 | source = { registry = "https://pypi.org/simple" }
204 | dependencies = [
205 | { name = "pydantic" },
206 | { name = "python-dotenv" },
207 | ]
208 | sdist = { url = "https://files.pythonhosted.org/packages/88/82/c79424d7d8c29b994fb01d277da57b0a9b09cc03c3ff875f9bd8a86b2145/pydantic_settings-2.8.1.tar.gz", hash = "sha256:d5c663dfbe9db9d5e1c646b2e161da12f0d734d422ee56f567d0ea2cee4e8585", size = 83550 }
209 | wheels = [
210 | { url = "https://files.pythonhosted.org/packages/0b/53/a64f03044927dc47aafe029c42a5b7aabc38dfb813475e0e1bf71c4a59d0/pydantic_settings-2.8.1-py3-none-any.whl", hash = "sha256:81942d5ac3d905f7f3ee1a70df5dfb62d5569c12f51a5a647defc1c3d9ee2e9c", size = 30839 },
211 | ]
212 |
213 | [[package]]
214 | name = "python-dotenv"
215 | version = "1.1.0"
216 | source = { registry = "https://pypi.org/simple" }
217 | sdist = { url = "https://files.pythonhosted.org/packages/88/2c/7bb1416c5620485aa793f2de31d3df393d3686aa8a8506d11e10e13c5baf/python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5", size = 39920 }
218 | wheels = [
219 | { url = "https://files.pythonhosted.org/packages/1e/18/98a99ad95133c6a6e2005fe89faedf294a748bd5dc803008059409ac9b1e/python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d", size = 20256 },
220 | ]
221 |
222 | [[package]]
223 | name = "sniffio"
224 | version = "1.3.1"
225 | source = { registry = "https://pypi.org/simple" }
226 | sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 }
227 | wheels = [
228 | { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 },
229 | ]
230 |
231 | [[package]]
232 | name = "sse-starlette"
233 | version = "2.2.1"
234 | source = { registry = "https://pypi.org/simple" }
235 | dependencies = [
236 | { name = "anyio" },
237 | { name = "starlette" },
238 | ]
239 | sdist = { url = "https://files.pythonhosted.org/packages/71/a4/80d2a11af59fe75b48230846989e93979c892d3a20016b42bb44edb9e398/sse_starlette-2.2.1.tar.gz", hash = "sha256:54470d5f19274aeed6b2d473430b08b4b379ea851d953b11d7f1c4a2c118b419", size = 17376 }
240 | wheels = [
241 | { url = "https://files.pythonhosted.org/packages/d9/e0/5b8bd393f27f4a62461c5cf2479c75a2cc2ffa330976f9f00f5f6e4f50eb/sse_starlette-2.2.1-py3-none-any.whl", hash = "sha256:6410a3d3ba0c89e7675d4c273a301d64649c03a5ef1ca101f10b47f895fd0e99", size = 10120 },
242 | ]
243 |
244 | [[package]]
245 | name = "starlette"
246 | version = "0.46.1"
247 | source = { registry = "https://pypi.org/simple" }
248 | dependencies = [
249 | { name = "anyio" },
250 | ]
251 | sdist = { url = "https://files.pythonhosted.org/packages/04/1b/52b27f2e13ceedc79a908e29eac426a63465a1a01248e5f24aa36a62aeb3/starlette-0.46.1.tar.gz", hash = "sha256:3c88d58ee4bd1bb807c0d1acb381838afc7752f9ddaec81bbe4383611d833230", size = 2580102 }
252 | wheels = [
253 | { url = "https://files.pythonhosted.org/packages/a0/4b/528ccf7a982216885a1ff4908e886b8fb5f19862d1962f56a3fce2435a70/starlette-0.46.1-py3-none-any.whl", hash = "sha256:77c74ed9d2720138b25875133f3a2dae6d854af2ec37dceb56aef370c1d8a227", size = 71995 },
254 | ]
255 |
256 | [[package]]
257 | name = "typing-extensions"
258 | version = "4.13.0"
259 | source = { registry = "https://pypi.org/simple" }
260 | sdist = { url = "https://files.pythonhosted.org/packages/0e/3e/b00a62db91a83fff600de219b6ea9908e6918664899a2d85db222f4fbf19/typing_extensions-4.13.0.tar.gz", hash = "sha256:0a4ac55a5820789d87e297727d229866c9650f6521b64206413c4fbada24d95b", size = 106520 }
261 | wheels = [
262 | { url = "https://files.pythonhosted.org/packages/e0/86/39b65d676ec5732de17b7e3c476e45bb80ec64eb50737a8dce1a4178aba1/typing_extensions-4.13.0-py3-none-any.whl", hash = "sha256:c8dd92cc0d6425a97c18fbb9d1954e5ff92c1ca881a309c45f06ebc0b79058e5", size = 45683 },
263 | ]
264 |
265 | [[package]]
266 | name = "typing-inspection"
267 | version = "0.4.0"
268 | source = { registry = "https://pypi.org/simple" }
269 | dependencies = [
270 | { name = "typing-extensions" },
271 | ]
272 | sdist = { url = "https://files.pythonhosted.org/packages/82/5c/e6082df02e215b846b4b8c0b887a64d7d08ffaba30605502639d44c06b82/typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122", size = 76222 }
273 | wheels = [
274 | { url = "https://files.pythonhosted.org/packages/31/08/aa4fdfb71f7de5176385bd9e90852eaf6b5d622735020ad600f2bab54385/typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f", size = 14125 },
275 | ]
276 |
277 | [[package]]
278 | name = "uvicorn"
279 | version = "0.34.0"
280 | source = { registry = "https://pypi.org/simple" }
281 | dependencies = [
282 | { name = "click" },
283 | { name = "h11" },
284 | ]
285 | sdist = { url = "https://files.pythonhosted.org/packages/4b/4d/938bd85e5bf2edeec766267a5015ad969730bb91e31b44021dfe8b22df6c/uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9", size = 76568 }
286 | wheels = [
287 | { url = "https://files.pythonhosted.org/packages/61/14/33a3a1352cfa71812a3a21e8c9bfb83f60b0011f5e36f2b1399d51928209/uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4", size = 62315 },
288 | ]
289 |
--------------------------------------------------------------------------------