├── .github ├── FUNDING.yml ├── dependabot.yml └── workflows │ └── release.yaml ├── .gitignore ├── .vscode └── launch.json ├── Dockerfile ├── LICENSE ├── README.md ├── assets └── owui_example.png ├── charts └── mcp-bridge │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── configmap.yaml │ ├── deployment.yaml │ └── service.yaml │ └── values.yaml ├── compose.yml ├── docker-bake.hcl ├── docs ├── README.md ├── config.md ├── terminology.md └── usecases.md ├── mcp_bridge ├── __init__.py ├── auth.py ├── config │ ├── __init__.py │ ├── env_subst.py │ ├── file.py │ ├── final.py │ ├── http.py │ └── initial.py ├── endpoints.py ├── health │ ├── __init__.py │ ├── manager.py │ ├── router.py │ └── types.py ├── lifespan.py ├── main.py ├── mcpManagement │ ├── __init__.py │ ├── prompts.py │ ├── resources.py │ ├── router.py │ ├── server.py │ └── tools.py ├── mcp_clients │ ├── AbstractClient.py │ ├── DockerClient.py │ ├── McpClientManager.py │ ├── SseClient.py │ ├── StdioClient.py │ └── session.py ├── mcp_server │ ├── __init__.py │ ├── server.py │ ├── sse.py │ └── sse_transport.py ├── models │ ├── __init__.py │ ├── chatCompletionStreamResponse.py │ └── mcpServerStatus.py ├── openai_clients │ ├── __init__.py │ ├── chatCompletion.py │ ├── completion.py │ ├── genericHttpxClient.py │ ├── streamChatCompletion.py │ ├── streamCompletion.py │ └── utils.py ├── openapi_tags.py ├── py.typed ├── routers.py ├── sampling │ ├── modelSelector.py │ └── sampler.py ├── telemetry.py └── tool_mappers │ ├── __init__.py │ ├── mcp2openaiConverters.py │ └── openai2mcpConverters.py ├── mypy.ini ├── pyproject.toml └── uv.lock /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: SecretiveShell 2 | ko_fi: secretiveshell 3 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "pip" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" 7 | 8 | - package-ecosystem: "docker" 9 | directory: "/" 10 | schedule: 11 | interval: "weekly" 12 | 13 | - package-ecosystem: "github-actions" 14 | directory: ".github/workflows" 15 | schedule: 16 | interval: "daily" 17 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Publish Python 🐍 distribution 📦 to PyPI and TestPyPI 2 | 3 | on: 4 | #push: 5 | # branches: 6 | # - main 7 | workflow_dispatch: 8 | inputs: 9 | version_bump: 10 | type: choice 11 | description: 'Choose the type of version bump (major, minor, patch)' 12 | required: true 13 | default: 'minor' 14 | options: 15 | - major 16 | - minor 17 | - patch 18 | - alpha 19 | - beta 20 | - rc 21 | - rev 22 | - post 23 | 24 | jobs: 25 | 26 | build: 27 | name: Build distribution 📦 28 | runs-on: ubuntu-latest 29 | outputs: 30 | new_tag: ${{ steps.set_var.outputs.new_tag }} 31 | permissions: 32 | contents: write # IMPORTANT: mandatory for making GitHub Releases 33 | id-token: write # IMPORTANT: mandatory for trusted publishing 34 | steps: 35 | - uses: actions/checkout@v4 36 | - name: Set up Python 37 | uses: actions/setup-python@v5 38 | with: 39 | python-version: "3.x" 40 | - name: Install pypa/build 41 | run: >- 42 | python3 -m 43 | pip install 44 | hatchling uv 45 | --user 46 | - name: update version tag in pyproject.toml 47 | id: set_var 48 | run: | 49 | hatchling version ${{ github.event.inputs.version_bump }} 50 | echo "new_tag=$(hatchling version)" >> $GITHUB_OUTPUT 51 | git config --global user.email "gha@github.com" 52 | git config --global user.name "github robot" 53 | git commit -am "update package build version gha" 54 | git push origin master 55 | 56 | - name: Build a binary wheel and a source tarball 57 | run: uv build && uv publish 58 | - name: Store the distribution packages 59 | uses: actions/upload-artifact@v4 60 | with: 61 | name: python-package-distributions 62 | path: dist/ 63 | 64 | 65 | github-release: 66 | name: >- 67 | Sign the Python 🐍 distribution 📦 with Sigstore 68 | and upload them to GitHub Release 69 | needs: 70 | - build 71 | runs-on: ubuntu-latest 72 | 73 | permissions: 74 | contents: write # IMPORTANT: mandatory for making GitHub Releases 75 | id-token: write # IMPORTANT: mandatory for sigstore 76 | 77 | steps: 78 | - uses: actions/checkout@v4 79 | - name: Download all the dists 80 | uses: actions/download-artifact@v4.3.0 81 | with: 82 | name: python-package-distributions 83 | path: . 84 | - name: Sign the dists with Sigstore 85 | uses: sigstore/gh-action-sigstore-python@v3.0.0 86 | with: 87 | inputs: >- 88 | ./*.tar.gz 89 | ./*.whl 90 | - name: Create GitHub Release 91 | env: 92 | GITHUB_TOKEN: ${{ github.token }} 93 | run: >- 94 | 95 | gh release create 96 | '${{ needs.build.outputs.new_tag }}' 97 | --repo '${{ github.repository }}' 98 | --generate-notes 99 | - name: Upload artifact signatures to GitHub Release 100 | env: 101 | GITHUB_TOKEN: ${{ github.token }} 102 | # Upload to GitHub Release using the `gh` CLI. 103 | # `dist/` contains the built packages, and the 104 | # sigstore-produced signatures and certificates. 105 | run: >- 106 | gh release upload 107 | '${{ needs.build.outputs.new_tag }}' ./*.tar.gz ./*.whl ./*.tar.gz.sigstore.json ./*.whl.sigstore.json 108 | --repo '${{ github.repository }}' 109 | 110 | push-store-image: 111 | permissions: write-all 112 | runs-on: ubuntu-latest 113 | needs: 114 | - build 115 | steps: 116 | - name: Login to GitHub Container Registry 117 | uses: docker/login-action@v3 118 | with: 119 | registry: ghcr.io 120 | username: ${{ github.actor }} 121 | password: ${{ secrets.GITHUB_TOKEN }} 122 | - name: Checkout code 123 | uses: actions/checkout@v4 124 | - name: 'Build images' 125 | run: | 126 | RELEASE=${{ needs.build.outputs.new_tag }} \ 127 | docker buildx bake mcp-bridge -f docker-bake.hcl --push 128 | 129 | 130 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 110 | .pdm.toml 111 | .pdm-python 112 | .pdm-build/ 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | .dmypy.json 146 | dmypy.json 147 | 148 | # Pyre type checker 149 | .pyre/ 150 | 151 | # pytype static type analyzer 152 | .pytype/ 153 | 154 | # Cython debug symbols 155 | cython_debug/ 156 | 157 | # PyCharm 158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this file. For a more nuclear 161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 162 | #.idea/ 163 | 164 | 165 | ## custom 166 | commands.md 167 | compose.yml 168 | config.json -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "Python Debugger: Module", 9 | "type": "debugpy", 10 | "request": "launch", 11 | "django": true, 12 | "module": "mcp_bridge.main", 13 | } 14 | ] 15 | } -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12-bullseye 2 | 3 | # install uv to run stdio clients (uvx) 4 | RUN pip install --no-cache-dir uv 5 | 6 | # install npx to run stdio clients (npx) 7 | RUN apt-get update && apt-get install -y --no-install-recommends curl 8 | RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - 9 | RUN apt-get install -y --no-install-recommends nodejs 10 | 11 | COPY pyproject.toml . 12 | 13 | ## FOR GHCR BUILD PIPELINE 14 | COPY mcp_bridge/__init__.py mcp_bridge/__init__.py 15 | COPY README.md README.md 16 | 17 | RUN uv sync 18 | 19 | COPY mcp_bridge mcp_bridge 20 | 21 | EXPOSE 8000 22 | 23 | WORKDIR /mcp_bridge 24 | ENTRYPOINT ["uv", "run", "main.py"] 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 TerminalMan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MCP-Bridge 2 | 3 |

4 | Discord 5 | Static Badge 6 | Static Badge 7 |

8 | 9 | 10 | MCP-Bridge acts as a bridge between the OpenAI API and MCP (MCP) tools, allowing developers to leverage MCP tools through the OpenAI API interface. 11 | 12 | ## Overview 13 | MCP-Bridge is designed to facilitate the integration of MCP tools with the OpenAI API. It provides a set of endpoints that can be used to interact with MCP tools in a way that is compatible with the OpenAI API. This allows you to use any client with any MCP tool without explicit support for MCP. For example, see this example of using Open Web UI with the official MCP fetch tool. 14 | 15 | ![open web ui example](/assets/owui_example.png) 16 | 17 | ## Current Features 18 | 19 | working features: 20 | 21 | - non streaming chat completions with MCP 22 | - streaming chat completions with MCP 23 | 24 | - non streaming completions without MCP 25 | 26 | - MCP tools 27 | - MCP sampling 28 | 29 | - SSE Bridge for external clients 30 | 31 | planned features: 32 | 33 | - streaming completions are not implemented yet 34 | 35 | - MCP resources are planned to be supported 36 | 37 | ## Installation 38 | 39 | The recommended way to install MCP-Bridge is to use Docker. See the example compose.yml file for an example of how to set up docker. 40 | 41 | Note that this requires an inference engine with tool call support. I have tested this with vLLM with success, though ollama should also be compatible. 42 | 43 | ### Docker installation 44 | 45 | 1. **Clone the repository** 46 | 47 | 2. **Edit the compose.yml file** 48 | 49 | You will need to add a reference to the config.json file in the compose.yml file. Pick any of 50 | - add the config.json file to the same directory as the compose.yml file and use a volume mount (you will need to add the volume manually) 51 | - add a http url to the environment variables to download the config.json file from a url 52 | - add the config json directly as an environment variable 53 | 54 | see below for an example of each option: 55 | ```bash 56 | environment: 57 | - MCP_BRIDGE__CONFIG__FILE=config.json # mount the config file for this to work 58 | - MCP_BRIDGE__CONFIG__HTTP_URL=http://10.88.100.170:8888/config.json 59 | - MCP_BRIDGE__CONFIG__JSON={"inference_server":{"base_url":"http://example.com/v1","api_key":"None"},"mcp_servers":{"fetch":{"command":"uvx","args":["mcp-server-fetch"]}}} 60 | ``` 61 | The mount point for using the config file would look like: 62 | ```yaml 63 | volumes: 64 | - ./config.json:/mcp_bridge/config.json 65 | ``` 66 | 67 | 3. **run the service** 68 | ``` 69 | docker-compose up --build -d 70 | ``` 71 | 72 | ### Manual installation (no docker) 73 | 74 | If you want to run the application without docker, you will need to install the requirements and run the application manually. 75 | 76 | 1. **Clone the repository** 77 | 78 | 2. **Set up a dependencies:** 79 | ```bash 80 | uv sync 81 | ``` 82 | 83 | 3. **Create a config.json file in the root directory** 84 | 85 | Here is an example config.json file: 86 | ```json 87 | { 88 | "inference_server": { 89 | "base_url": "http://example.com/v1", 90 | "api_key": "None" 91 | }, 92 | "mcp_servers": { 93 | "fetch": { 94 | "command": "uvx", 95 | "args": ["mcp-server-fetch"] 96 | } 97 | } 98 | } 99 | ``` 100 | 101 | 4. **Run the application:** 102 | ```bash 103 | uv run mcp_bridge/main.py 104 | ``` 105 | 106 | ## Usage 107 | Once the application is running, you can interact with it using the OpenAI API. 108 | 109 | View the documentation at [http://yourserver:8000/docs](http://localhost:8000/docs). There is an endpoint to list all the MCP tools available on the server, which you can use to test the application configuration. 110 | 111 | ## Rest API endpoints 112 | 113 | MCP-Bridge exposes many rest api endpoints for interacting with all of the native MCP primatives. This lets you outsource the complexity of dealing with MCP servers to MCP-Bridge without comprimising on functionality. See the openapi docs for examples of how to use this functionality. 114 | 115 | ## SSE Bridge 116 | MCP-Bridge also provides an SSE bridge for external clients. This lets external chat apps with explicit MCP support use MCP-Bridge as a MCP server. Point your client at the SSE endpoint (http://yourserver:8000/mcp-server/sse) and you should be able to see all the MCP tools available on the server. 117 | 118 | This also makes it easy to test if your configuration is working correctly. You can use [wong2/mcp-cli](https://github.com/wong2/mcp-cli?tab=readme-ov-file#connect-to-a-running-server-over-sse) to test your configuration. `npx @wong2/mcp-cli --sse http://localhost:8000/mcp-server/sse` 119 | 120 | If you want to use the tools inside of [claude desktop](https://claude.ai/download) or other `STDIO` only MCP clients, you can do this with a tool such as [lightconetech/mcp-gateway](https://github.com/lightconetech/mcp-gateway) 121 | 122 | ## Configuration 123 | 124 | To add new MCP servers, edit the config.json file. 125 | 126 | ### API Key Authentication 127 | 128 | MCP-Bridge supports API key authentication to secure your server. To enable this feature, add something like this to your `config.json` file: 129 | 130 | ```json 131 | { 132 | "security": { 133 | "auth": { 134 | "enabled": true, 135 | "api_keys": [ 136 | { 137 | "key": "your-secure-api-key-here" 138 | } 139 | ] 140 | } 141 | } 142 | } 143 | ``` 144 | 145 | When making requests to the MCP-Bridge server, include the API key in the Authorization header as a Bearer token: 146 | 147 | ``` 148 | Authorization: Bearer your-secure-api-key-here 149 | ``` 150 | 151 | If the `api_key` field is empty or not present in the configuration, authentication will be skipped, allowing backward compatibility. 152 | 153 | ### Full Configuration Example 154 | 155 | an example config.json file with most of the options explicitly set: 156 | 157 | ```json 158 | { 159 | "inference_server": { 160 | "base_url": "http://localhost:8000/v1", 161 | "api_key": "None" 162 | }, 163 | "sampling": { 164 | "timeout": 10, 165 | "models": [ 166 | { 167 | "model": "gpt-4o", 168 | "intelligence": 0.8, 169 | "cost": 0.9, 170 | "speed": 0.3 171 | }, 172 | { 173 | "model": "gpt-4o-mini", 174 | "intelligence": 0.4, 175 | "cost": 0.1, 176 | "speed": 0.7 177 | } 178 | ] 179 | }, 180 | "mcp_servers": { 181 | "fetch": { 182 | "command": "uvx", 183 | "args": [ 184 | "mcp-server-fetch" 185 | ] 186 | } 187 | }, 188 | "security": { 189 | "auth": { 190 | "enabled": true, 191 | "api_keys": [ 192 | { 193 | "key": "your-secure-api-key-here" 194 | } 195 | ] 196 | } 197 | }, 198 | "network": { 199 | "host": "0.0.0.0", 200 | "port": 9090 201 | }, 202 | "logging": { 203 | "log_level": "DEBUG" 204 | } 205 | } 206 | ``` 207 | 208 | | Section | Description | 209 | | ---------------- | ---------------------------------- | 210 | | inference_server | The inference server configuration | 211 | | mcp_servers | The MCP servers configuration | 212 | | network | uvicorn network configuration | 213 | | logging | The logging configuration | 214 | | api_key | API key for server authentication | 215 | 216 | ## Support 217 | 218 | If you encounter any issues please open an issue or join the [discord](https://discord.gg/4NVQHqNxSZ). 219 | 220 | There is also documentation available [here](/docs/README.md). 221 | 222 | ## How does it work 223 | 224 | The application sits between the OpenAI API and the inference engine. An incoming request is modified to include tool definitions for all MCP tools available on the MCP servers. The request is then forwarded to the inference engine, which uses the tool definitions to create tool calls. MCP bridge then manage the calls to the tools. The request is then modified to include the tool call results, and is returned to the inference engine again so the LLM can create a response. Finally, the response is returned to the OpenAI API. 225 | 226 | ```mermaid 227 | sequenceDiagram 228 | participant OpenWebUI as Open Web UI 229 | participant MCPProxy as MCP Proxy 230 | participant MCPserver as MCP Server 231 | participant InferenceEngine as Inference Engine 232 | 233 | OpenWebUI ->> MCPProxy: Request 234 | MCPProxy ->> MCPserver: list tools 235 | MCPserver ->> MCPProxy: list of tools 236 | MCPProxy ->> InferenceEngine: Forward Request 237 | InferenceEngine ->> MCPProxy: Response 238 | MCPProxy ->> MCPserver: call tool 239 | MCPserver ->> MCPProxy: tool response 240 | MCPProxy ->> InferenceEngine: llm uses tool response 241 | InferenceEngine ->> MCPProxy: Response 242 | MCPProxy ->> OpenWebUI: Return Response 243 | ``` 244 | 245 | ## Contribution Guidelines 246 | Contributions to MCP-Bridge are welcome! To contribute, please follow these steps: 247 | 1. Fork the repository. 248 | 2. Create a new branch for your feature or bug fix. 249 | 3. Make your changes and commit them. 250 | 4. Push your changes to your fork. 251 | 5. Create a pull request to the main repository. 252 | 253 | ## License 254 | MCP-Bridge is licensed under the MIT License. See the [LICENSE](LICENSE) file for more information. 255 | -------------------------------------------------------------------------------- /assets/owui_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SecretiveShell/MCP-Bridge/a0a9fc02f8af3a539c88ccd48c26d1241e9684dc/assets/owui_example.png -------------------------------------------------------------------------------- /charts/mcp-bridge/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /charts/mcp-bridge/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: mcp-bridge 3 | description: basic helm chart for MCP Bridge Service 4 | type: application 5 | version: 0.1.0 6 | appVersion: "1.0.0" 7 | -------------------------------------------------------------------------------- /charts/mcp-bridge/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if contains "NodePort" .Values.service.type }} 3 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "mcp-bridge.fullname" . }}) 4 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 5 | echo http://$NODE_IP:$NODE_PORT 6 | {{- else if contains "LoadBalancer" .Values.service.type }} 7 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 8 | You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "mcp-bridge.fullname" . }}' 9 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "mcp-bridge.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") 10 | echo http://$SERVICE_IP:{{ .Values.service.port }} 11 | {{- else if contains "ClusterIP" .Values.service.type }} 12 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "mcp-bridge.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 13 | export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") 14 | echo "Visit http://127.0.0.1:8080 to use your application" 15 | kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /charts/mcp-bridge/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "mcp-bridge.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "mcp-bridge.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "mcp-bridge.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "mcp-bridge.labels" -}} 37 | helm.sh/chart: {{ include "mcp-bridge.chart" . }} 38 | {{ include "mcp-bridge.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "mcp-bridge.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "mcp-bridge.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "mcp-bridge.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "mcp-bridge.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | -------------------------------------------------------------------------------- /charts/mcp-bridge/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ include "mcp-bridge.fullname" . }}-config 5 | data: 6 | config.json: | 7 | { 8 | "inference_server": { 9 | "base_url": {{ .Values.config.inferenceServer.baseUrl | quote }}, 10 | "api_key": {{ .Values.config.inferenceServer.apiKey | quote }} 11 | }, 12 | "mcp_servers": {{ .Values.config.mcpServers | toJson }}, 13 | "network": { 14 | "host": {{ .Values.config.network.host | default "0.0.0.0" | quote }}, 15 | "port": {{ .Values.config.network.port | default 9090 }} 16 | }, 17 | "logging": { 18 | "log_level": {{ .Values.config.logging.logLevel | default "DEBUG" | quote }} 19 | } 20 | } -------------------------------------------------------------------------------- /charts/mcp-bridge/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "mcp-bridge.fullname" . }} 5 | labels: 6 | {{- include "mcp-bridge.labels" . | nindent 4 }} 7 | spec: 8 | replicas: {{ .Values.replicaCount }} 9 | selector: 10 | matchLabels: 11 | {{- include "mcp-bridge.selectorLabels" . | nindent 6 }} 12 | template: 13 | metadata: 14 | labels: 15 | {{- include "mcp-bridge.selectorLabels" . | nindent 8 }} 16 | {{- with .Values.podAnnotations }} 17 | annotations: 18 | {{- toYaml . | nindent 8 }} 19 | {{- end }} 20 | spec: 21 | containers: 22 | - name: {{ .Chart.Name }} 23 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 24 | imagePullPolicy: {{ .Values.image.pullPolicy }} 25 | ports: 26 | - name: http 27 | containerPort: 9090 28 | protocol: TCP 29 | volumeMounts: 30 | - name: config-volume 31 | mountPath: /mcp_bridge/config.json 32 | subPath: config.json 33 | resources: 34 | {{- toYaml .Values.resources | nindent 12 }} 35 | volumes: 36 | - name: config-volume 37 | configMap: 38 | name: {{ include "mcp-bridge.fullname" . }}-config -------------------------------------------------------------------------------- /charts/mcp-bridge/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "mcp-bridge.fullname" . }} 5 | labels: 6 | {{- include "mcp-bridge.labels" . | nindent 4 }} 7 | spec: 8 | type: {{ .Values.service.type }} 9 | ports: 10 | - port: {{ .Values.service.port }} 11 | targetPort: http 12 | protocol: TCP 13 | name: http 14 | selector: 15 | {{- include "mcp-bridge.selectorLabels" . | nindent 4 }} 16 | -------------------------------------------------------------------------------- /charts/mcp-bridge/values.yaml: -------------------------------------------------------------------------------- 1 | replicaCount: 1 2 | 3 | image: 4 | repository: ghcr.io/secretiveshell/mcp-bridge/mcp-bridge 5 | pullPolicy: Always 6 | tag: 0.1.0 7 | 8 | service: 9 | type: ClusterIP 10 | port: 9090 11 | 12 | config: 13 | inferenceServer: 14 | baseUrl: "" 15 | apiKey: "" 16 | mcpServers: {} 17 | network: 18 | host: "0.0.0.0" 19 | port: 9090 20 | logging: 21 | logLevel: "DEBUG" 22 | 23 | # Example of how to add MCP servers 24 | # mcpServers: 25 | # mcp-k8s: 26 | # command: "mcp-k8s" 27 | # args: [] 28 | # fetch: 29 | # command: "uvx" 30 | # args: ["mcp-server-fetch"] 31 | # custom-server: 32 | # command: "custom-command" 33 | # args: ["arg1", "arg2"] 34 | 35 | resources: 36 | limits: 37 | cpu: 500m 38 | memory: 512Mi 39 | requests: 40 | cpu: 200m 41 | memory: 256Mi 42 | 43 | podAnnotations: {} 44 | 45 | podLabels: {} 46 | -------------------------------------------------------------------------------- /compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | mcp-bridge: 3 | build: 4 | context: . 5 | develop: 6 | watch: 7 | - path: mcp_bridge 8 | action: rebuild 9 | container_name: mcp-bridge 10 | ports: 11 | - "8000:8000" 12 | environment: 13 | - MCP_BRIDGE__CONFIG__FILE=config.json # mount the config file for this to work 14 | # - MCP_BRIDGE__CONFIG__HTTP_URL=http://10.88.100.170:8888/config.json 15 | # - MCP_BRIDGE__CONFIG__JSON= 16 | # volumes: 17 | # - ./config.json:/mcp_bridge/config.json 18 | restart: unless-stopped 19 | 20 | jaeger: 21 | image: jaegertracing/jaeger:latest 22 | ports: 23 | - "16686:16686" # Web UI 24 | # - "4317:4317" # OTLP gRPC 25 | - "4318:4318" # OTLP HTTP 26 | # - "5778:5778" # Config server 27 | # - "9411:9411" # Zipkin compatible 28 | restart: unless-stopped -------------------------------------------------------------------------------- /docker-bake.hcl: -------------------------------------------------------------------------------- 1 | 2 | variable "RELEASE" { 3 | default = "v1.0.0" 4 | } 5 | 6 | variable "REGISTRY" { 7 | default = "ghcr.io/secretiveshell/mcp-bridge" 8 | } 9 | 10 | group "default" { 11 | targets = ["mcp-bridge"] 12 | } 13 | 14 | 15 | target "mcp-bridge" { 16 | dockerfile = "Dockerfile" 17 | tags = ["${REGISTRY}/${target.mcp-bridge.name}:${RELEASE}"] 18 | context = "." 19 | labels = { 20 | "org.opencontainers.image.source" = "https://github.com/SecretiveShell/MCP-Bridge" 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # MCP-Bridge Documentation 2 | 3 | ## Index 4 | 5 | - [example usecases](/docs/usecases.md) 6 | - [config guide](/docs/config.md) 7 | - [terminology](/docs/terminology.md) 8 | -------------------------------------------------------------------------------- /docs/config.md: -------------------------------------------------------------------------------- 1 | # Config 2 | 3 | The config file is a json file that contains all the information needed to run the application. 4 | 5 | ## Writing a config file 6 | 7 | | Section | Description | 8 | | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | 9 | | inference_server | The inference server configuration. This should point to openai/vllm/ollama etc. Any OpenAI compatible base url should work. | 10 | | sampling | Sampling model preferences. You must have at least one sampling model configured, and you can configure the same model with different intelligence, cost, and speed many times | 11 | | mcp_servers | MCP server connection info/configuration. This is mostly the same as claude desktop but with some extra options. | 12 | | network | uvicorn network configuration. Only used outside of docker environment | 13 | | logging | The logging configuration. Set to DEBUG for debug logging | 14 | 15 | Here is an example config.json file: 16 | 17 | ```json 18 | { 19 | "inference_server": { 20 | "base_url": "http://localhost:8000/v1", 21 | "api_key": "None" 22 | }, 23 | "sampling": { 24 | "timeout": 10, 25 | "models": [ 26 | { 27 | "model": "gpt-4o", 28 | "intelligence": 0.8, 29 | "cost": 0.9, 30 | "speed": 0.3 31 | }, 32 | { 33 | "model": "gpt-4o-mini", 34 | "intelligence": 0.4, 35 | "cost": 0.1, 36 | "speed": 0.7 37 | } 38 | ] 39 | }, 40 | "mcp_servers": { 41 | "fetch": { 42 | "command": "uvx", 43 | "args": [ 44 | "mcp-server-fetch" 45 | ] 46 | }, 47 | "sse-example-server": { 48 | "url": "http://localhost:8000/mcp-server/sse" 49 | }, 50 | "docker-example-server": { 51 | "image": "example-server:latest", 52 | } 53 | }, 54 | "network": { 55 | "host": "0.0.0.0", 56 | "port": 9090 57 | }, 58 | "logging": { 59 | "log_level": "DEBUG" 60 | } 61 | } 62 | ``` 63 | 64 | ## Loading a config file 65 | 66 | ### Docker 67 | 68 | when using docker you will need to add a reference to the config.json file in the `compose.yml` file. Pick any of 69 | 70 | - add the `config.json` file to the same directory as the compose.yml file and use a volume mount (you will need to add the volume manually) 71 | ```bash 72 | environment: 73 | - MCP_BRIDGE__CONFIG__FILE=config.json # mount the config file for this to work 74 | ``` 75 | 76 | The mount point for using the config file would look like: 77 | ```yaml 78 | volumes: 79 | - ./config.json:/mcp_bridge/config.json 80 | ``` 81 | 82 | - add a http url to the environment variables to download the config.json file from a url 83 | ```bash 84 | environment: 85 | - MCP_BRIDGE__CONFIG__HTTP_URL=http://10.88.100.170:8888/config.json 86 | ``` 87 | 88 | - add the config json directly as an environment variable 89 | ```bash 90 | environment: 91 | - MCP_BRIDGE__CONFIG__JSON={"inference_server":{"base_url":"http://example.com/v1","api_key":"None"},"mcp_servers":{"fetch":{"command":"uvx","args":["mcp-server-fetch"]}}} 92 | ``` 93 | 94 | ### Non Docker 95 | 96 | For non docker, the system will look for a `config.json` file in the current directory. This means that there is no special configuration needed. You can still use the advanced loading mechanisms if you want to, but you will need to modify the environment variables for your system as in the docker section. 97 | 98 | -------------------------------------------------------------------------------- /docs/terminology.md: -------------------------------------------------------------------------------- 1 | # Terminology 2 | 3 | This document is a continual work in progress. Please feel free to contribute. 4 | 5 | | Term | Description | 6 | | --- | --- | 7 | | **server** | A MCP server. e.g. MCP-wolfram-alpha, MCP-searxng, MCP-timeserver | 8 | | **bridge** | The MCP-Bridge application | 9 | | **client** | A generic MCP client. e.g. claude desktop. This includes the bridge | 10 | | **upstream** | The MCP-SDK | 11 | | **inference engine** | LLM runtime such as ollama, vllm, openai api | 12 | -------------------------------------------------------------------------------- /docs/usecases.md: -------------------------------------------------------------------------------- 1 | # Usecase Examples 2 | 3 | This document contains a list of usecases for the MCP-Bridge. These usecases are not exhaustive, but they should give you an idea of what the application can do. 4 | 5 | ## Usecase 1: OpenwebUI with MCP-Bridge 6 | 7 | This is documented in the readme. You can point OpenWebUI at the openAI compatible endpoint and it will work with MCP tools. 8 | 9 | ## Usecase 2: Custom Programs with MCP-Bridge Rest API 10 | 11 | You can use the MCP-Bridge Rest API to call MCP tools from your own program. This allows you to offload any complexity of MCP servers to the MCP-Bridge, which can be used to test your configuration. 12 | 13 | ## Usecase 3: Custom Programs with MCP-Bridge SSE server 14 | 15 | You can use the MCP-Bridge SSE server to call MCP tools from your own program. This allows you to offload configuration and session management to the MCP-Bridge, and means you only need to support a single MCP server. No need to map tools to servers. 16 | 17 | ## Usecase 4: Docker to SSE adapter 18 | 19 | You can use docker in docker with MCP-Bridge to spawn many isolated servers and the connect to them all over SSE. This allows for larger scale homelab style deployments. 20 | 21 | ## Usecase 5: Sampling Middleware 22 | 23 | Unfortunately most clients do not support [sampling](https://modelcontextprotocol.info/docs/concepts/sampling/). MCP-Bridge can be used as a sampling middleware to help resolve this. By using a SSE connector like [lightconetech/mcp-gateway](https://github.com/lightconetech/mcp-gateway) you can add sampling support to more limited STDIO clients like claude desktop that don't support it natively. 24 | -------------------------------------------------------------------------------- /mcp_bridge/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.5.1' -------------------------------------------------------------------------------- /mcp_bridge/auth.py: -------------------------------------------------------------------------------- 1 | import secrets 2 | from fastapi import Depends, HTTPException, Security, status 3 | from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials 4 | from mcp_bridge.config import config 5 | 6 | security = HTTPBearer(auto_error=False) 7 | 8 | async def get_api_key(credentials: HTTPAuthorizationCredentials = Security(security)): 9 | """ 10 | Validate the API key provided in the Authorization header. 11 | 12 | If no API key is configured in the server settings, authentication is skipped. 13 | If an API key is configured, the request must include a matching API key. 14 | 15 | The API key should be provided in the Authorization header as: 16 | Authorization: Bearer your-api-key-here 17 | """ 18 | 19 | # If no API key is configured, skip authentication 20 | if not config.security.auth.enabled: 21 | return True 22 | 23 | if not credentials: 24 | raise HTTPException( 25 | status_code=status.HTTP_401_UNAUTHORIZED, 26 | detail="API key is required in Authorization header (Bearer token)", 27 | ) 28 | 29 | return await simple_key_checker(credentials.credentials) 30 | 31 | # TODO: add aiocache wrapper? 32 | async def simple_key_checker(api_key: str) -> bool: 33 | """ 34 | Check if the provided API key is valid. 35 | """ 36 | 37 | # If API key is configured but not provided in the request 38 | if not api_key: 39 | raise HTTPException( 40 | status_code=status.HTTP_401_UNAUTHORIZED, 41 | detail="API key is required in Authorization header (Bearer token)", 42 | ) 43 | 44 | # If API key is 45 | for key in config.security.auth.api_keys: 46 | if secrets.compare_digest(key.key, api_key): 47 | return True 48 | 49 | raise HTTPException( 50 | status_code=status.HTTP_401_UNAUTHORIZED, 51 | detail="Invalid API key", 52 | ) -------------------------------------------------------------------------------- /mcp_bridge/config/__init__.py: -------------------------------------------------------------------------------- 1 | from mcp_bridge.config.env_subst import substitute_env_vars 2 | from mcp_bridge.config.initial import initial_settings 3 | from mcp_bridge.config.final import Settings 4 | from typing import Any, Callable 5 | from loguru import logger 6 | from pydantic import ValidationError 7 | 8 | __all__ = ["config"] 9 | 10 | config: Settings = None # type: ignore 11 | 12 | if initial_settings.load_config: 13 | # import stuff needed to load the config 14 | from deepmerge import always_merger 15 | import sys 16 | 17 | configs: list[dict[str, Any]] = [] 18 | load_config: Callable[[str], dict] # without this mypy will error about param names 19 | 20 | # load the config 21 | if initial_settings.file is not None: 22 | logger.info(f"Loading config from {initial_settings.file}") 23 | from .file import load_config 24 | 25 | configs.append(load_config(initial_settings.file)) 26 | 27 | if initial_settings.http_url is not None: 28 | logger.info(f"Loading config from {initial_settings.http_url}") 29 | from .http import load_config 30 | 31 | configs.append(load_config(initial_settings.http_url)) 32 | 33 | if initial_settings.json is not None: 34 | logger.info("Loading config from json string") 35 | configs.append(initial_settings.json) 36 | 37 | # merge the configs 38 | result: dict = {} 39 | for cfg in configs: 40 | always_merger.merge(result, cfg) 41 | 42 | result = substitute_env_vars(result) 43 | 44 | # build the config 45 | try: 46 | config = Settings(**result) 47 | except ValidationError as e: 48 | logger.error("unable to load a valid configuration") 49 | for error in e.errors(): 50 | logger.error(f"{error['loc'][0]}: {error['msg']}") 51 | exit(1) 52 | 53 | if config.logging.log_level != "DEBUG": 54 | logger.remove() 55 | logger.add( 56 | sys.stderr, 57 | format="{time} {level} {message}", 58 | level=config.logging.log_level, 59 | colorize=True, 60 | ) 61 | -------------------------------------------------------------------------------- /mcp_bridge/config/env_subst.py: -------------------------------------------------------------------------------- 1 | from string import Template 2 | from typing import Any 3 | import os 4 | 5 | from loguru import logger 6 | 7 | 8 | def substitute_env_vars(config: Any, env: dict[str, str] | None = None) -> Any: 9 | """Substitute environment variables in a configuration object.""" 10 | 11 | # copy the environment if it is not provided 12 | if env is None: 13 | env = os.environ.copy() 14 | 15 | assert env is not None, "env is None" # the guard should have caught this 16 | 17 | # handle strings 18 | if isinstance(config, str): 19 | return Template(config).safe_substitute(env) 20 | 21 | # handle other types 22 | elif isinstance(config, dict): 23 | return { 24 | k: substitute_env_vars(v, env) for k, v in config.items() if v is not None 25 | } 26 | 27 | # handle lists 28 | elif isinstance(config, list): 29 | return [substitute_env_vars(v, env) for v in config] 30 | 31 | return config 32 | -------------------------------------------------------------------------------- /mcp_bridge/config/file.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Any 3 | from loguru import logger 4 | 5 | 6 | def load_config(file: str) -> dict[str, Any]: 7 | try: 8 | with open(file, "r") as f: 9 | return json.load(f) 10 | 11 | except FileNotFoundError: 12 | logger.warning(f'the "{file}" file was not found') 13 | 14 | except Exception: 15 | logger.error(f'there was an error reading the "{file}" file') 16 | 17 | return {} 18 | -------------------------------------------------------------------------------- /mcp_bridge/config/final.py: -------------------------------------------------------------------------------- 1 | from typing import Annotated, Literal, Union 2 | from pydantic_settings import BaseSettings, SettingsConfigDict 3 | from pydantic import BaseModel, Field 4 | 5 | from mcp.client.stdio import StdioServerParameters 6 | from mcpx.client.transports.docker import DockerMCPServer 7 | 8 | 9 | class InferenceServer(BaseModel): 10 | base_url: str = Field( 11 | default="http://localhost:11434/v1", 12 | description="Base URL of the inference server", 13 | ) 14 | api_key: str = Field( 15 | default="unauthenticated", description="API key for the inference server" 16 | ) 17 | 18 | 19 | class Logging(BaseModel): 20 | log_level: Literal["INFO", "DEBUG"] = Field("INFO", description="default log level") 21 | log_server_pings: bool = Field(False, description="log server pings") 22 | 23 | 24 | class SamplingModel(BaseModel): 25 | model: Annotated[str, Field(description="Name of the sampling model")] 26 | 27 | intelligence: Annotated[ 28 | float, Field(description="Intelligence of the sampling model") 29 | ] = 0.5 30 | cost: Annotated[float, Field(description="Cost of the sampling model")] = 0.5 31 | speed: Annotated[float, Field(description="Speed of the sampling model")] = 0.5 32 | 33 | 34 | class Sampling(BaseModel): 35 | timeout: Annotated[int, Field(description="Timeout for sampling requests")] = 10 36 | models: Annotated[ 37 | list[SamplingModel], Field(description="List of sampling models") 38 | ] = [] 39 | 40 | 41 | class SSEMCPServer(BaseModel): 42 | # TODO: expand this once I find a good definition for this 43 | url: str = Field(description="URL of the MCP server") 44 | 45 | 46 | MCPServer = Annotated[ 47 | Union[StdioServerParameters, SSEMCPServer, DockerMCPServer], 48 | Field(description="MCP server configuration"), 49 | ] 50 | 51 | 52 | class Network(BaseModel): 53 | host: str = Field("0.0.0.0", description="Host of the network") 54 | port: int = Field(8000, description="Port of the network") 55 | 56 | 57 | class Cors(BaseModel): 58 | enabled: bool = Field(True, description="Enable CORS") 59 | allow_origins: list[str] = Field(["*"], description="Allowed origins") 60 | allow_credentials: bool = Field(True, description="Allow credentials") 61 | allow_methods: list[str] = Field(["*"], description="Allowed methods") 62 | allow_headers: list[str] = Field(["*"], description="Allowed headers") 63 | 64 | 65 | class ApiKey(BaseModel): 66 | key: str = Field(..., description="API key") 67 | permissions: Literal["all"] = Field( 68 | "all", description="API key permissions" 69 | ) # TODO: Add support for other permissions 70 | 71 | 72 | class Auth(BaseModel): 73 | enabled: bool = Field(False, description="Enable authentication") 74 | api_keys: list[ApiKey] = Field([], description="API keys") 75 | 76 | 77 | class Security(BaseModel): 78 | CORS: Cors = Field( 79 | default_factory=lambda: Cors.model_construct(), description="CORS configuration" 80 | ) 81 | auth: Auth = Field( 82 | default_factory=lambda: Auth.model_construct(), 83 | description="Authentication configuration", 84 | ) 85 | 86 | 87 | class Telemetry(BaseModel): 88 | """Telemetry configuration 89 | 90 | open-telemetry is entirely local to your own infrastructure and does not send any data to any external service unless you configure it to do so 91 | 92 | defaults to false since we cannot assume you are actually running an open telemetry collector on your machine. 93 | """ 94 | enabled: bool = Field(False, description="Enable telemetry") 95 | service_name: str = Field( 96 | default="MCP Bridge", description="Name of the service" 97 | ) 98 | otel_endpoint: str = Field( 99 | default="http://jaeger:4318/v1/traces", 100 | description="Endpoint for the OTEL exporter", 101 | ) 102 | 103 | class Settings(BaseSettings): 104 | inference_server: InferenceServer = Field( 105 | default_factory=lambda: InferenceServer.model_construct(), 106 | description="Inference server configuration", 107 | ) 108 | 109 | mcp_servers: dict[str, MCPServer] = Field( 110 | default_factory=dict, description="MCP servers configuration" 111 | ) 112 | 113 | sampling: Sampling = Field( 114 | default_factory=lambda: Sampling.model_construct(), 115 | description="sampling config", 116 | ) 117 | 118 | logging: Logging = Field( 119 | default_factory=lambda: Logging.model_construct(), 120 | description="logging config", 121 | ) 122 | 123 | network: Network = Field( 124 | default_factory=lambda: Network.model_construct(), 125 | description="network config", 126 | ) 127 | 128 | security: Security = Field( 129 | default_factory=lambda: Security.model_construct(), 130 | description="security config", 131 | ) 132 | 133 | telemetry: Telemetry = Field( 134 | default_factory=lambda: Telemetry.model_construct(), 135 | description="telemetry config", 136 | ) 137 | 138 | model_config = SettingsConfigDict( 139 | env_prefix="MCP_BRIDGE__", 140 | env_file=".env", 141 | env_file_encoding="utf-8", 142 | env_nested_delimiter="__", 143 | cli_parse_args=True, 144 | cli_avoid_json=True, 145 | ) 146 | -------------------------------------------------------------------------------- /mcp_bridge/config/http.py: -------------------------------------------------------------------------------- 1 | import json 2 | import httpx 3 | from typing import Any 4 | from loguru import logger 5 | 6 | 7 | def load_config(url: str) -> dict[str, Any]: 8 | try: 9 | resp = httpx.get(str(url)) 10 | return resp.json() 11 | 12 | except httpx.ConnectError: 13 | logger.error(f"could not connect to {httpx.URL(url).host}") 14 | 15 | except json.JSONDecodeError: 16 | logger.error(f"failed to parse json from {httpx.URL(url)}") 17 | 18 | return {} 19 | -------------------------------------------------------------------------------- /mcp_bridge/config/initial.py: -------------------------------------------------------------------------------- 1 | from pydantic_settings import BaseSettings, SettingsConfigDict 2 | from pydantic import Field, Json 3 | from typing import Optional 4 | 5 | __all__ = ["initial_settings"] 6 | 7 | 8 | class InitialSettings(BaseSettings): 9 | file: Optional[str] = Field("config.json") 10 | http_url: Optional[str] = Field(None) 11 | json: Optional[Json] = Field(None) # allow for raw config to be passed as env var 12 | 13 | load_config: bool = Field( 14 | True, include_in_schema=False 15 | ) # this can be used to disable loading the config 16 | 17 | model_config = SettingsConfigDict( 18 | env_prefix="MCP_BRIDGE__CONFIG__", 19 | env_file=".env", 20 | env_file_encoding="utf-8", 21 | env_nested_delimiter="__", 22 | ) 23 | 24 | 25 | # This will load the InitialSettings from environment variables 26 | initial_settings = InitialSettings() 27 | -------------------------------------------------------------------------------- /mcp_bridge/endpoints.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Depends, Request 2 | 3 | from lmos_openai_types import CreateChatCompletionRequest, CreateCompletionRequest 4 | 5 | from mcp_bridge.openai_clients import ( 6 | get_client, 7 | completions, 8 | chat_completions, 9 | streaming_chat_completions, 10 | ) 11 | 12 | from mcp_bridge.openapi_tags import Tag 13 | 14 | router = APIRouter(prefix="/v1", tags=[Tag.openai]) 15 | 16 | 17 | @router.post("/completions") 18 | async def openai_completions( 19 | request: CreateCompletionRequest, 20 | http_request: Request 21 | ): 22 | """Completions endpoint""" 23 | if request.stream: 24 | raise NotImplementedError("Streaming Completion is not supported") 25 | else: 26 | return await completions(request, http_request) 27 | 28 | 29 | @router.post("/chat/completions") 30 | async def openai_chat_completions( 31 | request: CreateChatCompletionRequest, 32 | http_request: Request 33 | ): 34 | """Chat Completions endpoint""" 35 | if request.stream: 36 | return await streaming_chat_completions(request, http_request) 37 | else: 38 | return await chat_completions(request, http_request) 39 | 40 | 41 | @router.get("/models") 42 | async def models(request: Request): 43 | """List models""" 44 | async with get_client(request) as client: 45 | response = await client.get("/models") 46 | return response.json() 47 | -------------------------------------------------------------------------------- /mcp_bridge/health/__init__.py: -------------------------------------------------------------------------------- 1 | from .router import router 2 | from .manager import manager 3 | from .types import UnhealthyEvent 4 | 5 | __all__ = ["router", "manager", "UnhealthyEvent"] 6 | -------------------------------------------------------------------------------- /mcp_bridge/health/manager.py: -------------------------------------------------------------------------------- 1 | from collections import deque 2 | from .types import UnhealthyEvent 3 | 4 | __all__ = ["manager"] 5 | 6 | 7 | class HealthManager: 8 | """Manages the health of the server""" 9 | 10 | UnhealthyEvents: deque[UnhealthyEvent] = deque( 11 | maxlen=100 12 | ) # we do not want to memory leak 13 | 14 | def add_unhealthy_event(self, event: UnhealthyEvent) -> None: 15 | self.UnhealthyEvents.append(event) 16 | 17 | def get_unhealthy_events(self) -> list[UnhealthyEvent]: 18 | return list(self.UnhealthyEvents) 19 | 20 | def is_healthy(self) -> bool: 21 | return not any(event.severity == "error" for event in self.UnhealthyEvents) 22 | 23 | 24 | manager: HealthManager = HealthManager() 25 | -------------------------------------------------------------------------------- /mcp_bridge/health/router.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter 2 | from fastapi.responses import JSONResponse 3 | from .types import HealthCheckResponse 4 | from .manager import manager 5 | from mcp_bridge.openapi_tags import Tag 6 | 7 | router = APIRouter(tags=[Tag.health]) 8 | 9 | 10 | @router.get("/health", response_model=HealthCheckResponse) 11 | async def health(): 12 | """Health check endpoint""" 13 | healthy = manager.is_healthy() 14 | 15 | if not healthy: 16 | # Create HealthCheckResponse instance 17 | response = HealthCheckResponse( 18 | status="error", 19 | unhealthy_events=manager.get_unhealthy_events(), 20 | ) 21 | # Return JSONResponse with custom status code and serialized content 22 | return JSONResponse(content=response.model_dump(), status_code=500) 23 | 24 | # Create and return HealthCheckResponse for healthy state 25 | response = HealthCheckResponse( 26 | status="ok", 27 | unhealthy_events=[], 28 | ) 29 | return response 30 | -------------------------------------------------------------------------------- /mcp_bridge/health/types.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel, Field 2 | from typing import Literal, Optional 3 | import datetime 4 | 5 | 6 | class UnhealthyEvent(BaseModel): 7 | """Represents an unhealthy event""" 8 | 9 | name: str = Field(..., description="Name of the event") 10 | severity: Literal["error", "warning"] = Field( 11 | ..., description="Severity of the event" 12 | ) 13 | traceback: Optional[str] = Field(default=None, description="Traceback of the error") 14 | timestamp: str = Field( 15 | default_factory=lambda: datetime.datetime.now().isoformat(), 16 | description="Time of the event", 17 | ) 18 | 19 | 20 | class HealthCheckResponse(BaseModel): 21 | """Represents a health check response""" 22 | 23 | status: Literal["ok", "error"] = Field(..., description="Server status") 24 | unhealthy_events: list[UnhealthyEvent] = Field( 25 | default_factory=list, description="List of unhealthy events" 26 | ) 27 | -------------------------------------------------------------------------------- /mcp_bridge/lifespan.py: -------------------------------------------------------------------------------- 1 | from contextlib import asynccontextmanager 2 | from mcp_bridge.mcp_clients.McpClientManager import ClientManager 3 | from loguru import logger 4 | 5 | 6 | @asynccontextmanager 7 | async def lifespan(app): 8 | """Lifespan context manager for fastapi""" 9 | 10 | # startup 11 | logger.log("DEBUG", "Entered fastapi lifespan") 12 | await ClientManager.initialize() 13 | logger.log("DEBUG", "Initialized MCP Client Manager") 14 | 15 | logger.log("DEBUG", "Yielding lifespan") 16 | yield 17 | logger.log("DEBUG", "Returned form lifespan yield") 18 | 19 | # shutdown 20 | 21 | logger.log("DEBUG", "Exiting fastapi lifespan") 22 | -------------------------------------------------------------------------------- /mcp_bridge/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI 2 | from fastapi.middleware.cors import CORSMiddleware 3 | from loguru import logger 4 | 5 | from mcp_bridge import __version__ as version 6 | from mcp_bridge.config import config 7 | from mcp_bridge.routers import secure_router, public_router 8 | from mcp_bridge.lifespan import lifespan 9 | from mcp_bridge.openapi_tags import tags_metadata 10 | from mcp_bridge.telemetry import setup_tracing 11 | 12 | 13 | def create_app() -> FastAPI: 14 | """ 15 | Create and configure the FastAPI application. 16 | """ 17 | app = FastAPI( 18 | title="MCP Bridge", 19 | description="A middleware application to add MCP support to OpenAI-compatible APIs", 20 | version=version, 21 | lifespan=lifespan, 22 | openapi_tags=tags_metadata, 23 | ) 24 | 25 | # setup tracing 26 | setup_tracing(app) 27 | 28 | # show auth data 29 | if config.security.auth.enabled: 30 | logger.info("Authentication is enabled") 31 | else: 32 | logger.info("Authentication is disabled") 33 | 34 | # Add CORS middleware 35 | if config.security.CORS.enabled: 36 | if config.security.CORS.allow_origins == ["*"]: 37 | logger.warning("CORS middleware is enabled with wildcard origins") 38 | else: 39 | logger.info("CORS middleware is enabled") 40 | 41 | app.add_middleware( 42 | CORSMiddleware, 43 | allow_origins=config.security.CORS.allow_origins, 44 | allow_credentials=config.security.CORS.allow_credentials, 45 | allow_methods=config.security.CORS.allow_methods, 46 | allow_headers=config.security.CORS.allow_headers, 47 | ) 48 | else: 49 | logger.info("CORS middleware is disabled") 50 | 51 | app.include_router(secure_router) 52 | app.include_router(public_router) 53 | 54 | return app 55 | 56 | app = create_app() 57 | 58 | def run(): 59 | import uvicorn 60 | uvicorn.run(app, host=config.network.host, port=config.network.port) 61 | 62 | if __name__ == "__main__": 63 | run() -------------------------------------------------------------------------------- /mcp_bridge/mcpManagement/__init__.py: -------------------------------------------------------------------------------- 1 | from .router import router 2 | 3 | __all__ = ["router"] 4 | -------------------------------------------------------------------------------- /mcp_bridge/mcpManagement/prompts.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from fastapi import APIRouter, HTTPException 3 | from mcp_bridge.mcp_clients.McpClientManager import ClientManager 4 | from mcp.types import ListPromptsResult, GetPromptResult 5 | 6 | router = APIRouter(prefix="/prompts") 7 | 8 | 9 | @router.get("") 10 | async def get_prompts() -> dict[str, ListPromptsResult]: 11 | """Get all prompts from all MCP clients""" 12 | 13 | prompts = {} 14 | 15 | for name, client in ClientManager.get_clients(): 16 | prompts[name] = await client.list_prompts() 17 | 18 | return prompts 19 | 20 | 21 | @router.post("/{prompt_name}") 22 | async def get_prompt(prompt_name: str, args: dict[str, Any] = {}) -> GetPromptResult: 23 | """Evaluate a prompt""" 24 | 25 | client = await ClientManager.get_client_from_prompt(prompt_name) 26 | if not client: 27 | raise HTTPException(status_code=404, detail=f"Prompt '{prompt_name}' not found") 28 | 29 | result = await client.get_prompt(prompt_name, arguments=args) 30 | if not result: 31 | raise HTTPException(status_code=404, detail=f"Prompt '{prompt_name}' not found") 32 | 33 | return result 34 | -------------------------------------------------------------------------------- /mcp_bridge/mcpManagement/resources.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, HTTPException 2 | from mcp_bridge.mcp_clients.McpClientManager import ClientManager 3 | from mcp.types import ListResourcesResult 4 | 5 | router = APIRouter(prefix="/resources") 6 | 7 | 8 | @router.get("") 9 | async def get_resources() -> dict[str, ListResourcesResult]: 10 | """Get all resources from all MCP clients""" 11 | 12 | resources = {} 13 | 14 | for name, client in ClientManager.get_clients(): 15 | resources[name] = await client.list_resources() 16 | 17 | return resources 18 | -------------------------------------------------------------------------------- /mcp_bridge/mcpManagement/router.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter 2 | from mcp_bridge.openapi_tags import Tag 3 | 4 | from .tools import router as tools_router 5 | from .prompts import router as prompts_router 6 | from .resources import router as resources_router 7 | from .server import router as server_router 8 | 9 | router = APIRouter(prefix="/mcp", tags=[Tag.mcp_management]) 10 | 11 | router.include_router(tools_router) 12 | router.include_router(prompts_router) 13 | router.include_router(resources_router) 14 | router.include_router(server_router) 15 | -------------------------------------------------------------------------------- /mcp_bridge/mcpManagement/server.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, HTTPException 2 | from mcp.types import ListPromptsResult, ListToolsResult, ListResourcesResult 3 | from mcp_bridge.models.mcpServerStatus import McpServerStatus 4 | from mcp_bridge.mcp_clients.McpClientManager import ClientManager 5 | 6 | router = APIRouter(prefix="/servers") 7 | 8 | 9 | @router.get("/{server_name}/prompts") 10 | async def get_server_prompts(server_name: str) -> ListPromptsResult: 11 | """Get all prompts from a specific MCP server""" 12 | 13 | client = ClientManager.get_client(server_name) 14 | if not client: 15 | raise HTTPException(status_code=404, detail=f"Server '{server_name}' not found") 16 | 17 | return await client.list_prompts() 18 | 19 | 20 | @router.get("/{server_name}/tools") 21 | async def get_server_tools(server_name: str) -> ListToolsResult: 22 | """Get all tools from a specific MCP server""" 23 | 24 | client = ClientManager.get_client(server_name) 25 | if not client: 26 | raise HTTPException(status_code=404, detail=f"Server '{server_name}' not found") 27 | 28 | return await client.list_tools() 29 | 30 | 31 | @router.get("/{server_name}/resources") 32 | async def get_server_resources(server_name: str) -> ListResourcesResult: 33 | """Get all resources from a specific MCP server""" 34 | 35 | client = ClientManager.get_client(server_name) 36 | if not client: 37 | raise HTTPException(status_code=404, detail=f"Server '{server_name}' not found") 38 | 39 | return await client.list_resources() 40 | 41 | 42 | @router.get("/{server_name}/status") 43 | async def get_server_status(server_name: str) -> McpServerStatus: 44 | """Get the status of a specific MCP server""" 45 | 46 | client = ClientManager.get_client(server_name) 47 | if not client: 48 | raise HTTPException(status_code=404, detail=f"Server '{server_name}' not found") 49 | 50 | return await client.status() 51 | -------------------------------------------------------------------------------- /mcp_bridge/mcpManagement/tools.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from fastapi import APIRouter, HTTPException 3 | from mcp_bridge.mcp_clients.McpClientManager import ClientManager 4 | from mcp.types import ListToolsResult, CallToolResult 5 | 6 | router = APIRouter(prefix="/tools") 7 | 8 | 9 | @router.get("") 10 | async def get_tools() -> dict[str, ListToolsResult]: 11 | """Get all tools from all MCP clients""" 12 | 13 | tools = {} 14 | 15 | for name, client in ClientManager.get_clients(): 16 | tools[name] = await client.list_tools() 17 | 18 | return tools 19 | 20 | 21 | @router.post("/{tool_name}/call") 22 | async def call_tool(tool_name: str, arguments: dict[str, Any] = {}) -> CallToolResult: 23 | """Call a tool""" 24 | 25 | client = await ClientManager.get_client_from_tool(tool_name) 26 | if not client: 27 | raise HTTPException(status_code=404, detail=f"Tool '{tool_name}' not found") 28 | 29 | return await client.call_tool(tool_name, arguments) 30 | -------------------------------------------------------------------------------- /mcp_bridge/mcp_clients/AbstractClient.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from abc import ABC, abstractmethod 3 | from typing import Any, Optional 4 | from fastapi import HTTPException 5 | from mcp import McpError 6 | from mcp.types import ( 7 | CallToolResult, 8 | ListToolsResult, 9 | TextContent, 10 | ListResourcesResult, 11 | ListPromptsResult, 12 | GetPromptResult, 13 | TextResourceContents, 14 | BlobResourceContents, 15 | ) 16 | from loguru import logger 17 | from pydantic import AnyUrl 18 | from mcp_bridge.mcp_clients.session import McpClientSession 19 | from mcp_bridge.models.mcpServerStatus import McpServerStatus 20 | 21 | 22 | class GenericMcpClient(ABC): 23 | name: str 24 | config: Any 25 | client: Any 26 | session: McpClientSession | None = None 27 | 28 | def __init__(self, name: str) -> None: 29 | super().__init__() 30 | self.session = None 31 | self.name = name 32 | 33 | logger.debug(f"initializing client class for {name}") 34 | 35 | @abstractmethod 36 | async def _maintain_session(self): 37 | pass 38 | 39 | async def _session_maintainer(self): 40 | while True: 41 | try: 42 | await self._maintain_session() 43 | except FileNotFoundError as e: 44 | logger.error(f"failed to maintain session for {self.name}: file {e.filename} not found.") 45 | except Exception as e: 46 | logger.error(f"failed to maintain session for {self.name}: {type(e)} {e.args}") 47 | 48 | logger.debug(f"restarting session for {self.name}") 49 | await asyncio.sleep(0.5) 50 | 51 | async def start(self): 52 | asyncio.create_task(self._session_maintainer()) 53 | 54 | async def call_tool( 55 | self, name: str, arguments: dict, timeout: Optional[int] = None 56 | ) -> CallToolResult: 57 | await self._wait_for_session() 58 | 59 | try: 60 | async with asyncio.timeout(timeout): 61 | return await self.session.call_tool( 62 | name=name, 63 | arguments=arguments, 64 | ) 65 | 66 | except asyncio.TimeoutError: 67 | logger.error(f"timed out calling tool: {name}") 68 | return CallToolResult( 69 | content=[ 70 | TextContent(type="text", text=f"Timeout Error calling {name}") 71 | ], 72 | isError=True, 73 | ) 74 | 75 | except McpError as e: 76 | logger.error(f"error calling {name}: {e}") 77 | return CallToolResult( 78 | content=[TextContent(type="text", text=f"Error calling {name}: {e}")], 79 | isError=True, 80 | ) 81 | 82 | async def get_prompt( 83 | self, prompt: str, arguments: dict[str, str] 84 | ) -> GetPromptResult | None: 85 | await self._wait_for_session() 86 | 87 | try: 88 | return await self.session.get_prompt(prompt, arguments) 89 | except Exception as e: 90 | logger.error(f"error evaluating prompt: {e}") 91 | 92 | return None 93 | 94 | async def read_resource( 95 | self, uri: AnyUrl 96 | ) -> list[TextResourceContents | BlobResourceContents]: 97 | await self._wait_for_session() 98 | try: 99 | resource = await self.session.read_resource(uri) 100 | return resource.contents 101 | except Exception as e: 102 | logger.error(f"error reading resource: {e}") 103 | return [] 104 | 105 | async def list_tools(self) -> ListToolsResult: 106 | # if session is None, then the client is not running 107 | # wait to see if it restarts 108 | await self._wait_for_session() 109 | 110 | try: 111 | return await self.session.list_tools() 112 | except Exception as e: 113 | logger.error(f"error listing tools: {e}") 114 | return ListToolsResult(tools=[]) 115 | 116 | async def list_resources(self) -> ListResourcesResult: 117 | await self._wait_for_session() 118 | try: 119 | return await self.session.list_resources() 120 | except Exception as e: 121 | logger.error(f"error listing resources: {e}") 122 | return ListResourcesResult(resources=[]) 123 | 124 | async def list_prompts(self) -> ListPromptsResult: 125 | await self._wait_for_session() 126 | try: 127 | return await self.session.list_prompts() 128 | except Exception as e: 129 | logger.error(f"error listing prompts: {e}") 130 | return ListPromptsResult(prompts=[]) 131 | 132 | async def _wait_for_session(self, timeout: int = 5, http_error: bool = True): 133 | try: 134 | async with asyncio.timeout(timeout): 135 | while self.session is None: 136 | await asyncio.sleep(1) 137 | logger.debug(f"waiting for session for {self.name}") 138 | 139 | except asyncio.TimeoutError: 140 | if http_error: 141 | raise HTTPException( 142 | status_code=500, detail=f"Could not connect to MCP server \"{self.name}\"." 143 | ) 144 | 145 | raise TimeoutError(f"Could not connect to MCP server \"{self.name}\"." ) 146 | 147 | assert self.session is not None, "Session is None" 148 | 149 | async def status(self) -> McpServerStatus: 150 | """Get the status of the MCP server""" 151 | return McpServerStatus( 152 | name=self.name, online=self.session is not None, enabled=True 153 | ) 154 | -------------------------------------------------------------------------------- /mcp_bridge/mcp_clients/DockerClient.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_bridge.mcp_clients.session import McpClientSession 4 | from mcp_bridge.config import config 5 | from mcpx.client.transports.docker import docker_client, DockerMCPServer 6 | from .AbstractClient import GenericMcpClient 7 | from loguru import logger 8 | 9 | 10 | class DockerClient(GenericMcpClient): 11 | config: DockerMCPServer 12 | 13 | def __init__(self, name: str, config: DockerMCPServer) -> None: 14 | super().__init__(name=name) 15 | 16 | self.config = config 17 | 18 | async def _maintain_session(self): 19 | async with docker_client(self.config) as client: 20 | logger.debug(f"made instance of docker client for {self.name}") 21 | async with McpClientSession(*client) as session: 22 | await session.initialize() 23 | logger.debug(f"finished initialise session for {self.name}") 24 | self.session = session 25 | 26 | try: 27 | while True: 28 | await asyncio.sleep(10) 29 | if config.logging.log_server_pings: 30 | logger.debug(f"pinging session for {self.name}") 31 | 32 | await session.send_ping() 33 | 34 | except Exception as exc: 35 | logger.error(f"ping failed for {self.name}: {exc}") 36 | self.session = None 37 | 38 | logger.debug(f"exiting session for {self.name}") 39 | -------------------------------------------------------------------------------- /mcp_bridge/mcp_clients/McpClientManager.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | from loguru import logger 4 | from mcp import McpError, StdioServerParameters 5 | from mcpx.client.transports.docker import DockerMCPServer 6 | 7 | from mcp_bridge.config import config 8 | from mcp_bridge.config.final import SSEMCPServer 9 | 10 | from .DockerClient import DockerClient 11 | from .SseClient import SseClient 12 | from .StdioClient import StdioClient 13 | 14 | client_types = Union[StdioClient, SseClient, DockerClient] 15 | 16 | 17 | class MCPClientManager: 18 | clients: dict[str, client_types] = {} 19 | 20 | async def initialize(self): 21 | """Initialize the MCP Client Manager and start all clients""" 22 | 23 | logger.log("DEBUG", "Initializing MCP Client Manager") 24 | 25 | for server_name, server_config in config.mcp_servers.items(): 26 | self.clients[server_name] = await self.construct_client( 27 | server_name, server_config 28 | ) 29 | 30 | async def construct_client(self, name, server_config) -> client_types: 31 | logger.log("DEBUG", f"Constructing client for {server_config}") 32 | 33 | if isinstance(server_config, StdioServerParameters): 34 | client = StdioClient(name, server_config) 35 | await client.start() 36 | return client 37 | 38 | if isinstance(server_config, SSEMCPServer): 39 | # TODO: implement sse client 40 | client = SseClient(name, server_config) # type: ignore 41 | await client.start() 42 | return client 43 | 44 | if isinstance(server_config, DockerMCPServer): 45 | client = DockerClient(name, server_config) 46 | await client.start() 47 | return client 48 | 49 | raise NotImplementedError("Client Type not supported") 50 | 51 | def get_client(self, server_name: str): 52 | return self.clients[server_name] 53 | 54 | def get_clients(self): 55 | return list(self.clients.items()) 56 | 57 | async def get_client_from_tool(self, tool: str): 58 | for name, client in self.get_clients(): 59 | 60 | # client cannot have tools if it is not connected 61 | if not client.session: 62 | continue 63 | 64 | try: 65 | list_tools = await client.session.list_tools() 66 | for client_tool in list_tools.tools: 67 | if client_tool.name == tool: 68 | return client 69 | except McpError: 70 | continue 71 | 72 | async def get_client_from_prompt(self, prompt: str): 73 | for name, client in self.get_clients(): 74 | 75 | # client cannot have prompts if it is not connected 76 | if not client.session: 77 | continue 78 | 79 | try: 80 | list_prompts = await client.session.list_prompts() 81 | for client_prompt in list_prompts.prompts: 82 | if client_prompt.name == prompt: 83 | return client 84 | except McpError: 85 | continue 86 | 87 | 88 | ClientManager = MCPClientManager() 89 | -------------------------------------------------------------------------------- /mcp_bridge/mcp_clients/SseClient.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from mcp.client.sse import sse_client 3 | from mcp_bridge.config import config 4 | from mcp_bridge.config.final import SSEMCPServer 5 | from mcp_bridge.mcp_clients.session import McpClientSession 6 | from .AbstractClient import GenericMcpClient 7 | from loguru import logger 8 | 9 | 10 | class SseClient(GenericMcpClient): 11 | config: SSEMCPServer 12 | 13 | def __init__(self, name: str, config: SSEMCPServer) -> None: 14 | super().__init__(name=name) 15 | 16 | self.config = config 17 | 18 | async def _maintain_session(self): 19 | async with sse_client(self.config.url) as client: 20 | async with McpClientSession(*client) as session: 21 | await session.initialize() 22 | logger.debug(f"finished initialise session for {self.name}") 23 | self.session = session 24 | 25 | try: 26 | while True: 27 | await asyncio.sleep(10) 28 | if config.logging.log_server_pings: 29 | logger.debug(f"pinging session for {self.name}") 30 | 31 | await session.send_ping() 32 | 33 | except Exception as exc: 34 | logger.error(f"ping failed for {self.name}: {exc}") 35 | self.session = None 36 | 37 | logger.debug(f"exiting session for {self.name}") 38 | -------------------------------------------------------------------------------- /mcp_bridge/mcp_clients/StdioClient.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from mcp import StdioServerParameters, stdio_client 3 | 4 | from mcp_bridge.config import config 5 | from mcp_bridge.mcp_clients.session import McpClientSession 6 | from .AbstractClient import GenericMcpClient 7 | from loguru import logger 8 | import shutil 9 | import os 10 | 11 | 12 | # Keywords to identify virtual environment variables 13 | venv_keywords = ["CONDA", "VIRTUAL", "PYTHON"] 14 | 15 | class StdioClient(GenericMcpClient): 16 | config: StdioServerParameters 17 | 18 | def __init__(self, name: str, config: StdioServerParameters) -> None: 19 | super().__init__(name=name) 20 | 21 | # logger.debug(f"initializing settings for {name}: {config.command} {" ".join(config.args)}") 22 | 23 | own_config = config.model_copy(deep=True) 24 | 25 | env = dict(os.environ.copy()) 26 | 27 | env = { 28 | key: value for key, value in env.items() 29 | if not any(key.startswith(keyword) for keyword in venv_keywords) 30 | } 31 | 32 | if config.env is not None: 33 | env.update(config.env) 34 | 35 | own_config.env = env 36 | 37 | command = shutil.which(config.command) 38 | if command is None: 39 | logger.error(f"could not find command {config.command}") 40 | exit(1) 41 | 42 | own_config.command = command 43 | 44 | # this changes the default to ignore 45 | if "encoding_error_handler" not in config.model_fields_set: 46 | own_config.encoding_error_handler = "ignore" 47 | 48 | self.config = own_config 49 | 50 | async def _maintain_session(self): 51 | logger.debug(f"starting maintain session for {self.name}") 52 | async with stdio_client(self.config) as client: 53 | logger.debug(f"entered stdio_client context manager for {self.name}") 54 | assert client[0] is not None, f"missing read stream for {self.name}" 55 | assert client[1] is not None, f"missing write stream for {self.name}" 56 | async with McpClientSession(*client) as session: 57 | logger.debug(f"entered client session context manager for {self.name}") 58 | await session.initialize() 59 | logger.debug(f"finished initialise session for {self.name}") 60 | self.session = session 61 | 62 | try: 63 | while True: 64 | await asyncio.sleep(10) 65 | if config.logging.log_server_pings: 66 | logger.debug(f"pinging session for {self.name}") 67 | 68 | await session.send_ping() 69 | 70 | except Exception as exc: 71 | logger.error(f"ping failed for {self.name}: {exc}") 72 | self.session = None 73 | 74 | logger.debug(f"exiting session for {self.name}") 75 | -------------------------------------------------------------------------------- /mcp_bridge/mcp_clients/session.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | from typing import Awaitable, Callable 3 | 4 | from loguru import logger 5 | import mcp.types as types 6 | from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream 7 | from mcp.shared.session import BaseSession, RequestResponder 8 | from mcp.shared.version import SUPPORTED_PROTOCOL_VERSIONS 9 | from pydantic import AnyUrl 10 | 11 | from mcp_bridge import __version__ as version 12 | from mcp_bridge.sampling.sampler import handle_sampling_message 13 | 14 | sampling_function_signature = Callable[ 15 | [types.CreateMessageRequestParams], Awaitable[types.CreateMessageResult] 16 | ] 17 | 18 | 19 | class McpClientSession( 20 | BaseSession[ 21 | types.ClientRequest, 22 | types.ClientNotification, 23 | types.ClientResult, 24 | types.ServerRequest, 25 | types.ServerNotification, 26 | ] 27 | ): 28 | 29 | def __init__( 30 | self, 31 | read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception], 32 | write_stream: MemoryObjectSendStream[types.JSONRPCMessage], 33 | read_timeout_seconds: timedelta | None = None, 34 | ) -> None: 35 | super().__init__( 36 | read_stream, 37 | write_stream, 38 | types.ServerRequest, 39 | types.ServerNotification, 40 | read_timeout_seconds=read_timeout_seconds, 41 | ) 42 | 43 | async def __aenter__(self): 44 | session = await super().__aenter__() 45 | self._task_group.start_soon(self._consume_messages) 46 | return session 47 | 48 | async def _consume_messages(self): 49 | try: 50 | async for message in self.incoming_messages: 51 | try: 52 | if isinstance(message, Exception): 53 | logger.error(f"Received exception in message stream: {message}") 54 | elif isinstance(message, RequestResponder): 55 | logger.debug(f"Received request: {message.request}") 56 | elif isinstance(message, types.ServerNotification): 57 | if isinstance(message.root, types.LoggingMessageNotification): 58 | logger.debug(f"Received notification from server: {message.root.params}") 59 | else: 60 | logger.debug(f"Received notification from server: {message}") 61 | else: 62 | logger.debug(f"Received notification: {message}") 63 | except Exception as e: 64 | logger.exception(f"Error processing message: {e}") 65 | except Exception as e: 66 | logger.exception(f"Message consumer task failed: {e}") 67 | 68 | async def initialize(self) -> types.InitializeResult: 69 | result = await self.send_request( 70 | types.ClientRequest( 71 | types.InitializeRequest( 72 | method="initialize", 73 | params=types.InitializeRequestParams( 74 | protocolVersion=types.LATEST_PROTOCOL_VERSION, 75 | capabilities=types.ClientCapabilities( 76 | sampling=types.SamplingCapability(), 77 | experimental=None, 78 | roots=types.RootsCapability( 79 | listChanged=True 80 | ), 81 | ), 82 | clientInfo=types.Implementation(name="MCP-Bridge", version=version), 83 | ), 84 | ) 85 | ), 86 | types.InitializeResult, 87 | ) 88 | 89 | if result.protocolVersion not in SUPPORTED_PROTOCOL_VERSIONS: 90 | raise RuntimeError( 91 | "Unsupported protocol version from the server: " 92 | f"{result.protocolVersion}" 93 | ) 94 | 95 | await self.send_notification( 96 | types.ClientNotification( 97 | types.InitializedNotification(method="notifications/initialized") 98 | ) 99 | ) 100 | 101 | return result 102 | 103 | async def send_ping(self) -> types.EmptyResult: 104 | """Send a ping request.""" 105 | return await self.send_request( 106 | types.ClientRequest( 107 | types.PingRequest( 108 | method="ping", 109 | ) 110 | ), 111 | types.EmptyResult, 112 | ) 113 | 114 | async def send_progress_notification( 115 | self, progress_token: str | int, progress: float, total: float | None = None 116 | ) -> None: 117 | """Send a progress notification.""" 118 | await self.send_notification( 119 | types.ClientNotification( 120 | types.ProgressNotification( 121 | method="notifications/progress", 122 | params=types.ProgressNotificationParams( 123 | progressToken=progress_token, 124 | progress=progress, 125 | total=total, 126 | ), 127 | ), 128 | ) 129 | ) 130 | 131 | async def set_logging_level(self, level: types.LoggingLevel) -> types.EmptyResult: 132 | """Send a logging/setLevel request.""" 133 | return await self.send_request( 134 | types.ClientRequest( 135 | types.SetLevelRequest( 136 | method="logging/setLevel", 137 | params=types.SetLevelRequestParams(level=level), 138 | ) 139 | ), 140 | types.EmptyResult, 141 | ) 142 | 143 | async def list_resources(self) -> types.ListResourcesResult: 144 | """Send a resources/list request.""" 145 | return await self.send_request( 146 | types.ClientRequest( 147 | types.ListResourcesRequest( 148 | method="resources/list", 149 | ) 150 | ), 151 | types.ListResourcesResult, 152 | ) 153 | 154 | async def read_resource(self, uri: AnyUrl) -> types.ReadResourceResult: 155 | """Send a resources/read request.""" 156 | return await self.send_request( 157 | types.ClientRequest( 158 | types.ReadResourceRequest( 159 | method="resources/read", 160 | params=types.ReadResourceRequestParams(uri=uri), 161 | ) 162 | ), 163 | types.ReadResourceResult, 164 | ) 165 | 166 | async def subscribe_resource(self, uri: AnyUrl) -> types.EmptyResult: 167 | """Send a resources/subscribe request.""" 168 | return await self.send_request( 169 | types.ClientRequest( 170 | types.SubscribeRequest( 171 | method="resources/subscribe", 172 | params=types.SubscribeRequestParams(uri=uri), 173 | ) 174 | ), 175 | types.EmptyResult, 176 | ) 177 | 178 | async def unsubscribe_resource(self, uri: AnyUrl) -> types.EmptyResult: 179 | """Send a resources/unsubscribe request.""" 180 | return await self.send_request( 181 | types.ClientRequest( 182 | types.UnsubscribeRequest( 183 | method="resources/unsubscribe", 184 | params=types.UnsubscribeRequestParams(uri=uri), 185 | ) 186 | ), 187 | types.EmptyResult, 188 | ) 189 | 190 | async def call_tool( 191 | self, name: str, arguments: dict | None = None 192 | ) -> types.CallToolResult: 193 | """Send a tools/call request.""" 194 | return await self.send_request( 195 | types.ClientRequest( 196 | types.CallToolRequest( 197 | method="tools/call", 198 | params=types.CallToolRequestParams(name=name, arguments=arguments), 199 | ) 200 | ), 201 | types.CallToolResult, 202 | ) 203 | 204 | async def list_prompts(self) -> types.ListPromptsResult: 205 | """Send a prompts/list request.""" 206 | return await self.send_request( 207 | types.ClientRequest( 208 | types.ListPromptsRequest( 209 | method="prompts/list", 210 | ) 211 | ), 212 | types.ListPromptsResult, 213 | ) 214 | 215 | async def get_prompt( 216 | self, name: str, arguments: dict[str, str] | None = None 217 | ) -> types.GetPromptResult: 218 | """Send a prompts/get request.""" 219 | return await self.send_request( 220 | types.ClientRequest( 221 | types.GetPromptRequest( 222 | method="prompts/get", 223 | params=types.GetPromptRequestParams(name=name, arguments=arguments), 224 | ) 225 | ), 226 | types.GetPromptResult, 227 | ) 228 | 229 | async def complete( 230 | self, ref: types.ResourceReference | types.PromptReference, argument: dict 231 | ) -> types.CompleteResult: 232 | """Send a completion/complete request.""" 233 | return await self.send_request( 234 | types.ClientRequest( 235 | types.CompleteRequest( 236 | method="completion/complete", 237 | params=types.CompleteRequestParams( 238 | ref=ref, 239 | argument=types.CompletionArgument(**argument), 240 | ), 241 | ) 242 | ), 243 | types.CompleteResult, 244 | ) 245 | 246 | async def list_tools(self) -> types.ListToolsResult: 247 | """Send a tools/list request.""" 248 | return await self.send_request( 249 | types.ClientRequest( 250 | types.ListToolsRequest( 251 | method="tools/list", 252 | ) 253 | ), 254 | types.ListToolsResult, 255 | ) 256 | 257 | async def send_roots_list_changed(self) -> None: 258 | """Send a roots/list_changed notification.""" 259 | await self.send_notification( 260 | types.ClientNotification( 261 | types.RootsListChangedNotification( 262 | method="notifications/roots/list_changed", 263 | ) 264 | ) 265 | ) 266 | 267 | async def _received_request( 268 | self, responder: RequestResponder["types.ServerRequest", "types.ClientResult"] 269 | ) -> None: 270 | if isinstance(responder.request.root, types.CreateMessageRequest): 271 | # handle create message request (sampling) 272 | response = await self.sample(responder.request.root.params) 273 | client_response = types.ClientResult(**response.model_dump()) 274 | await responder.respond(client_response) 275 | 276 | async def sample(self, params: types.CreateMessageRequestParams) -> types.CreateMessageResult: 277 | logger.info("got sampling request from mcp server") 278 | resp = await handle_sampling_message(params) 279 | logger.info("finished sampling request from mcp server") 280 | return resp 281 | -------------------------------------------------------------------------------- /mcp_bridge/mcp_server/__init__.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Depends 2 | from .sse import router as sse_router 3 | from mcp_bridge.openapi_tags import Tag 4 | from mcp_bridge.auth import get_api_key 5 | 6 | __all__ = ["router"] 7 | 8 | router = APIRouter(prefix="/mcp-server", tags=[Tag.mcp_server]) 9 | router.include_router(sse_router) 10 | -------------------------------------------------------------------------------- /mcp_bridge/mcp_server/server.py: -------------------------------------------------------------------------------- 1 | from mcp import types 2 | from mcp.server import Server, NotificationOptions 3 | from mcp.server.models import InitializationOptions 4 | from pydantic import AnyUrl 5 | from mcp_bridge.mcp_clients.McpClientManager import ClientManager 6 | from loguru import logger 7 | 8 | __all__ = ["server", "options"] 9 | 10 | server = Server("MCP-Bridge") 11 | 12 | ## list functions 13 | 14 | 15 | @server.list_prompts() 16 | async def list_prompts() -> list[types.Prompt]: 17 | prompts = [] 18 | for name, client in ClientManager.get_clients(): 19 | # if client is None, then we cannot list the prompts 20 | if client is None: 21 | logger.error(f"Client '{name}' not found") 22 | continue 23 | 24 | client_prompts = await client.list_prompts() 25 | prompts.extend(client_prompts.prompts) 26 | return prompts 27 | 28 | 29 | @server.list_resources() 30 | async def list_resources() -> list[types.Resource]: 31 | resources = [] 32 | for name, client in ClientManager.get_clients(): 33 | try: 34 | client_resources = await client.list_resources() 35 | resources.extend(client_resources.resources) 36 | except Exception as e: 37 | logger.error(f"Error listing resources for {name}: {e}") 38 | return resources 39 | 40 | 41 | @server.list_resource_templates() 42 | async def list_resource_templates() -> list[types.ResourceTemplate]: 43 | return [] 44 | 45 | 46 | @server.list_tools() 47 | async def list_tools() -> list[types.Tool]: 48 | tools = [] 49 | for name, client in ClientManager.get_clients(): 50 | # if client is None, then we cannot list the tools 51 | if client is None: 52 | logger.error(f"Client '{name}' not found") 53 | continue 54 | 55 | client_tools = await client.list_tools() 56 | tools.extend(client_tools.tools) 57 | return tools 58 | 59 | 60 | ## get functions 61 | 62 | 63 | @server.get_prompt() 64 | async def get_prompt(name: str, args: dict[str, str] | None) -> types.GetPromptResult: 65 | client = await ClientManager.get_client_from_prompt(name) 66 | 67 | # if client is None, then we cannot get the prompt 68 | if client is None: 69 | raise Exception(f"Prompt '{name}' not found") 70 | 71 | # if args is None, then we should use an empty dict 72 | if args is None: 73 | args = {} 74 | 75 | result = await client.get_prompt(name, args) 76 | if result is None: 77 | raise Exception(f"Prompt '{name}' not found") 78 | 79 | return result 80 | 81 | 82 | @server.read_resource() 83 | async def handle_read_resource(uri: AnyUrl) -> str | bytes: 84 | for name, client in ClientManager.get_clients(): 85 | try: 86 | client_resources = await client.list_resources() 87 | if str(uri) in map(lambda x: str(x.uri), client_resources.resources): 88 | response = await client.read_resource(uri) 89 | for resource in response: 90 | if resource.mimeType == "text/plain": 91 | assert isinstance(resource, types.TextResourceContents) 92 | assert type(resource.text) is str 93 | return resource.text 94 | 95 | elif resource.mimeType == "application/octet-stream": 96 | assert isinstance(resource, types.BlobResourceContents) 97 | assert type(resource.blob) is bytes 98 | return resource.blob 99 | 100 | else: 101 | raise Exception( 102 | f"Unsupported resource type: {resource.mimeType}" 103 | ) 104 | 105 | except Exception as e: 106 | logger.error(f"Error listing resources for {name}: {e}") 107 | 108 | raise Exception(f"Resource '{uri}' not found") 109 | 110 | 111 | @server.call_tool() 112 | async def handle_call_tool( 113 | name: str, arguments: dict | None 114 | ) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]: 115 | client = await ClientManager.get_client_from_tool(name) 116 | 117 | # if client is None, then we cannot call the tool 118 | if client is None: 119 | raise Exception(f"Tool '{name}' not found") 120 | 121 | # if arguments is None, then we should use an empty dict 122 | if arguments is None: 123 | arguments = {} 124 | 125 | return (await client.call_tool(name, arguments)).content 126 | 127 | 128 | # options 129 | 130 | options = InitializationOptions( 131 | server_name="MCP-Bridge", 132 | server_version="0.2.0", 133 | capabilities=server.get_capabilities( 134 | notification_options=NotificationOptions(), 135 | experimental_capabilities={}, 136 | ), 137 | ) 138 | -------------------------------------------------------------------------------- /mcp_bridge/mcp_server/sse.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from anyio import BrokenResourceError 3 | from fastapi.responses import StreamingResponse 4 | from .sse_transport import SseServerTransport 5 | from fastapi import APIRouter, Request 6 | from pydantic import ValidationError 7 | from loguru import logger 8 | 9 | from .server import server, options 10 | 11 | router = APIRouter(prefix="/sse") 12 | 13 | sse = SseServerTransport("/mcp-server/sse/messages") 14 | 15 | 16 | @router.get("/", response_class=StreamingResponse) 17 | async def handle_sse(request: Request): 18 | logger.info("new incoming SSE connection established") 19 | async with sse.connect_sse(request) as streams: 20 | try: 21 | await server.run(streams[0], streams[1], options) 22 | except BrokenResourceError: 23 | pass 24 | except asyncio.CancelledError: 25 | pass 26 | except ValidationError: 27 | pass 28 | except Exception: 29 | raise 30 | await request.close() 31 | 32 | 33 | @router.post("/messages") 34 | async def handle_messages(request: Request): 35 | logger.info("incoming SSE message received") 36 | await sse.handle_post_message(request.scope, request.receive, request._send) 37 | await request.close() 38 | -------------------------------------------------------------------------------- /mcp_bridge/mcp_server/sse_transport.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | This is a modification of the original code from the mcp sdk 4 | They try to directly contron the ASGI lifespan which causes issues with fastapi 5 | 6 | also switched the logger to loguru since we are vendoring it anyway 7 | 8 | """ 9 | 10 | from contextlib import asynccontextmanager 11 | from typing import Any 12 | from urllib.parse import quote 13 | from uuid import UUID, uuid4 14 | 15 | import anyio 16 | from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream 17 | from pydantic import ValidationError 18 | from sse_starlette import EventSourceResponse 19 | from fastapi.requests import Request 20 | from fastapi.responses import Response 21 | from starlette.types import Receive, Scope, Send 22 | 23 | import mcp.types as types 24 | 25 | from loguru import logger 26 | 27 | logger.disable("mcp_server.sse_transport") 28 | 29 | 30 | class SseServerTransport: 31 | """ 32 | SSE server transport for MCP. This class provides _two_ ASGI applications, 33 | suitable to be used with a framework like Starlette and a server like Hypercorn: 34 | 35 | 1. connect_sse() is an ASGI application which receives incoming GET requests, 36 | and sets up a new SSE stream to send server messages to the client. 37 | 2. handle_post_message() is an ASGI application which receives incoming POST 38 | requests, which should contain client messages that link to a 39 | previously-established SSE session. 40 | """ 41 | 42 | _endpoint: str 43 | _read_stream_writers: dict[ 44 | UUID, MemoryObjectSendStream[types.JSONRPCMessage | Exception] 45 | ] 46 | 47 | def __init__(self, endpoint: str) -> None: 48 | """ 49 | Creates a new SSE server transport, which will direct the client to POST 50 | messages to the relative or absolute URL given. 51 | """ 52 | 53 | super().__init__() 54 | self._endpoint = endpoint 55 | self._read_stream_writers = {} 56 | logger.debug(f"SseServerTransport initialized with endpoint: {endpoint}") 57 | 58 | @asynccontextmanager 59 | async def connect_sse(self, request: Request): 60 | if request.scope["type"] != "http": 61 | logger.error("connect_sse received non-HTTP request") 62 | raise ValueError("connect_sse can only handle HTTP requests") 63 | 64 | logger.debug("Setting up SSE connection") 65 | read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception] 66 | read_stream_writer: MemoryObjectSendStream[types.JSONRPCMessage | Exception] 67 | 68 | write_stream: MemoryObjectSendStream[types.JSONRPCMessage] 69 | write_stream_reader: MemoryObjectReceiveStream[types.JSONRPCMessage] 70 | 71 | read_stream_writer, read_stream = anyio.create_memory_object_stream(0) 72 | write_stream, write_stream_reader = anyio.create_memory_object_stream(0) 73 | 74 | session_id = uuid4() 75 | session_uri = f"{quote(self._endpoint)}?session_id={session_id.hex}" 76 | self._read_stream_writers[session_id] = read_stream_writer 77 | logger.debug(f"Created new session with ID: {session_id}") 78 | 79 | sse_stream_writer, sse_stream_reader = anyio.create_memory_object_stream( 80 | 0, dict[str, Any] 81 | ) 82 | 83 | async def sse_writer(): 84 | logger.debug("Starting SSE writer") 85 | async with sse_stream_writer, write_stream_reader: 86 | await sse_stream_writer.send({"event": "endpoint", "data": session_uri}) 87 | logger.debug(f"Sent endpoint event: {session_uri}") 88 | 89 | async for message in write_stream_reader: 90 | logger.debug(f"Sending message via SSE: {message}") 91 | await sse_stream_writer.send( 92 | { 93 | "event": "message", 94 | "data": message.model_dump_json( 95 | by_alias=True, exclude_none=True 96 | ), 97 | } 98 | ) 99 | 100 | async with anyio.create_task_group() as tg: 101 | response = EventSourceResponse( 102 | content=sse_stream_reader, data_sender_callable=sse_writer 103 | ) 104 | logger.debug("Starting SSE response task") 105 | tg.start_soon(response, request.scope, request.receive, request._send) 106 | 107 | logger.debug("Yielding read and write streams") 108 | yield (read_stream, write_stream) 109 | 110 | async def handle_post_message( 111 | self, scope: Scope, receive: Receive, send: Send 112 | ) -> Response: 113 | logger.debug("Handling POST message") 114 | request = Request(scope, receive) 115 | 116 | session_id_param = request.query_params.get("session_id") 117 | if session_id_param is None: 118 | logger.warning("Received request without session_id") 119 | response = Response("session_id is required", status_code=400) 120 | return response 121 | 122 | try: 123 | session_id = UUID(hex=session_id_param) 124 | logger.debug(f"Parsed session ID: {session_id}") 125 | except ValueError: 126 | logger.warning(f"Received invalid session ID: {session_id_param}") 127 | response = Response("Invalid session ID", status_code=400) 128 | return response 129 | 130 | writer = self._read_stream_writers.get(session_id) 131 | if not writer: 132 | logger.warning(f"Could not find session for ID: {session_id}") 133 | response = Response("Could not find session", status_code=404) 134 | return response 135 | 136 | json = await request.json() 137 | logger.debug(f"Received JSON: {json}") 138 | 139 | try: 140 | message = types.JSONRPCMessage.model_validate(json) 141 | logger.debug(f"Validated client message: {message}") 142 | except ValidationError as err: 143 | logger.error(f"Failed to parse message: {err}") 144 | response = Response("Could not parse message", status_code=400) 145 | await writer.send(err) 146 | return response 147 | 148 | logger.debug(f"Sending message to writer: {message}") 149 | response = Response("Accepted", status_code=202) 150 | await writer.send(message) 151 | return response 152 | -------------------------------------------------------------------------------- /mcp_bridge/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .chatCompletionStreamResponse import SSEData 2 | -------------------------------------------------------------------------------- /mcp_bridge/models/chatCompletionStreamResponse.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, List 2 | from pydantic import BaseModel 3 | 4 | 5 | class Delta(BaseModel): 6 | role: Optional[str] = None 7 | content: Optional[str] = None 8 | 9 | 10 | class Choice(BaseModel): 11 | index: int 12 | delta: Delta 13 | logprobs: Optional[dict] = None 14 | finish_reason: Optional[str] = None 15 | 16 | 17 | class SSEData(BaseModel): 18 | id: str 19 | object: str 20 | created: int 21 | model: str 22 | choices: List[Choice] 23 | -------------------------------------------------------------------------------- /mcp_bridge/models/mcpServerStatus.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel, Field 2 | 3 | 4 | class McpServerStatus(BaseModel): 5 | name: str = Field(..., description="Name of the MCP server") 6 | online: bool = Field(..., description="Whether the server is online") 7 | enabled: bool = Field(True, description="Whether the server is enabled") 8 | -------------------------------------------------------------------------------- /mcp_bridge/openai_clients/__init__.py: -------------------------------------------------------------------------------- 1 | from .genericHttpxClient import get_client 2 | from .completion import completions 3 | from .chatCompletion import chat_completions 4 | from .streamChatCompletion import streaming_chat_completions 5 | 6 | __all__ = ["get_client", "completions", "chat_completions", "streaming_chat_completions"] 7 | -------------------------------------------------------------------------------- /mcp_bridge/openai_clients/chatCompletion.py: -------------------------------------------------------------------------------- 1 | from fastapi import Request 2 | from lmos_openai_types import ( 3 | CreateChatCompletionRequest, 4 | CreateChatCompletionResponse, 5 | ChatCompletionRequestMessage, 6 | ) 7 | 8 | from .utils import call_tool, chat_completion_add_tools 9 | from .genericHttpxClient import get_client 10 | from mcp_bridge.mcp_clients.McpClientManager import ClientManager 11 | from mcp_bridge.tool_mappers import mcp2openai 12 | from loguru import logger 13 | import json 14 | 15 | 16 | async def chat_completions( 17 | request: CreateChatCompletionRequest, 18 | http_request: Request, 19 | ) -> CreateChatCompletionResponse: 20 | """performs a chat completion using the inference server""" 21 | 22 | request = await chat_completion_add_tools(request) 23 | 24 | while True: 25 | # logger.debug(request.model_dump_json()) 26 | async with get_client(http_request) as client: 27 | text = ( 28 | await client.post( 29 | "/chat/completions", 30 | #content=request.model_dump_json( 31 | # exclude_defaults=True, exclude_none=True, exclude_unset=True 32 | #), 33 | json=request.model_dump(exclude_defaults=True, exclude_none=True, exclude_unset=True), 34 | ) 35 | ).text 36 | logger.debug(text) 37 | try: 38 | response = CreateChatCompletionResponse.model_validate_json(text) 39 | except Exception as e: 40 | logger.error(f"Error parsing response: {text}") 41 | logger.error(e) 42 | return 43 | 44 | msg = response.choices[0].message 45 | msg = ChatCompletionRequestMessage( 46 | role="assistant", 47 | content=msg.content, 48 | tool_calls=msg.tool_calls, 49 | ) # type: ignore 50 | request.messages.append(msg) 51 | 52 | logger.debug(f"finish reason: {response.choices[0].finish_reason}") 53 | if response.choices[0].finish_reason.value in ["stop", "length"]: 54 | logger.debug("no tool calls found") 55 | return response 56 | 57 | logger.debug("tool calls found") 58 | for tool_call in response.choices[0].message.tool_calls.root: 59 | logger.debug( 60 | f"tool call: {tool_call.function.name} arguments: {json.loads(tool_call.function.arguments)}" 61 | ) 62 | 63 | # FIXME: this can probably be done in parallel using asyncio gather 64 | tool_call_result = await call_tool( 65 | tool_call.function.name, tool_call.function.arguments 66 | ) 67 | if tool_call_result is None: 68 | continue 69 | 70 | logger.debug( 71 | f"tool call result for {tool_call.function.name}: {tool_call_result.model_dump()}" 72 | ) 73 | 74 | logger.debug(f"tool call result content: {tool_call_result.content}") 75 | 76 | tools_content = [ 77 | {"type": "text", "text": part.text} 78 | for part in filter(lambda x: x.type == "text", tool_call_result.content) 79 | ] 80 | if len(tools_content) == 0: 81 | tools_content = [ 82 | {"type": "text", "text": "the tool call result is empty"} 83 | ] 84 | request.messages.append( 85 | ChatCompletionRequestMessage.model_validate( 86 | { 87 | "role": "tool", 88 | "content": tools_content, 89 | "tool_call_id": tool_call.id, 90 | } 91 | ) 92 | ) 93 | 94 | logger.debug("sending next iteration of chat completion request") 95 | -------------------------------------------------------------------------------- /mcp_bridge/openai_clients/completion.py: -------------------------------------------------------------------------------- 1 | from fastapi import Request 2 | from lmos_openai_types import CreateCompletionRequest 3 | from .genericHttpxClient import get_client 4 | 5 | 6 | async def completions(request: CreateCompletionRequest, http_request: Request) -> dict: 7 | """performs a completion using the inference server""" 8 | 9 | async with get_client(http_request) as client: 10 | response = await client.post( 11 | "/completions", 12 | json=request.model_dump( 13 | exclude_defaults=True, exclude_none=True, exclude_unset=True 14 | ), 15 | ) 16 | return response.json() 17 | -------------------------------------------------------------------------------- /mcp_bridge/openai_clients/genericHttpxClient.py: -------------------------------------------------------------------------------- 1 | from httpx import AsyncClient 2 | from mcp_bridge.config import config 3 | from fastapi import Request 4 | from contextlib import asynccontextmanager 5 | 6 | async def create_client(request: Request = None): 7 | """Creates a new client instance with the appropriate headers""" 8 | client = AsyncClient( 9 | base_url=config.inference_server.base_url, 10 | headers={ 11 | "Authorization": f"Bearer {config.inference_server.api_key}", 12 | "Content-Type": "application/json" 13 | }, 14 | timeout=10000, 15 | ) 16 | 17 | if request: 18 | # Dodaj nagłówki z żądania 19 | headers = {k.lower(): v for k, v in request.headers.items()} 20 | 21 | openwebui_headers = [ 22 | "x-openwebui-user-name", 23 | "x-openwebui-user-id", 24 | "x-openwebui-user-email", 25 | "x-openwebui-user-role" 26 | ] 27 | 28 | for header in openwebui_headers: 29 | if header in headers: 30 | client.headers[header] = headers[header] 31 | 32 | return client 33 | 34 | @asynccontextmanager 35 | async def get_client(request: Request = None): 36 | """Context manager for HTTP client""" 37 | client = await create_client(request) 38 | try: 39 | yield client 40 | finally: 41 | await client.aclose() 42 | -------------------------------------------------------------------------------- /mcp_bridge/openai_clients/streamChatCompletion.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Optional 3 | from fastapi import HTTPException, Request 4 | from lmos_openai_types import ( 5 | ChatCompletionMessageToolCall, 6 | ChatCompletionRequestMessage, 7 | CreateChatCompletionRequest, 8 | CreateChatCompletionStreamResponse, 9 | Function1, 10 | ) 11 | from .utils import call_tool, chat_completion_add_tools 12 | from mcp_bridge.models import SSEData 13 | from .genericHttpxClient import get_client 14 | from mcp_bridge.mcp_clients.McpClientManager import ClientManager 15 | from mcp_bridge.tool_mappers import mcp2openai 16 | from loguru import logger 17 | from httpx_sse import aconnect_sse 18 | 19 | from sse_starlette.sse import EventSourceResponse, ServerSentEvent 20 | 21 | 22 | async def streaming_chat_completions(request: CreateChatCompletionRequest, http_request: Request): 23 | # raise NotImplementedError("Streaming Chat Completion is not supported") 24 | 25 | try: 26 | return EventSourceResponse( 27 | content=chat_completions(request, http_request), 28 | media_type="text/event-stream", 29 | headers={"Cache-Control": "no-cache"}, 30 | ) 31 | 32 | except Exception as e: 33 | logger.error(e) 34 | 35 | 36 | async def chat_completions(request: CreateChatCompletionRequest, http_request: Request): 37 | """performs a chat completion using the inference server""" 38 | 39 | request.stream = True 40 | 41 | request = await chat_completion_add_tools(request) 42 | 43 | fully_done = False 44 | while not fully_done: 45 | # json_data = request.model_dump_json( 46 | # exclude_defaults=True, exclude_none=True, exclude_unset=True 47 | # ) 48 | 49 | json_data = json.dumps(request.model_dump( 50 | exclude_defaults=True, exclude_none=True, exclude_unset=True 51 | )) 52 | 53 | # logger.debug(json_data) 54 | 55 | last: Optional[CreateChatCompletionStreamResponse] = None # last message 56 | 57 | tool_call_name: str = "" 58 | tool_call_json: str = "" 59 | should_forward: bool = True 60 | response_content: str = "" 61 | tool_call_id: str = "" 62 | 63 | async with get_client(http_request) as client: 64 | async with aconnect_sse( 65 | client, "post", "/chat/completions", content=json_data 66 | ) as event_source: 67 | 68 | # check if the content type is correct because the aiter_sse method 69 | # will raise an exception if the content type is not correct 70 | if "Content-Type" in event_source.response.headers: 71 | content_type = event_source.response.headers["Content-Type"] 72 | if "text/event-stream" not in content_type: 73 | logger.error(f"Unexpected Content-Type: {content_type}") 74 | error_data = await event_source.response.aread() 75 | logger.error(f"Request URL: {event_source.response.url}") 76 | logger.error(f"Request Data: {json_data}") 77 | logger.error(f"Response Status: {event_source.response.status_code}") 78 | logger.error(f"Response Data: {error_data.decode(event_source.response.encoding or 'utf-8')}") 79 | raise HTTPException(status_code=500, detail="Unexpected Content-Type") 80 | 81 | # iterate over the SSE stream 82 | async for sse in event_source.aiter_sse(): 83 | event = sse.event 84 | data = sse.data 85 | id = sse.id 86 | retry = sse.retry 87 | 88 | logger.debug( 89 | f"event: {event},\ndata: {data},\nid: {id},\nretry: {retry}" 90 | ) 91 | 92 | # handle if the SSE stream is done 93 | if data == "[DONE]": 94 | logger.debug("inference serverstream done") 95 | break 96 | 97 | # for some reason openrouter uses uppercase for finish_reason 98 | try: 99 | data['choices'][0]['finish_reason'] = data['choices'][0]['finish_reason'].lower() # type: ignore 100 | except Exception as e: 101 | logger.debug(f"failed to lowercase finish_reason: {e}") 102 | 103 | try: 104 | parsed_data = CreateChatCompletionStreamResponse.model_validate_json( 105 | data 106 | ) 107 | except Exception as e: 108 | logger.debug(data) 109 | raise e 110 | 111 | # add the delta to the response content 112 | content = parsed_data.choices[0].delta.content if len(parsed_data.choices) > 0 else "" 113 | content = content if content is not None else "" 114 | response_content += content 115 | 116 | # handle stop reasons 117 | if len(parsed_data.choices) > 0 and parsed_data.choices[0].finish_reason is not None: 118 | if parsed_data.choices[0].finish_reason.value in [ 119 | "stop", 120 | "length", 121 | ]: 122 | fully_done = True 123 | else: 124 | should_forward = False 125 | 126 | # this manages the incoming tool call schema 127 | # most of this is assertions to please mypy 128 | if len(parsed_data.choices) > 0 and parsed_data.choices[0].delta.tool_calls is not None: 129 | should_forward = False 130 | assert ( 131 | parsed_data.choices[0].delta.tool_calls[0].function is not None 132 | ) 133 | 134 | name = parsed_data.choices[0].delta.tool_calls[0].function.name 135 | name = name if name is not None else "" 136 | tool_call_name = name if tool_call_name == "" else tool_call_name 137 | 138 | call_id = parsed_data.choices[0].delta.tool_calls[0].id 139 | call_id = call_id if call_id is not None else "" 140 | tool_call_id = id if tool_call_id == "" else tool_call_id 141 | 142 | arg = parsed_data.choices[0].delta.tool_calls[0].function.arguments 143 | tool_call_json += arg if arg is not None else "" 144 | 145 | # forward SSE messages to the client 146 | logger.debug(f"{should_forward=}") 147 | if should_forward: 148 | # we do not want to forward tool call json to the client 149 | logger.debug("forwarding message") 150 | yield SSEData.model_validate_json(sse.data).model_dump_json() 151 | 152 | # save the last message 153 | last = parsed_data 154 | 155 | # ideally we should check this properly 156 | assert last is not None 157 | if len(last.choices) > 0: 158 | assert last.choices[0].finish_reason is not None 159 | 160 | if len(last.choices) > 0 and last.choices[0].finish_reason.value in ["stop", "length"]: 161 | logger.debug("no tool calls found") 162 | fully_done = True 163 | continue 164 | 165 | logger.debug("tool calls found") 166 | logger.debug( 167 | f"{tool_call_name=} {tool_call_json=}" 168 | ) # this should not be error but its easier to debug 169 | 170 | # add received message to the history 171 | msg = ChatCompletionRequestMessage( 172 | role="assistant", 173 | content=response_content, 174 | tool_calls=[ 175 | ChatCompletionMessageToolCall( 176 | id=tool_call_id, 177 | type="function", 178 | function=Function1(name=tool_call_name, arguments=tool_call_json), 179 | ) 180 | ], 181 | ) # type: ignore 182 | request.messages.append(msg) 183 | 184 | #### MOST OF THIS IS COPY PASTED FROM CHAT_COMPLETIONS 185 | # FIXME: this can probably be done in parallel using asyncio gather 186 | tool_call_result = await call_tool(tool_call_name, tool_call_json) 187 | if tool_call_result is None: 188 | continue 189 | 190 | logger.debug( 191 | f"tool call result for {tool_call_name}: {tool_call_result.model_dump()}" 192 | ) 193 | 194 | logger.debug(f"tool call result content: {tool_call_result.content}") 195 | 196 | tools_content = [ 197 | {"type": "text", "text": part.text} 198 | for part in filter(lambda x: x.type == "text", tool_call_result.content) 199 | ] 200 | if len(tools_content) == 0: 201 | tools_content = [{"type": "text", "text": "the tool call result is empty"}] 202 | request.messages.append( 203 | ChatCompletionRequestMessage.model_validate( 204 | { 205 | "role": "tool", 206 | "content": tools_content, 207 | "tool_call_id": tool_call_id, 208 | } 209 | ) 210 | ) 211 | 212 | logger.debug("sending next iteration of chat completion request") 213 | 214 | # when done, send the final event 215 | logger.debug("sending final event") 216 | yield ServerSentEvent(event="message", data="[DONE]", id=None, retry=None) 217 | -------------------------------------------------------------------------------- /mcp_bridge/openai_clients/streamCompletion.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SecretiveShell/MCP-Bridge/a0a9fc02f8af3a539c88ccd48c26d1241e9684dc/mcp_bridge/openai_clients/streamCompletion.py -------------------------------------------------------------------------------- /mcp_bridge/openai_clients/utils.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | from loguru import logger 3 | from lmos_openai_types import CreateChatCompletionRequest 4 | import mcp.types 5 | import json 6 | 7 | from mcp_bridge.mcp_clients.McpClientManager import ClientManager 8 | from mcp_bridge.tool_mappers import mcp2openai 9 | 10 | 11 | async def chat_completion_add_tools(request: CreateChatCompletionRequest): 12 | request.tools = [] 13 | 14 | for _, session in ClientManager.get_clients(): 15 | # if session is None, then the client is not running 16 | if session.session is None: 17 | logger.error(f"session is `None` for {session.name}") 18 | continue 19 | 20 | tools = await session.session.list_tools() 21 | for tool in tools.tools: 22 | request.tools.append(mcp2openai(tool)) 23 | 24 | return request 25 | 26 | 27 | async def call_tool( 28 | tool_call_name: str, tool_call_json: str, timeout: Optional[int] = None 29 | ) -> Optional[mcp.types.CallToolResult]: 30 | if tool_call_name == "" or tool_call_name is None: 31 | logger.error("tool call name is empty") 32 | return None 33 | 34 | if tool_call_json is None: 35 | logger.error("tool call json is empty") 36 | return None 37 | 38 | session = await ClientManager.get_client_from_tool(tool_call_name) 39 | 40 | if session is None: 41 | logger.error(f"session is `None` for {tool_call_name}") 42 | return None 43 | 44 | try: 45 | tool_call_args = json.loads(tool_call_json) 46 | except json.JSONDecodeError: 47 | logger.error(f"failed to decode json for {tool_call_name}") 48 | return None 49 | 50 | return await session.call_tool(tool_call_name, tool_call_args, timeout) 51 | -------------------------------------------------------------------------------- /mcp_bridge/openapi_tags.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class Tag(str, Enum): 5 | """Tag for OpenAPI""" 6 | 7 | mcp_management = "MCP Management API" 8 | mcp_server = "MCP Server APIs" 9 | openai = "OpenAI API Compatible APIs" 10 | health = "System Health API" 11 | 12 | 13 | tags_metadata = [ 14 | { 15 | "name": Tag.openai, 16 | "description": "OpenAI compatible endpoints for use with openai clients", 17 | }, 18 | { 19 | "name": Tag.mcp_management, 20 | "description": "Interact with and manage the MCP servers", 21 | }, 22 | { 23 | "name": Tag.mcp_server, 24 | "description": "Clients can use MCP-Bridge as a MCP server", 25 | }, 26 | { 27 | "name": Tag.health, 28 | "description": "System health endpoints", 29 | }, 30 | ] 31 | -------------------------------------------------------------------------------- /mcp_bridge/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SecretiveShell/MCP-Bridge/a0a9fc02f8af3a539c88ccd48c26d1241e9684dc/mcp_bridge/py.typed -------------------------------------------------------------------------------- /mcp_bridge/routers.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Depends 2 | from mcp_bridge.auth import get_api_key 3 | 4 | from mcp_bridge.endpoints import router as endpointRouter 5 | from mcp_bridge.mcpManagement import router as mcpRouter 6 | from mcp_bridge.health import router as healthRouter 7 | from mcp_bridge.mcp_server import router as mcp_server_router 8 | 9 | secure_router = APIRouter(dependencies=[Depends(get_api_key)]) 10 | 11 | secure_router.include_router(endpointRouter) 12 | secure_router.include_router(mcpRouter) 13 | secure_router.include_router(mcp_server_router) 14 | 15 | public_router = APIRouter() 16 | 17 | public_router.include_router(healthRouter) 18 | -------------------------------------------------------------------------------- /mcp_bridge/sampling/modelSelector.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | from mcp.types import ModelPreferences 4 | 5 | from mcp_bridge.config import config 6 | 7 | def euclidean_distance(point1, point2): 8 | """ 9 | Calculates the Euclidean distance between two points, ignoring None values. 10 | """ 11 | valid_dimensions = [(p1, p2) for p1, p2 in zip(point1, point2) if p1 is not None and p2 is not None] 12 | 13 | if not valid_dimensions: # No valid dimensions to compare 14 | return float('inf') 15 | 16 | return math.sqrt(sum((p1 - p2) ** 2 for p1, p2 in valid_dimensions)) 17 | 18 | def find_best_model(preferences: ModelPreferences): 19 | distance = math.inf 20 | preffered_model = None 21 | preference_points = (preferences.intelligencePriority, preferences.speedPriority, preferences.costPriority) 22 | 23 | if preference_points == (None, None, None): 24 | return config.sampling.models[0] 25 | 26 | for model in config.sampling.models: 27 | model_points = (model.intelligence, model.speed, model.cost) 28 | model_distance = euclidean_distance(model_points, preference_points) 29 | if model_distance < distance: 30 | distance = model_distance 31 | preffered_model = model 32 | 33 | if preffered_model is None: 34 | preffered_model = config.sampling.models[0] 35 | 36 | return preffered_model -------------------------------------------------------------------------------- /mcp_bridge/sampling/sampler.py: -------------------------------------------------------------------------------- 1 | from loguru import logger 2 | from mcp import SamplingMessage 3 | import mcp.types as types 4 | from lmos_openai_types import CreateChatCompletionResponse 5 | from mcp.types import CreateMessageRequestParams, CreateMessageResult 6 | 7 | from mcp_bridge.config import config 8 | from mcp_bridge.openai_clients.genericHttpxClient import get_client 9 | from mcp_bridge.sampling.modelSelector import find_best_model 10 | 11 | def make_message(x: SamplingMessage): 12 | if x.content.type == "text": 13 | return { 14 | "role": x.role, 15 | "content": [{ 16 | "type": "text", 17 | "text": x.content.text, 18 | }] 19 | } 20 | if x.content.type == "image": 21 | return { 22 | "role": x.role, 23 | "content": [{ 24 | "type": "image", 25 | "image_url": x.content.data, 26 | }] 27 | } 28 | 29 | async def handle_sampling_message( 30 | message: CreateMessageRequestParams, 31 | ) -> CreateMessageResult: 32 | """perform sampling""" 33 | 34 | logger.debug(f"sampling message: {message.modelPreferences}") 35 | 36 | # select model 37 | model = config.sampling.models[0] 38 | if message.modelPreferences is not None: 39 | model = find_best_model(message.modelPreferences) 40 | 41 | logger.debug(f"selected model: {model.model}") 42 | 43 | logger.debug("sending sampling request to endpoint") 44 | # request = CreateChatCompletionRequest(model=model.model, messages=message.messages, stream=False) # type: ignore 45 | request = { 46 | "model": model.model, 47 | "messages": [make_message(x) for x in message.messages], 48 | "stream": False, 49 | } 50 | 51 | logger.debug(f"request: {request}") 52 | 53 | logger.debug(request) 54 | 55 | async with get_client() as client: 56 | resp = await client.post( 57 | "/chat/completions", 58 | json=request, 59 | timeout=config.sampling.timeout, 60 | ) 61 | 62 | logger.debug("parsing json") 63 | text = resp.text 64 | logger.debug(text) 65 | 66 | response = CreateChatCompletionResponse.model_validate_json(text) 67 | 68 | logger.debug("sampling request received from endpoint") 69 | 70 | assert response.choices is not None 71 | assert len(response.choices) > 0 72 | assert response.choices[0].message is not None 73 | assert response.choices[0].message.content is not None 74 | 75 | return types.CreateMessageResult( 76 | role="assistant", 77 | content=types.TextContent( 78 | type="text", 79 | text=response.choices[0].message.content, 80 | ), 81 | model=model.model, 82 | stopReason=response.choices[0].finish_reason, 83 | ) 84 | -------------------------------------------------------------------------------- /mcp_bridge/telemetry.py: -------------------------------------------------------------------------------- 1 | from opentelemetry import trace 2 | from opentelemetry.sdk.resources import Resource 3 | from opentelemetry.sdk.trace import TracerProvider 4 | from opentelemetry.sdk.trace.export import BatchSpanProcessor 5 | from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter 6 | 7 | from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor 8 | from opentelemetry.instrumentation.httpx import HTTPXClientInstrumentor 9 | 10 | from mcp_bridge.config import config 11 | 12 | def setup_tracing(app) -> None: 13 | resource = Resource(attributes={"service.name": config.telemetry.service_name}) 14 | 15 | provider = TracerProvider(resource=resource) 16 | trace.set_tracer_provider(provider) 17 | 18 | otlp_exporter = OTLPSpanExporter(endpoint=config.telemetry.otel_endpoint) 19 | span_processor = BatchSpanProcessor(otlp_exporter) 20 | 21 | if config.telemetry.enabled: 22 | # if not enabled do not add the span processor 23 | provider.add_span_processor(span_processor) 24 | 25 | FastAPIInstrumentor().instrument_app(app) 26 | HTTPXClientInstrumentor().instrument() -------------------------------------------------------------------------------- /mcp_bridge/tool_mappers/__init__.py: -------------------------------------------------------------------------------- 1 | from .mcp2openaiConverters import mcp2openai 2 | 3 | __all__ = ["mcp2openai"] 4 | -------------------------------------------------------------------------------- /mcp_bridge/tool_mappers/mcp2openaiConverters.py: -------------------------------------------------------------------------------- 1 | from mcp import Tool 2 | from lmos_openai_types import ChatCompletionTool 3 | 4 | 5 | def mcp2openai(mcp_tool: Tool) -> ChatCompletionTool: 6 | """Convert a MCP Tool to an OpenAI ChatCompletionTool.""" 7 | 8 | return ChatCompletionTool( 9 | type="function", 10 | function={ 11 | "name": mcp_tool.name, 12 | "description": mcp_tool.description, 13 | "parameters": mcp_tool.inputSchema, 14 | "strict": False, 15 | }, 16 | ) 17 | -------------------------------------------------------------------------------- /mcp_bridge/tool_mappers/openai2mcpConverters.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SecretiveShell/MCP-Bridge/a0a9fc02f8af3a539c88ccd48c26d1241e9684dc/mcp_bridge/tool_mappers/openai2mcpConverters.py -------------------------------------------------------------------------------- /mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | plugins = pydantic.mypy -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "mcp-bridge" 3 | dynamic = ["version"] 4 | description = "A middleware to provide an openAI compatible endpoint that can call MCP tools." 5 | readme = "README.md" 6 | requires-python = ">=3.11" 7 | dependencies = [ 8 | "deepmerge>=2.0", 9 | "fastapi>=0.115.6", 10 | "httpx>=0.28.1", 11 | "httpx-sse>=0.4.0", 12 | "lmos-openai-types", 13 | "loguru>=0.7.3", 14 | "mcp>=1.2.0", 15 | "mcpx[docker]>=0.1.1", 16 | "opentelemetry-api>=1.33.1", 17 | "opentelemetry-exporter-otlp>=1.33.1", 18 | "opentelemetry-instrumentation-fastapi>=0.54b1", 19 | "opentelemetry-instrumentation-httpx>=0.54b1", 20 | "opentelemetry-sdk>=1.33.1", 21 | "pydantic>=2.10.4", 22 | "pydantic-settings>=2.7.0", 23 | "sse-starlette>=2.2.0", 24 | "tortoise-orm[asyncmy,asyncpg]>=0.23.0", 25 | "uvicorn>=0.34.0", 26 | ] 27 | 28 | [tool.uv.sources] 29 | lmos-openai-types = { git = "https://github.com/LMOS-IO/LMOS-openai-types", rev = "pydantic-gen" } 30 | 31 | [dependency-groups] 32 | dev = [ 33 | "mypy>=1.14.0", 34 | "ruff>=0.8.4", 35 | "uv>=0.5.20", 36 | ] 37 | [build-system] 38 | requires = [ "hatchling",] 39 | build-backend = "hatchling.build" 40 | 41 | [tool.hatch.version] 42 | path = "mcp_bridge/__init__.py" 43 | 44 | [project.scripts] 45 | mcp-bridge = "mcp_bridge.main:run" 46 | --------------------------------------------------------------------------------