├── src
└── deepmcpagent
│ ├── py.typed
│ ├── __main__.py
│ ├── __init__.py
│ ├── prompt.py
│ ├── clients.py
│ ├── config.py
│ ├── agent.py
│ ├── tools.py
│ ├── cli.py
│ └── cross_agent.py
├── docs
├── images
│ ├── icon.png
│ └── screenshot_output.png
├── api.md
├── faq.md
├── examples.md
├── troubleshooting.md
├── model-setup.md
├── index.md
├── server-specs.md
├── getting-started.md
├── installation.md
├── cli.md
├── changelog.md
└── cross-agent.md
├── tests
├── test_agent.py
├── test_cli_parse.py
├── test_tools_schema.py
└── test_config.py
├── examples
├── servers
│ └── math_server.py
├── use_agent.py
└── use_cross_agent.py
├── mkdocs.yml
├── .github
└── workflows
│ ├── docs.yml
│ └── publish.yml
├── .gitignore
├── CODE_OF_CONDUCT.md
├── pyproject.toml
├── CHANGELOG.md
├── CONTRIBUTING.md
├── LICENSE
└── README.md
/src/deepmcpagent/py.typed:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/images/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cryxnet/DeepMCPAgent/HEAD/docs/images/icon.png
--------------------------------------------------------------------------------
/docs/images/screenshot_output.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cryxnet/DeepMCPAgent/HEAD/docs/images/screenshot_output.png
--------------------------------------------------------------------------------
/docs/api.md:
--------------------------------------------------------------------------------
1 | # API Reference
2 |
3 | This page documents the public Python API exposed by `deepmcpagent`.
4 |
5 | ::: deepmcpagent
6 |
--------------------------------------------------------------------------------
/src/deepmcpagent/__main__.py:
--------------------------------------------------------------------------------
1 | """Support executing the CLI by doing `python -m deepmcpagent`."""
2 |
3 | from .cli import app
4 |
5 | app(prog_name="deepmcpagent")
6 |
--------------------------------------------------------------------------------
/tests/test_agent.py:
--------------------------------------------------------------------------------
1 | import pytest # noqa: I001
2 |
3 |
4 | pytest.skip(
5 | "Integration test requires a live MCP server and model credentials.",
6 | allow_module_level=True,
7 | )
8 |
--------------------------------------------------------------------------------
/examples/servers/math_server.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from fastmcp import FastMCP
4 |
5 | mcp = FastMCP("Math")
6 |
7 |
8 | @mcp.tool()
9 | def add(a: int, b: int) -> int:
10 | """Add two integers."""
11 | return a + b
12 |
13 |
14 | @mcp.tool()
15 | def multiply(a: int, b: int) -> int:
16 | """Multiply two integers."""
17 | return a * b
18 |
19 |
20 | if __name__ == "__main__":
21 | # Serve over HTTP at /mcp
22 | mcp.run(transport="http", host="127.0.0.1", port=8000, path="/mcp")
23 |
--------------------------------------------------------------------------------
/src/deepmcpagent/__init__.py:
--------------------------------------------------------------------------------
1 | """Public API for deepmcpagent."""
2 |
3 | from .agent import build_deep_agent
4 | from .clients import FastMCPMulti
5 | from .config import (
6 | HTTPServerSpec,
7 | ServerSpec,
8 | StdioServerSpec,
9 | servers_to_mcp_config,
10 | )
11 | from .tools import MCPToolLoader, ToolInfo
12 |
13 | __all__ = [
14 | "HTTPServerSpec",
15 | "ServerSpec",
16 | "StdioServerSpec",
17 | "servers_to_mcp_config",
18 | "FastMCPMulti",
19 | "MCPToolLoader",
20 | "ToolInfo",
21 | "build_deep_agent",
22 | ]
23 |
--------------------------------------------------------------------------------
/src/deepmcpagent/prompt.py:
--------------------------------------------------------------------------------
1 | """System prompt definition for deepmcpagent.
2 |
3 | Edit this file to change the default system behavior of the agent
4 | without modifying code in the builder.
5 | """
6 |
7 | DEFAULT_SYSTEM_PROMPT: str = (
8 | "You are a capable deep agent. Use available tools from connected MCP servers "
9 | "to plan and execute tasks. Always inspect tool descriptions and input schemas "
10 | "before calling them. Be precise and avoid hallucinating tool arguments. "
11 | "Prefer calling tools rather than guessing, and cite results from tools clearly."
12 | )
13 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: DeepMCPAgent
2 | theme:
3 | name: material
4 | features:
5 | - navigation.instant
6 | - navigation.tracking
7 | - content.code.copy
8 |
9 | plugins:
10 | - search
11 | - mkdocstrings:
12 | handlers:
13 | python:
14 | options:
15 | docstring_style: google
16 | show_source: true
17 |
18 | nav:
19 | - Home: index.md
20 | - Installation: installation.md
21 | - Getting Started: getting-started.md
22 | - Server Specs & Auth: server-specs.md
23 | - Model Setup: model-setup.md
24 | - CLI: cli.md
25 | - Examples: examples.md
26 | - API: api.md
27 | - Troubleshooting: troubleshooting.md
28 | - FAQ: faq.md
29 | - Changelog: changelog.md
30 |
--------------------------------------------------------------------------------
/docs/faq.md:
--------------------------------------------------------------------------------
1 | # FAQ
2 |
3 | ## Why MCP-only tools?
4 | MCP provides a clean, standard way to expose tools. Agents should discover tools at runtime — not hardcode them.
5 |
6 | ## Can I connect to multiple servers?
7 | Yes. Pass a dict of names → `HTTPServerSpec(...)`. All tools get merged for the agent.
8 |
9 | ## How do I authenticate to external MCP APIs?
10 | Use the `headers` field in `HTTPServerSpec` (e.g., `Authorization: Bearer ...`).
11 |
12 | ## Do I need OpenAI?
13 | No. You must pass a model, but it can be any LangChain chat model (Anthropic, Ollama, Groq, local LLMs, …).
14 |
15 | ## Can I run stdio servers?
16 | FastMCP’s Python client is oriented around HTTP/SSE. If you have a stdio server, put an HTTP shim in front or use another adapter.
17 |
--------------------------------------------------------------------------------
/docs/examples.md:
--------------------------------------------------------------------------------
1 | # Examples
2 |
3 | ## Sample MCP server (HTTP)
4 | `examples/servers/math_server.py`:
5 | ```python
6 | from fastmcp import FastMCP
7 |
8 | mcp = FastMCP("Math")
9 |
10 | @mcp.tool()
11 | def add(a: int, b: int) -> int:
12 | return a + b
13 |
14 | @mcp.tool()
15 | def multiply(a: int, b: int) -> int:
16 | return a * b
17 |
18 | if __name__ == "__main__":
19 | mcp.run(transport="http", host="127.0.0.1", port=8000, path="/mcp")
20 | ```
21 | Run:
22 | ```bash
23 | python examples/servers/math_server.py
24 | ```
25 |
26 | ## Fancy console trace
27 | `examples/use_agent.py` prints:
28 | - Discovered tools (table)
29 | - Each tool call (name + args)
30 | - Each tool result
31 | - Final LLM answer (panel)
32 |
33 | Run:
34 | ```bash
35 | python examples/use_agent.py
36 | ```
37 |
--------------------------------------------------------------------------------
/docs/troubleshooting.md:
--------------------------------------------------------------------------------
1 | # Troubleshooting
2 |
3 | ## zsh: no matches found: .[dev,deep]
4 | Quote extras:
5 | ```bash
6 | pip install -e ".[dev,deep]"
7 | ```
8 |
9 | ## PEP 668: externally managed environment (macOS)
10 | Create/activate a venv:
11 | ```bash
12 | python3 -m venv .venv && source .venv/bin/activate
13 | ```
14 |
15 | ## 404 when connecting (Client failed to connect: Session terminated)
16 | Ensure your server exposes a **path** (e.g., `/mcp`) and your client uses it:
17 | ```python
18 | HTTPServerSpec(url="http://127.0.0.1:8000/mcp", transport="http")
19 | ```
20 |
21 | ## AttributeError: `_FastMCPTool` has no attribute `_client`
22 | Use the version where `_client` and `_tool_name` are `PrivateAttr` and set in `__init__`. Update your package.
23 |
24 | ## High token usage
25 | Tool-calling models add overhead. Use smaller models while developing.
26 |
27 | ## Deprecation warnings (LangGraph)
28 | `config_schema` warnings are safe. We’ll migrate to `context_schema` when stable.
29 |
--------------------------------------------------------------------------------
/tests/test_cli_parse.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from deepmcpagent.cli import _merge_servers, _parse_kv
4 |
5 |
6 | def test_parse_kv_simple() -> None:
7 | assert _parse_kv(["a=1", "b = two"]) == {"a": "1", "b": "two"}
8 |
9 |
10 | def test_merge_servers_http_and_stdio() -> None:
11 | stdios = [
12 | "name=echo command=python args='-m mypkg.server --port 3333' env.API_KEY=xyz keep_alive=false",
13 | ]
14 | https = [
15 | "name=remote url=https://example.com/mcp transport=http header.Authorization='Bearer abc'",
16 | ]
17 | servers = _merge_servers(stdios, https)
18 | assert set(servers.keys()) == {"echo", "remote"}
19 | # HTTP server spec
20 | http = servers["remote"]
21 | assert http.url == "https://example.com/mcp"
22 | assert http.headers["Authorization"] == "Bearer abc"
23 | # stdio server spec
24 | stdio = servers["echo"]
25 | assert stdio.command == "python"
26 | assert stdio.args[0] == "-m"
27 | assert stdio.keep_alive is False
28 |
--------------------------------------------------------------------------------
/docs/model-setup.md:
--------------------------------------------------------------------------------
1 | # Model Setup (BYOM)
2 |
3 | DeepMCPAgent **requires** a model — there is no fallback.
4 |
5 | You may pass:
6 | - a **LangChain chat model instance**, or
7 | - a **provider id string** (forwarded to `langchain.chat_models.init_chat_model()`)
8 |
9 | ## Passing a model instance
10 | ```python
11 | from langchain_openai import ChatOpenAI
12 | model = ChatOpenAI(model="gpt-4.1")
13 | graph, loader = await build_deep_agent(servers=servers, model=model)
14 | ```
15 |
16 | ## Passing a provider id string
17 | ```python
18 | graph, loader = await build_deep_agent(
19 | servers=servers,
20 | model="openai:gpt-4.1" # handled by LangChain init_chat_model
21 | )
22 | ```
23 |
24 | ## Environment variables
25 | Use provider-specific env vars (e.g., `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`).
26 | You can load a local `.env` via `python-dotenv`:
27 |
28 | ```python
29 | from dotenv import load_dotenv
30 | load_dotenv()
31 | ```
32 |
33 | ## Tips
34 | - Prefer instances for fine-grained control (temperature, timeouts).
35 | - Use smaller models for dev / testing to save latency & cost.
36 |
--------------------------------------------------------------------------------
/src/deepmcpagent/clients.py:
--------------------------------------------------------------------------------
1 | """FastMCP client wrapper that supports multiple servers via a single configuration."""
2 |
3 | from __future__ import annotations
4 |
5 | from collections.abc import Mapping
6 | from typing import Any # <-- add
7 |
8 | from fastmcp import Client as FastMCPClient
9 |
10 | from .config import ServerSpec, servers_to_mcp_config
11 |
12 |
13 | class FastMCPMulti:
14 | """Create a single FastMCP client wired to multiple servers.
15 |
16 | The client is configured using the `mcpServers` dictionary generated from
17 | the typed server specifications.
18 |
19 | Args:
20 | servers: Mapping of server name to server spec.
21 | """
22 |
23 | def __init__(self, servers: Mapping[str, ServerSpec]) -> None:
24 | mcp_cfg = {"mcpServers": servers_to_mcp_config(servers)}
25 | self._client: Any = FastMCPClient(mcp_cfg) # <-- annotate as Any
26 |
27 | @property
28 | def client(self) -> Any: # <-- return Any to avoid unparameterized generic
29 | """Return the underlying FastMCP client instance."""
30 | return self._client
31 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: DeepMCPAgent
3 | ---
4 |
5 |
6 |

7 |
DeepMCPAgent
8 |
Model-agnostic LangChain/LangGraph agents powered entirely by MCP tools over HTTP/SSE.
9 |
10 |
11 | ---
12 |
13 | ## Why DeepMCPAgent?
14 |
15 | - 🔌 **Zero manual wiring** — discover tools dynamically from MCP servers
16 | - 🌐 **External APIs welcome** — HTTP / SSE servers with headers & auth
17 | - 🧠 **Bring your own model** — any LangChain chat model (OpenAI, Anthropic, Ollama, Groq, local, …)
18 | - ⚡ **DeepAgents loop (optional)** — or **LangGraph ReAct** fallback if not installed
19 | - 🛠️ **Typed tools** — JSON Schema → Pydantic → LangChain `BaseTool`
20 | - 🧪 **Quality** — mypy (strict), ruff, pytest, GitHub Actions
21 |
22 | ---
23 |
24 | ## TL;DR (Quickstart)
25 |
26 | ```bash
27 | python3 -m venv .venv && source .venv/bin/activate
28 | pip install "deepmcpagent[deep]"
29 | python examples/servers/math_server.py # serves http://127.0.0.1:8000/mcp
30 | python examples/use_agent.py
31 | ```
32 |
--------------------------------------------------------------------------------
/.github/workflows/docs.yml:
--------------------------------------------------------------------------------
1 | name: Docs
2 |
3 | on:
4 | push:
5 | branches: [main]
6 |
7 | permissions:
8 | contents: read
9 | pages: write
10 | id-token: write
11 |
12 | jobs:
13 | build:
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v4
17 |
18 | - name: Set up Python
19 | uses: actions/setup-python@v5
20 | with:
21 | python-version: "3.11"
22 |
23 | - name: Install dependencies
24 | run: |
25 | pip install -e ".[docs]"
26 |
27 | - name: Build docs
28 | run: mkdocs build --strict
29 |
30 | - name: Upload artifact
31 | uses: actions/upload-pages-artifact@v3
32 | with:
33 | path: site
34 |
35 | deploy:
36 | needs: build
37 | runs-on: ubuntu-latest
38 | permissions:
39 | pages: write
40 | id-token: write
41 | environment:
42 | name: github-pages
43 | url: ${{ steps.deployment.outputs.page_url }}
44 | steps:
45 | - name: Deploy to GitHub Pages
46 | id: deployment
47 | uses: actions/deploy-pages@v4
48 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # Virtual environments
29 | .env
30 | .venv
31 | env/
32 | venv/
33 | ENV/
34 | env.bak/
35 | venv.bak/
36 |
37 | # PyInstaller
38 | *.manifest
39 | *.spec
40 |
41 | # Unit test / coverage reports
42 | .coverage
43 | .coverage.*
44 | .cache
45 | nosetests.xml
46 | coverage.xml
47 | *.cover
48 | *.py,cover
49 | .hypothesis/
50 | .pytest_cache/
51 |
52 | # Typing / linting
53 | .mypy_cache/
54 | .dmypy.json
55 | dmypy.json
56 | .pyre/
57 | .ruff_cache/
58 |
59 | # Jupyter Notebook
60 | .ipynb_checkpoints
61 |
62 | # IDEs and editors
63 | .vscode/
64 | .idea/
65 | *.sublime-project
66 | *.sublime-workspace
67 |
68 | # OS files
69 | .DS_Store
70 | Thumbs.db
71 |
72 | # MkDocs build
73 | /site
74 |
75 | # Logs
76 | *.log
77 | logs/
78 |
79 | # Local configs
80 | *.env.local
81 |
--------------------------------------------------------------------------------
/tests/test_tools_schema.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import pytest
4 |
5 | pytest.importorskip("fastmcp")
6 |
7 | from deepmcpagent.tools import _jsonschema_to_pydantic # type: ignore
8 |
9 |
10 | def test_jsonschema_to_pydantic_basic_types() -> None:
11 | schema = {
12 | "type": "object",
13 | "properties": {
14 | "s": {"type": "string", "description": "a"},
15 | "i": {"type": "integer"},
16 | "n": {"type": "number"},
17 | "b": {"type": "boolean"},
18 | },
19 | "required": ["s", "i"],
20 | }
21 | model = _jsonschema_to_pydantic(schema, model_name="Args_test")
22 | # required fields have Ellipsis default (pydantic Required)
23 | fields = model.model_fields
24 | assert fields["s"].is_required()
25 | assert fields["i"].is_required()
26 | assert fields["n"].is_required() is False
27 | assert fields["b"].is_required() is False
28 |
29 |
30 | def test_jsonschema_to_pydantic_empty_schema() -> None:
31 | model = _jsonschema_to_pydantic({}, model_name="Args_empty")
32 | # Fallback field exists
33 | assert "payload" in model.model_fields
34 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Code of Conduct
2 |
3 | We are committed to a respectful, inclusive, and harassment-free community.
4 |
5 | This Code applies to all spaces managed by the project (GitHub issues, PRs, discussions, docs, chats, events) and to anyone participating as a community member, maintainer, or contributor.
6 |
7 | ---
8 |
9 | ## Our Standards
10 |
11 | **Examples of behavior that contributes to a positive environment:**
12 |
13 | - Being respectful, considerate, and patient
14 | - Giving and gracefully accepting constructive feedback
15 | - Focusing on what is best for the community
16 | - Showing empathy toward other community members
17 | - Using inclusive language and assuming good intent
18 |
19 | **Examples of unacceptable behavior:**
20 |
21 | - Harassment, intimidation, or discrimination of any kind
22 | - Personal attacks or insulting/derogatory comments
23 | - Trolling, excessive disruption, or off-topic content
24 | - Publishing others’ private information without consent
25 | - Unwelcome sexual attention or advances
26 | - Any other conduct that is inappropriate in a professional setting
27 |
28 | ---
29 |
30 | Thank you for helping us make this a welcoming and productive community. 💙
31 |
--------------------------------------------------------------------------------
/docs/server-specs.md:
--------------------------------------------------------------------------------
1 | # Server Specs & Auth
2 |
3 | DeepMCPAgent describes servers programmatically with typed specs.
4 |
5 | ## HTTPServerSpec (recommended)
6 | ```python
7 | from deepmcpagent import HTTPServerSpec
8 |
9 | srv = HTTPServerSpec(
10 | url="https://api.example.com/mcp", # include the path, e.g., /mcp
11 | transport="http", # "http", "streamable-http", or "sse"
12 | headers={"Authorization": "Bearer X"}, # optional
13 | auth=None # optional hint for FastMCP deployments
14 | )
15 | ```
16 |
17 | !!! note
18 | FastMCP’s Python client is designed for remote servers (HTTP/SSE).
19 | If you need a local stdio server, run it behind an HTTP shim or use a different adapter.
20 |
21 | ## Multiple servers
22 | ```python
23 | servers = {
24 | "math": HTTPServerSpec(url="http://127.0.0.1:8000/mcp", transport="http"),
25 | "search": HTTPServerSpec(url="https://search.example.com/mcp", transport="sse"),
26 | }
27 | ```
28 |
29 | ## Headers & authentication
30 | Attach custom headers (e.g., `Authorization`, `X-Org`) using `headers={...}`.
31 | If your deployment supports special `auth` keys, set `auth="..."`.
32 |
33 | ## Streamable HTTP vs SSE
34 | - `http`: regular HTTP requests
35 | - `streamable-http`: same endpoint, but optimized for streaming payloads
36 | - `sse`: Server-Sent Events (event-stream)
37 |
--------------------------------------------------------------------------------
/tests/test_config.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from deepmcpagent.config import HTTPServerSpec, StdioServerSpec, servers_to_mcp_config
4 |
5 |
6 | def test_servers_to_mcp_config_http_only() -> None:
7 | servers = {
8 | "math": HTTPServerSpec(
9 | url="http://127.0.0.1:8000/mcp",
10 | transport="http",
11 | headers={"Authorization": "Bearer X"},
12 | auth=None,
13 | )
14 | }
15 | cfg = servers_to_mcp_config(servers)
16 | assert "math" in cfg
17 | entry = cfg["math"]
18 | assert entry["transport"] == "http"
19 | assert entry["url"] == "http://127.0.0.1:8000/mcp"
20 | assert entry["headers"] == {"Authorization": "Bearer X"}
21 | assert "auth" not in entry # None should be omitted
22 |
23 |
24 | def test_servers_to_mcp_config_stdio() -> None:
25 | servers = {
26 | "local": StdioServerSpec(
27 | command="python",
28 | args=["-m", "cool.server"],
29 | env={"API_KEY": "abc"},
30 | cwd=None,
31 | keep_alive=False,
32 | )
33 | }
34 | cfg = servers_to_mcp_config(servers)
35 | entry = cfg["local"]
36 | assert entry["transport"] == "stdio"
37 | assert entry["command"] == "python"
38 | assert entry["args"] == ["-m", "cool.server"]
39 | # None becomes None, empty dict should be allowed
40 | assert entry["env"] == {"API_KEY": "abc"}
41 | assert entry["cwd"] is None
42 | assert entry["keep_alive"] is False
43 |
--------------------------------------------------------------------------------
/docs/getting-started.md:
--------------------------------------------------------------------------------
1 | # Getting Started
2 |
3 | This guide takes you from zero to a working **MCP-only agent**.
4 |
5 | ## 1) Start a sample MCP server (HTTP)
6 |
7 | ```bash
8 | python examples/servers/math_server.py
9 | ```
10 | This exposes an MCP endpoint at **http://127.0.0.1:8000/mcp**.
11 |
12 | ## 2) Bring your own model (BYOM)
13 |
14 | DeepMCPAgent requires a model — either a **LangChain model instance** or a **provider id string** that `init_chat_model()` understands.
15 |
16 | ### Example (OpenAI via LangChain)
17 | ```python
18 | from langchain_openai import ChatOpenAI
19 | model = ChatOpenAI(model="gpt-4.1")
20 | ```
21 |
22 | ### Example (Anthropic via LangChain)
23 | ```python
24 | from langchain_anthropic import ChatAnthropic
25 | model = ChatAnthropic(model="claude-3-5-sonnet-latest")
26 | ```
27 |
28 | ### Example (Ollama local)
29 | ```python
30 | from langchain_community.chat_models import ChatOllama
31 | model = ChatOllama(model="llama3.1")
32 | ```
33 |
34 | ## 3) Build the agent
35 |
36 | ```python
37 | import asyncio
38 | from deepmcpagent import HTTPServerSpec, build_deep_agent
39 |
40 | async def main():
41 | servers = {
42 | "math": HTTPServerSpec(
43 | url="http://127.0.0.1:8000/mcp",
44 | transport="http", # or "sse"
45 | ),
46 | }
47 |
48 | graph, loader = await build_deep_agent(
49 | servers=servers,
50 | model=model, # required
51 | )
52 |
53 | result = await graph.ainvoke({"messages":[{"role":"user","content":"add 21 and 21 using tools"}]})
54 | print(result)
55 |
56 | asyncio.run(main())
57 | ```
58 |
--------------------------------------------------------------------------------
/docs/installation.md:
--------------------------------------------------------------------------------
1 | # Installation
2 |
3 | ## Requirements
4 |
5 | - Python **3.10+**
6 | - A virtual environment (recommended)
7 |
8 | ## Install from PyPI (recommended)
9 |
10 | The easiest way is to install from [PyPI](https://pypi.org/project/deepmcpagent/):
11 |
12 | ```bash
13 | pip install "deepmcpagent[deep]"
14 | ```
15 |
16 | ✅ This gives you the **best experience** by including
17 | [`deepagents`](https://pypi.org/project/deepagents/) for the deep agent loop.
18 | If you skip `[deep]`, the agent will fall back to a standard LangGraph ReAct loop.
19 |
20 | ### Other optional extras
21 |
22 | - **Dev tooling** (ruff, mypy, pytest):
23 |
24 | ```bash
25 | pip install "deepmcpagent[dev]"
26 | ```
27 |
28 | - **Docs tooling** (MkDocs + Material + mkdocstrings):
29 |
30 | ```bash
31 | pip install "deepmcpagent[docs]"
32 | ```
33 |
34 | - **Examples** (dotenv + extra model integrations):
35 |
36 | ```bash
37 | pip install "deepmcpagent[examples]"
38 | ```
39 |
40 | !!! tip "zsh users"
41 | Quote extras: `pip install "deepmcpagent[deep,dev]"` (or escape brackets).
42 |
43 | ---
44 |
45 | ## Editable install (contributors)
46 |
47 | If you’re working on the project itself:
48 |
49 | ```bash
50 | git clone https://github.com/cryxnet/deepmcpagent.git
51 | cd deepmcpagent
52 | python3 -m venv .venv
53 | source .venv/bin/activate
54 | pip install -e ".[dev,deep,docs,examples]"
55 | ```
56 |
57 | ---
58 |
59 | ## macOS / Homebrew note (PEP 668)
60 |
61 | If you see **“externally managed environment”** errors, you’re installing into Homebrew’s Python.
62 | Always use a virtual environment as shown above.
63 |
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: CI & Publish
2 |
3 | on:
4 | push:
5 | branches: [main]
6 | tags: ["v*"] # tag like v0.3.1 to publish to PyPI
7 | workflow_dispatch:
8 |
9 | jobs:
10 | quality:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v4
14 | - uses: actions/setup-python@v5
15 | with:
16 | python-version: "3.10"
17 |
18 | - name: Install deps (dev)
19 | run: |
20 | python -m pip install --upgrade pip
21 | pip install -e ".[dev]"
22 |
23 | - name: Lint
24 | run: |
25 | ruff check .
26 | ruff format --check .
27 |
28 | - name: Type-check
29 | run: mypy src
30 |
31 | - name: Tests (skipped if none)
32 | run: |
33 | if [ -d tests ] || ls -1 **/*_test.py **/test_*.py >/dev/null 2>&1; then
34 | pytest -q
35 | else
36 | echo "No tests found, skipping pytest."
37 | fi
38 |
39 | build:
40 | needs: quality
41 | runs-on: ubuntu-latest
42 | steps:
43 | - uses: actions/checkout@v4
44 | - uses: actions/setup-python@v5
45 | with:
46 | python-version: "3.10"
47 | - name: Build sdist+wheel
48 | run: |
49 | python -m pip install --upgrade pip build
50 | python -m build
51 | - uses: actions/upload-artifact@v4
52 | with:
53 | name: dist
54 | path: dist/*
55 |
56 | publish-pypi:
57 | # Publish ONLY when a tag v* is pushed
58 | if: startsWith(github.ref, 'refs/tags/v')
59 | needs: build
60 | runs-on: ubuntu-latest
61 | permissions:
62 | id-token: write # REQUIRED for PyPI Trusted Publishing (no API token)
63 | contents: read
64 | environment: pypi
65 | steps:
66 | - uses: actions/download-artifact@v4
67 | with:
68 | name: dist
69 | path: dist
70 | - name: Publish to PyPI
71 | uses: pypa/gh-action-pypi-publish@release/v1
72 | with:
73 | packages-dir: dist
74 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=68", "wheel", "setuptools-scm[toml]>=6.2"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "deepmcpagent"
7 | dynamic = ["version"]
8 | description = "DeepMCPAgent: LangChain/LangGraph agents powered by MCP tools over FastMCP."
9 | readme = "README.md"
10 | requires-python = ">=3.10"
11 | license = { text = "Apache-2.0" }
12 | authors = [{ name = "cryxnet" }]
13 | keywords = ["mcp", "fastmcp", "langchain", "langgraph", "agents", "tools"]
14 | classifiers = [
15 | "Programming Language :: Python :: 3",
16 | "Programming Language :: Python :: 3 :: Only",
17 | "License :: OSI Approved :: Apache Software License",
18 | "Development Status :: 4 - Beta",
19 | "Typing :: Typed",
20 | "Topic :: Software Development :: Libraries",
21 | ]
22 |
23 | dependencies = [
24 | "fastmcp>=2.12.2",
25 | "langchain>=0.3.27",
26 | "pydantic>=2.8",
27 | "typer>=0.15.2",
28 | "rich>=14",
29 | "anyio>=4.5",
30 | "python-dotenv>=1.1",
31 | "langgraph>=0.6,<0.7",
32 | "langgraph-prebuilt>=0.6,<0.7",
33 | ]
34 | [project.optional-dependencies]
35 | deep = [
36 | "deepagents>=0.0.5,<1.0"
37 | ]
38 |
39 | docs = [
40 | "mkdocs>=1.6",
41 | "mkdocs-material>=9.5",
42 | "mkdocstrings[python]>=0.29",
43 | ]
44 |
45 |
46 | examples = [
47 | "langchain-openai>=0.3.30",
48 | ]
49 |
50 | dev = [
51 | "pytest>=8.4",
52 | "pytest-asyncio>=1.1",
53 | "mypy>=1.17",
54 | "ruff>=0.12",
55 | ]
56 |
57 | [project.scripts]
58 | deepmcpagent = "deepmcpagent.cli:app"
59 |
60 | [project.urls]
61 | Homepage = "https://github.com/cryxnet/deepmcpagent"
62 | Issues = "https://github.com/cryxnet/deepmcpagent/issues"
63 |
64 | [tool.setuptools.packages.find]
65 | where = ["src"]
66 | include = ["deepmcpagent*"]
67 |
68 | [tool.setuptools.package-dir]
69 | "" = "src"
70 |
71 | [tool.setuptools.package-data]
72 | deepmcpagent = ["py.typed"]
73 |
74 | [tool.mypy]
75 | python_version = "3.10"
76 | strict = true
77 | warn_unused_ignores = true
78 | warn_redundant_casts = true
79 | warn_unreachable = true
80 | disallow_untyped_defs = true
81 | disallow_incomplete_defs = true
82 | check_untyped_defs = true
83 | no_implicit_optional = true
84 | show_error_codes = true
85 |
86 | [tool.ruff]
87 | line-length = 100
88 | target-version = "py310"
89 |
90 | [tool.ruff.lint]
91 | select = ["E","F","I","UP","B","C4","SIM","ARG"]
92 | ignore = ["E501"]
93 |
94 | [tool.pytest.ini_options]
95 | asyncio_mode = "auto"
96 |
97 | [tool.setuptools_scm]
98 | version_scheme = "post-release"
99 | local_scheme = "no-local-version"
100 |
--------------------------------------------------------------------------------
/examples/use_agent.py:
--------------------------------------------------------------------------------
1 | """
2 | Example: Using DeepMCP with a custom model over HTTP.
3 |
4 | Console output:
5 | - Discovered tools (from your MCP servers)
6 | - Each tool invocation + result (via deepmcpagent trace hooks)
7 | - Final LLM answer
8 | """
9 |
10 | import asyncio
11 | from typing import Any
12 |
13 | from dotenv import load_dotenv
14 | from langchain_openai import ChatOpenAI
15 | from rich.console import Console
16 | from rich.panel import Panel
17 | from rich.table import Table
18 |
19 | from deepmcpagent import HTTPServerSpec, build_deep_agent
20 |
21 |
22 | def _extract_final_answer(result: Any) -> str:
23 | """Best-effort extraction of the final text from different executors."""
24 | try:
25 | # LangGraph prebuilt typically returns {"messages": [...]}
26 | if isinstance(result, dict) and "messages" in result and result["messages"]:
27 | last = result["messages"][-1]
28 | content = getattr(last, "content", None)
29 | if isinstance(content, str) and content:
30 | return content
31 | if isinstance(content, list) and content and isinstance(content[0], dict):
32 | return content[0].get("text") or str(content)
33 | return str(last)
34 | return str(result)
35 | except Exception:
36 | return str(result)
37 |
38 |
39 | async def main() -> None:
40 | console = Console()
41 | load_dotenv()
42 |
43 | # Ensure your MCP server (e.g., math_server.py) is running in another terminal:
44 | # python math_server.py
45 | servers = {
46 | "math": HTTPServerSpec(
47 | url="http://127.0.0.1:8000/mcp",
48 | transport="http",
49 | ),
50 | }
51 |
52 | # Any LangChain-compatible chat model (or init string) works here.
53 | model = ChatOpenAI(model="gpt-4.1")
54 |
55 | # Build the agent using your package. `trace_tools=True` prints tool calls/results.
56 | graph, loader = await build_deep_agent(
57 | servers=servers,
58 | model=model,
59 | instructions="You are a helpful agent. Use MCP math tools to solve problems.",
60 | trace_tools=True,
61 | )
62 |
63 | # Show discovered tools
64 | infos = await loader.list_tool_info()
65 | infos = list(infos) if infos else []
66 |
67 | table = Table(title="Discovered MCP Tools", show_lines=True)
68 | table.add_column("Name", style="cyan", no_wrap=True)
69 | table.add_column("Description", style="green")
70 | if infos:
71 | for t in infos:
72 | table.add_row(t.name, t.description or "-")
73 | else:
74 | table.add_row("— none —", "No tools discovered (is your MCP server running?)")
75 | console.print(table)
76 |
77 | # Run a single-turn query. Tool traces will be printed automatically.
78 | query = "What is (3 + 5) * 7 using math tools?"
79 | console.print(Panel.fit(query, title="User Query", style="bold magenta"))
80 |
81 | result = await graph.ainvoke({"messages": [{"role": "user", "content": query}]})
82 | final_text = _extract_final_answer(result)
83 |
84 | console.print(Panel(final_text or "(no content)", title="Final LLM Answer", style="bold green"))
85 |
86 |
87 | if __name__ == "__main__":
88 | asyncio.run(main())
89 |
--------------------------------------------------------------------------------
/src/deepmcpagent/config.py:
--------------------------------------------------------------------------------
1 | """ "Typed server specifications and conversion helpers for FastMCP configuration."""
2 |
3 | from __future__ import annotations
4 |
5 | from collections.abc import Mapping
6 | from typing import Literal
7 |
8 | from pydantic import BaseModel, ConfigDict, Field
9 |
10 |
11 | class _BaseServer(BaseModel):
12 | """Base model for server specs."""
13 |
14 | # Pydantic v2 style configuration (replaces class Config)
15 | model_config = ConfigDict(extra="forbid")
16 |
17 |
18 | class StdioServerSpec(_BaseServer):
19 | """Specification for a local MCP server launched via stdio.
20 |
21 | NOTE:
22 | The FastMCP Python client typically expects HTTP/SSE transports. Using
23 | `StdioServerSpec` requires a different adapter or an HTTP shim in front
24 | of the stdio server. Keep this for future expansion or custom runners.
25 |
26 | Attributes:
27 | command: Executable to launch (e.g., "python").
28 | args: Positional arguments for the process.
29 | env: Environment variables to set for the process.
30 | cwd: Optional working directory.
31 | keep_alive: Whether the client should try to keep a persistent session.
32 | """
33 |
34 | command: str
35 | args: list[str] = Field(default_factory=list)
36 | env: dict[str, str] = Field(default_factory=dict)
37 | cwd: str | None = None
38 | keep_alive: bool = True
39 |
40 |
41 | class HTTPServerSpec(_BaseServer):
42 | """Specification for a remote MCP server reachable via HTTP/SSE.
43 |
44 | Attributes:
45 | url: Full endpoint URL for the MCP server (e.g., http://127.0.0.1:8000/mcp).
46 | transport: The transport mechanism ("http", "streamable-http", or "sse").
47 | headers: Optional request headers (e.g., Authorization tokens).
48 | auth: Optional auth hint if your FastMCP deployment consumes it.
49 | """
50 |
51 | url: str
52 | transport: Literal["http", "streamable-http", "sse"] = "http"
53 | headers: dict[str, str] = Field(default_factory=dict)
54 | auth: str | None = None
55 |
56 |
57 | ServerSpec = StdioServerSpec | HTTPServerSpec
58 | """Union of supported server specifications."""
59 |
60 |
61 | def servers_to_mcp_config(servers: Mapping[str, ServerSpec]) -> dict[str, dict[str, object]]:
62 | """Convert programmatic server specs to the FastMCP configuration dict.
63 |
64 | Args:
65 | servers: Mapping of server name to specification.
66 |
67 | Returns:
68 | Dict suitable for initializing `fastmcp.Client({"mcpServers": ...})`.
69 | """
70 | cfg: dict[str, dict[str, object]] = {}
71 | for name, s in servers.items():
72 | if isinstance(s, StdioServerSpec):
73 | cfg[name] = {
74 | "transport": "stdio",
75 | "command": s.command,
76 | "args": s.args,
77 | "env": s.env or None,
78 | "cwd": s.cwd or None,
79 | "keep_alive": s.keep_alive,
80 | }
81 | else:
82 | entry: dict[str, object] = {
83 | "transport": s.transport,
84 | "url": s.url,
85 | }
86 | if s.headers:
87 | entry["headers"] = s.headers
88 | if s.auth is not None:
89 | entry["auth"] = s.auth
90 | cfg[name] = entry
91 | return cfg
92 |
--------------------------------------------------------------------------------
/docs/cli.md:
--------------------------------------------------------------------------------
1 | # CLI
2 |
3 | DeepMCPAgent includes a command-line interface (`deepmcpagent`) for exploring and running agents with MCP tools.
4 | This is useful for quick testing, debugging, or building automation pipelines.
5 |
6 | ---
7 |
8 | ## Usage
9 |
10 | ```bash
11 | deepmcpagent [OPTIONS] COMMAND [ARGS]...
12 | ```
13 |
14 | Run `--help` on any command for details.
15 |
16 | ---
17 |
18 | ## Global Options
19 |
20 | - `--version`
21 | Print the current version of DeepMCPAgent and exit.
22 |
23 | - `--help`
24 | Show the help message.
25 |
26 | ---
27 |
28 | ## Commands
29 |
30 | ### `list-tools`
31 |
32 | Discover available MCP tools from one or more servers.
33 |
34 | **Usage:**
35 |
36 | ```bash
37 | deepmcpagent list-tools --model-id MODEL --http "name=math url=http://127.0.0.1:8000/mcp transport=http"
38 | ```
39 |
40 | **Options:**
41 |
42 | - `--model-id ` (required)
43 | The model provider id, e.g. `"openai:gpt-4.1"`.
44 |
45 | - `--stdio ` (repeatable)
46 | Start an MCP server over stdio.
47 | Example:
48 |
49 | ```bash
50 | --stdio "name=echo command=python args='-m mypkg.server --port 3333' env.API_KEY=xyz keep_alive=false"
51 | ```
52 |
53 | - `--http ` (repeatable)
54 | Connect to an MCP server over HTTP/SSE.
55 | Example:
56 |
57 | ```bash
58 | --http "name=math url=http://127.0.0.1:8000/mcp transport=http"
59 | ```
60 |
61 | - `--instructions `
62 | Optional override for the system prompt.
63 |
64 | **Example:**
65 |
66 | ```bash
67 | deepmcpagent list-tools --model-id "openai:gpt-4.1" \
68 | --http "name=math url=http://127.0.0.1:8000/mcp transport=http"
69 | ```
70 |
71 | ---
72 |
73 | ### `run`
74 |
75 | Start an interactive agent session that uses only MCP tools.
76 |
77 | **Usage:**
78 |
79 | ```bash
80 | deepmcpagent run --model-id MODEL --http "name=math url=http://127.0.0.1:8000/mcp transport=http"
81 | ```
82 |
83 | **Options:**
84 |
85 | - `--model-id ` (required)
86 | The model provider id, e.g. `"openai:gpt-4.1"`.
87 |
88 | - `--stdio ` (repeatable)
89 | Start an MCP server over stdio.
90 |
91 | - `--http ` (repeatable)
92 | Connect to an MCP server over HTTP/SSE.
93 |
94 | - `--instructions `
95 | Optional override for the system prompt.
96 |
97 | **Example:**
98 |
99 | ```bash
100 | deepmcpagent run --model-id "openai:gpt-4.1" \
101 | --http "name=math url=http://127.0.0.1:8000/mcp transport=http"
102 | ```
103 |
104 | **Interactive session:**
105 |
106 | ```text
107 | DeepMCPAgent is ready. Type 'exit' to quit.
108 | > What is 2 + 2?
109 | 2 + 2 = 4.
110 | ```
111 |
112 | ---
113 |
114 | ## Example Server Setup
115 |
116 | To test locally, run the sample math server:
117 |
118 | ```bash
119 | python examples/servers/math_server.py
120 | ```
121 |
122 | Then connect:
123 |
124 | ```bash
125 | deepmcpagent run --model-id "openai:gpt-4.1" \
126 | --http "name=math url=http://127.0.0.1:8000/mcp transport=http"
127 | ```
128 |
129 | ---
130 |
131 | ## Notes
132 |
133 | - MCP servers can be mixed: use both `--stdio` and `--http`.
134 | - Multiple servers can be provided by repeating the flags.
135 | - The agent falls back gracefully if no servers are available, but won’t have tools.
136 | - For automation, `list-tools` is useful in CI/CD pipelines to validate server contracts.
137 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | ## 0.5.0 — 2025-10-18
4 |
5 | ### Added
6 |
7 | - Cross-Agent Communication (in-process) with `cross_agent.py`.
8 | - `CrossAgent`, `make_cross_agent_tools`, `ask_agent_`, and `broadcast_to_agents`.
9 | - `build_deep_agent(..., cross_agents=...)` to attach peers as tools.
10 | - Example `examples/use_cross_agent.py`.
11 | - Cross-Agent documentation section.
12 |
13 | ### Changed
14 |
15 | - Pydantic v2 config via `model_config = ConfigDict(extra="forbid")`.
16 |
17 | ### Fixed
18 |
19 | - Clearer trace output for cross-agent tool calls.
20 | - Better errors for unknown peers in `broadcast_to_agents`.
21 | - Implemented sync `_run` on tools to satisfy `BaseTool`.
22 |
23 | ---
24 |
25 | ## 0.4.1 — 2025-10-17
26 |
27 | ### Added
28 |
29 | - No new features.
30 |
31 | ### Changed
32 |
33 | - Updated runtime compatibility for environments without `deepagents` installed (graceful fallback to ReAct agent).
34 | - Minor code cleanup and improved defensive checks in the agent builder.
35 |
36 | ### Fixed
37 |
38 | - Fixed `TypeError` when falling back to `create_react_agent()` with `langgraph>=0.6`.
39 | - Agent builder now dynamically detects supported parameters and omits deprecated ones for smooth operation across LangGraph versions.
40 | - Improved `_after()` trace hook to skip `None` values when `trace_tools=True`, correctly displaying tool results.
41 |
42 | ---
43 |
44 | ## 0.4.0
45 |
46 | ### Added
47 |
48 | - CLI now supports pretty console output, `--trace/--no-trace`, and `--raw` modes.
49 | - HTTP server specs fully supported with block string syntax (`--http "name=... url=..."`).
50 | - Tool tracing hooks (`on_before`, `on_after`, `on_error`) integrated into the agent layer.
51 | - Richer agent streaming output: shows invoked tools, arguments, and results.
52 | - Added `__version__` export via package metadata.
53 | - Basic PyTests
54 |
55 | ### Changed
56 |
57 | - Updated runtime dependencies:
58 | - `langgraph` and `langgraph-prebuilt` pinned to `>=0.6,<0.7`.
59 | - `langchain` bumped to `>=0.3.27`.
60 | - `fastmcp` bumped to `>=2.12.2`.
61 | - CLI and agent examples polished for consistency and usability.
62 | - Development extras modernized (latest `ruff`, `mypy`, `pytest`, etc.).
63 |
64 | ### Fixed
65 |
66 | - Multiple Ruff issues (imports, `Optional` → `X | None`, try/except cleanups).
67 | - Validation errors in CLI argument parsing.
68 | - Tool discovery now handles `None` or empty results gracefully.
69 | - Safer error handling in `_FastMCPTool` when tool callbacks raise.
70 | - CI workflow stabilized for PyPI publishing with setuptools-scm dynamic versioning.
71 |
72 | ---
73 |
74 | ## 0.3.0
75 |
76 | ### Added
77 |
78 | - Improved JSON Schema → Pydantic mapping:
79 | - Carries through defaults and descriptions via `Field`.
80 | - Generates per-tool arg models (`Args_`).
81 | - Sanitizes model names for Pydantic compatibility.
82 | - CLI improvements:
83 | - Added `--version` flag.
84 | - Simplified option parsing.
85 | - Updated documentation.
86 | - PyPI Trusted Publishing workflow (publish on tag).
87 | - CI improvements: Ruff formatting, mypy fixes, skip deep extra on Python 3.10.
88 |
89 | ### Fixed
90 |
91 | - Type errors in CLI, agent, tools, and clients.
92 | - CLI annotation options adjusted to satisfy Ruff rules.
93 |
94 | ### Changed
95 |
96 | - Project license clarified to Apache-2.0.
97 | - Project metadata aligned with license notice.
98 |
99 | ---
100 |
101 | ## 0.1.0
102 |
103 | - Initial FastMCP client edition.
104 |
--------------------------------------------------------------------------------
/docs/changelog.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | ## 0.5.0 — 2025-10-18
4 |
5 | ### Added
6 |
7 | - Cross-Agent Communication (in-process) with `cross_agent.py`.
8 | - `CrossAgent`, `make_cross_agent_tools`, `ask_agent_`, and `broadcast_to_agents`.
9 | - `build_deep_agent(..., cross_agents=...)` to attach peers as tools.
10 | - Example `examples/use_cross_agent.py`.
11 | - Cross-Agent documentation section.
12 |
13 | ### Changed
14 |
15 | - Pydantic v2 config via `model_config = ConfigDict(extra="forbid")`.
16 |
17 | ### Fixed
18 |
19 | - Clearer trace output for cross-agent tool calls.
20 | - Better errors for unknown peers in `broadcast_to_agents`.
21 | - Implemented sync `_run` on tools to satisfy `BaseTool`.
22 |
23 | ---
24 |
25 | ## 0.4.1 — 2025-10-17
26 |
27 | ### Added
28 |
29 | - No new features.
30 |
31 | ### Changed
32 |
33 | - Updated runtime compatibility for environments without `deepagents` installed (graceful fallback to ReAct agent).
34 | - Minor code cleanup and improved defensive checks in the agent builder.
35 |
36 | ### Fixed
37 |
38 | - Fixed `TypeError` when falling back to `create_react_agent()` with `langgraph>=0.6`.
39 | - Agent builder now dynamically detects supported parameters and omits deprecated ones for smooth operation across LangGraph versions.
40 | - Improved `_after()` trace hook to skip `None` values when `trace_tools=True`, correctly displaying tool results.
41 |
42 | ---
43 |
44 | ## 0.4.0
45 |
46 | ### Added
47 |
48 | - CLI now supports pretty console output, `--trace/--no-trace`, and `--raw` modes.
49 | - HTTP server specs fully supported with block string syntax (`--http "name=... url=..."`).
50 | - Tool tracing hooks (`on_before`, `on_after`, `on_error`) integrated into the agent layer.
51 | - Richer agent streaming output: shows invoked tools, arguments, and results.
52 | - Added `__version__` export via package metadata.
53 | - Basic PyTests
54 |
55 | ### Changed
56 |
57 | - Updated runtime dependencies:
58 | - `langgraph` and `langgraph-prebuilt` pinned to `>=0.6,<0.7`.
59 | - `langchain` bumped to `>=0.3.27`.
60 | - `fastmcp` bumped to `>=2.12.2`.
61 | - CLI and agent examples polished for consistency and usability.
62 | - Development extras modernized (latest `ruff`, `mypy`, `pytest`, etc.).
63 |
64 | ### Fixed
65 |
66 | - Multiple Ruff issues (imports, `Optional` → `X | None`, try/except cleanups).
67 | - Validation errors in CLI argument parsing.
68 | - Tool discovery now handles `None` or empty results gracefully.
69 | - Safer error handling in `_FastMCPTool` when tool callbacks raise.
70 | - CI workflow stabilized for PyPI publishing with setuptools-scm dynamic versioning.
71 |
72 | ---
73 |
74 | ## 0.3.0
75 |
76 | ### Added
77 |
78 | - Improved JSON Schema → Pydantic mapping:
79 | - Carries through defaults and descriptions via `Field`.
80 | - Generates per-tool arg models (`Args_`).
81 | - Sanitizes model names for Pydantic compatibility.
82 | - CLI improvements:
83 | - Added `--version` flag.
84 | - Simplified option parsing.
85 | - Updated documentation.
86 | - PyPI Trusted Publishing workflow (publish on tag).
87 | - CI improvements: Ruff formatting, mypy fixes, skip deep extra on Python 3.10.
88 |
89 | ### Fixed
90 |
91 | - Type errors in CLI, agent, tools, and clients.
92 | - CLI annotation options adjusted to satisfy Ruff rules.
93 |
94 | ### Changed
95 |
96 | - Project license clarified to Apache-2.0.
97 | - Project metadata aligned with license notice.
98 |
99 | ---
100 |
101 | ## 0.1.0
102 |
103 | - Initial FastMCP client edition.
104 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to DeepMCPAgent
2 |
3 | Thanks for taking the time to contribute — you’re awesome! 🎉
4 | This document explains how to set up your environment, follow our coding standards, and submit great pull requests.
5 |
6 | ---
7 |
8 | ## 👋 Ways to contribute
9 |
10 | - 🐛 Report bugs and propose fixes
11 | - 🧩 Improve docs, examples, and tutorials
12 | - 🧠 Suggest features or design improvements
13 | - ✅ Add tests and refactor for quality
14 | - 🔌 Create example MCP servers or integration recipes
15 |
16 | Before starting larger work, please open an issue to discuss the idea.
17 |
18 | ---
19 |
20 | ## 🧰 Development setup
21 |
22 | DeepMCPAgent targets **Python 3.10+**.
23 |
24 | ```bash
25 | # 1) Clone and enter the repo
26 | git clone https://github.com/cryxnet/deepmcpagent.git
27 | cd deepmcpagent
28 |
29 | # 2) Create and activate a virtual environment
30 | python3 -m venv .venv
31 | source .venv/bin/activate
32 |
33 | # 3) Install in editable mode with dev extras
34 | pip install -e ".[dev]"
35 |
36 | # (optional) If you plan to use the DeepAgents backend:
37 | pip install -e ".[deep]"
38 | ```
39 |
40 | > **macOS / Homebrew note:** if you see a PEP 668 “externally-managed-environment” error, make sure you’re inside a virtualenv as shown above.
41 |
42 | ---
43 |
44 | ## 🧪 Quality checks (run before committing)
45 |
46 | We keep a high bar for code quality. Please run:
47 |
48 | ```bash
49 | # Format & lint
50 | ruff check .
51 | ruff format .
52 |
53 | # Type-check (mypy strict)
54 | mypy
55 |
56 | # Tests
57 | pytest -q
58 |
59 | # Docs (optional but appreciated)
60 | mkdocs build
61 | ```
62 |
63 | You can run an example end-to-end:
64 |
65 | ```bash
66 | # Terminal 1 — start the sample HTTP MCP server
67 | python examples/servers/math_server.py
68 | # serves http://127.0.0.1:8000/mcp
69 |
70 | # Terminal 2 — run the demo agent with fancy console output
71 | python examples/use_agent.py
72 | ```
73 |
74 | ---
75 |
76 | ## 🧑💻 Code style & guidelines
77 |
78 | - **Typing:** 100% typed. If you must use `Any`, confine it and explain why.
79 | - **Docs:** Public APIs must include clear docstrings (Google style preferred).
80 | - **Structure:** Keep modules focused. Avoid giant files and deeply nested logic.
81 | - **Errors:** Raise precise exceptions with actionable messages.
82 | - **Logging:** Prefer clear return values and exceptions over ad-hoc prints.
83 | - **Dependencies:** Keep runtime deps minimal. Add dev deps under `[project.optional-dependencies].dev`.
84 | - **Compatibility:** Don’t use features newer than the minimum supported Python version.
85 |
86 | ---
87 |
88 | ## 🔧 Project-specific conventions
89 |
90 | - **MCP connectivity:** Use the FastMCP client for HTTP/SSE servers. If you add server specs or transports, keep them fully typed and documented.
91 | - **Tools:** Convert MCP `inputSchema` → Pydantic → LangChain `BaseTool`. If you extend the schema mapping, add tests.
92 | - **Agent loop:** Prefer **DeepAgents** when installed; otherwise the **LangGraph ReAct** fallback is used. Keep both paths healthy.
93 | - **Prompts:** The default system prompt lives in `src/deepmcpagent/prompt.py`. If changing behavior, document rationale in the PR.
94 |
95 | ---
96 |
97 | ## 🌿 Git workflow
98 |
99 | 1. **Create a branch:**
100 |
101 | ```
102 | git checkout -b feat/short-description
103 | ```
104 |
105 | 2. **Commit using Conventional Commits:**
106 |
107 | - `feat: add HTTP auth headers to server spec`
108 | - `fix: prevent missing _client in tool wrapper`
109 | - `docs: expand README Quickstart`
110 | - `refactor: split agent builder`
111 | - `test: add coverage for schema mapper`
112 |
113 | 3. **Keep PRs focused and reasonably small.** Link related issues in the description.
114 | 4. **Checklist before opening a PR:**
115 |
116 | - [ ] `ruff check .` and `ruff format .` pass
117 | - [ ] `mypy` passes (strict)
118 | - [ ] `pytest` passes locally
119 | - [ ] Docs / examples updated if behavior changed
120 | - [ ] Added tests for new logic
121 | - [ ] No extraneous files (lockfiles, IDE configs, etc.)
122 |
123 | ---
124 |
125 | ## 📝 Documentation
126 |
127 | We use **MkDocs + mkdocstrings**.
128 |
129 | ```bash
130 | # Preview docs locally
131 | mkdocs serve
132 | ```
133 |
134 | - API references live under `docs/` and are generated from docstrings.
135 | - Keep README examples runnable and consistent with `examples/`.
136 |
137 | ---
138 |
139 | ## 🧾 License & DCO
140 |
141 | By contributing, you agree that your contributions are licensed under the project’s **Apache-2.0 License** (see `LICENSE`).
142 |
143 | We follow a **Developer Certificate of Origin (DCO)** model — include a `Signed-off-by` line in your commits or enable GitHub’s sign-off option:
144 |
145 | ```
146 | Signed-off-by: Your Name
147 | ```
148 |
149 | ---
150 |
151 | ## 🙌 Thank you
152 |
153 | Your time and effort make this project better for everyone. We’re excited to collaborate!
154 |
--------------------------------------------------------------------------------
/src/deepmcpagent/agent.py:
--------------------------------------------------------------------------------
1 | """Agent builders that use the FastMCP client and MCP-only tools."""
2 |
3 | from __future__ import annotations
4 |
5 | from collections.abc import Mapping
6 | from typing import Any, cast
7 |
8 | from langchain.chat_models import init_chat_model
9 | from langchain_core.language_models.chat_models import BaseChatModel
10 | from langchain_core.runnables import Runnable
11 | from langchain_core.tools import BaseTool
12 | from langgraph.prebuilt import create_react_agent
13 |
14 | from .clients import FastMCPMulti
15 | from .config import ServerSpec
16 | from .cross_agent import CrossAgent, make_cross_agent_tools # NEW
17 | from .prompt import DEFAULT_SYSTEM_PROMPT
18 | from .tools import MCPClientError, MCPToolLoader
19 |
20 | # Model can be a provider string (handled by LangChain), a chat model instance, or a Runnable.
21 | ModelLike = str | BaseChatModel | Runnable[Any, Any]
22 |
23 |
24 | def _normalize_model(model: ModelLike) -> Runnable[Any, Any]:
25 | """Normalize the supplied model into a Runnable."""
26 | if isinstance(model, str):
27 | # This supports many providers via lc init strings, not just OpenAI.
28 | return cast(Runnable[Any, Any], init_chat_model(model))
29 | # Already BaseChatModel or Runnable
30 | return cast(Runnable[Any, Any], model)
31 |
32 |
33 | async def build_deep_agent(
34 | *,
35 | servers: Mapping[str, ServerSpec],
36 | model: ModelLike,
37 | instructions: str | None = None,
38 | trace_tools: bool = False,
39 | cross_agents: Mapping[str, CrossAgent] | None = None, # NEW
40 | ) -> tuple[Runnable[Any, Any], MCPToolLoader]:
41 | """Build an MCP-first agent graph.
42 |
43 | This function discovers tools from the configured MCP servers, converts them into
44 | LangChain tools, and then builds an agent. If the optional `deepagents` package is
45 | installed, a Deep Agent loop is created. Otherwise, a LangGraph ReAct agent is used.
46 |
47 | Args:
48 | servers: Mapping of server name to spec (HTTP/SSE recommended for FastMCP).
49 | model: REQUIRED. Either a LangChain chat model instance, a provider id string
50 | accepted by `init_chat_model`, or a Runnable.
51 | instructions: Optional system prompt. If not provided, uses DEFAULT_SYSTEM_PROMPT.
52 | trace_tools: If True, print each tool invocation and result from inside the tool
53 | wrapper (works for both DeepAgents and LangGraph prebuilt).
54 | cross_agents: Optional mapping of peer name -> CrossAgent. When provided, each
55 | peer is exposed as a tool (e.g., `ask_agent_`) and an optional
56 | `broadcast_to_agents` tool is added to consult multiple peers.
57 |
58 | Returns:
59 | Tuple of `(graph, loader)` where:
60 | - `graph` is a LangGraph or DeepAgents runnable with `.ainvoke`.
61 | - `loader` can be used to introspect tools.
62 | """
63 | if model is None: # Defensive check; CLI/code must always pass a model now.
64 | raise ValueError("A model is required. Provide a model instance or a provider id string.")
65 |
66 | # Simple printing callbacks for tracing (kept dependency-free)
67 | def _before(name: str, kwargs: dict[str, Any]) -> None:
68 | if trace_tools:
69 | print(f"→ Invoking tool: {name} with {kwargs}")
70 |
71 | def _after(name: str, res: Any) -> None:
72 | if not trace_tools:
73 | return
74 | pretty = res
75 | for attr in ("data", "text", "content", "result"):
76 | try:
77 | val = getattr(res, attr, None)
78 | if val not in (None, ""):
79 | pretty = val
80 | break
81 | except Exception:
82 | continue
83 | print(f"✔ Tool result from {name}: {pretty}")
84 |
85 | def _error(name: str, exc: Exception) -> None:
86 | if trace_tools:
87 | print(f"✖ {name} error: {exc}")
88 |
89 | multi = FastMCPMulti(servers)
90 | loader = MCPToolLoader(
91 | multi,
92 | on_before=_before if trace_tools else None,
93 | on_after=_after if trace_tools else None,
94 | on_error=_error if trace_tools else None,
95 | )
96 |
97 | try:
98 | discovered = await loader.get_all_tools()
99 | tools: list[BaseTool] = list(discovered) if discovered else []
100 | except MCPClientError as exc:
101 | raise RuntimeError(
102 | f"Failed to initialize agent because tool discovery failed. Details: {exc}"
103 | ) from exc
104 |
105 | # Attach cross-agent tools if provided
106 | if cross_agents:
107 | tools.extend(make_cross_agent_tools(cross_agents))
108 |
109 | if not tools:
110 | print("[deepmcpagent] No tools discovered from MCP servers; agent will run without tools.")
111 |
112 | chat: Runnable[Any, Any] = _normalize_model(model)
113 | sys_prompt = instructions or DEFAULT_SYSTEM_PROMPT
114 |
115 | # ----------------------------------------------------------------------
116 | # Attempt DeepAgents first, then gracefully fall back to LangGraph.
117 | # ----------------------------------------------------------------------
118 | try:
119 | # Optional deep agent loop if installed.
120 | from deepagents import create_deep_agent # type: ignore
121 |
122 | graph = cast(
123 | Runnable[Any, Any],
124 | create_deep_agent(tools=tools, instructions=sys_prompt, model=chat),
125 | )
126 |
127 | except ImportError:
128 | # Fallback to LangGraph’s ReAct agent, compatible with all versions.
129 | import inspect
130 |
131 | try:
132 | sig = inspect.signature(create_react_agent)
133 | params = set(sig.parameters.keys())
134 |
135 | # base kwargs always valid
136 | kwargs: dict[str, Any] = {"model": chat, "tools": tools}
137 |
138 | # Only pass prompt args if supported by this version
139 | if "system_prompt" in params:
140 | kwargs["system_prompt"] = sys_prompt
141 | elif "state_modifier" in params:
142 | kwargs["state_modifier"] = sys_prompt
143 | # Newer versions (>=0.6) have no prompt args → skip
144 |
145 | graph = cast(Runnable[Any, Any], create_react_agent(**kwargs))
146 |
147 | except TypeError:
148 | # Absolute fallback for latest versions: no prompt args allowed
149 | graph = cast(Runnable[Any, Any], create_react_agent(model=chat, tools=tools))
150 |
151 | return graph, loader
152 |
--------------------------------------------------------------------------------
/examples/use_cross_agent.py:
--------------------------------------------------------------------------------
1 | # use_cross_agent.py
2 | """
3 | Example: Cross-Agent Communication with DeepMCPAgent.
4 |
5 | This demonstrates wiring a specialist peer agent into a primary agent so the
6 | primary can delegate work via cross-agent tools:
7 |
8 | - ask_agent_(message, context?, timeout_s?)
9 | - broadcast_to_agents(message, peers?, timeout_s?)
10 |
11 | Console output:
12 | - Discovered MCP tools (from your servers)
13 | - Advertised cross-agent tools (derived from peers)
14 | - Each tool invocation + result (via deepmcpagent trace hooks)
15 | - Final LLM answer
16 | """
17 |
18 | import asyncio
19 | from typing import Any
20 |
21 | from dotenv import load_dotenv
22 | from langchain_openai import ChatOpenAI
23 | from rich.console import Console
24 | from rich.panel import Panel
25 | from rich.table import Table
26 |
27 | from deepmcpagent import HTTPServerSpec, build_deep_agent
28 | from deepmcpagent.cross_agent import CrossAgent
29 |
30 |
31 | def _extract_final_answer(result: Any) -> str:
32 | """Best-effort extraction of the final text from different executors."""
33 | try:
34 | # LangGraph prebuilt typically returns {"messages": [...]}
35 | if isinstance(result, dict) and "messages" in result and result["messages"]:
36 | last = result["messages"][-1]
37 | content = getattr(last, "content", None)
38 | if isinstance(content, str) and content:
39 | return content
40 | if isinstance(content, list) and content and isinstance(content[0], dict):
41 | return content[0].get("text") or str(content)
42 | return str(last)
43 | return str(result)
44 | except Exception:
45 | return str(result)
46 |
47 |
48 | async def main() -> None:
49 | console = Console()
50 | load_dotenv()
51 |
52 | # Ensure your MCP server (e.g., math_server.py) is running in another terminal:
53 | # python math_server.py
54 | servers = {
55 | "math": HTTPServerSpec(
56 | url="http://127.0.0.1:8000/mcp",
57 | transport="http",
58 | ),
59 | }
60 |
61 | # Any LangChain-compatible chat model (or init string) works here.
62 | # Use the same or different models for main/peer agents as you prefer.
63 | main_model = ChatOpenAI(model="gpt-4.1")
64 | peer_model = ChatOpenAI(model="gpt-4o-mini")
65 |
66 | # ---------------------------------------------------------------------
67 | # 1) Build a specialist peer agent (math-focused)
68 | # ---------------------------------------------------------------------
69 | math_peer_graph, _ = await build_deep_agent(
70 | servers=servers,
71 | model=peer_model,
72 | instructions=(
73 | "You are a focused Math Specialist. ALWAYS use available MCP math tools "
74 | "to compute precisely. Return concise numeric results with brief steps."
75 | ),
76 | trace_tools=True, # See the math peer's own tool usage when directly invoked
77 | )
78 |
79 | # Wrap the peer as a CrossAgent so it can be exposed as a tool.
80 | peers = {
81 | "mathpeer": CrossAgent(
82 | agent=math_peer_graph,
83 | description="Specialist math agent that uses MCP math tools for accurate computation.",
84 | )
85 | }
86 |
87 | # ---------------------------------------------------------------------
88 | # 2) Build the main agent and attach cross-agent tools
89 | # ---------------------------------------------------------------------
90 | main_graph, loader = await build_deep_agent(
91 | servers=servers,
92 | model=main_model,
93 | instructions=(
94 | "You are a helpful orchestrator. Prefer calling tools rather than guessing. "
95 | "If the task is mathematical, DELEGATE to the math peer via the tool "
96 | "'ask_agent_mathpeer'. If multiple peers exist, you may also use "
97 | "'broadcast_to_agents' to compare answers."
98 | ),
99 | trace_tools=True, # See tool invocations from the main agent (including cross-agent tools)
100 | cross_agents=peers, # <-- Attach the peer(s)
101 | )
102 |
103 | # ---------------------------------------------------------------------
104 | # 3) Show discovered tools (MCP) + cross-agent tools
105 | # ---------------------------------------------------------------------
106 | infos = await loader.list_tool_info()
107 | infos = list(infos) if infos else []
108 |
109 | mcp_table = Table(title="Discovered MCP Tools", show_lines=True)
110 | mcp_table.add_column("Name", style="cyan", no_wrap=True)
111 | mcp_table.add_column("Description", style="green")
112 | if infos:
113 | for t in infos:
114 | mcp_table.add_row(t.name, t.description or "-")
115 | else:
116 | mcp_table.add_row("— none —", "No tools discovered (is your MCP server running?)")
117 | console.print(mcp_table)
118 |
119 | cross_table = Table(title="Cross-Agent Tools (exposed on MAIN agent)", show_lines=True)
120 | cross_table.add_column("Tool", style="cyan", no_wrap=True)
121 | cross_table.add_column("What it does", style="green")
122 | # One per peer:
123 | for name in peers:
124 | cross_table.add_row(f"ask_agent_{name}", f"Ask the '{name}' peer for help.")
125 | # Broadcast tool is always added by make_cross_agent_tools in our integration
126 | cross_table.add_row(
127 | "broadcast_to_agents", "Ask multiple peers in parallel and collect answers."
128 | )
129 | console.print(cross_table)
130 |
131 | # ---------------------------------------------------------------------
132 | # 4) Run a single-turn query that should trigger delegation
133 | # ---------------------------------------------------------------------
134 | query = (
135 | "A rectangle is width 3.5 and length 6.2. "
136 | "Compute area and perimeter, then add 17^2 to the sum of (area + perimeter). "
137 | "Please DELEGATE to the math peer via the 'ask_agent_mathpeer' tool and show brief steps."
138 | )
139 | console.print(
140 | Panel.fit(query, title="User Query (expects cross-agent delegation)", style="bold magenta")
141 | )
142 |
143 | result = await main_graph.ainvoke({"messages": [{"role": "user", "content": query}]})
144 | final_text = _extract_final_answer(result)
145 | console.print(Panel(final_text or "(no content)", title="Final LLM Answer", style="bold green"))
146 |
147 | # ---------------------------------------------------------------------
148 | # 5) (Optional) Demonstrate broadcast to peers — with one peer it's trivial,
149 | # but we show how you'd instruct the main agent to use it.
150 | # ---------------------------------------------------------------------
151 | query2 = (
152 | "As a quick check, consult all peers via 'broadcast_to_agents' with the message "
153 | "'Compute (3 + 5) * 7 using MCP math tools.' Then summarize the responses."
154 | )
155 | console.print(Panel.fit(query2, title="User Query (broadcast demo)", style="bold magenta"))
156 | result2 = await main_graph.ainvoke({"messages": [{"role": "user", "content": query2}]})
157 | final_text2 = _extract_final_answer(result2)
158 | console.print(
159 | Panel(
160 | final_text2 or "(no content)", title="Final LLM Answer (Broadcast)", style="bold green"
161 | )
162 | )
163 |
164 |
165 | if __name__ == "__main__":
166 | asyncio.run(main())
167 |
--------------------------------------------------------------------------------
/src/deepmcpagent/tools.py:
--------------------------------------------------------------------------------
1 | """MCP tool discovery and conversion to LangChain tools."""
2 |
3 | from __future__ import annotations
4 |
5 | import contextlib
6 | import re
7 | from collections.abc import Callable
8 | from dataclasses import dataclass
9 | from typing import Any, cast
10 |
11 | from langchain_core.tools import BaseTool
12 | from pydantic import BaseModel, Field, PrivateAttr, create_model
13 |
14 | from .clients import FastMCPMulti
15 |
16 | # Callback types for tracing tool calls
17 | OnBefore = Callable[[str, dict[str, Any]], None]
18 | OnAfter = Callable[[str, Any], None]
19 | OnError = Callable[[str, Exception], None]
20 |
21 |
22 | @dataclass(frozen=True)
23 | class ToolInfo:
24 | """Human-friendly metadata for a discovered MCP tool."""
25 |
26 | server_guess: str
27 | name: str
28 | description: str
29 | input_schema: dict[str, Any]
30 |
31 |
32 | class MCPClientError(RuntimeError):
33 | """Raised when communicating with the MCP client fails."""
34 |
35 |
36 | def _jsonschema_to_pydantic(schema: dict[str, Any], *, model_name: str = "Args") -> type[BaseModel]:
37 | props = (schema or {}).get("properties", {}) or {}
38 | required = set((schema or {}).get("required", []) or [])
39 |
40 | # Each value is (annotation, default)
41 | def f(n: str, p: dict[str, Any]) -> tuple[type[Any], Any]:
42 | t = p.get("type")
43 | desc = p.get("description")
44 | default = p.get("default")
45 | req = n in required
46 |
47 | def default_val() -> Any:
48 | return ... if req else default
49 |
50 | if t == "string":
51 | return (str, Field(default_val(), description=desc))
52 | if t == "integer":
53 | return (int, Field(default_val(), description=desc))
54 | if t == "number":
55 | return (float, Field(default_val(), description=desc))
56 | if t == "boolean":
57 | return (bool, Field(default_val(), description=desc))
58 | if t == "array":
59 | return (list, Field(default_val(), description=desc))
60 | if t == "object":
61 | return (dict, Field(default_val(), description=desc))
62 | return (Any, Field(default_val(), description=desc))
63 |
64 | fields: dict[str, tuple[type[Any], Any]] = {
65 | n: f(n, spec or {}) for n, spec in props.items()
66 | } or {"payload": (dict, Field(None, description="Raw payload"))}
67 |
68 | safe_name = re.sub(r"[^0-9a-zA-Z_]", "_", model_name) or "Args"
69 |
70 | # Hand the kwargs to pydantic as Any to satisfy the stubbed overloads
71 | model = create_model(safe_name, **cast(dict[str, Any], fields))
72 | return cast(type[BaseModel], model)
73 |
74 |
75 | class _FastMCPTool(BaseTool):
76 | """LangChain `BaseTool` wrapper that invokes a FastMCP tool by name."""
77 |
78 | name: str
79 | description: str
80 | args_schema: type[BaseModel]
81 |
82 | _tool_name: str = PrivateAttr()
83 | _client: Any = PrivateAttr()
84 | _on_before: OnBefore | None = PrivateAttr(default=None)
85 | _on_after: OnAfter | None = PrivateAttr(default=None)
86 | _on_error: OnError | None = PrivateAttr(default=None)
87 |
88 | def __init__(
89 | self,
90 | *,
91 | name: str,
92 | description: str,
93 | args_schema: type[BaseModel],
94 | tool_name: str,
95 | client: Any,
96 | on_before: OnBefore | None = None,
97 | on_after: OnAfter | None = None,
98 | on_error: OnError | None = None,
99 | ) -> None:
100 | super().__init__(name=name, description=description, args_schema=args_schema)
101 | self._tool_name = tool_name
102 | self._client = client
103 | self._on_before = on_before
104 | self._on_after = on_after
105 | self._on_error = on_error
106 |
107 | async def _arun(self, **kwargs: Any) -> Any:
108 | """Asynchronously execute the MCP tool via the FastMCP client."""
109 | if self._on_before:
110 | with contextlib.suppress(Exception):
111 | self._on_before(self.name, kwargs)
112 |
113 | try:
114 | async with self._client:
115 | res = await self._client.call_tool(self._tool_name, kwargs)
116 | except Exception as exc: # surface transport/protocol issues
117 | if self._on_error:
118 | with contextlib.suppress(Exception):
119 | self._on_error(self.name, exc)
120 | raise MCPClientError(f"Failed to call MCP tool '{self._tool_name}': {exc}") from exc
121 |
122 | if self._on_after:
123 | with contextlib.suppress(Exception):
124 | self._on_after(self.name, res)
125 |
126 | return res
127 |
128 | def _run(self, **kwargs: Any) -> Any: # pragma: no cover
129 | """Synchronous execution path (rarely used)."""
130 | import anyio
131 |
132 | return anyio.run(lambda: self._arun(**kwargs))
133 |
134 |
135 | class MCPToolLoader:
136 | """Discover MCP tools via FastMCP and convert them to LangChain tools."""
137 |
138 | def __init__(
139 | self,
140 | multi: FastMCPMulti,
141 | *,
142 | on_before: OnBefore | None = None,
143 | on_after: OnAfter | None = None,
144 | on_error: OnError | None = None,
145 | ) -> None:
146 | self._multi = multi
147 | self._on_before = on_before
148 | self._on_after = on_after
149 | self._on_error = on_error
150 |
151 | async def _list_tools_raw(self) -> tuple[Any, list[Any]]:
152 | """Fetch raw tool descriptors from all configured MCP servers."""
153 | c = self._multi.client
154 | try:
155 | async with c:
156 | tools = await c.list_tools()
157 | except Exception as exc:
158 | raise MCPClientError(
159 | f"Failed to list tools from MCP servers: {exc}. "
160 | "Check server URLs, network connectivity, and authentication headers."
161 | ) from exc
162 | return c, list(tools or [])
163 |
164 | async def get_all_tools(self) -> list[BaseTool]:
165 | """Return all available tools as LangChain `BaseTool` instances."""
166 | client, tools = await self._list_tools_raw()
167 |
168 | out: list[BaseTool] = []
169 | for t in tools:
170 | name = t.name
171 | desc = getattr(t, "description", "") or ""
172 | schema = getattr(t, "inputSchema", None) or {}
173 | model = _jsonschema_to_pydantic(schema, model_name=f"Args_{name}")
174 | out.append(
175 | _FastMCPTool(
176 | name=name,
177 | description=desc,
178 | args_schema=model,
179 | tool_name=name,
180 | client=client,
181 | on_before=self._on_before,
182 | on_after=self._on_after,
183 | on_error=self._on_error,
184 | )
185 | )
186 | return out
187 |
188 | async def list_tool_info(self) -> list[ToolInfo]:
189 | """Return human-readable tool metadata for introspection or debugging."""
190 | _, tools = await self._list_tools_raw()
191 | return [
192 | ToolInfo(
193 | server_guess=(getattr(t, "server", None) or getattr(t, "serverName", None) or ""),
194 | name=t.name,
195 | description=getattr(t, "description", "") or "",
196 | input_schema=getattr(t, "inputSchema", None) or {},
197 | )
198 | for t in tools
199 | ]
200 |
--------------------------------------------------------------------------------
/docs/cross-agent.md:
--------------------------------------------------------------------------------
1 | ## What are "Cross Agents"?
2 |
3 | **Cross agents** are other agent graphs (LangGraph ReAct or DeepAgents loops) that you expose to a calling agent as tools. Each peer is surfaced as:
4 |
5 | - `ask_agent_` — forward a single question to a specific peer and return its final answer.
6 | - `broadcast_to_agents` — ask multiple peers the same question in parallel and return a mapping of answers.
7 |
8 | This makes multi-agent patterns feel like calling any other MCP tool: the caller reasons about _when_ to delegate; the peer focuses on _how_ to solve its slice.
9 |
10 | ---
11 |
12 | ## How it works (high level)
13 |
14 | 1. You build (or already have) one or more agent runnables: `Runnable[{messages}] -> Result`.
15 | 2. Wrap those peers with `CrossAgent(agent=..., description=...)`.
16 | 3. Pass `cross_agents={...}` into `build_deep_agent(...)` for your main agent.
17 | 4. The builder automatically attaches:
18 |
19 | - one `ask_agent_` tool per peer
20 | - an optional `broadcast_to_agents` tool
21 |
22 | 5. During planning, the main agent can call these tools like any other MCP tool.
23 |
24 | Internally, the tools:
25 |
26 | - Package your prompt as `{ "messages": [...] }` and call `peer.ainvoke(...)`.
27 | - Extract a “best final text” from the peer’s result (compatible with LangGraph/DeepAgents common shapes).
28 | - Return that text (or a dict of texts for broadcast) to the caller.
29 |
30 | ---
31 |
32 | ## Installation & prerequisites
33 |
34 | You already have DeepMCPAgent. Ensure your env can run your chosen chat model(s) via LangChain’s `init_chat_model` strings (e.g., `openai:gpt-4.1`, `anthropic:messages-2025-xx`), and that any MCP servers you want are reachable.
35 |
36 | > Works with both **DeepAgents** (if installed) and the **LangGraph prebuilt ReAct agent** fallback.
37 |
38 | ---
39 |
40 | ## Quick start
41 |
42 | ### 1) Build a specialist peer agent
43 |
44 | ```python
45 | from deepmcpagent.agent import build_deep_agent
46 | from deepmcpagent.config import HTTPServerSpec
47 |
48 | research_graph, _ = await build_deep_agent(
49 | servers={"web": HTTPServerSpec(url="http://127.0.0.1:8000/mcp")},
50 | model="openai:gpt-4o-mini",
51 | )
52 | ```
53 |
54 | ### 2) Build your main agent and attach the peer
55 |
56 | ```python
57 | from deepmcpagent.agent import build_deep_agent
58 | from deepmcpagent.cross_agent import CrossAgent
59 |
60 | main_graph, _ = await build_deep_agent(
61 | servers={"files": HTTPServerSpec(url="http://127.0.0.1:9000/mcp")},
62 | model="openai:gpt-4.1",
63 | cross_agents={
64 | "researcher": CrossAgent(
65 | agent=research_graph,
66 | description="Focused web researcher that gathers and summarizes sources."
67 | )
68 | },
69 | )
70 | ```
71 |
72 | ### 3) Use it in a chat loop
73 |
74 | ```python
75 | result = await main_graph.ainvoke({
76 | "messages": [{"role": "user", "content": "Draft a brief on Topic X"}]
77 | })
78 | # During planning the main agent may call:
79 | # - ask_agent_researcher(message=..., context?=..., timeout_s?=...)
80 | # - broadcast_to_agents(message=..., peers?=[...], timeout_s?=...)
81 | ```
82 |
83 | ---
84 |
85 | ## Tool API
86 |
87 | ### `ask_agent_`
88 |
89 | **Purpose:** Ask a specific peer a question and get the peer’s final answer.
90 |
91 | **Input schema**
92 |
93 | ```json
94 | {
95 | "message": "string (required)",
96 | "context": "string (optional)",
97 | "timeout_s": "number (optional, seconds)"
98 | }
99 | ```
100 |
101 | - `message`: The user-level query to forward.
102 | - `context`: Extra caller context (constraints, partial results, style guide). Sent first as a `system` message to bias many executors helpfully.
103 | - `timeout_s`: Per-call timeout; returns a “Timed out” message if exceeded.
104 |
105 | **Return:** `string` — best-effort final text from the peer.
106 |
107 | ---
108 |
109 | ### `broadcast_to_agents`
110 |
111 | **Purpose:** Ask multiple peers the same question in parallel.
112 |
113 | **Input schema**
114 |
115 | ```json
116 | {
117 | "message": "string (required)",
118 | "peers": ["string", "... (optional)"],
119 | "timeout_s": "number (optional, seconds)"
120 | }
121 | ```
122 |
123 | - `peers`: Subset of peer names; omit to use all.
124 | - `timeout_s`: Per-peer timeout.
125 |
126 | **Return:** `object` mapping peer name → final text answer, e.g.
127 |
128 | ```json
129 | {
130 | "researcher": "Summary ...",
131 | "editor": "Refined draft ...",
132 | "critic": "Risks list ..."
133 | }
134 | ```
135 |
136 | ---
137 |
138 | ## Examples & patterns
139 |
140 | ### Specialist delegation
141 |
142 | - **Researcher → Writer → Editor**: The main agent requests sources via `ask_agent_researcher`, drafts with local tools, then sends the draft to an `editor` peer for tone/clarity.
143 |
144 | ### Ensemble consensus
145 |
146 | - Broadcast to `{ "math", "python", "reasoner" }`, then summarize or vote on the best answer.
147 |
148 | ### Safety gatekeeping
149 |
150 | - Route candidate content to a `safety` peer for policy checks before finalizing.
151 |
152 | ### RAG aggregator
153 |
154 | - A peer dedicated to retrieval; main agent calls it when tool descriptions mention “search”, “vector”, or “db” tasks.
155 |
156 | ---
157 |
158 | ## Tracing & debugging
159 |
160 | Enable tool tracing to see cross-agent calls and outputs:
161 |
162 | ```bash
163 | deepmcpagent run \
164 | --model-id openai:gpt-4.1 \
165 | --http "name=files url=http://127.0.0.1:9000/mcp" \
166 | --trace
167 | ```
168 |
169 | Programmatically, pass `trace_tools=True` into `build_deep_agent(...)` to get console prints like:
170 |
171 | ```
172 | → Invoking tool: ask_agent_researcher with {'message': '...'}
173 | ✔ Tool result from ask_agent_researcher:
174 | ```
175 |
176 | ---
177 |
178 | ## Error handling & timeouts
179 |
180 | - Transport/peer errors surface as `MCPClientError` or `ValueError` (e.g., unknown peer in broadcast).
181 | - Use `timeout_s` to keep the caller responsive.
182 | - The broadcast tool returns `"Timed out"` for slow peers without failing the whole call.
183 |
184 | ---
185 |
186 | ## Design notes
187 |
188 | - **Zero new infra:** Peers are plain in-process runnables; no extra MCP servers needed to talk agent-to-agent.
189 | - **LLM-native delegation:** Uses standard tool calls, so planning remains transparent and controllable.
190 | - **Composable & optional:** `cross_agents` is a single optional arg; if omitted, nothing changes.
191 | - **Parallel fan-out:** Broadcast leverages `anyio.gather` for concurrent peer calls.
192 |
193 | ---
194 |
195 | ## Compatibility
196 |
197 | - **DeepAgents available:** Uses the DeepAgents loop under the hood when present.
198 | - **Otherwise:** Falls back to LangGraph’s `create_react_agent`. Prompt injection via `system_prompt` / `state_modifier` is handled across versions.
199 |
200 | ---
201 |
202 | ## Security & privacy boundaries
203 |
204 | - Only the `message` (and optional `context`) are sent to peers.
205 | - Avoid passing secrets in `context`. Prefer secret storage and tool-level auth for sensitive operations.
206 |
207 | ---
208 |
209 | ## Performance tips
210 |
211 | - Keep peers focused and lightweight; the caller can decide _when_ to delegate.
212 | - Use `timeout_s` for high-latency peers or external retrieval.
213 | - Consider smaller/cheaper models for “filter” or “triage” peers; reserve larger models for synthesis.
214 |
215 | ---
216 |
217 | ## Testing
218 |
219 | ### Unit test a single ask
220 |
221 | ```python
222 | import pytest
223 | from langchain_core.runnables import RunnableLambda
224 | from deepmcpagent.cross_agent import CrossAgent, make_cross_agent_tools
225 |
226 | async def fake_peer(inputs):
227 | return {"messages": [{"role": "assistant", "content": "ok"}]}
228 |
229 | def test_ask_agent_tool():
230 | peer = CrossAgent(agent=RunnableLambda(fake_peer))
231 | tools = make_cross_agent_tools({"peer": peer})
232 | ask = next(t for t in tools if t.name == "ask_agent_peer")
233 | out = pytest.run(asyncio.run(ask._arun(message="ping"))) # or use anyio
234 | assert out == "ok"
235 | ```
236 |
237 | ### Integration test with your builder
238 |
239 | - Build two agents with trivial models or stubs.
240 | - Attach via `cross_agents`.
241 | - Invoke a prompt that forces a tool call (e.g., “Ask the researcher for sources and summarize”).
242 |
243 | ---
244 |
245 | ## Migration from single-agent setups
246 |
247 | You don’t have to change your MCP servers or model configs. Introduce peers gradually by:
248 |
249 | 1. Building a small specialist peer (`research`, `editor`, `critic`).
250 | 2. Attaching it via `cross_agents`.
251 | 3. Nudging your system prompt to allow delegation: _“If a peer tool is available and more capable, delegate.”_
252 |
253 | ---
254 |
255 | ## Reference (Public API)
256 |
257 | ### `CrossAgent`
258 |
259 | ```python
260 | CrossAgent(
261 | agent: Runnable[Any, Any],
262 | description: str = ""
263 | )
264 | ```
265 |
266 | ### `build_deep_agent(..., cross_agents=...)`
267 |
268 | ```python
269 | main_graph, loader = await build_deep_agent(
270 | servers=..., # Mapping[str, ServerSpec]
271 | model="openai:gpt-4.1", # or BaseChatModel or Runnable
272 | instructions=None, # optional
273 | trace_tools=True, # optional
274 | cross_agents={
275 | "researcher": CrossAgent(agent=peer_graph, description="..."),
276 | # more peers...
277 | }
278 | )
279 | ```
280 |
281 | **Auto-added tools**
282 |
283 | - `ask_agent_(message: str, context?: str, timeout_s?: float) -> str`
284 | - `broadcast_to_agents(message: str, peers?: list[str], timeout_s?: float) -> dict[str, str]`
285 |
286 | ---
287 |
288 | ## Roadmap
289 |
290 | - Remote peers (HTTP/SSE) with the same API.
291 | - Streaming replies & “live debate” orchestration.
292 | - Capability tagging & auto-routing (“use the best peer for X”).
293 | - Observability hooks (spans/metrics per peer call).
294 |
295 | ---
296 |
297 | _That’s it—plug in a peer, flip on tracing, and you’ve got cooperative agents without extra plumbing._
298 |
--------------------------------------------------------------------------------
/src/deepmcpagent/cli.py:
--------------------------------------------------------------------------------
1 | """
2 | CLI for deepmcpagent: list tools and run an interactive agent session.
3 |
4 | Notes:
5 | - The CLI path uses provider id strings for models (e.g., "openai:gpt-4.1"),
6 | which `init_chat_model` handles. In code, you can pass a model instance.
7 | - Model is REQUIRED (no fallback).
8 | - Usage for repeated server specs:
9 | --stdio "name=echo command=python args='-m mypkg.server --port 3333' env.API_KEY=xyz keep_alive=false"
10 | --stdio "name=tool2 command=/usr/local/bin/tool2"
11 | --http "name=remote url=http://127.0.0.1:8000/mcp transport=http"
12 |
13 | (Repeat --stdio/--http for multiple servers.)
14 | """
15 |
16 | from __future__ import annotations
17 |
18 | import asyncio
19 | import json
20 | import shlex
21 | from importlib.metadata import version as get_version
22 | from typing import Annotated, Any, Literal, cast
23 |
24 | import typer
25 | from dotenv import load_dotenv
26 | from rich.console import Console
27 | from rich.panel import Panel
28 | from rich.table import Table
29 |
30 | from .agent import build_deep_agent
31 | from .config import HTTPServerSpec, ServerSpec, StdioServerSpec
32 |
33 | load_dotenv()
34 |
35 | app = typer.Typer(no_args_is_help=True, add_completion=False)
36 | console = Console()
37 |
38 |
39 | @app.callback(invoke_without_command=True)
40 | def _version_callback(
41 | version: Annotated[
42 | bool | None,
43 | typer.Option("--version", help="Show version and exit", is_eager=True),
44 | ] = None,
45 | ) -> None:
46 | """Global callback to support --version printing."""
47 | if version:
48 | console.print(get_version("deepmcpagent"))
49 | raise typer.Exit()
50 |
51 |
52 | def _parse_kv(opts: list[str]) -> dict[str, str]:
53 | """Parse ['k=v', 'x=y', ...] into a dict. Values may contain spaces."""
54 | out: dict[str, str] = {}
55 | for it in opts:
56 | if "=" not in it:
57 | raise typer.BadParameter(f"Expected key=value, got: {it}")
58 | k, v = it.split("=", 1)
59 | out[k.strip()] = v.strip()
60 | return out
61 |
62 |
63 | def _merge_servers(stdios: list[str], https: list[str]) -> dict[str, ServerSpec]:
64 | """
65 | Convert flat lists of block strings into server specs.
66 |
67 | Each entry in `stdios` / `https` is a single quoted string like:
68 | "name=echo command=python args='-m mymod --port 3333' env.API_KEY=xyz cwd=/tmp keep_alive=false"
69 | "name=remote url=http://127.0.0.1:8000/mcp transport=http"
70 |
71 | We first shlex-split the string into key=value tokens, then parse.
72 | """
73 | servers: dict[str, ServerSpec] = {}
74 |
75 | # stdio (kept for completeness)
76 | for block_str in stdios:
77 | tokens = shlex.split(block_str)
78 | kv = _parse_kv(tokens)
79 |
80 | name = kv.pop("name", None)
81 | if not name:
82 | raise typer.BadParameter("Missing required key: name (in --stdio block)")
83 |
84 | command = kv.pop("command", None)
85 | if not command:
86 | raise typer.BadParameter("Missing required key: command (in --stdio block)")
87 |
88 | args_value = kv.pop("args", "")
89 | args_list = shlex.split(args_value) if args_value else []
90 |
91 | env = {k.split(".", 1)[1]: v for k, v in list(kv.items()) if k.startswith("env.")}
92 | cwd = kv.get("cwd")
93 | keep_alive = kv.get("keep_alive", "true").lower() != "false"
94 |
95 | stdio_spec: ServerSpec = StdioServerSpec(
96 | command=command,
97 | args=args_list,
98 | env=env,
99 | cwd=cwd,
100 | keep_alive=keep_alive,
101 | )
102 | servers[name] = stdio_spec
103 |
104 | # http
105 | for block_str in https:
106 | tokens = shlex.split(block_str)
107 | kv = _parse_kv(tokens)
108 |
109 | name = kv.pop("name", None)
110 | if not name:
111 | raise typer.BadParameter("Missing required key: name (in --http block)")
112 |
113 | url = kv.pop("url", None)
114 | if not url:
115 | raise typer.BadParameter("Missing required key: url (in --http block)")
116 |
117 | transport_str = kv.pop("transport", "http") # "http", "streamable-http", or "sse"
118 | transport = cast(Literal["http", "streamable-http", "sse"], transport_str)
119 |
120 | headers = {k.split(".", 1)[1]: v for k, v in list(kv.items()) if k.startswith("header.")}
121 | auth = kv.get("auth")
122 |
123 | http_spec: ServerSpec = HTTPServerSpec(
124 | url=url,
125 | transport=transport,
126 | headers=headers,
127 | auth=auth,
128 | )
129 | servers[name] = http_spec
130 |
131 | return servers
132 |
133 |
134 | def _extract_final_answer(result: Any) -> str:
135 | """Best-effort extraction of the final text from various executors."""
136 | try:
137 | # LangGraph prebuilt returns {"messages": [ ... ]}
138 | if isinstance(result, dict) and "messages" in result and result["messages"]:
139 | last = result["messages"][-1]
140 | content = getattr(last, "content", None)
141 | if isinstance(content, str) and content:
142 | return content
143 | if isinstance(content, list) and content and isinstance(content[0], dict):
144 | return content[0].get("text") or str(content)
145 | return str(last)
146 | return str(result)
147 | except Exception:
148 | return str(result)
149 |
150 |
151 | @app.command(name="list-tools")
152 | def list_tools(
153 | model_id: Annotated[
154 | str,
155 | typer.Option("--model-id", help="REQUIRED model provider id (e.g., 'openai:gpt-4.1')."),
156 | ],
157 | stdio: Annotated[
158 | list[str] | None,
159 | typer.Option(
160 | "--stdio",
161 | help=(
162 | "Block string: \"name=... command=... args='...' "
163 | '[env.X=Y] [cwd=...] [keep_alive=true|false]". Repeatable.'
164 | ),
165 | ),
166 | ] = None,
167 | http: Annotated[
168 | list[str] | None,
169 | typer.Option(
170 | "--http",
171 | help=(
172 | 'Block string: "name=... url=... [transport=http|streamable-http|sse] '
173 | '[header.X=Y] [auth=...]". Repeatable.'
174 | ),
175 | ),
176 | ] = None,
177 | instructions: Annotated[
178 | str,
179 | typer.Option("--instructions", help="Optional system prompt override."),
180 | ] = "",
181 | ) -> None:
182 | """List all MCP tools discovered using the provided server specs."""
183 | servers = _merge_servers(stdio or [], http or [])
184 |
185 | async def _run() -> None:
186 | _, loader = await build_deep_agent(
187 | servers=servers,
188 | model=model_id,
189 | instructions=instructions or None,
190 | )
191 | infos = await loader.list_tool_info()
192 | infos = list(infos or [])
193 |
194 | table = Table(title="MCP Tools", show_lines=True)
195 | table.add_column("Tool", style="cyan", no_wrap=True)
196 | table.add_column("Description", style="green")
197 | table.add_column("Input Schema", style="white")
198 | for i in infos:
199 | schema_str = json.dumps(i.input_schema, ensure_ascii=False)
200 | if len(schema_str) > 120:
201 | schema_str = schema_str[:117] + "..."
202 | table.add_row(i.name, i.description or "-", schema_str)
203 | console.print(table)
204 |
205 | asyncio.run(_run())
206 |
207 |
208 | @app.command()
209 | def run(
210 | model_id: Annotated[
211 | str,
212 | typer.Option(..., help="REQUIRED model provider id (e.g., 'openai:gpt-4.1')."),
213 | ],
214 | stdio: Annotated[
215 | list[str] | None,
216 | typer.Option(
217 | "--stdio",
218 | help=(
219 | "Block string: \"name=... command=... args='...' "
220 | '[env.X=Y] [cwd=...] [keep_alive=true|false]". Repeatable.'
221 | ),
222 | ),
223 | ] = None,
224 | http: Annotated[
225 | list[str] | None,
226 | typer.Option(
227 | "--http",
228 | help=(
229 | 'Block string: "name=... url=... [transport=http|streamable-http|sse] '
230 | '[header.X=Y] [auth=...]". Repeatable.'
231 | ),
232 | ),
233 | ] = None,
234 | instructions: Annotated[
235 | str,
236 | typer.Option("--instructions", help="Optional system prompt override."),
237 | ] = "",
238 | # IMPORTANT: don't duplicate defaults in Option() and the parameter!
239 | trace: Annotated[
240 | bool,
241 | typer.Option("--trace/--no-trace", help="Print tool invocations & results."),
242 | ] = True,
243 | raw: Annotated[
244 | bool,
245 | typer.Option("--raw/--no-raw", help="Also print raw result object."),
246 | ] = False,
247 | ) -> None:
248 | """Start an interactive agent that uses only MCP tools."""
249 | servers = _merge_servers(stdio or [], http or [])
250 |
251 | async def _chat() -> None:
252 | graph, _ = await build_deep_agent(
253 | servers=servers,
254 | model=model_id,
255 | instructions=instructions or None,
256 | trace_tools=trace, # <- enable deepmcpagent tool tracing
257 | )
258 | console.print("[bold]DeepMCPAgent is ready. Type 'exit' to quit.[/bold]")
259 | while True:
260 | try:
261 | user = input("> ").strip()
262 | except (EOFError, KeyboardInterrupt):
263 | console.print("\nExiting.")
264 | break
265 | if user.lower() in {"exit", "quit"}:
266 | break
267 | if not user:
268 | continue
269 | try:
270 | result = await graph.ainvoke({"messages": [{"role": "user", "content": user}]})
271 | except Exception as exc:
272 | console.print(f"[red]Error during run:[/red] {exc}")
273 | continue
274 |
275 | final_text = _extract_final_answer(result)
276 | console.print(
277 | Panel(final_text or "(no content)", title="Final LLM Answer", style="bold green")
278 | )
279 | if raw:
280 | console.print(result)
281 |
282 | asyncio.run(_chat())
283 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2025 cryxnet
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |

4 |
5 |
🤖 DeepMCPAgent
6 |
Model-agnostic LangChain/LangGraph agents powered entirely by MCP tools over HTTP/SSE.
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 | Discover MCP tools dynamically. Bring your own LangChain model. Build production-ready agents—fast.
26 |
27 |
28 |
29 | 📚 Documentation • 🛠 Issues
30 |
31 |
32 |
33 |
34 |
35 | ## ✨ Why DeepMCPAgent?
36 |
37 | - 🔌 **Zero manual tool wiring** — tools are discovered dynamically from MCP servers (HTTP/SSE)
38 | - 🌐 **External APIs welcome** — connect to remote MCP servers (with headers/auth)
39 | - 🧠 **Model-agnostic** — pass any LangChain chat model instance (OpenAI, Anthropic, Ollama, Groq, local, …)
40 | - ⚡ **DeepAgents (optional)** — if installed, you get a deep agent loop; otherwise robust LangGraph ReAct fallback
41 | - 🛠️ **Typed tool args** — JSON-Schema → Pydantic → LangChain `BaseTool` (typed, validated calls)
42 | - 🧪 **Quality bar** — mypy (strict), ruff, pytest, GitHub Actions, docs
43 |
44 | > **MCP first.** Agents shouldn’t hardcode tools — they should **discover** and **call** them. DeepMCPAgent builds that bridge.
45 |
46 | ---
47 |
48 | ## 🚀 Installation
49 |
50 | Install from [PyPI](https://pypi.org/project/deepmcpagent/):
51 |
52 | ```bash
53 | pip install "deepmcpagent[deep]"
54 | ```
55 |
56 | This installs DeepMCPAgent with **DeepAgents support (recommended)** for the best agent loop.
57 | Other optional extras:
58 |
59 | - `dev` → linting, typing, tests
60 | - `docs` → MkDocs + Material + mkdocstrings
61 | - `examples` → dependencies used by bundled examples
62 |
63 | ```bash
64 | # install with deepagents + dev tooling
65 | pip install "deepmcpagent[deep,dev]"
66 | ```
67 |
68 | ⚠️ If you’re using **zsh**, remember to quote extras:
69 |
70 | ```bash
71 | pip install "deepmcpagent[deep,dev]"
72 | ```
73 |
74 | ---
75 |
76 | ## 🚀 Quickstart
77 |
78 | ### 1) Start a sample MCP server (HTTP)
79 |
80 | ```bash
81 | python examples/servers/math_server.py
82 | ```
83 |
84 | This serves an MCP endpoint at: **[http://127.0.0.1:8000/mcp](http://127.0.0.1:8000/mcp)**
85 |
86 | ### 2) Run the example agent (with fancy console output)
87 |
88 | ```bash
89 | python examples/use_agent.py
90 | ```
91 |
92 | **What you’ll see:**
93 |
94 | 
95 |
96 | ---
97 |
98 | ## 🧑💻 Bring-Your-Own Model (BYOM)
99 |
100 | DeepMCPAgent lets you pass **any LangChain chat model instance** (or a provider id string if you prefer `init_chat_model`):
101 |
102 | ```python
103 | import asyncio
104 | from deepmcpagent import HTTPServerSpec, build_deep_agent
105 |
106 | # choose your model:
107 | # from langchain_openai import ChatOpenAI
108 | # model = ChatOpenAI(model="gpt-4.1")
109 |
110 | # from langchain_anthropic import ChatAnthropic
111 | # model = ChatAnthropic(model="claude-3-5-sonnet-latest")
112 |
113 | # from langchain_community.chat_models import ChatOllama
114 | # model = ChatOllama(model="llama3.1")
115 |
116 | async def main():
117 | servers = {
118 | "math": HTTPServerSpec(
119 | url="http://127.0.0.1:8000/mcp",
120 | transport="http", # or "sse"
121 | # headers={"Authorization": "Bearer "},
122 | ),
123 | }
124 |
125 | graph, _ = await build_deep_agent(
126 | servers=servers,
127 | model=model,
128 | instructions="Use MCP tools precisely."
129 | )
130 |
131 | out = await graph.ainvoke({"messages":[{"role":"user","content":"add 21 and 21 with tools"}]})
132 | print(out)
133 |
134 | asyncio.run(main())
135 | ```
136 |
137 | > Tip: If you pass a **string** like `"openai:gpt-4.1"`, we’ll call LangChain’s `init_chat_model()` for you (and it will read env vars like `OPENAI_API_KEY`). Passing a **model instance** gives you full control.
138 |
139 | ---
140 |
141 | ## 🤝 Cross-Agent Communication
142 |
143 | DeepMCPAgent v0.5 introduces **Cross-Agent Communication** — agents that can _talk to each other_ without extra servers, message queues, or orchestration layers.
144 |
145 | You can now attach one agent as a **peer** inside another, turning it into a callable tool.
146 | Each peer appears automatically as `ask_agent_` or can be reached via `broadcast_to_agents` for parallel reasoning across multiple agents.
147 |
148 | This means your agents can **delegate**, **collaborate**, and **critique** each other — all through the same MCP tool interface.
149 | It’s lightweight, model-agnostic, and fully transparent: every peer call is traced like any other tool invocation.
150 |
151 | ---
152 |
153 | ### 💻 Example
154 |
155 | ```python
156 | import asyncio
157 | from deepmcpagent import HTTPServerSpec, build_deep_agent
158 | from deepmcpagent.cross_agent import CrossAgent
159 |
160 | async def main():
161 | # 1️⃣ Build a "research" peer agent
162 | research_graph, _ = await build_deep_agent(
163 | servers={"web": HTTPServerSpec(url="http://127.0.0.1:8000/mcp")},
164 | model="openai:gpt-4o-mini",
165 | instructions="You are a focused research assistant that finds and summarizes sources.",
166 | )
167 |
168 | # 2️⃣ Build the main agent and attach the peer as a tool
169 | main_graph, _ = await build_deep_agent(
170 | servers={"math": HTTPServerSpec(url="http://127.0.0.1:9000/mcp")},
171 | model="openai:gpt-4.1",
172 | instructions="You are a lead analyst. Use peers when you need research or summarization.",
173 | cross_agents={
174 | "researcher": CrossAgent(agent=research_graph, description="A web research peer.")
175 | },
176 | trace_tools=True, # see all tool calls + peer responses in console
177 | )
178 |
179 | # 3️⃣ Ask a question — the main agent can now call the researcher
180 | result = await main_graph.ainvoke({
181 | "messages": [{"role": "user", "content": "Find recent research on AI ethics and summarize it."}]
182 | })
183 |
184 | print(result)
185 |
186 | asyncio.run(main())
187 | ```
188 |
189 | 🧩 **Result:**
190 | Your main agent automatically calls `ask_agent_researcher(...)` when it decides delegation makes sense, and the peer agent returns its best final answer — all transparently handled by the MCP layer.
191 |
192 | ---
193 |
194 | ### 💡 Use Cases
195 |
196 | - Researcher → Writer → Editor pipelines
197 | - Safety or reviewer peers that audit outputs
198 | - Retrieval or reasoning specialists
199 | - Multi-model ensembles combining small and large LLMs
200 |
201 | No new infrastructure. No complex orchestration.
202 | Just **agents helping agents**, powered entirely by MCP over HTTP/SSE.
203 |
204 | > 🧠 One framework, many minds — **DeepMCPAgent** turns individual LLMs into a cooperative system.
205 |
206 | ---
207 |
208 | ## 🖥️ CLI (no Python required)
209 |
210 | ```bash
211 | # list tools from one or more HTTP servers
212 | deepmcpagent list-tools \
213 | --http name=math url=http://127.0.0.1:8000/mcp transport=http \
214 | --model-id "openai:gpt-4.1"
215 |
216 | # interactive agent chat (HTTP/SSE servers only)
217 | deepmcpagent run \
218 | --http name=math url=http://127.0.0.1:8000/mcp transport=http \
219 | --model-id "openai:gpt-4.1"
220 | ```
221 |
222 | > The CLI accepts **repeated** `--http` blocks; add `header.X=Y` pairs for auth:
223 | >
224 | > ```
225 | > --http name=ext url=https://api.example.com/mcp transport=http header.Authorization="Bearer TOKEN"
226 | > ```
227 |
228 | ---
229 |
230 | ## Full Architecture & Agent Flow
231 |
232 | ### 1) High-level Architecture (modules & data flow)
233 |
234 | ```mermaid
235 | flowchart LR
236 | %% Groupings
237 | subgraph User["👤 User / App"]
238 | Q["Prompt / Task"]
239 | CLI["CLI (Typer)"]
240 | PY["Python API"]
241 | end
242 |
243 | subgraph Agent["🤖 Agent Runtime"]
244 | DIR["build_deep_agent()"]
245 | PROMPT["prompt.py\n(DEFAULT_SYSTEM_PROMPT)"]
246 | subgraph AGRT["Agent Graph"]
247 | DA["DeepAgents loop\n(if installed)"]
248 | REACT["LangGraph ReAct\n(fallback)"]
249 | end
250 | LLM["LangChain Model\n(instance or init_chat_model(provider-id))"]
251 | TOOLS["LangChain Tools\n(BaseTool[])"]
252 | end
253 |
254 | subgraph MCP["🧰 Tooling Layer (MCP)"]
255 | LOADER["MCPToolLoader\n(JSON-Schema ➜ Pydantic ➜ BaseTool)"]
256 | TOOLWRAP["_FastMCPTool\n(async _arun → client.call_tool)"]
257 | end
258 |
259 | subgraph FMCP["🌐 FastMCP Client"]
260 | CFG["servers_to_mcp_config()\n(mcpServers dict)"]
261 | MULTI["FastMCPMulti\n(fastmcp.Client)"]
262 | end
263 |
264 | subgraph SRV["🛠 MCP Servers (HTTP/SSE)"]
265 | S1["Server A\n(e.g., math)"]
266 | S2["Server B\n(e.g., search)"]
267 | S3["Server C\n(e.g., github)"]
268 | end
269 |
270 | %% Edges
271 | Q -->|query| CLI
272 | Q -->|query| PY
273 | CLI --> DIR
274 | PY --> DIR
275 |
276 | DIR --> PROMPT
277 | DIR --> LLM
278 | DIR --> LOADER
279 | DIR --> AGRT
280 |
281 | LOADER --> MULTI
282 | CFG --> MULTI
283 | MULTI -->|list_tools| SRV
284 | LOADER --> TOOLS
285 | TOOLS --> AGRT
286 |
287 | AGRT <-->|messages| LLM
288 | AGRT -->|tool calls| TOOLWRAP
289 | TOOLWRAP --> MULTI
290 | MULTI -->|call_tool| SRV
291 |
292 | SRV -->|tool result| MULTI --> TOOLWRAP --> AGRT -->|final answer| CLI
293 | AGRT -->|final answer| PY
294 | ```
295 |
296 | ---
297 |
298 | ### 2) Runtime Sequence (end-to-end tool call)
299 |
300 | ```mermaid
301 | sequenceDiagram
302 | autonumber
303 | participant U as User
304 | participant CLI as CLI/Python
305 | participant Builder as build_deep_agent()
306 | participant Loader as MCPToolLoader
307 | participant Graph as Agent Graph (DeepAgents or ReAct)
308 | participant LLM as LangChain Model
309 | participant Tool as _FastMCPTool
310 | participant FMCP as FastMCP Client
311 | participant S as MCP Server (HTTP/SSE)
312 |
313 | U->>CLI: Enter prompt
314 | CLI->>Builder: build_deep_agent(servers, model, instructions?)
315 | Builder->>Loader: get_all_tools()
316 | Loader->>FMCP: list_tools()
317 | FMCP->>S: HTTP(S)/SSE list_tools
318 | S-->>FMCP: tools + JSON-Schema
319 | FMCP-->>Loader: tool specs
320 | Loader-->>Builder: BaseTool[]
321 | Builder-->>CLI: (Graph, Loader)
322 |
323 | U->>Graph: ainvoke({messages:[user prompt]})
324 | Graph->>LLM: Reason over system + messages + tool descriptions
325 | LLM-->>Graph: Tool call (e.g., add(a=3,b=5))
326 | Graph->>Tool: _arun(a=3,b=5)
327 | Tool->>FMCP: call_tool("add", {a:3,b:5})
328 | FMCP->>S: POST /mcp tools.call("add", {...})
329 | S-->>FMCP: result { data: 8 }
330 | FMCP-->>Tool: result
331 | Tool-->>Graph: ToolMessage(content=8)
332 |
333 | Graph->>LLM: Continue with observations
334 | LLM-->>Graph: Final response "(3 + 5) * 7 = 56"
335 | Graph-->>CLI: messages (incl. final LLM answer)
336 | ```
337 |
338 | ---
339 |
340 | ### 3) Agent Control Loop (planning & acting)
341 |
342 | ```mermaid
343 | stateDiagram-v2
344 | [*] --> AcquireTools
345 | AcquireTools: Discover MCP tools via FastMCP\n(JSON-Schema ➜ Pydantic ➜ BaseTool)
346 | AcquireTools --> Plan
347 |
348 | Plan: LLM plans next step\n(uses system prompt + tool descriptions)
349 | Plan --> CallTool: if tool needed
350 | Plan --> Respond: if direct answer sufficient
351 |
352 | CallTool: _FastMCPTool._arun\n→ client.call_tool(name, args)
353 | CallTool --> Observe: receive tool result
354 | Observe: Parse result payload (data/text/content)
355 | Observe --> Decide
356 |
357 | Decide: More tools needed?
358 | Decide --> Plan: yes
359 | Decide --> Respond: no
360 |
361 | Respond: LLM crafts final message
362 | Respond --> [*]
363 | ```
364 |
365 | ---
366 |
367 | ### 4) Code Structure (types & relationships)
368 |
369 | ```mermaid
370 | classDiagram
371 | class StdioServerSpec {
372 | +command: str
373 | +args: List[str]
374 | +env: Dict[str,str]
375 | +cwd: Optional[str]
376 | +keep_alive: bool
377 | }
378 |
379 | class HTTPServerSpec {
380 | +url: str
381 | +transport: Literal["http","streamable-http","sse"]
382 | +headers: Dict[str,str]
383 | +auth: Optional[str]
384 | }
385 |
386 | class FastMCPMulti {
387 | -_client: fastmcp.Client
388 | +client(): Client
389 | }
390 |
391 | class MCPToolLoader {
392 | -_multi: FastMCPMulti
393 | +get_all_tools(): List[BaseTool]
394 | +list_tool_info(): List[ToolInfo]
395 | }
396 |
397 | class _FastMCPTool {
398 | +name: str
399 | +description: str
400 | +args_schema: Type[BaseModel]
401 | -_tool_name: str
402 | -_client: Any
403 | +_arun(**kwargs) async
404 | }
405 |
406 | class ToolInfo {
407 | +server_guess: str
408 | +name: str
409 | +description: str
410 | +input_schema: Dict[str,Any]
411 | }
412 |
413 | class build_deep_agent {
414 | +servers: Mapping[str,ServerSpec]
415 | +model: ModelLike
416 | +instructions?: str
417 | +returns: (graph, loader)
418 | }
419 |
420 | StdioServerSpec <|-- ServerSpec
421 | HTTPServerSpec <|-- ServerSpec
422 | FastMCPMulti o--> ServerSpec : uses servers_to_mcp_config()
423 | MCPToolLoader o--> FastMCPMulti
424 | MCPToolLoader --> _FastMCPTool : creates
425 | _FastMCPTool ..> BaseTool
426 | build_deep_agent --> MCPToolLoader : discovery
427 | build_deep_agent --> _FastMCPTool : tools for agent
428 | ```
429 |
430 | ---
431 |
432 | > These diagrams reflect the current implementation:
433 | >
434 | > - **Model is required** (string provider-id or LangChain model instance).
435 | > - **MCP tools only**, discovered at runtime via **FastMCP** (HTTP/SSE).
436 | > - Agent loop prefers **DeepAgents** if installed; otherwise **LangGraph ReAct**.
437 | > - Tools are typed via **JSON-Schema ➜ Pydantic ➜ LangChain BaseTool**.
438 | > - Fancy console output shows **discovered tools**, **calls**, **results**, and **final answer**.
439 |
440 | ---
441 |
442 | ## 🧪 Development
443 |
444 | ```bash
445 | # install dev tooling
446 | pip install -e ".[dev]"
447 |
448 | # lint & type-check
449 | ruff check .
450 | mypy
451 |
452 | # run tests
453 | pytest -q
454 | ```
455 |
456 | ---
457 |
458 | ## 🛡️ Security & Privacy
459 |
460 | - **Your keys, your model** — we don’t enforce a provider; pass any LangChain model.
461 | - Use **HTTP headers** in `HTTPServerSpec` to deliver bearer/OAuth tokens to servers.
462 |
463 | ---
464 |
465 | ## 🧯 Troubleshooting
466 |
467 | - **PEP 668: externally managed environment (macOS + Homebrew)**
468 | Use a virtualenv:
469 |
470 | ```bash
471 | python3 -m venv .venv
472 | source .venv/bin/activate
473 | ```
474 |
475 | - **404 Not Found when connecting**
476 | Ensure your server uses a path (e.g., `/mcp`) and your client URL includes it.
477 | - **Tool calls failing / attribute errors**
478 | Ensure you’re on the latest version; our tool wrapper uses `PrivateAttr` for client state.
479 | - **High token counts**
480 | That’s normal with tool-calling models. Use smaller models for dev.
481 |
482 | ---
483 |
484 | ## 📄 License
485 |
486 | Apache-2.0 — see [`LICENSE`](/LICENSE).
487 |
488 | ---
489 |
490 | ## ⭐ Stars
491 |
492 |
493 |
499 |
505 |
509 |
510 |
511 | ## 🙏 Acknowledgments
512 |
513 | - The [**MCP** community](https://modelcontextprotocol.io/) for a clean protocol.
514 | - [**LangChain**](https://www.langchain.com/) and [**LangGraph**](https://www.langchain.com/langgraph) for powerful agent runtimes.
515 | - [**FastMCP**](https://gofastmcp.com/getting-started/welcome) for solid client & server implementations.
516 |
517 | ```
518 |
519 | ```
520 |
--------------------------------------------------------------------------------
/src/deepmcpagent/cross_agent.py:
--------------------------------------------------------------------------------
1 | # deepmcpagent/cross_agent.py
2 | """
3 | Cross-agent communication utilities for DeepMCPAgent.
4 |
5 | Expose other in-process agents (“peers”) as standard LangChain tools so a
6 | primary (caller) agent can *delegate* to them during planning/execution.
7 |
8 | Tools provided
9 | - Per-peer ask tool → ``ask_agent_``
10 | Forward one message (plus optional caller context) to a single peer and
11 | return the peer’s final text.
12 |
13 | - Broadcast tool → ``broadcast_to_agents``
14 | Send the same message to multiple peers in parallel and return a mapping
15 | of peer → final text. Timeouts/errors are captured per peer so one slow
16 | or failing peer does not fail the whole call.
17 |
18 | Notes:
19 | - No new infrastructure is required. Peers are just in-process LangChain
20 | ``Runnable`` graphs (e.g., a DeepAgents loop or a LangGraph prebuilt
21 | executor returned by :func:`deepmcpagent.agent.build_deep_agent`).
22 | - Both tool classes implement async and sync execution paths (``_arun`` and
23 | ``_run``) to satisfy ``BaseTool``’s interface.
24 | - Optional per-call timeouts use ``anyio.move_on_after``.
25 | - The “final text” is extracted from common agent result shapes. If your
26 | peer returns a custom structure, adapt upstream or post-process the
27 | returned string.
28 |
29 | Examples:
30 | Build a peer agent and attach it to a main agent as a tool:
31 |
32 | >>> from deepmcpagent.agent import build_deep_agent
33 | >>> from deepmcpagent.cross_agent import CrossAgent
34 | >>>
35 | >>> peer_graph, _ = await build_deep_agent(servers=..., model="openai:gpt-4o-mini")
36 | >>> main_graph, _ = await build_deep_agent(
37 | ... servers=...,
38 | ... model="openai:gpt-4.1",
39 | ... cross_agents={"researcher": CrossAgent(agent=peer_graph, description="Web research")}
40 | ... )
41 | >>> # Now the main agent can call:
42 | >>> # - ask_agent_researcher(message=..., context?=..., timeout_s?=...)
43 | >>> # - broadcast_to_agents(message=..., peers?=[...], timeout_s?=...)
44 | """
45 |
46 | from __future__ import annotations
47 |
48 | from collections.abc import Callable, Iterable, Mapping, Sequence
49 | from dataclasses import dataclass
50 | from typing import Any, cast
51 |
52 | from langchain_core.runnables import Runnable
53 | from langchain_core.tools import BaseTool
54 | from pydantic import BaseModel, Field, PrivateAttr
55 |
56 | # -----------------------------
57 | # Public API surface
58 | # -----------------------------
59 |
60 |
61 | @dataclass(frozen=True)
62 | class CrossAgent:
63 | """Metadata wrapper for a peer agent to be exposed as a tool.
64 |
65 | The wrapper is descriptive only. Behavior is implemented by tools produced
66 | via :func:`make_cross_agent_tools`.
67 |
68 | Attributes:
69 | agent: A runnable agent (e.g., LangGraph or DeepAgents) that accepts
70 | ``{"messages": [...]}`` and returns a result consumable by the
71 | built-in “best final text” extractor.
72 | description: One-line human description used in tool docs to help the
73 | calling agent decide when to delegate.
74 |
75 | Examples:
76 | >>> cross = CrossAgent(agent=peer_graph, description="Accurate math")
77 | """
78 |
79 | agent: Runnable[Any, Any]
80 | description: str = ""
81 |
82 |
83 | def make_cross_agent_tools(
84 | peers: Mapping[str, CrossAgent],
85 | *,
86 | tool_name_prefix: str = "ask_agent_",
87 | include_broadcast: bool = True,
88 | ) -> list[BaseTool]:
89 | """Create LangChain tools for cross-agent communication.
90 |
91 | For each peer, a tool named ``f"{tool_name_prefix}{peer_name}"`` is created.
92 | Optionally, a ``broadcast_to_agents`` tool is added to fan-out questions to
93 | multiple peers concurrently.
94 |
95 | Args:
96 | peers: Mapping of peer name → :class:`CrossAgent`. The name becomes part
97 | of the tool id (e.g., ``ask_agent_mathpeer``).
98 | tool_name_prefix: Prefix used for each per-peer ask tool. Defaults to
99 | ``"ask_agent_"``.
100 | include_broadcast: When ``True`` (default), also include the group
101 | fan-out tool ``broadcast_to_agents``.
102 |
103 | Returns:
104 | list[BaseTool]: A list of fully constructed tools ready to be appended
105 | to the caller agent’s toolset.
106 |
107 | Notes:
108 | Construction does not contact peers; errors (e.g., network) surface at
109 | call time during execution of the generated tools.
110 |
111 | Examples:
112 | >>> tools = make_cross_agent_tools({
113 | ... "researcher": CrossAgent(agent=peer_graph, description="Web research")
114 | ... })
115 | >>> # Attach `tools` alongside your MCP-discovered tools when building the agent.
116 | """
117 | if not peers:
118 | return []
119 |
120 | def _best_text(result: Any) -> str:
121 | """Extract a final text answer from common agent result shapes.
122 |
123 | Looks for a LangGraph-like ``{"messages": [...]}`` structure; otherwise
124 | falls back to ``str(result)``.
125 |
126 | Args:
127 | result: The raw result returned by a peer agent.
128 |
129 | Returns:
130 | str: Best-effort final text response.
131 | """
132 | try:
133 | if isinstance(result, dict) and "messages" in result and result["messages"]:
134 | last = result["messages"][-1]
135 | content = getattr(last, "content", None)
136 | if isinstance(content, str) and content:
137 | return content
138 | if isinstance(content, list) and content and isinstance(content[0], dict):
139 | return cast(str, content[0].get("text") or str(content))
140 | return str(last)
141 | return str(result)
142 | except Exception:
143 | return str(result)
144 |
145 | out: list[BaseTool] = []
146 |
147 | # Per-agent ask tools
148 | for name, spec in peers.items():
149 | out.append(
150 | _AskAgentTool(
151 | name=f"{tool_name_prefix}{name}",
152 | description=(
153 | f"Ask peer agent '{name}' for help. " + (spec.description or "")
154 | ).strip(),
155 | target=spec.agent,
156 | extract=_best_text,
157 | )
158 | )
159 |
160 | # Optional broadcast tool
161 | if include_broadcast:
162 | out.append(
163 | _BroadcastTool(
164 | name="broadcast_to_agents",
165 | description=(
166 | "Ask multiple peer agents the same question in parallel and "
167 | "return each peer's final answer."
168 | ),
169 | peers=peers,
170 | extract=_best_text,
171 | )
172 | )
173 |
174 | return out
175 |
176 |
177 | # -----------------------------
178 | # Tool implementations
179 | # -----------------------------
180 |
181 |
182 | class _AskArgs(BaseModel):
183 | """Arguments for per-peer ask tools (``ask_agent_``).
184 |
185 | Attributes:
186 | message: The user-level message to forward to the peer agent.
187 | context: Optional caller context (constraints, partial results, style
188 | guide). If provided, it is inserted first as a *system* message to
189 | bias many executors.
190 | timeout_s: Optional timeout (seconds). If exceeded, the tool returns
191 | ``"Timed out waiting for peer agent reply."`` instead of raising.
192 | """
193 |
194 | message: str = Field(..., description="Message to send to the peer agent.")
195 | context: str | None = Field(
196 | None,
197 | description=(
198 | "Optional additional context from the caller (e.g., hints, partial "
199 | "results, or constraints)."
200 | ),
201 | )
202 | timeout_s: float | None = Field(
203 | None,
204 | ge=0,
205 | description="Optional timeout in seconds for the peer agent call.",
206 | )
207 |
208 |
209 | class _AskAgentTool(BaseTool):
210 | """Tool that forwards a question to a specific peer agent.
211 |
212 | This tool wraps a peer :class:`~langchain_core.runnables.Runnable` and
213 | returns the peer’s *final text* using a best-effort extractor.
214 |
215 | Attributes:
216 | name: Tool identifier (e.g., ``ask_agent_researcher``).
217 | description: Human description to guide the caller agent’s planning.
218 | args_schema: Pydantic model describing accepted keyword args.
219 |
220 | Notes:
221 | - Async-first: prefer ``_arun``; a sync shim (``_run``) is provided to
222 | satisfy the abstract base class and support sync-only executors.
223 | - The peer is invoked with a ChatML-like payload:
224 | ``{"messages": [{"role": "...", "content": "..."}]}``.
225 |
226 | """
227 |
228 | name: str
229 | description: str
230 | # Pydantic v2 requires a type annotation for field overrides.
231 | args_schema: type[BaseModel] = _AskArgs
232 |
233 | _target: Runnable[Any, Any] = PrivateAttr()
234 | _extract: Callable[[Any], str] = PrivateAttr()
235 |
236 | def __init__(
237 | self,
238 | *,
239 | name: str,
240 | description: str,
241 | target: Runnable[Any, Any],
242 | extract: Callable[[Any], str],
243 | ) -> None:
244 | """Initialize the ask tool.
245 |
246 | Args:
247 | name: Tool identifier.
248 | description: Human description for planning.
249 | target: The peer agent runnable to call.
250 | extract: Function that extracts the final text from peer results.
251 | """
252 | super().__init__(name=name, description=description)
253 | self._target = target
254 | self._extract = extract
255 |
256 | async def _arun(
257 | self,
258 | *,
259 | message: str,
260 | context: str | None = None,
261 | timeout_s: float | None = None,
262 | ) -> str:
263 | """Asynchronously forward a message to the peer agent.
264 |
265 | Args:
266 | message: The message to forward (becomes a user message).
267 | context: Optional caller context, sent first as a system message.
268 | timeout_s: Optional timeout in seconds for the peer call.
269 |
270 | Returns:
271 | str: The peer agent’s best-effort final text answer, or a timeout
272 | message if the deadline is exceeded.
273 |
274 | Raises:
275 | Exception: Propagates exceptions raised by the peer call (network or
276 | executor failures). On timeout, returns a string instead of raising.
277 | """
278 | payload: list[dict[str, Any]] = []
279 | # Put context first to bias some executors that read system first
280 | if context:
281 | payload.append({"role": "system", "content": f"Caller context: {context}"})
282 | payload.append({"role": "user", "content": message})
283 |
284 | async def _call() -> Any:
285 | return await self._target.ainvoke({"messages": payload})
286 |
287 | if timeout_s and timeout_s > 0:
288 | import anyio
289 |
290 | with anyio.move_on_after(timeout_s) as scope:
291 | res = await _call()
292 | if scope.cancel_called: # rare
293 | return "Timed out waiting for peer agent reply."
294 | else:
295 | res = await _call()
296 |
297 | return self._extract(res)
298 |
299 | def _run(
300 | self,
301 | *,
302 | message: str,
303 | context: str | None = None,
304 | timeout_s: float | None = None,
305 | ) -> str: # pragma: no cover (usually unused in async apps)
306 | """Synchronous shim that delegates to :meth:`_arun`.
307 |
308 | Args:
309 | message: The message to forward (becomes a user message).
310 | context: Optional caller context, sent first as a system message.
311 | timeout_s: Optional timeout in seconds for the peer call.
312 |
313 | Returns:
314 | str: The peer agent’s best-effort final text answer (or timeout text).
315 | """
316 | import anyio
317 |
318 | return anyio.run(lambda: self._arun(message=message, context=context, timeout_s=timeout_s))
319 |
320 |
321 | class _BroadcastArgs(BaseModel):
322 | """Arguments for the broadcast tool (``broadcast_to_agents``).
323 |
324 | Attributes:
325 | message: The shared message sent to all (or a subset of) peers.
326 | peers: Optional subset of peer names to consult. If omitted, all
327 | registered peers are consulted.
328 | timeout_s: Optional per-peer timeout in seconds. Affected peers return
329 | ``"Timed out"`` in the result mapping.
330 | """
331 |
332 | message: str = Field(..., description="Message to send to all/selected peers.")
333 | peers: Sequence[str] | None = Field(
334 | None, description="Optional subset of peer names. If omitted, use all peers."
335 | )
336 | timeout_s: float | None = Field(
337 | None, ge=0, description="Optional timeout per peer call in seconds."
338 | )
339 |
340 |
341 | class _BroadcastTool(BaseTool):
342 | """Ask multiple peer agents in parallel and return a mapping of answers.
343 |
344 | Each selected peer is invoked concurrently. Timeouts and exceptions are
345 | captured **per peer** so the overall call remains resilient.
346 |
347 | Attributes:
348 | name: Tool identifier (``broadcast_to_agents``).
349 | description: Human description for planning.
350 | args_schema: Pydantic model describing accepted keyword args.
351 |
352 | Notes:
353 | Uses ``anyio.create_task_group`` for compatibility across anyio versions.
354 | """
355 |
356 | name: str
357 | description: str
358 | # Pydantic v2 requires a type annotation for field overrides.
359 | args_schema: type[BaseModel] = _BroadcastArgs
360 |
361 | _peers: Mapping[str, CrossAgent] = PrivateAttr()
362 | _extract: Callable[[Any], str] = PrivateAttr()
363 |
364 | def __init__(
365 | self,
366 | *,
367 | name: str,
368 | description: str,
369 | peers: Mapping[str, CrossAgent],
370 | extract: Callable[[Any], str],
371 | ) -> None:
372 | """Initialize the broadcast tool.
373 |
374 | Args:
375 | name: Tool identifier.
376 | description: Human description for planning.
377 | peers: Mapping of peer name → :class:`CrossAgent`.
378 | extract: Function that extracts the final text from peer results.
379 | """
380 | super().__init__(name=name, description=description)
381 | self._peers = peers
382 | self._extract = extract
383 |
384 | async def _arun(
385 | self,
386 | *,
387 | message: str,
388 | peers: Sequence[str] | None = None,
389 | timeout_s: float | None = None,
390 | ) -> dict[str, str]:
391 | """Asynchronously consult multiple peers in parallel.
392 |
393 | Args:
394 | message: The message forwarded to each selected peer.
395 | peers: Optional subset of peer names to target. If ``None``, uses all.
396 | timeout_s: Optional timeout in seconds applied per peer call.
397 |
398 | Returns:
399 | dict[str, str]: Mapping of ``peer_name`` → final text. Peers that
400 | exceed the timeout return ``"Timed out"``. Peers that raise an
401 | exception return ``"Error: "``.
402 |
403 | Raises:
404 | ValueError: If any requested peer name is unknown.
405 | """
406 | selected: Iterable[tuple[str, CrossAgent]]
407 | if peers:
408 | missing = [p for p in peers if p not in self._peers]
409 | if missing:
410 | raise ValueError(f"Unknown peer(s): {', '.join(missing)}")
411 | selected = [(p, self._peers[p]) for p in peers]
412 | else:
413 | selected = list(self._peers.items())
414 |
415 | import anyio
416 |
417 | results: dict[str, str] = {}
418 |
419 | async def _one(name: str, target: Runnable[Any, Any]) -> None:
420 | async def _call() -> Any:
421 | return await target.ainvoke({"messages": [{"role": "user", "content": message}]})
422 |
423 | if timeout_s and timeout_s > 0:
424 | with anyio.move_on_after(timeout_s) as scope:
425 | try:
426 | res = await _call()
427 | if scope.cancel_called:
428 | results[name] = "Timed out"
429 | return
430 | results[name] = self._extract(res)
431 | except Exception as exc: # keep broadcast resilient
432 | results[name] = f"Error: {exc}"
433 | else:
434 | try:
435 | res = await _call()
436 | results[name] = self._extract(res)
437 | except Exception as exc:
438 | results[name] = f"Error: {exc}"
439 |
440 | # Using TaskGroup for compatibility across anyio versions
441 | async with anyio.create_task_group() as tg:
442 | for n, s in selected:
443 | tg.start_soon(_one, n, s.agent)
444 |
445 | return results
446 |
447 | def _run(
448 | self,
449 | *,
450 | message: str,
451 | peers: Sequence[str] | None = None,
452 | timeout_s: float | None = None,
453 | ) -> dict[str, str]: # pragma: no cover (usually unused in async apps)
454 | """Synchronous shim that delegates to :meth:`_arun`.
455 |
456 | Args:
457 | message: The message forwarded to each selected peer.
458 | peers: Optional subset of peer names to target. If ``None``, uses all.
459 | timeout_s: Optional timeout in seconds applied per peer call.
460 |
461 | Returns:
462 | dict[str, str]: Mapping of ``peer_name`` → final text (or timeout/error text).
463 | """
464 | import anyio
465 |
466 | return anyio.run(lambda: self._arun(message=message, peers=peers, timeout_s=timeout_s))
467 |
--------------------------------------------------------------------------------