├── .difyignore ├── .gitignore ├── .vscode └── launch.json ├── LICENSE ├── PRIVACY.md ├── README.md ├── _assets ├── agent.png └── icon.svg ├── main.py ├── manifest.yaml ├── provider ├── mcp_agent.py └── mcp_agent.yaml ├── requirements.txt └── strategies ├── mcp_client.py ├── mcp_function_calling.py └── mcp_function_calling.yaml /.difyignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # Distribution / packaging 7 | .Python 8 | build/ 9 | develop-eggs/ 10 | dist/ 11 | downloads/ 12 | eggs/ 13 | .eggs/ 14 | lib/ 15 | lib64/ 16 | parts/ 17 | sdist/ 18 | var/ 19 | wheels/ 20 | share/python-wheels/ 21 | *.egg-info/ 22 | .installed.cfg 23 | *.egg 24 | MANIFEST 25 | 26 | # PyInstaller 27 | # Usually these files are written by a python script from a template 28 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 29 | *.manifest 30 | *.spec 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | 36 | # Unit test / coverage reports 37 | htmlcov/ 38 | .tox/ 39 | .nox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *.cover 46 | *.py,cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | cover/ 50 | 51 | # Translations 52 | *.mo 53 | *.pot 54 | 55 | # Django stuff: 56 | *.log 57 | local_settings.py 58 | db.sqlite3 59 | db.sqlite3-journal 60 | 61 | # Flask stuff: 62 | instance/ 63 | .webassets-cache 64 | 65 | # Scrapy stuff: 66 | .scrapy 67 | 68 | # Sphinx documentation 69 | docs/_build/ 70 | 71 | # PyBuilder 72 | .pybuilder/ 73 | target/ 74 | 75 | # Jupyter Notebook 76 | .ipynb_checkpoints 77 | 78 | # IPython 79 | profile_default/ 80 | ipython_config.py 81 | 82 | # pyenv 83 | # For a library or package, you might want to ignore these files since the code is 84 | # intended to run in multiple environments; otherwise, check them in: 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | Pipfile.lock 93 | 94 | # UV 95 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. 96 | # This is especially recommended for binary packages to ensure reproducibility, and is more 97 | # commonly ignored for libraries. 98 | uv.lock 99 | 100 | # poetry 101 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 102 | # This is especially recommended for binary packages to ensure reproducibility, and is more 103 | # commonly ignored for libraries. 104 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 105 | poetry.lock 106 | 107 | # pdm 108 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 109 | #pdm.lock 110 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 111 | # in version control. 112 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 113 | .pdm.toml 114 | .pdm-python 115 | .pdm-build/ 116 | 117 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 118 | __pypackages__/ 119 | 120 | # Celery stuff 121 | celerybeat-schedule 122 | celerybeat.pid 123 | 124 | # SageMath parsed files 125 | *.sage.py 126 | 127 | # Environments 128 | .env 129 | .venv 130 | env/ 131 | venv/ 132 | ENV/ 133 | env.bak/ 134 | venv.bak/ 135 | 136 | # Spyder project settings 137 | .spyderproject 138 | .spyproject 139 | 140 | # Rope project settings 141 | .ropeproject 142 | 143 | # mkdocs documentation 144 | /site 145 | 146 | # mypy 147 | .mypy_cache/ 148 | .dmypy.json 149 | dmypy.json 150 | 151 | # Pyre type checker 152 | .pyre/ 153 | 154 | # pytype static type analyzer 155 | .pytype/ 156 | 157 | # Cython debug symbols 158 | cython_debug/ 159 | 160 | # PyCharm 161 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 162 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 163 | # and can be added to the global gitignore or merged into this file. For a more nuclear 164 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 165 | .idea/ 166 | .vscode/ 167 | .git/ 168 | test.db 169 | .DS_Store -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # UV 98 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | #uv.lock 102 | 103 | # poetry 104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 105 | # This is especially recommended for binary packages to ensure reproducibility, and is more 106 | # commonly ignored for libraries. 107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 108 | #poetry.lock 109 | 110 | # pdm 111 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 112 | #pdm.lock 113 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 114 | # in version control. 115 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 116 | .pdm.toml 117 | .pdm-python 118 | .pdm-build/ 119 | 120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 121 | __pypackages__/ 122 | 123 | # Celery stuff 124 | celerybeat-schedule 125 | celerybeat.pid 126 | 127 | # SageMath parsed files 128 | *.sage.py 129 | 130 | # Environments 131 | .env 132 | .venv 133 | env/ 134 | venv/ 135 | ENV/ 136 | env.bak/ 137 | venv.bak/ 138 | 139 | # Spyder project settings 140 | .spyderproject 141 | .spyproject 142 | 143 | # Rope project settings 144 | .ropeproject 145 | 146 | # mkdocs documentation 147 | /site 148 | 149 | # mypy 150 | .mypy_cache/ 151 | .dmypy.json 152 | dmypy.json 153 | 154 | # Pyre type checker 155 | .pyre/ 156 | 157 | # pytype static type analyzer 158 | .pytype/ 159 | 160 | # Cython debug symbols 161 | cython_debug/ 162 | 163 | # PyCharm 164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 166 | # and can be added to the global gitignore or merged into this file. For a more nuclear 167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 168 | #.idea/ 169 | 170 | # Ruff stuff: 171 | .ruff_cache/ 172 | 173 | # PyPI configuration file 174 | .pypirc 175 | .DS_Store -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2.0", 3 | "configurations": [ 4 | { 5 | "name": "Python: Module", 6 | "type": "debugpy", 7 | "request": "launch", 8 | "module": "main", 9 | "python": "${workspaceFolder}/.venv/bin/python", 10 | "cwd": "${workspaceFolder}", 11 | "console": "integratedTerminal", 12 | "justMyCode": false, 13 | "env": { 14 | "GEVENT_SUPPORT": "True" 15 | } 16 | } 17 | ] 18 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 非法操作 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /PRIVACY.md: -------------------------------------------------------------------------------- 1 | # Privacy Policy 2 | 3 | This tool is designed with privacy in mind and does not collect any user data. We are committed to maintaining your privacy and ensuring your data remains secure. 4 | 5 | ## Data Collection 6 | 7 | - **No Personal Information**: We do not collect, store, or process any personal information. 8 | - **No Usage Data**: We do not track or monitor how you use the tool. 9 | - **No Analytics**: We do not implement any analytics or tracking mechanisms. 10 | 11 | ## Third-Party Services 12 | 13 | This tool does not integrate with or utilize any third-party services that might collect user data. 14 | 15 | ## Changes to Privacy Policy 16 | 17 | If there are any changes to our privacy practices, we will update this document accordingly. 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MCP Agent Strategy 2 | 3 | **Author:** hjlarry 4 | **Version:** 0.0.2 5 | **Type:** agent strategy 6 | **Repo&Issue:** [https://github.com/hjlarry/dify-plugin-mcp_agent](https://github.com/hjlarry/dify-plugin-mcp_agent) 7 | 8 | A Dify Agent strategy plugin that integrates MCP tool calls and common function calls capabilities. 9 | 10 | ## Features 11 | 12 | - Full compatibility with official function call agent strategy 13 | - Seamless integration with MCP tool calls 14 | - Flexible MCP server connection options 15 | 16 | ## Getting Started 17 | 18 | ![agent](./_assets/agent.png) 19 | 20 | ### Basic Configuration 21 | 22 | For single MCP server setup, simply provide the server URL: 23 | 24 | ```shell 25 | http://localhost:8000/sse 26 | ``` 27 | 28 | ### Advanced Configuration 29 | 30 | For multiple MCP servers with detailed connection parameters: 31 | ```json 32 | { 33 | "server_name1": { 34 | "url": "http://127.0.0.1:8000/sse", 35 | "headers": {}, 36 | "timeout": 5, 37 | "sse_read_timeout": 300 38 | }, 39 | "server_name2": { 40 | "url": "http://127.0.0.1:8001/sse" 41 | } 42 | } 43 | ``` 44 | 45 | 46 | ## How to change MCP server from `stdio` to `sse` ? 47 | 48 | ### Option 1: source code modification 49 | ```python 50 | if __name__ == "__main__": 51 | mcp.run(transport='sse') 52 | ``` 53 | 54 | ### Option 2: using the [mcp-proxy](https://github.com/sparfenyuk/mcp-proxy) 55 | ```shell 56 | uv tool install mcp-proxy 57 | mcp-proxy --sse-host=0.0.0.0 --sse-port=8080 uvx your-server 58 | ``` -------------------------------------------------------------------------------- /_assets/agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hjlarry/dify-plugin-mcp_agent/76e12dca8bccfad1d2d1cbd34cb25ce237fe7f40/_assets/agent.png -------------------------------------------------------------------------------- /_assets/icon.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 7 | 11 | 12 | 13 | 14 | 18 | 22 | 23 | 24 | 25 | 29 | 33 | 34 | 38 | 42 | 43 | 44 | 45 | 48 | 52 | 53 | 54 | 55 | 56 | 60 | 61 | 62 | 67 | 68 | 69 | 74 | 75 | 76 | 81 | 82 | 83 | 84 | 85 | 89 | 90 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | sys.path.append("../..") 4 | 5 | from dify_plugin import DifyPluginEnv, Plugin 6 | 7 | plugin = Plugin(DifyPluginEnv(MAX_REQUEST_TIMEOUT=240)) 8 | 9 | if __name__ == "__main__": 10 | plugin.run() 11 | -------------------------------------------------------------------------------- /manifest.yaml: -------------------------------------------------------------------------------- 1 | version: 0.0.1 2 | type: plugin 3 | author: "hjlarry" 4 | name: "agent" 5 | label: 6 | en_US: "MCP Agent Strategy" 7 | zh_Hans: "MCP Agent 策略" 8 | created_at: "2024-07-12T08:03:44.658609186Z" 9 | icon: icon.svg 10 | description: 11 | en_US: An agent strategy with MCP tool calls and common function calls. 12 | zh_Hans: 支持 MCP 工具调用和 function call 的 Agent 策略。 13 | tags: 14 | - "agent" 15 | resource: 16 | memory: 1048576 17 | permission: 18 | tool: 19 | enabled: true 20 | model: 21 | enabled: true 22 | llm: true 23 | plugins: 24 | agent_strategies: 25 | - "provider/mcp_agent.yaml" 26 | meta: 27 | version: 0.0.1 28 | arch: 29 | - "amd64" 30 | - "arm64" 31 | runner: 32 | language: "python" 33 | version: "3.12" 34 | entrypoint: "main" 35 | -------------------------------------------------------------------------------- /provider/mcp_agent.py: -------------------------------------------------------------------------------- 1 | from dify_plugin.interfaces.agent import AgentProvider 2 | 3 | 4 | class MCPAgentProvider(AgentProvider): 5 | pass 6 | -------------------------------------------------------------------------------- /provider/mcp_agent.yaml: -------------------------------------------------------------------------------- 1 | identity: 2 | author: hjlarry 3 | name: mcp_agent 4 | label: 5 | en_US: MCP Agent 6 | zh_Hans: MCP Agent 7 | pt_BR: MCP Agent 8 | description: 9 | en_US: The agent with MCP tool calls and common function calls. 10 | zh_Hans: 支持 MCP 工具调用和 function call 的 Agent。 11 | icon: icon.svg 12 | strategies: 13 | - strategies/mcp_function_calling.yaml 14 | extra: 15 | python: 16 | source: provider/mcp_agent.py 17 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | dify_plugin~=0.0.1b73 2 | httpx-sse~=0.4.0 -------------------------------------------------------------------------------- /strategies/mcp_client.py: -------------------------------------------------------------------------------- 1 | import json 2 | from urllib.parse import urljoin, urlparse 3 | from queue import Queue, Empty 4 | from threading import Event,Thread 5 | 6 | import httpx 7 | from httpx_sse import connect_sse 8 | 9 | class MCPClient: 10 | def __init__(self, sse_url, headers={}, timeout=5, sse_read_timeout=300): 11 | self.sse_url = sse_url 12 | self.timeout = timeout 13 | parsed_url = urlparse(sse_url) 14 | self.base_url = f"{parsed_url.scheme}://{parsed_url.netloc}" 15 | self.message_endpoint = None 16 | self.session = httpx.Client(headers=headers, timeout=httpx.Timeout(timeout, read=sse_read_timeout)) 17 | self._request_id = 0 18 | 19 | self.message_queue = Queue() 20 | self.response_ready = Event() 21 | self.should_stop = Event() 22 | self._listen_thread = None 23 | self._connected = Event() 24 | self.connect() 25 | 26 | def _listen_messages(self) -> None: 27 | with connect_sse( 28 | self.session, 29 | "GET", 30 | self.sse_url 31 | ) as event_source: 32 | event_source.response.raise_for_status() 33 | 34 | for event in event_source.iter_sse(): 35 | if self.should_stop.is_set(): 36 | break 37 | if event.event == 'endpoint': 38 | self.message_endpoint = event.data 39 | self._connected.set() 40 | elif event.event == "message": 41 | message = json.loads(event.data) 42 | self.message_queue.put(message) 43 | self.response_ready.set() 44 | 45 | def send_message(self, data: dict): 46 | if not self.message_endpoint: 47 | raise RuntimeError("please call connect() first") 48 | 49 | response = self.session.post( 50 | urljoin(self.base_url, self.message_endpoint), 51 | json=data, 52 | headers={'Content-Type': 'application/json'} 53 | ) 54 | response.raise_for_status() 55 | 56 | if "id" in data: 57 | message_id = data["id"] 58 | while True: 59 | self.response_ready.wait() 60 | self.response_ready.clear() 61 | 62 | try: 63 | while True: 64 | message = self.message_queue.get_nowait() 65 | if "id" in message and message["id"] == message_id: 66 | self._request_id += 1 67 | return message 68 | self.message_queue.put(message) 69 | except Empty: 70 | pass 71 | 72 | return {} 73 | 74 | def connect(self) -> None: 75 | self._listen_thread = Thread(target=self._listen_messages, daemon=True) 76 | self._listen_thread.start() 77 | 78 | if not self._connected.wait(timeout=self.timeout): 79 | raise TimeoutError("conection timeout!!") 80 | 81 | def close(self) -> None: 82 | self.should_stop.is_set() 83 | self.session.close() 84 | if self._listen_thread and self._listen_thread.is_alive(): 85 | self._listen_thread.join(timeout=1) 86 | 87 | def initialize(self): 88 | 89 | init_data = { 90 | "jsonrpc": "2.0", 91 | "id": self._request_id, 92 | "method": "initialize", 93 | "params": { 94 | "protocolVersion": "2024-11-05", 95 | "capabilities": {}, 96 | "clientInfo": { 97 | "name": "mcp", 98 | "version": "0.1.0" 99 | } 100 | } 101 | } 102 | 103 | init_result = self.send_message(init_data) 104 | 105 | notify_data = { 106 | "jsonrpc": "2.0", 107 | "method": "notifications/initialized", 108 | "params": {} 109 | } 110 | self.send_message(notify_data) 111 | 112 | return init_data 113 | 114 | def list_tools(self): 115 | tools_data = { 116 | "jsonrpc": "2.0", 117 | "id": self._request_id, 118 | "method": "tools/list", 119 | "params": {} 120 | } 121 | return self.send_message(tools_data).get("result",{}).get("tools",[]) 122 | 123 | def call_tool(self, tool_name: str, tool_args: dict): 124 | call_data = { 125 | "jsonrpc": "2.0", 126 | "id": self._request_id, 127 | "method": "tools/call", 128 | "params": { 129 | "name": tool_name, 130 | "arguments": tool_args 131 | } 132 | } 133 | return self.send_message(call_data).get("result", {}).get("content", []) 134 | 135 | 136 | def main(): 137 | try: 138 | client = MCPClient() 139 | print(f"connected to {client.sse_url}") 140 | 141 | init_result = client.initialize() 142 | print("init:", init_result) 143 | 144 | tools = client.list_tools() 145 | print("list tools:", tools) 146 | 147 | result = client.call_tool("get_alerts", {"state": "CA"}) 148 | print("call tool:", result) 149 | 150 | finally: 151 | client.close() 152 | 153 | if __name__ == "__main__": 154 | main() -------------------------------------------------------------------------------- /strategies/mcp_function_calling.py: -------------------------------------------------------------------------------- 1 | import json 2 | import time 3 | from collections.abc import Generator 4 | from copy import deepcopy 5 | from typing import Any, Optional, cast 6 | 7 | from dify_plugin.entities.agent import AgentInvokeMessage 8 | from dify_plugin.entities.model.llm import ( 9 | LLMModelConfig, 10 | LLMResult, 11 | LLMResultChunk, 12 | LLMUsage, 13 | ) 14 | from dify_plugin.entities.model.message import ( 15 | AssistantPromptMessage, 16 | PromptMessage, 17 | PromptMessageContentType, 18 | PromptMessageRole, 19 | PromptMessageTool, 20 | SystemPromptMessage, 21 | ToolPromptMessage, 22 | UserPromptMessage, 23 | ) 24 | from dify_plugin.entities.tool import LogMetadata, ToolInvokeMessage, ToolProviderType 25 | from dify_plugin.interfaces.agent import AgentModelConfig, AgentStrategy, ToolEntity 26 | from pydantic import BaseModel, Field 27 | 28 | from .mcp_client import MCPClient 29 | 30 | 31 | class FunctionCallingParams(BaseModel): 32 | query: str 33 | instruction: str | None 34 | model: AgentModelConfig 35 | tools: list[ToolEntity] | None 36 | maximum_iterations: int = 3 37 | 38 | 39 | class ToolInvokeMeta(BaseModel): 40 | """ 41 | Tool invoke meta 42 | """ 43 | 44 | time_cost: float = Field(..., description="The time cost of the tool invoke") 45 | error: Optional[str] = None 46 | tool_config: Optional[dict] = None 47 | 48 | @classmethod 49 | def empty(cls) -> "ToolInvokeMeta": 50 | """ 51 | Get an empty instance of ToolInvokeMeta 52 | """ 53 | return cls(time_cost=0.0, error=None, tool_config={}) 54 | 55 | @classmethod 56 | def error_instance(cls, error: str) -> "ToolInvokeMeta": 57 | """ 58 | Get an instance of ToolInvokeMeta with error 59 | """ 60 | return cls(time_cost=0.0, error=error, tool_config={}) 61 | 62 | def to_dict(self) -> dict: 63 | return { 64 | "time_cost": self.time_cost, 65 | "error": self.error, 66 | "tool_config": self.tool_config, 67 | } 68 | 69 | 70 | class FunctionCallingAgentStrategy(AgentStrategy): 71 | def __init__(self, session): 72 | super().__init__(session) 73 | self.query = "" 74 | 75 | def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: 76 | """ 77 | Run FunctionCall agent application 78 | """ 79 | fc_params = FunctionCallingParams(**parameters) 80 | query = fc_params.query 81 | self.query = query 82 | instruction = fc_params.instruction 83 | init_prompt_messages = [ 84 | PromptMessage(role=PromptMessageRole.SYSTEM, content=instruction) 85 | ] 86 | 87 | mcp_tools = [] 88 | mcp_tool_instances = {} 89 | mcp_tool_client = {} 90 | if mcp_server := parameters.get("mcp_server"): 91 | try: 92 | mcp_configs = json.loads(mcp_server) 93 | except json.JSONDecodeError: 94 | if not mcp_server.startswith("http"): 95 | raise 96 | mcp_configs = { 97 | "default_server": { 98 | "url": mcp_server, 99 | } 100 | } 101 | for mcp_server_name, mcp_server_config in mcp_configs.items(): 102 | mcp_server_url = mcp_server_config.get("url") 103 | if not mcp_server_url: 104 | raise Exception("MCP server url is not provided") 105 | 106 | 107 | m_client = MCPClient(mcp_server_url) 108 | m_client.initialize() 109 | mcp_tools = m_client.list_tools() 110 | mcp_tool_instances = {tool.get("name"): tool for tool in mcp_tools} 111 | mcp_tool_client = {tool.get("name"): m_client for tool in mcp_tools} 112 | tools = fc_params.tools or [] 113 | tool_instances = {tool.identity.name: tool for tool in tools} if tools else {} 114 | # init prompt messages 115 | model = fc_params.model 116 | stop = ( 117 | fc_params.model.completion_params.get("stop", []) 118 | if fc_params.model.completion_params 119 | else [] 120 | ) 121 | # convert tools into ModelRuntime Tool format 122 | prompt_messages_tools = self._init_prompt_tools(tools) 123 | prompt_messages_tools.extend(self._init_prompt_mcp_tools(mcp_tools)) 124 | 125 | iteration_step = 1 126 | max_iteration_steps = fc_params.maximum_iterations 127 | current_thoughts: list[PromptMessage] = [] 128 | # continue to run until there is not any tool call 129 | function_call_state = True 130 | llm_usage: dict[str, Optional[LLMUsage]] = {"usage": None} 131 | final_answer = "" 132 | 133 | while function_call_state and iteration_step <= max_iteration_steps: 134 | function_call_state = False 135 | round_started_at = time.perf_counter() 136 | round_log = self.create_log_message( 137 | label=f"ROUND {iteration_step}", 138 | data={}, 139 | metadata={ 140 | LogMetadata.STARTED_AT: round_started_at, 141 | }, 142 | status=ToolInvokeMessage.LogMessage.LogStatus.START, 143 | ) 144 | yield round_log 145 | if iteration_step == max_iteration_steps: 146 | # the last iteration, remove all tools 147 | prompt_messages_tools = [] 148 | 149 | # recalc llm max tokens 150 | prompt_messages = self._organize_prompt_messages( 151 | history_prompt_messages=init_prompt_messages, 152 | current_thoughts=current_thoughts, 153 | ) 154 | if model.completion_params: 155 | self.recalc_llm_max_tokens( 156 | model.entity, prompt_messages, model.completion_params 157 | ) 158 | # invoke model 159 | model_started_at = time.perf_counter() 160 | model_log = self.create_log_message( 161 | label=f"{model.model} Thought", 162 | data={}, 163 | metadata={ 164 | LogMetadata.STARTED_AT: model_started_at, 165 | LogMetadata.PROVIDER: model.provider, 166 | }, 167 | parent=round_log, 168 | status=ToolInvokeMessage.LogMessage.LogStatus.START, 169 | ) 170 | yield model_log 171 | chunks: Generator[LLMResultChunk, None, None] | LLMResult = ( 172 | self.session.model.llm.invoke( 173 | model_config=LLMModelConfig(**model.model_dump(mode="json")), 174 | prompt_messages=prompt_messages, 175 | stream=True, 176 | stop=stop, 177 | tools=prompt_messages_tools, 178 | ) 179 | ) 180 | 181 | tool_calls: list[tuple[str, str, dict[str, Any]]] = [] 182 | 183 | # save full response 184 | response = "" 185 | 186 | # save tool call names and inputs 187 | tool_call_names = "" 188 | 189 | current_llm_usage = None 190 | 191 | if isinstance(chunks, Generator): 192 | for chunk in chunks: 193 | # check if there is any tool call 194 | if self.check_tool_calls(chunk): 195 | function_call_state = True 196 | tool_calls.extend(self.extract_tool_calls(chunk) or []) 197 | tool_call_names = ";".join( 198 | [tool_call[1] for tool_call in tool_calls] 199 | ) 200 | 201 | if chunk.delta.message and chunk.delta.message.content: 202 | if isinstance(chunk.delta.message.content, list): 203 | for content in chunk.delta.message.content: 204 | response += content.data 205 | if ( 206 | not function_call_state 207 | or iteration_step == max_iteration_steps 208 | ): 209 | yield self.create_text_message(content.data) 210 | else: 211 | response += str(chunk.delta.message.content) 212 | if ( 213 | not function_call_state 214 | or iteration_step == max_iteration_steps 215 | ): 216 | yield self.create_text_message( 217 | str(chunk.delta.message.content) 218 | ) 219 | 220 | if chunk.delta.usage: 221 | self.increase_usage(llm_usage, chunk.delta.usage) 222 | current_llm_usage = chunk.delta.usage 223 | 224 | else: 225 | result = chunks 226 | # check if there is any tool call 227 | if self.check_blocking_tool_calls(result): 228 | function_call_state = True 229 | tool_calls.extend(self.extract_blocking_tool_calls(result) or []) 230 | tool_call_names = ";".join( 231 | [tool_call[1] for tool_call in tool_calls] 232 | ) 233 | 234 | if result.usage: 235 | self.increase_usage(llm_usage, result.usage) 236 | current_llm_usage = result.usage 237 | 238 | if result.message and result.message.content: 239 | if isinstance(result.message.content, list): 240 | for content in result.message.content: 241 | response += content.data 242 | else: 243 | response += str(result.message.content) 244 | 245 | if not result.message.content: 246 | result.message.content = "" 247 | yield self.finish_log_message( 248 | log=model_log, 249 | data={ 250 | "output": response, 251 | "tool_name": tool_call_names, 252 | "tool_input": { 253 | tool_call[1]: tool_call[2] for tool_call in tool_calls 254 | }, 255 | }, 256 | metadata={ 257 | LogMetadata.STARTED_AT: model_started_at, 258 | LogMetadata.FINISHED_AT: time.perf_counter(), 259 | LogMetadata.ELAPSED_TIME: time.perf_counter() - model_started_at, 260 | LogMetadata.PROVIDER: model.provider, 261 | LogMetadata.TOTAL_PRICE: current_llm_usage.total_price 262 | if current_llm_usage 263 | else 0, 264 | LogMetadata.CURRENCY: current_llm_usage.currency 265 | if current_llm_usage 266 | else "", 267 | LogMetadata.TOTAL_TOKENS: current_llm_usage.total_tokens 268 | if current_llm_usage 269 | else 0, 270 | }, 271 | ) 272 | assistant_message = AssistantPromptMessage(content="", tool_calls=[]) 273 | if tool_calls: 274 | assistant_message.tool_calls = [ 275 | AssistantPromptMessage.ToolCall( 276 | id=tool_call[0], 277 | type="function", 278 | function=AssistantPromptMessage.ToolCall.ToolCallFunction( 279 | name=tool_call[1], 280 | arguments=json.dumps(tool_call[2], ensure_ascii=False), 281 | ), 282 | ) 283 | for tool_call in tool_calls 284 | ] 285 | else: 286 | assistant_message.content = response 287 | 288 | current_thoughts.append(assistant_message) 289 | 290 | final_answer += response + "\n" 291 | 292 | # call tools 293 | tool_responses = [] 294 | for tool_call_id, tool_call_name, tool_call_args in tool_calls: 295 | tool_instance = tool_instances.get(tool_call_name) 296 | mcp_tool_instance = mcp_tool_instances.get(tool_call_name) 297 | tool_call_started_at = time.perf_counter() 298 | tool_call_log = self.create_log_message( 299 | label=f"CALL {tool_call_name}", 300 | data={}, 301 | metadata={ 302 | LogMetadata.STARTED_AT: time.perf_counter(), 303 | LogMetadata.PROVIDER: tool_instance.identity.provider if tool_instance else "MCP Client", 304 | }, 305 | parent=round_log, 306 | status=ToolInvokeMessage.LogMessage.LogStatus.START, 307 | ) 308 | yield tool_call_log 309 | 310 | if tool_instance: 311 | # invoke tool 312 | try: 313 | tool_invoke_responses = self.session.tool.invoke( 314 | provider_type=ToolProviderType(tool_instance.provider_type), 315 | provider=tool_instance.identity.provider, 316 | tool_name=tool_instance.identity.name, 317 | parameters={ 318 | **tool_instance.runtime_parameters, 319 | **tool_call_args, 320 | }, 321 | ) 322 | result = "" 323 | for response in tool_invoke_responses: 324 | if response.type == ToolInvokeMessage.MessageType.TEXT: 325 | result += cast( 326 | ToolInvokeMessage.TextMessage, response.message 327 | ).text 328 | elif response.type == ToolInvokeMessage.MessageType.LINK: 329 | result += ( 330 | f"result link: {cast(ToolInvokeMessage.TextMessage, response.message).text}." 331 | + " please tell user to check it." 332 | ) 333 | elif response.type in { 334 | ToolInvokeMessage.MessageType.IMAGE_LINK, 335 | ToolInvokeMessage.MessageType.IMAGE, 336 | }: 337 | result += ( 338 | "image has been created and sent to user already, " 339 | + "you do not need to create it, just tell the user to check it now." 340 | ) 341 | elif response.type == ToolInvokeMessage.MessageType.JSON: 342 | text = json.dumps( 343 | cast( 344 | ToolInvokeMessage.JsonMessage, response.message 345 | ).json_object, 346 | ensure_ascii=False, 347 | ) 348 | result += f"tool response: {text}." 349 | else: 350 | result += f"tool response: {response.message!r}." 351 | except Exception as e: 352 | result = f"tool invoke error: {str(e)}" 353 | tool_response = { 354 | "tool_call_id": tool_call_id, 355 | "tool_call_name": tool_call_name, 356 | "tool_call_input": { 357 | **tool_instance.runtime_parameters, 358 | **tool_call_args, 359 | }, 360 | "tool_response": result, 361 | } 362 | elif mcp_tool_instance: 363 | try: 364 | m_client = mcp_tool_client.get(tool_call_name) 365 | result = m_client.call_tool( 366 | tool_name=tool_call_name, 367 | tool_args=tool_call_args, 368 | ) 369 | except Exception as e: 370 | result = f"tool invoke error: {str(e)}" 371 | tool_response = { 372 | "tool_call_id": tool_call_id, 373 | "tool_call_name": tool_call_name, 374 | "tool_call_input": tool_call_args, 375 | "tool_response": str(result), 376 | } 377 | else: 378 | tool_response = { 379 | "tool_call_id": tool_call_id, 380 | "tool_call_name": tool_call_name, 381 | "tool_response": f"there is not a tool named {tool_call_name}", 382 | "meta": ToolInvokeMeta.error_instance( 383 | f"there is not a tool named {tool_call_name}" 384 | ).to_dict(), 385 | } 386 | 387 | yield self.finish_log_message( 388 | log=tool_call_log, 389 | data={ 390 | "output": tool_response, 391 | }, 392 | metadata={ 393 | LogMetadata.STARTED_AT: tool_call_started_at, 394 | LogMetadata.PROVIDER: tool_instance.identity.provider if tool_instance else "MCP Client", 395 | LogMetadata.FINISHED_AT: time.perf_counter(), 396 | LogMetadata.ELAPSED_TIME: time.perf_counter() 397 | - tool_call_started_at, 398 | }, 399 | ) 400 | tool_responses.append(tool_response) 401 | if tool_response["tool_response"] is not None: 402 | current_thoughts.append( 403 | ToolPromptMessage( 404 | content=str(tool_response["tool_response"]), 405 | tool_call_id=tool_call_id, 406 | name=tool_call_name, 407 | ) 408 | ) 409 | 410 | # update prompt tool 411 | for prompt_tool in prompt_messages_tools: 412 | if prompt_tool.name in tool_instances: 413 | self.update_prompt_message_tool( 414 | tool_instances[prompt_tool.name], prompt_tool 415 | ) 416 | yield self.finish_log_message( 417 | log=round_log, 418 | data={ 419 | "output": { 420 | "llm_response": response, 421 | "tool_responses": tool_responses, 422 | }, 423 | }, 424 | metadata={ 425 | LogMetadata.STARTED_AT: round_started_at, 426 | LogMetadata.FINISHED_AT: time.perf_counter(), 427 | LogMetadata.ELAPSED_TIME: time.perf_counter() - round_started_at, 428 | LogMetadata.TOTAL_PRICE: current_llm_usage.total_price 429 | if current_llm_usage 430 | else 0, 431 | LogMetadata.CURRENCY: current_llm_usage.currency 432 | if current_llm_usage 433 | else "", 434 | LogMetadata.TOTAL_TOKENS: current_llm_usage.total_tokens 435 | if current_llm_usage 436 | else 0, 437 | }, 438 | ) 439 | iteration_step += 1 440 | 441 | for m_client in mcp_tool_client.values(): 442 | m_client.close() 443 | 444 | yield self.create_json_message( 445 | { 446 | "execution_metadata": { 447 | LogMetadata.TOTAL_PRICE: llm_usage["usage"].total_price 448 | if llm_usage["usage"] is not None 449 | else 0, 450 | LogMetadata.CURRENCY: llm_usage["usage"].currency 451 | if llm_usage["usage"] is not None 452 | else "", 453 | LogMetadata.TOTAL_TOKENS: llm_usage["usage"].total_tokens 454 | if llm_usage["usage"] is not None 455 | else 0, 456 | } 457 | } 458 | ) 459 | 460 | def check_tool_calls(self, llm_result_chunk: LLMResultChunk) -> bool: 461 | """ 462 | Check if there is any tool call in llm result chunk 463 | """ 464 | return bool(llm_result_chunk.delta.message.tool_calls) 465 | 466 | def check_blocking_tool_calls(self, llm_result: LLMResult) -> bool: 467 | """ 468 | Check if there is any blocking tool call in llm result 469 | """ 470 | return bool(llm_result.message.tool_calls) 471 | 472 | def extract_tool_calls( 473 | self, llm_result_chunk: LLMResultChunk 474 | ) -> list[tuple[str, str, dict[str, Any]]]: 475 | """ 476 | Extract tool calls from llm result chunk 477 | 478 | Returns: 479 | List[Tuple[str, str, Dict[str, Any]]]: [(tool_call_id, tool_call_name, tool_call_args)] 480 | """ 481 | tool_calls = [] 482 | for prompt_message in llm_result_chunk.delta.message.tool_calls: 483 | args = {} 484 | if prompt_message.function.arguments != "": 485 | args = json.loads(prompt_message.function.arguments) 486 | 487 | tool_calls.append( 488 | ( 489 | prompt_message.id, 490 | prompt_message.function.name, 491 | args, 492 | ) 493 | ) 494 | 495 | return tool_calls 496 | 497 | def extract_blocking_tool_calls( 498 | self, llm_result: LLMResult 499 | ) -> list[tuple[str, str, dict[str, Any]]]: 500 | """ 501 | Extract blocking tool calls from llm result 502 | 503 | Returns: 504 | List[Tuple[str, str, Dict[str, Any]]]: [(tool_call_id, tool_call_name, tool_call_args)] 505 | """ 506 | tool_calls = [] 507 | for prompt_message in llm_result.message.tool_calls: 508 | args = {} 509 | if prompt_message.function.arguments != "": 510 | args = json.loads(prompt_message.function.arguments) 511 | 512 | tool_calls.append( 513 | ( 514 | prompt_message.id, 515 | prompt_message.function.name, 516 | args, 517 | ) 518 | ) 519 | 520 | return tool_calls 521 | 522 | def _init_system_message( 523 | self, prompt_template: str, prompt_messages: list[PromptMessage] 524 | ) -> list[PromptMessage]: 525 | """ 526 | Initialize system message 527 | """ 528 | if not prompt_messages and prompt_template: 529 | return [ 530 | SystemPromptMessage(content=prompt_template), 531 | ] 532 | 533 | if ( 534 | prompt_messages 535 | and not isinstance(prompt_messages[0], SystemPromptMessage) 536 | and prompt_template 537 | ): 538 | prompt_messages.insert(0, SystemPromptMessage(content=prompt_template)) 539 | 540 | return prompt_messages or [] 541 | 542 | def _organize_user_query( 543 | self, query: str, prompt_messages: list[PromptMessage] 544 | ) -> list[PromptMessage]: 545 | """ 546 | Organize user query 547 | """ 548 | 549 | prompt_messages.append(UserPromptMessage(content=query)) 550 | 551 | return prompt_messages 552 | 553 | def _clear_user_prompt_image_messages( 554 | self, prompt_messages: list[PromptMessage] 555 | ) -> list[PromptMessage]: 556 | """ 557 | As for now, gpt supports both fc and vision at the first iteration. 558 | We need to remove the image messages from the prompt messages at the first iteration. 559 | """ 560 | prompt_messages = deepcopy(prompt_messages) 561 | 562 | for prompt_message in prompt_messages: 563 | if isinstance(prompt_message, UserPromptMessage) and isinstance( 564 | prompt_message.content, list 565 | ): 566 | prompt_message.content = "\n".join( 567 | [ 568 | content.data 569 | if content.type == PromptMessageContentType.TEXT 570 | else "[image]" 571 | if content.type == PromptMessageContentType.IMAGE 572 | else "[file]" 573 | for content in prompt_message.content 574 | ] 575 | ) 576 | 577 | return prompt_messages 578 | 579 | def _organize_prompt_messages( 580 | self, 581 | current_thoughts: list[PromptMessage], 582 | history_prompt_messages: list[PromptMessage], 583 | ) -> list[PromptMessage]: 584 | prompt_template = "" 585 | history_prompt_messages = self._init_system_message( 586 | prompt_template, history_prompt_messages 587 | ) 588 | query_prompt_messages = self._organize_user_query(self.query or "", []) 589 | 590 | prompt_messages = [ 591 | *history_prompt_messages, 592 | *query_prompt_messages, 593 | *current_thoughts, 594 | ] 595 | if len(current_thoughts) != 0: 596 | # clear messages after the first iteration 597 | prompt_messages = self._clear_user_prompt_image_messages(prompt_messages) 598 | return prompt_messages 599 | 600 | def _init_prompt_mcp_tools(self, mcp_tools:list): 601 | """ 602 | Initialize prompt message tools 603 | """ 604 | prompt_messages_tools = [] 605 | for tool in mcp_tools: 606 | prompt_message = PromptMessageTool( 607 | name=tool.get("name"), 608 | description=tool.get("description"), 609 | parameters=tool.get("inputSchema"), 610 | ) 611 | prompt_messages_tools.append(prompt_message) 612 | return prompt_messages_tools 613 | 614 | -------------------------------------------------------------------------------- /strategies/mcp_function_calling.yaml: -------------------------------------------------------------------------------- 1 | identity: 2 | name: function_calling 3 | author: Dify 4 | label: 5 | en_US: MCP FunctionCalling 6 | description: 7 | en_US: Function Calling is a basic strategy for agent, model will use the tools provided to perform the task. 8 | zh_Hans: Function Calling 是一个基本的 Agent 策略,模型将使用提供的工具来执行任务。 9 | pt_BR: Function Calling is a basic strategy for agent, model will use the tools provided to perform the task. 10 | parameters: 11 | - name: model 12 | type: model-selector 13 | scope: tool-call&llm 14 | required: true 15 | label: 16 | en_US: Model 17 | zh_Hans: 模型 18 | pt_BR: Model 19 | - name: tools 20 | type: array[tools] 21 | required: false 22 | label: 23 | en_US: Tools list 24 | zh_Hans: 工具列表 25 | pt_BR: Tools list 26 | - name: mcp_server 27 | type: string 28 | required: false 29 | label: 30 | en_US: MCP Server URL 31 | zh_Hans: MCP 服务器地址 32 | human_description: 33 | en_US: support single url like `http://localhost:8000/sse` or json array config for multi servers 34 | zh_Hans: 支持单个地址,如 `http://localhost:8000/sse` 或多地址的 json 数组配置 35 | - name: instruction 36 | type: string 37 | required: true 38 | label: 39 | en_US: Instruction 40 | zh_Hans: 指令 41 | pt_BR: Instruction 42 | auto_generate: 43 | type: prompt_instruction 44 | template: 45 | enabled: true 46 | - name: query 47 | type: string 48 | required: true 49 | label: 50 | en_US: Query 51 | zh_Hans: 查询 52 | pt_BR: Query 53 | - name: maximum_iterations 54 | type: number 55 | required: true 56 | label: 57 | en_US: Maxium Iterations 58 | zh_Hans: 最大迭代次数 59 | pt_BR: Maxium Iterations 60 | default: 3 61 | max: 30 62 | min: 1 63 | extra: 64 | python: 65 | source: strategies/mcp_function_calling.py 66 | --------------------------------------------------------------------------------