├── .env.example
├── .gitignore
├── .python-version
├── MCP-HandsOn-ENG.ipynb
├── MCP-HandsOn-KOR.ipynb
├── README.md
├── README_KOR.md
├── app.py
├── app_KOR.py
├── assets
├── add-tools.png
├── apply-tool-configuration.png
├── architecture.png
├── check-status.png
├── project-demo.png
├── smithery-copy-json.png
└── smithery-json.png
├── config.json
├── dockers
├── .env.example
├── config.json
├── docker-compose-KOR-mac.yaml
├── docker-compose-KOR.yaml
├── docker-compose-mac.yaml
└── docker-compose.yaml
├── example_config.json
├── mcp_server_local.py
├── mcp_server_rag.py
├── mcp_server_remote.py
├── mcp_server_time.py
├── packages.txt
├── pyproject.toml
├── requirements.txt
├── utils.py
└── uv.lock
/.env.example:
--------------------------------------------------------------------------------
1 | ANTHROPIC_API_KEY=sk-ant-api03...
2 | OPENAI_API_KEY=sk-proj-o0gulL2J2a...
3 | LANGSMITH_API_KEY=lsv2_sk_ed22...
4 | LANGSMITH_TRACING=true
5 | LANGSMITH_ENDPOINT=https://api.smith.langchain.com
6 | LANGSMITH_PROJECT=LangGraph-MCP-Agents
7 |
8 | USE_LOGIN=true
9 | USER_ID=admin
10 | USER_PASSWORD=admin1234
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Python-generated files
2 | __pycache__/
3 | *.py[oc]
4 | build/
5 | dist/
6 | wheels/
7 | *.egg-info
8 |
9 | # Virtual environments
10 | .venv
11 |
12 | # Environment variables
13 | .env
14 |
15 | # macOS
16 | .DS_Store
17 |
18 | # Data
19 | data/
--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------
1 | 3.12
2 |
--------------------------------------------------------------------------------
/MCP-HandsOn-ENG.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# MCP + LangGraph Hands-On Tutorial\n",
8 | "\n",
9 | "- Author: [Teddy Notes](https://youtube.com/c/teddynote)\n",
10 | "- Lecture: [Fastcampus RAG trick notes](https://fastcampus.co.kr/data_online_teddy)\n",
11 | "\n",
12 | "**References**\n",
13 | "- https://modelcontextprotocol.io/introduction\n",
14 | "- https://github.com/langchain-ai/langchain-mcp-adapters"
15 | ]
16 | },
17 | {
18 | "cell_type": "markdown",
19 | "metadata": {},
20 | "source": [
21 | "## configure\n",
22 | "\n",
23 | "Refer to the installation instructions below to install `uv`.\n",
24 | "\n",
25 | "**How to install `uv`**\n",
26 | "\n",
27 | "```bash\n",
28 | "# macOS/Linux\n",
29 | "curl -LsSf https://astral.sh/uv/install.sh | sh\n",
30 | "\n",
31 | "# Windows (PowerShell)\n",
32 | "irm https://astral.sh/uv/install.ps1 | iex\n",
33 | "```\n",
34 | "\n",
35 | "Install **dependencies**\n",
36 | "\n",
37 | "```bash\n",
38 | "uv pip install -r requirements.txt\n",
39 | "```"
40 | ]
41 | },
42 | {
43 | "cell_type": "markdown",
44 | "metadata": {},
45 | "source": [
46 | "Gets the environment variables."
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": null,
52 | "metadata": {},
53 | "outputs": [],
54 | "source": [
55 | "from dotenv import load_dotenv\n",
56 | "\n",
57 | "load_dotenv(override=True)"
58 | ]
59 | },
60 | {
61 | "cell_type": "markdown",
62 | "metadata": {},
63 | "source": [
64 | "## MultiServerMCPClient"
65 | ]
66 | },
67 | {
68 | "cell_type": "markdown",
69 | "metadata": {},
70 | "source": [
71 | "Run `mcp_server_remote.py` in advance. Open a terminal with the virtual environment activated and run the server.\n",
72 | "\n",
73 | "> Command\n",
74 | "```bash\n",
75 | "source .venv/bin/activate\n",
76 | "python mcp_server_remote.py\n",
77 | "```\n",
78 | "\n",
79 | "Create and terminate a temporary Session connection using `async with`"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": null,
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | "from langchain_mcp_adapters.client import MultiServerMCPClient\n",
89 | "from langgraph.prebuilt import create_react_agent\n",
90 | "from utils import ainvoke_graph, astream_graph\n",
91 | "from langchain_anthropic import ChatAnthropic\n",
92 | "\n",
93 | "model = ChatAnthropic(\n",
94 | " model_name=\"claude-3-7-sonnet-latest\", temperature=0, max_tokens=20000\n",
95 | ")\n",
96 | "\n",
97 | "async with MultiServerMCPClient(\n",
98 | " {\n",
99 | " \"weather\": {\n",
100 | " # Must match the server's port (port 8005)\n",
101 | " \"url\": \"http://localhost:8005/sse\",\n",
102 | " \"transport\": \"sse\",\n",
103 | " }\n",
104 | " }\n",
105 | ") as client:\n",
106 | " print(client.get_tools())\n",
107 | " agent = create_react_agent(model, client.get_tools())\n",
108 | " answer = await astream_graph(\n",
109 | " agent, {\"messages\": \"What's the weather like in Seoul?\"}\n",
110 | " )"
111 | ]
112 | },
113 | {
114 | "cell_type": "markdown",
115 | "metadata": {},
116 | "source": [
117 | "You might notice that you can't access the tool because the session is closed."
118 | ]
119 | },
120 | {
121 | "cell_type": "code",
122 | "execution_count": null,
123 | "metadata": {},
124 | "outputs": [],
125 | "source": [
126 | "await astream_graph(agent, {\"messages\": \"What's the weather like in Seoul?\"})"
127 | ]
128 | },
129 | {
130 | "cell_type": "markdown",
131 | "metadata": {},
132 | "source": [
133 | "Now let's change that to accessing the tool while maintaining an Async Session."
134 | ]
135 | },
136 | {
137 | "cell_type": "code",
138 | "execution_count": null,
139 | "metadata": {},
140 | "outputs": [],
141 | "source": [
142 | "# 1. Create client\n",
143 | "client = MultiServerMCPClient(\n",
144 | " {\n",
145 | " \"weather\": {\n",
146 | " \"url\": \"http://localhost:8005/sse\",\n",
147 | " \"transport\": \"sse\",\n",
148 | " }\n",
149 | " }\n",
150 | ")\n",
151 | "\n",
152 | "\n",
153 | "# 2. Explicitly initialize connection (this part is necessary)\n",
154 | "# Initialize\n",
155 | "await client.__aenter__()\n",
156 | "\n",
157 | "# Now tools are loaded\n",
158 | "print(client.get_tools()) # Tools are displayed"
159 | ]
160 | },
161 | {
162 | "cell_type": "markdown",
163 | "metadata": {},
164 | "source": [
165 | "Create an agent with langgraph(`create_react_agent`)."
166 | ]
167 | },
168 | {
169 | "cell_type": "code",
170 | "execution_count": 5,
171 | "metadata": {},
172 | "outputs": [],
173 | "source": [
174 | "# Create agent\n",
175 | "agent = create_react_agent(model, client.get_tools())"
176 | ]
177 | },
178 | {
179 | "cell_type": "markdown",
180 | "metadata": {},
181 | "source": [
182 | "Run the graph to see the results."
183 | ]
184 | },
185 | {
186 | "cell_type": "code",
187 | "execution_count": null,
188 | "metadata": {},
189 | "outputs": [],
190 | "source": [
191 | "await astream_graph(agent, {\"messages\": \"What's the weather like in Seoul?\"})"
192 | ]
193 | },
194 | {
195 | "cell_type": "markdown",
196 | "metadata": {},
197 | "source": [
198 | "## Stdio method\n",
199 | "\n",
200 | "The Stdio method is intended for use in a local environment.\n",
201 | "\n",
202 | "- Use standard input/output for communication"
203 | ]
204 | },
205 | {
206 | "cell_type": "code",
207 | "execution_count": null,
208 | "metadata": {},
209 | "outputs": [],
210 | "source": [
211 | "from mcp import ClientSession, StdioServerParameters\n",
212 | "from mcp.client.stdio import stdio_client\n",
213 | "from langgraph.prebuilt import create_react_agent\n",
214 | "from langchain_mcp_adapters.tools import load_mcp_tools\n",
215 | "from langchain_anthropic import ChatAnthropic\n",
216 | "\n",
217 | "# Initialize Anthropic's Claude model\n",
218 | "model = ChatAnthropic(\n",
219 | " model_name=\"claude-3-7-sonnet-latest\", temperature=0, max_tokens=20000\n",
220 | ")\n",
221 | "\n",
222 | "# Set up StdIO server parameters\n",
223 | "# - command: Path to Python interpreter\n",
224 | "# - args: MCP server script to execute\n",
225 | "server_params = StdioServerParameters(\n",
226 | " command=\"./.venv/bin/python\",\n",
227 | " args=[\"mcp_server_local.py\"],\n",
228 | ")\n",
229 | "\n",
230 | "# Use StdIO client to communicate with the server\n",
231 | "async with stdio_client(server_params) as (read, write):\n",
232 | " # Create client session\n",
233 | " async with ClientSession(read, write) as session:\n",
234 | " # Initialize connection\n",
235 | " await session.initialize()\n",
236 | "\n",
237 | " # Load MCP tools\n",
238 | " tools = await load_mcp_tools(session)\n",
239 | " print(tools)\n",
240 | "\n",
241 | " # Create agent\n",
242 | " agent = create_react_agent(model, tools)\n",
243 | "\n",
244 | " # Stream agent responses\n",
245 | " await astream_graph(agent, {\"messages\": \"What's the weather like in Seoul?\"})"
246 | ]
247 | },
248 | {
249 | "cell_type": "markdown",
250 | "metadata": {},
251 | "source": [
252 | "## Use MCP server with RAG deployed\n",
253 | "\n",
254 | "- File: `mcp_server_rag.py`\n",
255 | "\n",
256 | "Use the `mcp_server_rag.py` file that we built with langchain in advance.\n",
257 | "\n",
258 | "It uses stdio communication to get information about the tools, where it gets the `retriever` tool, which is the tool defined in `mcp_server_rag.py`. This file **doesn't** need to be running on the server beforehand."
259 | ]
260 | },
261 | {
262 | "cell_type": "code",
263 | "execution_count": null,
264 | "metadata": {},
265 | "outputs": [],
266 | "source": [
267 | "from mcp import ClientSession, StdioServerParameters\n",
268 | "from mcp.client.stdio import stdio_client\n",
269 | "from langchain_mcp_adapters.tools import load_mcp_tools\n",
270 | "from langgraph.prebuilt import create_react_agent\n",
271 | "from langchain_anthropic import ChatAnthropic\n",
272 | "from utils import astream_graph\n",
273 | "\n",
274 | "# Initialize Anthropic's Claude model\n",
275 | "model = ChatAnthropic(\n",
276 | " model_name=\"claude-3-7-sonnet-latest\", temperature=0, max_tokens=20000\n",
277 | ")\n",
278 | "\n",
279 | "# Set up StdIO server parameters for the RAG server\n",
280 | "server_params = StdioServerParameters(\n",
281 | " command=\"./.venv/bin/python\",\n",
282 | " args=[\"./mcp_server_rag.py\"],\n",
283 | ")\n",
284 | "\n",
285 | "# Use StdIO client to communicate with the RAG server\n",
286 | "async with stdio_client(server_params) as (read, write):\n",
287 | " # Create client session\n",
288 | " async with ClientSession(read, write) as session:\n",
289 | " # Initialize connection\n",
290 | " await session.initialize()\n",
291 | "\n",
292 | " # Load MCP tools (in this case, the retriever tool)\n",
293 | " tools = await load_mcp_tools(session)\n",
294 | "\n",
295 | " # Create and run the agent\n",
296 | " agent = create_react_agent(model, tools)\n",
297 | "\n",
298 | " # Stream agent responses\n",
299 | " await astream_graph(\n",
300 | " agent,\n",
301 | " {\n",
302 | " \"messages\": \"Search for the name of the generative AI developed by Samsung Electronics\"\n",
303 | " },\n",
304 | " )"
305 | ]
306 | },
307 | {
308 | "cell_type": "markdown",
309 | "metadata": {},
310 | "source": [
311 | "## Use a mix of SSE and Stdio methods\n",
312 | "\n",
313 | "- File: `mcp_server_rag.py` communicates over Stdio\n",
314 | "- `langchain-dev-docs` communicates via SSE\n",
315 | "\n",
316 | "Use a mix of SSE and Stdio methods."
317 | ]
318 | },
319 | {
320 | "cell_type": "code",
321 | "execution_count": null,
322 | "metadata": {},
323 | "outputs": [],
324 | "source": [
325 | "from langchain_mcp_adapters.client import MultiServerMCPClient\n",
326 | "from langgraph.prebuilt import create_react_agent\n",
327 | "from langchain_anthropic import ChatAnthropic\n",
328 | "\n",
329 | "# Initialize Anthropic's Claude model\n",
330 | "model = ChatAnthropic(\n",
331 | " model_name=\"claude-3-7-sonnet-latest\", temperature=0, max_tokens=20000\n",
332 | ")\n",
333 | "\n",
334 | "# 1. Create multi-server MCP client\n",
335 | "client = MultiServerMCPClient(\n",
336 | " {\n",
337 | " \"document-retriever\": {\n",
338 | " \"command\": \"./.venv/bin/python\",\n",
339 | " # Update with the absolute path to mcp_server_rag.py file\n",
340 | " \"args\": [\"./mcp_server_rag.py\"],\n",
341 | " # Communicate via stdio (using standard input/output)\n",
342 | " \"transport\": \"stdio\",\n",
343 | " },\n",
344 | " \"langchain-dev-docs\": {\n",
345 | " # Make sure the SSE server is running\n",
346 | " \"url\": \"https://teddynote.io/mcp/langchain/sse\",\n",
347 | " # Communicate via SSE (Server-Sent Events)\n",
348 | " \"transport\": \"sse\",\n",
349 | " },\n",
350 | " }\n",
351 | ")\n",
352 | "\n",
353 | "\n",
354 | "# 2. Initialize connection explicitly through async context manager\n",
355 | "await client.__aenter__()"
356 | ]
357 | },
358 | {
359 | "cell_type": "markdown",
360 | "metadata": {},
361 | "source": [
362 | "Create an agent using `create_react_agent` in langgraph."
363 | ]
364 | },
365 | {
366 | "cell_type": "code",
367 | "execution_count": 10,
368 | "metadata": {},
369 | "outputs": [],
370 | "source": [
371 | "from langgraph.checkpoint.memory import MemorySaver\n",
372 | "from langchain_core.runnables import RunnableConfig\n",
373 | "\n",
374 | "prompt = (\n",
375 | " \"You are a smart agent. \"\n",
376 | " \"Use `retriever` tool to search on AI related documents and answer questions.\"\n",
377 | " \"Use `langchain-dev-docs` tool to search on langchain / langgraph related documents and answer questions.\"\n",
378 | " \"Answer in English.\"\n",
379 | ")\n",
380 | "agent = create_react_agent(\n",
381 | " model, client.get_tools(), prompt=prompt, checkpointer=MemorySaver()\n",
382 | ")"
383 | ]
384 | },
385 | {
386 | "cell_type": "markdown",
387 | "metadata": {},
388 | "source": [
389 | "Use the `retriever` tool defined in `mcp_server_rag.py` that you built to perform the search."
390 | ]
391 | },
392 | {
393 | "cell_type": "code",
394 | "execution_count": null,
395 | "metadata": {},
396 | "outputs": [],
397 | "source": [
398 | "config = RunnableConfig(recursion_limit=30, thread_id=1)\n",
399 | "await astream_graph(\n",
400 | " agent,\n",
401 | " {\n",
402 | " \"messages\": \"Use the `retriever` tool to search for the name of the generative AI developed by Samsung Electronics\"\n",
403 | " },\n",
404 | " config=config,\n",
405 | ")"
406 | ]
407 | },
408 | {
409 | "cell_type": "markdown",
410 | "metadata": {},
411 | "source": [
412 | "This time, we'll use the `langchain-dev-docs` tool to perform the search."
413 | ]
414 | },
415 | {
416 | "cell_type": "code",
417 | "execution_count": null,
418 | "metadata": {},
419 | "outputs": [],
420 | "source": [
421 | "config = RunnableConfig(recursion_limit=30, thread_id=1)\n",
422 | "await astream_graph(\n",
423 | " agent,\n",
424 | " {\n",
425 | " \"messages\": \"Please tell me about the definition of self-rag by referring to the langchain-dev-docs\"\n",
426 | " },\n",
427 | " config=config,\n",
428 | ")"
429 | ]
430 | },
431 | {
432 | "cell_type": "markdown",
433 | "metadata": {},
434 | "source": [
435 | "Use `MemorySaver` to maintain short-term memory, so multi-turn conversations are possible."
436 | ]
437 | },
438 | {
439 | "cell_type": "code",
440 | "execution_count": null,
441 | "metadata": {},
442 | "outputs": [],
443 | "source": [
444 | "await astream_graph(\n",
445 | " agent,\n",
446 | " {\"messages\": \"Summarize the previous content in bullet points\"},\n",
447 | " config=config,\n",
448 | ")"
449 | ]
450 | },
451 | {
452 | "cell_type": "markdown",
453 | "metadata": {},
454 | "source": [
455 | "## LangChain-integrated tools + MCP tools\n",
456 | "\n",
457 | "Here we confirm that tools integrated into LangChain can be used in conjunction with existing MCP-only tools."
458 | ]
459 | },
460 | {
461 | "cell_type": "code",
462 | "execution_count": 15,
463 | "metadata": {},
464 | "outputs": [],
465 | "source": [
466 | "from langchain_community.tools.tavily_search import TavilySearchResults\n",
467 | "\n",
468 | "# Initialize the Tavily search tool (news type, news from the last 3 days)\n",
469 | "tavily = TavilySearchResults(max_results=3, topic=\"news\", days=3)\n",
470 | "\n",
471 | "# Use it together with existing MCP tools\n",
472 | "tools = client.get_tools() + [tavily]"
473 | ]
474 | },
475 | {
476 | "cell_type": "markdown",
477 | "metadata": {},
478 | "source": [
479 | "Create an agent using `create_react_agent` in langgraph."
480 | ]
481 | },
482 | {
483 | "cell_type": "code",
484 | "execution_count": 16,
485 | "metadata": {},
486 | "outputs": [],
487 | "source": [
488 | "from langgraph.checkpoint.memory import MemorySaver\n",
489 | "from langchain_core.runnables import RunnableConfig\n",
490 | "\n",
491 | "prompt = \"You are a smart agent with various tools. Answer questions in English.\"\n",
492 | "agent = create_react_agent(model, tools, prompt=prompt, checkpointer=MemorySaver())"
493 | ]
494 | },
495 | {
496 | "cell_type": "markdown",
497 | "metadata": {},
498 | "source": [
499 | "Perform a search using the newly added `tavily` tool."
500 | ]
501 | },
502 | {
503 | "cell_type": "code",
504 | "execution_count": null,
505 | "metadata": {},
506 | "outputs": [],
507 | "source": [
508 | "await astream_graph(\n",
509 | " agent, {\"messages\": \"Tell me about today's news for me\"}, config=config\n",
510 | ")"
511 | ]
512 | },
513 | {
514 | "cell_type": "markdown",
515 | "metadata": {},
516 | "source": [
517 | "You can see that the `retriever` tool is working smoothly."
518 | ]
519 | },
520 | {
521 | "cell_type": "code",
522 | "execution_count": null,
523 | "metadata": {},
524 | "outputs": [],
525 | "source": [
526 | "await astream_graph(\n",
527 | " agent,\n",
528 | " {\n",
529 | " \"messages\": \"Use the `retriever` tool to search for the name of the generative AI developed by Samsung Electronics\"\n",
530 | " },\n",
531 | " config=config,\n",
532 | ")"
533 | ]
534 | },
535 | {
536 | "cell_type": "markdown",
537 | "metadata": {},
538 | "source": [
539 | "## Smithery MCP Server\n",
540 | "\n",
541 | "- Link: https://smithery.ai/\n",
542 | "\n",
543 | "List of tools used:\n",
544 | "\n",
545 | "- Sequential Thinking: https://smithery.ai/server/@smithery-ai/server-sequential-thinking\n",
546 | " - MCP server providing tools for dynamic and reflective problem-solving through structured thinking processes\n",
547 | "- Desktop Commander: https://smithery.ai/server/@wonderwhy-er/desktop-commander\n",
548 | " - Run terminal commands and manage files with various editing capabilities. Coding, shell and terminal, task automation\n",
549 | "\n",
550 | "**Note**\n",
551 | "\n",
552 | "- When importing tools provided by smithery in JSON format, you must set `\"transport\": \"stdio\"` as shown in the example below."
553 | ]
554 | },
555 | {
556 | "cell_type": "code",
557 | "execution_count": null,
558 | "metadata": {},
559 | "outputs": [],
560 | "source": [
561 | "from langchain_mcp_adapters.client import MultiServerMCPClient\n",
562 | "from langgraph.prebuilt import create_react_agent\n",
563 | "from langchain_anthropic import ChatAnthropic\n",
564 | "\n",
565 | "# Initialize LLM model\n",
566 | "model = ChatAnthropic(model=\"claude-3-7-sonnet-latest\", temperature=0, max_tokens=20000)\n",
567 | "\n",
568 | "# 1. Create client\n",
569 | "client = MultiServerMCPClient(\n",
570 | " {\n",
571 | " \"server-sequential-thinking\": {\n",
572 | " \"command\": \"npx\",\n",
573 | " \"args\": [\n",
574 | " \"-y\",\n",
575 | " \"@smithery/cli@latest\",\n",
576 | " \"run\",\n",
577 | " \"@smithery-ai/server-sequential-thinking\",\n",
578 | " \"--key\",\n",
579 | " \"your_smithery_api_key\",\n",
580 | " ],\n",
581 | " \"transport\": \"stdio\", # Add communication using stdio method\n",
582 | " },\n",
583 | " \"desktop-commander\": {\n",
584 | " \"command\": \"npx\",\n",
585 | " \"args\": [\n",
586 | " \"-y\",\n",
587 | " \"@smithery/cli@latest\",\n",
588 | " \"run\",\n",
589 | " \"@wonderwhy-er/desktop-commander\",\n",
590 | " \"--key\",\n",
591 | " \"your_smithery_api_key\",\n",
592 | " ],\n",
593 | " \"transport\": \"stdio\", # Add communication using stdio method\n",
594 | " },\n",
595 | " \"document-retriever\": {\n",
596 | " \"command\": \"./.venv/bin/python\",\n",
597 | " # Update with the absolute path to the mcp_server_rag.py file\n",
598 | " \"args\": [\"./mcp_server_rag.py\"],\n",
599 | " # Communication using stdio (standard input/output)\n",
600 | " \"transport\": \"stdio\",\n",
601 | " },\n",
602 | " }\n",
603 | ")\n",
604 | "\n",
605 | "\n",
606 | "# 2. Explicitly initialize connection\n",
607 | "await client.__aenter__()"
608 | ]
609 | },
610 | {
611 | "cell_type": "markdown",
612 | "metadata": {},
613 | "source": [
614 | "Create an agent using `create_react_agent` in langgraph."
615 | ]
616 | },
617 | {
618 | "cell_type": "code",
619 | "execution_count": 23,
620 | "metadata": {},
621 | "outputs": [],
622 | "source": [
623 | "from langgraph.checkpoint.memory import MemorySaver\n",
624 | "from langchain_core.runnables import RunnableConfig\n",
625 | "\n",
626 | "# Set up configuration\n",
627 | "config = RunnableConfig(recursion_limit=30, thread_id=3)\n",
628 | "\n",
629 | "# Create agent\n",
630 | "agent = create_react_agent(model, client.get_tools(), checkpointer=MemorySaver())"
631 | ]
632 | },
633 | {
634 | "cell_type": "markdown",
635 | "metadata": {},
636 | "source": [
637 | "`Desktop Commander` 도구를 사용하여 터미널 명령을 실행합니다."
638 | ]
639 | },
640 | {
641 | "cell_type": "code",
642 | "execution_count": null,
643 | "metadata": {},
644 | "outputs": [],
645 | "source": [
646 | "await astream_graph(\n",
647 | " agent,\n",
648 | " {\n",
649 | " \"messages\": \"Draw the folder structure including the current path as a tree. However, exclude the .venv folder from the output.\"\n",
650 | " },\n",
651 | " config=config,\n",
652 | ")"
653 | ]
654 | },
655 | {
656 | "cell_type": "markdown",
657 | "metadata": {},
658 | "source": [
659 | "We'll use the `Sequential Thinking` tool to see if we can accomplish a relatively complex task."
660 | ]
661 | },
662 | {
663 | "cell_type": "code",
664 | "execution_count": null,
665 | "metadata": {},
666 | "outputs": [],
667 | "source": [
668 | "await astream_graph(\n",
669 | " agent,\n",
670 | " {\n",
671 | " \"messages\": (\n",
672 | " \"Use the `retriever` tool to search for information about generative AI developed by Samsung Electronics, \"\n",
673 | " \"and then use the `Sequential Thinking` tool to write a report.\"\n",
674 | " )\n",
675 | " },\n",
676 | " config=config,\n",
677 | ")"
678 | ]
679 | }
680 | ],
681 | "metadata": {
682 | "kernelspec": {
683 | "display_name": ".venv",
684 | "language": "python",
685 | "name": "python3"
686 | },
687 | "language_info": {
688 | "codemirror_mode": {
689 | "name": "ipython",
690 | "version": 3
691 | },
692 | "file_extension": ".py",
693 | "mimetype": "text/x-python",
694 | "name": "python",
695 | "nbconvert_exporter": "python",
696 | "pygments_lexer": "ipython3",
697 | "version": "3.12.8"
698 | }
699 | },
700 | "nbformat": 4,
701 | "nbformat_minor": 2
702 | }
703 |
--------------------------------------------------------------------------------
/MCP-HandsOn-KOR.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# MCP + LangGraph 핸즈온 튜토리얼\n",
8 | "\n",
9 | "- 작성자: [테디노트](https://youtube.com/c/teddynote)\n",
10 | "- 강의: [패스트캠퍼스 RAG 비법노트](https://fastcampus.co.kr/data_online_teddy)\n",
11 | "\n",
12 | "**참고자료**\n",
13 | "- https://modelcontextprotocol.io/introduction\n",
14 | "- https://github.com/langchain-ai/langchain-mcp-adapters"
15 | ]
16 | },
17 | {
18 | "cell_type": "markdown",
19 | "metadata": {},
20 | "source": [
21 | "## 환경설정\n",
22 | "\n",
23 | "아래 설치 방법을 참고하여 `uv` 를 설치합니다.\n",
24 | "\n",
25 | "**uv 설치 방법**\n",
26 | "\n",
27 | "```bash\n",
28 | "# macOS/Linux\n",
29 | "curl -LsSf https://astral.sh/uv/install.sh | sh\n",
30 | "\n",
31 | "# Windows (PowerShell)\n",
32 | "irm https://astral.sh/uv/install.ps1 | iex\n",
33 | "```\n",
34 | "\n",
35 | "**의존성 설치**\n",
36 | "\n",
37 | "```bash\n",
38 | "uv pip install -r requirements.txt\n",
39 | "```"
40 | ]
41 | },
42 | {
43 | "cell_type": "markdown",
44 | "metadata": {},
45 | "source": [
46 | "환경변수를 가져옵니다."
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": null,
52 | "metadata": {},
53 | "outputs": [],
54 | "source": [
55 | "from dotenv import load_dotenv\n",
56 | "\n",
57 | "load_dotenv(override=True)"
58 | ]
59 | },
60 | {
61 | "cell_type": "markdown",
62 | "metadata": {},
63 | "source": [
64 | "## MultiServerMCPClient"
65 | ]
66 | },
67 | {
68 | "cell_type": "markdown",
69 | "metadata": {},
70 | "source": [
71 | "사전에 `mcp_server_remote.py` 를 실행해둡니다. 터미널을 열고 가상환경이 활성화 되어 있는 상태에서 서버를 실행해 주세요.\n",
72 | "\n",
73 | "> 명령어\n",
74 | "```bash\n",
75 | "source .venv/bin/activate\n",
76 | "python mcp_server_remote.py\n",
77 | "```\n",
78 | "\n",
79 | "`async with` 로 일시적인 Session 연결을 생성 후 해제"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": null,
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | "from langchain_mcp_adapters.client import MultiServerMCPClient\n",
89 | "from langgraph.prebuilt import create_react_agent\n",
90 | "from utils import ainvoke_graph, astream_graph\n",
91 | "from langchain_anthropic import ChatAnthropic\n",
92 | "\n",
93 | "model = ChatAnthropic(\n",
94 | " model_name=\"claude-3-7-sonnet-latest\", temperature=0, max_tokens=20000\n",
95 | ")\n",
96 | "\n",
97 | "async with MultiServerMCPClient(\n",
98 | " {\n",
99 | " \"weather\": {\n",
100 | " # 서버의 포트와 일치해야 합니다.(8005번 포트)\n",
101 | " \"url\": \"http://localhost:8005/sse\",\n",
102 | " \"transport\": \"sse\",\n",
103 | " }\n",
104 | " }\n",
105 | ") as client:\n",
106 | " print(client.get_tools())\n",
107 | " agent = create_react_agent(model, client.get_tools())\n",
108 | " answer = await astream_graph(agent, {\"messages\": \"서울의 날씨는 어떠니?\"})"
109 | ]
110 | },
111 | {
112 | "cell_type": "markdown",
113 | "metadata": {},
114 | "source": [
115 | "다음의 경우에는 session 이 닫혔기 때문에 도구에 접근할 수 없는 것을 확인할 수 있습니다."
116 | ]
117 | },
118 | {
119 | "cell_type": "code",
120 | "execution_count": null,
121 | "metadata": {},
122 | "outputs": [],
123 | "source": [
124 | "await astream_graph(agent, {\"messages\": \"서울의 날씨는 어떠니?\"})"
125 | ]
126 | },
127 | {
128 | "cell_type": "markdown",
129 | "metadata": {},
130 | "source": [
131 | "이제 그럼 Async Session 을 유지하며 도구에 접근하는 방식으로 변경해 보겠습니다."
132 | ]
133 | },
134 | {
135 | "cell_type": "code",
136 | "execution_count": null,
137 | "metadata": {},
138 | "outputs": [],
139 | "source": [
140 | "# 1. 클라이언트 생성\n",
141 | "client = MultiServerMCPClient(\n",
142 | " {\n",
143 | " \"weather\": {\n",
144 | " \"url\": \"http://localhost:8005/sse\",\n",
145 | " \"transport\": \"sse\",\n",
146 | " }\n",
147 | " }\n",
148 | ")\n",
149 | "\n",
150 | "\n",
151 | "# 2. 명시적으로 연결 초기화 (이 부분이 필요함)\n",
152 | "# 초기화\n",
153 | "await client.__aenter__()\n",
154 | "\n",
155 | "# 이제 도구가 로드됨\n",
156 | "print(client.get_tools()) # 도구가 표시됨"
157 | ]
158 | },
159 | {
160 | "cell_type": "markdown",
161 | "metadata": {},
162 | "source": [
163 | "langgraph 의 에이전트를 생성합니다."
164 | ]
165 | },
166 | {
167 | "cell_type": "code",
168 | "execution_count": 5,
169 | "metadata": {},
170 | "outputs": [],
171 | "source": [
172 | "# 에이전트 생성\n",
173 | "agent = create_react_agent(model, client.get_tools())"
174 | ]
175 | },
176 | {
177 | "cell_type": "markdown",
178 | "metadata": {},
179 | "source": [
180 | "그래프를 실행하여 결과를 확인합니다."
181 | ]
182 | },
183 | {
184 | "cell_type": "code",
185 | "execution_count": null,
186 | "metadata": {},
187 | "outputs": [],
188 | "source": [
189 | "await astream_graph(agent, {\"messages\": \"서울의 날씨는 어떠니?\"})"
190 | ]
191 | },
192 | {
193 | "cell_type": "markdown",
194 | "metadata": {},
195 | "source": [
196 | "## Stdio 통신 방식\n",
197 | "\n",
198 | "Stdio 통신 방식은 로컬 환경에서 사용하기 위해 사용합니다.\n",
199 | "\n",
200 | "- 통신을 위해 표준 입력/출력 사용\n",
201 | "\n",
202 | "참고: 아래의 python 경로는 수정하세요!"
203 | ]
204 | },
205 | {
206 | "cell_type": "code",
207 | "execution_count": null,
208 | "metadata": {},
209 | "outputs": [],
210 | "source": [
211 | "from mcp import ClientSession, StdioServerParameters\n",
212 | "from mcp.client.stdio import stdio_client\n",
213 | "from langgraph.prebuilt import create_react_agent\n",
214 | "from langchain_mcp_adapters.tools import load_mcp_tools\n",
215 | "from langchain_anthropic import ChatAnthropic\n",
216 | "\n",
217 | "# Anthropic의 Claude 모델 초기화\n",
218 | "model = ChatAnthropic(\n",
219 | " model_name=\"claude-3-7-sonnet-latest\", temperature=0, max_tokens=20000\n",
220 | ")\n",
221 | "\n",
222 | "# StdIO 서버 파라미터 설정\n",
223 | "# - command: Python 인터프리터 경로\n",
224 | "# - args: 실행할 MCP 서버 스크립트\n",
225 | "server_params = StdioServerParameters(\n",
226 | " command=\"./.venv/bin/python\",\n",
227 | " args=[\"mcp_server_local.py\"],\n",
228 | ")\n",
229 | "\n",
230 | "# StdIO 클라이언트를 사용하여 서버와 통신\n",
231 | "async with stdio_client(server_params) as (read, write):\n",
232 | " # 클라이언트 세션 생성\n",
233 | " async with ClientSession(read, write) as session:\n",
234 | " # 연결 초기화\n",
235 | " await session.initialize()\n",
236 | "\n",
237 | " # MCP 도구 로드\n",
238 | " tools = await load_mcp_tools(session)\n",
239 | " print(tools)\n",
240 | "\n",
241 | " # 에이전트 생성\n",
242 | " agent = create_react_agent(model, tools)\n",
243 | "\n",
244 | " # 에이전트 응답 스트리밍\n",
245 | " await astream_graph(agent, {\"messages\": \"서울의 날씨는 어떠니?\"})"
246 | ]
247 | },
248 | {
249 | "cell_type": "markdown",
250 | "metadata": {},
251 | "source": [
252 | "## RAG 를 구축한 MCP 서버 사용\n",
253 | "\n",
254 | "- 파일: `mcp_server_rag.py`\n",
255 | "\n",
256 | "사전에 langchain 으로 구축한 `mcp_server_rag.py` 파일을 사용합니다.\n",
257 | "\n",
258 | "stdio 통신 방식으로 도구에 대한 정보를 가져옵니다. 여기서 도구는 `retriever` 도구를 가져오게 되며, 이 도구는 `mcp_server_rag.py` 에서 정의된 도구입니다. 이 파일은 사전에 서버에서 실행되지 **않아도** 됩니다."
259 | ]
260 | },
261 | {
262 | "cell_type": "code",
263 | "execution_count": null,
264 | "metadata": {},
265 | "outputs": [],
266 | "source": [
267 | "from mcp import ClientSession, StdioServerParameters\n",
268 | "from mcp.client.stdio import stdio_client\n",
269 | "from langchain_mcp_adapters.tools import load_mcp_tools\n",
270 | "from langgraph.prebuilt import create_react_agent\n",
271 | "from langchain_anthropic import ChatAnthropic\n",
272 | "from utils import astream_graph\n",
273 | "\n",
274 | "# Anthropic의 Claude 모델 초기화\n",
275 | "model = ChatAnthropic(\n",
276 | " model_name=\"claude-3-7-sonnet-latest\", temperature=0, max_tokens=20000\n",
277 | ")\n",
278 | "\n",
279 | "# RAG 서버를 위한 StdIO 서버 파라미터 설정\n",
280 | "server_params = StdioServerParameters(\n",
281 | " command=\"./.venv/bin/python\",\n",
282 | " args=[\"./mcp_server_rag.py\"],\n",
283 | ")\n",
284 | "\n",
285 | "# StdIO 클라이언트를 사용하여 RAG 서버와 통신\n",
286 | "async with stdio_client(server_params) as (read, write):\n",
287 | " # 클라이언트 세션 생성\n",
288 | " async with ClientSession(read, write) as session:\n",
289 | " # 연결 초기화\n",
290 | " await session.initialize()\n",
291 | "\n",
292 | " # MCP 도구 로드 (여기서는 retriever 도구)\n",
293 | " tools = await load_mcp_tools(session)\n",
294 | "\n",
295 | " # 에이전트 생성 및 실행\n",
296 | " agent = create_react_agent(model, tools)\n",
297 | "\n",
298 | " # 에이전트 응답 스트리밍\n",
299 | " await astream_graph(\n",
300 | " agent, {\"messages\": \"삼성전자가 개발한 생성형 AI의 이름을 검색해줘\"}\n",
301 | " )"
302 | ]
303 | },
304 | {
305 | "cell_type": "markdown",
306 | "metadata": {},
307 | "source": [
308 | "## SSE 방식과 StdIO 방식 혼합 사용\n",
309 | "\n",
310 | "- 파일: `mcp_server_rag.py` 는 StdIO 방식으로 통신\n",
311 | "- `langchain-dev-docs` 는 SSE 방식으로 통신\n",
312 | "\n",
313 | "SSE 방식과 StdIO 방식을 혼합하여 사용합니다."
314 | ]
315 | },
316 | {
317 | "cell_type": "code",
318 | "execution_count": null,
319 | "metadata": {},
320 | "outputs": [],
321 | "source": [
322 | "from langchain_mcp_adapters.client import MultiServerMCPClient\n",
323 | "from langgraph.prebuilt import create_react_agent\n",
324 | "from langchain_anthropic import ChatAnthropic\n",
325 | "\n",
326 | "# Anthropic의 Claude 모델 초기화\n",
327 | "model = ChatAnthropic(\n",
328 | " model_name=\"claude-3-7-sonnet-latest\", temperature=0, max_tokens=20000\n",
329 | ")\n",
330 | "\n",
331 | "# 1. 다중 서버 MCP 클라이언트 생성\n",
332 | "client = MultiServerMCPClient(\n",
333 | " {\n",
334 | " \"document-retriever\": {\n",
335 | " \"command\": \"./.venv/bin/python\",\n",
336 | " # mcp_server_rag.py 파일의 절대 경로로 업데이트해야 합니다\n",
337 | " \"args\": [\"./mcp_server_rag.py\"],\n",
338 | " # stdio 방식으로 통신 (표준 입출력 사용)\n",
339 | " \"transport\": \"stdio\",\n",
340 | " },\n",
341 | " \"langchain-dev-docs\": {\n",
342 | " # SSE 서버가 실행 중인지 확인하세요\n",
343 | " \"url\": \"https://teddynote.io/mcp/langchain/sse\",\n",
344 | " # SSE(Server-Sent Events) 방식으로 통신\n",
345 | " \"transport\": \"sse\",\n",
346 | " },\n",
347 | " }\n",
348 | ")\n",
349 | "\n",
350 | "\n",
351 | "# 2. 비동기 컨텍스트 매니저를 통한 명시적 연결 초기화\n",
352 | "await client.__aenter__()"
353 | ]
354 | },
355 | {
356 | "cell_type": "markdown",
357 | "metadata": {},
358 | "source": [
359 | "langgraph 의 `create_react_agent` 를 사용하여 에이전트를 생성합니다."
360 | ]
361 | },
362 | {
363 | "cell_type": "code",
364 | "execution_count": 10,
365 | "metadata": {},
366 | "outputs": [],
367 | "source": [
368 | "from langgraph.checkpoint.memory import MemorySaver\n",
369 | "from langchain_core.runnables import RunnableConfig\n",
370 | "\n",
371 | "prompt = (\n",
372 | " \"You are a smart agent. \"\n",
373 | " \"Use `retriever` tool to search on AI related documents and answer questions.\"\n",
374 | " \"Use `langchain-dev-docs` tool to search on langchain / langgraph related documents and answer questions.\"\n",
375 | " \"Answer in Korean.\"\n",
376 | ")\n",
377 | "agent = create_react_agent(\n",
378 | " model, client.get_tools(), prompt=prompt, checkpointer=MemorySaver()\n",
379 | ")"
380 | ]
381 | },
382 | {
383 | "cell_type": "markdown",
384 | "metadata": {},
385 | "source": [
386 | "구축해 놓은 `mcp_server_rag.py` 에서 정의한 `retriever` 도구를 사용하여 검색을 수행합니다."
387 | ]
388 | },
389 | {
390 | "cell_type": "code",
391 | "execution_count": null,
392 | "metadata": {},
393 | "outputs": [],
394 | "source": [
395 | "config = RunnableConfig(recursion_limit=30, thread_id=1)\n",
396 | "await astream_graph(\n",
397 | " agent,\n",
398 | " {\n",
399 | " \"messages\": \"`retriever` 도구를 사용해서 삼성전자가 개발한 생성형 AI 이름을 검색해줘\"\n",
400 | " },\n",
401 | " config=config,\n",
402 | ")"
403 | ]
404 | },
405 | {
406 | "cell_type": "markdown",
407 | "metadata": {},
408 | "source": [
409 | "이번에는 `langchain-dev-docs` 도구를 사용하여 검색을 수행합니다."
410 | ]
411 | },
412 | {
413 | "cell_type": "code",
414 | "execution_count": null,
415 | "metadata": {},
416 | "outputs": [],
417 | "source": [
418 | "config = RunnableConfig(recursion_limit=30, thread_id=1)\n",
419 | "await astream_graph(\n",
420 | " agent,\n",
421 | " {\"messages\": \"langgraph-dev-docs 참고해서 self-rag 의 정의에 대해서 알려줘\"},\n",
422 | " config=config,\n",
423 | ")"
424 | ]
425 | },
426 | {
427 | "cell_type": "markdown",
428 | "metadata": {},
429 | "source": [
430 | "`MemorySaver` 를 사용하여 단기 기억을 유지합니다. 따라서, multi-turn 대화도 가능합니다."
431 | ]
432 | },
433 | {
434 | "cell_type": "code",
435 | "execution_count": null,
436 | "metadata": {},
437 | "outputs": [],
438 | "source": [
439 | "await astream_graph(\n",
440 | " agent, {\"messages\": \"이전의 내용을 bullet point 로 요약해줘\"}, config=config\n",
441 | ")"
442 | ]
443 | },
444 | {
445 | "cell_type": "markdown",
446 | "metadata": {},
447 | "source": [
448 | "## LangChain 에 통합된 도구 + MCP 도구\n",
449 | "\n",
450 | "여기서는 LangChain 에 통합된 도구를 기존의 MCP 로만 이루어진 도구와 함께 사용이 가능한지 테스트 합니다."
451 | ]
452 | },
453 | {
454 | "cell_type": "code",
455 | "execution_count": 14,
456 | "metadata": {},
457 | "outputs": [],
458 | "source": [
459 | "from langchain_community.tools.tavily_search import TavilySearchResults\n",
460 | "\n",
461 | "# Tavily 검색 도구를 초기화 합니다. (news 타입, 최근 3일 내 뉴스)\n",
462 | "tavily = TavilySearchResults(max_results=3, topic=\"news\", days=3)\n",
463 | "\n",
464 | "# 기존의 MCP 도구와 함께 사용합니다.\n",
465 | "tools = client.get_tools() + [tavily]"
466 | ]
467 | },
468 | {
469 | "cell_type": "markdown",
470 | "metadata": {},
471 | "source": [
472 | "langgraph 의 `create_react_agent` 를 사용하여 에이전트를 생성합니다."
473 | ]
474 | },
475 | {
476 | "cell_type": "code",
477 | "execution_count": 15,
478 | "metadata": {},
479 | "outputs": [],
480 | "source": [
481 | "from langgraph.checkpoint.memory import MemorySaver\n",
482 | "from langchain_core.runnables import RunnableConfig\n",
483 | "\n",
484 | "# 재귀 제한 및 스레드 아이디 설정\n",
485 | "config = RunnableConfig(recursion_limit=30, thread_id=2)\n",
486 | "\n",
487 | "# 프롬프트 설정\n",
488 | "prompt = \"You are a smart agent with various tools. Answer questions in Korean.\"\n",
489 | "\n",
490 | "# 에이전트 생성\n",
491 | "agent = create_react_agent(model, tools, prompt=prompt, checkpointer=MemorySaver())"
492 | ]
493 | },
494 | {
495 | "cell_type": "markdown",
496 | "metadata": {},
497 | "source": [
498 | "새롭게 추가한 `tavily` 도구를 사용하여 검색을 수행합니다."
499 | ]
500 | },
501 | {
502 | "cell_type": "code",
503 | "execution_count": null,
504 | "metadata": {},
505 | "outputs": [],
506 | "source": [
507 | "await astream_graph(agent, {\"messages\": \"오늘 뉴스 찾아줘\"}, config=config)"
508 | ]
509 | },
510 | {
511 | "cell_type": "markdown",
512 | "metadata": {},
513 | "source": [
514 | "`retriever` 도구가 원활하게 작동하는 것을 확인할 수 있습니다."
515 | ]
516 | },
517 | {
518 | "cell_type": "code",
519 | "execution_count": null,
520 | "metadata": {},
521 | "outputs": [],
522 | "source": [
523 | "await astream_graph(\n",
524 | " agent,\n",
525 | " {\n",
526 | " \"messages\": \"`retriever` 도구를 사용해서 삼성전자가 개발한 생성형 AI 이름을 검색해줘\"\n",
527 | " },\n",
528 | " config=config,\n",
529 | ")"
530 | ]
531 | },
532 | {
533 | "cell_type": "markdown",
534 | "metadata": {},
535 | "source": [
536 | "## Smithery 에서 제공하는 MCP 서버\n",
537 | "\n",
538 | "- 링크: https://smithery.ai/"
539 | ]
540 | },
541 | {
542 | "cell_type": "markdown",
543 | "metadata": {},
544 | "source": [
545 | "사용한 도구 목록은 아래와 같습니다.\n",
546 | "\n",
547 | "- Sequential Thinking: https://smithery.ai/server/@smithery-ai/server-sequential-thinking\n",
548 | " - 구조화된 사고 프로세스를 통해 역동적이고 성찰적인 문제 해결을 위한 도구를 제공하는 MCP 서버\n",
549 | "- Desktop Commander: https://smithery.ai/server/@wonderwhy-er/desktop-commander\n",
550 | " - 다양한 편집 기능으로 터미널 명령을 실행하고 파일을 관리하세요. 코딩, 셸 및 터미널, 작업 자동화\n",
551 | "\n",
552 | "**참고**\n",
553 | "\n",
554 | "- smithery 에서 제공하는 도구를 JSON 형식으로 가져올때, 아래의 예시처럼 `\"transport\": \"stdio\"` 로 꼭 설정해야 합니다."
555 | ]
556 | },
557 | {
558 | "cell_type": "code",
559 | "execution_count": null,
560 | "metadata": {},
561 | "outputs": [],
562 | "source": [
563 | "from langchain_mcp_adapters.client import MultiServerMCPClient\n",
564 | "from langgraph.prebuilt import create_react_agent\n",
565 | "from langchain_anthropic import ChatAnthropic\n",
566 | "\n",
567 | "# LLM 모델 초기화\n",
568 | "model = ChatAnthropic(model=\"claude-3-7-sonnet-latest\", temperature=0, max_tokens=20000)\n",
569 | "\n",
570 | "# 1. 클라이언트 생성\n",
571 | "client = MultiServerMCPClient(\n",
572 | " {\n",
573 | " \"server-sequential-thinking\": {\n",
574 | " \"command\": \"npx\",\n",
575 | " \"args\": [\n",
576 | " \"-y\",\n",
577 | " \"@smithery/cli@latest\",\n",
578 | " \"run\",\n",
579 | " \"@smithery-ai/server-sequential-thinking\",\n",
580 | " \"--key\",\n",
581 | " \"89a4780a-53b7-4b7b-92e9-a29815f2669b\",\n",
582 | " ],\n",
583 | " \"transport\": \"stdio\", # stdio 방식으로 통신을 추가합니다.\n",
584 | " },\n",
585 | " \"desktop-commander\": {\n",
586 | " \"command\": \"npx\",\n",
587 | " \"args\": [\n",
588 | " \"-y\",\n",
589 | " \"@smithery/cli@latest\",\n",
590 | " \"run\",\n",
591 | " \"@wonderwhy-er/desktop-commander\",\n",
592 | " \"--key\",\n",
593 | " \"89a4780a-53b7-4b7b-92e9-a29815f2669b\",\n",
594 | " ],\n",
595 | " \"transport\": \"stdio\", # stdio 방식으로 통신을 추가합니다.\n",
596 | " },\n",
597 | " \"document-retriever\": {\n",
598 | " \"command\": \"./.venv/bin/python\",\n",
599 | " # mcp_server_rag.py 파일의 절대 경로로 업데이트해야 합니다\n",
600 | " \"args\": [\"./mcp_server_rag.py\"],\n",
601 | " # stdio 방식으로 통신 (표준 입출력 사용)\n",
602 | " \"transport\": \"stdio\",\n",
603 | " },\n",
604 | " }\n",
605 | ")\n",
606 | "\n",
607 | "\n",
608 | "# 2. 명시적으로 연결 초기화\n",
609 | "await client.__aenter__()"
610 | ]
611 | },
612 | {
613 | "cell_type": "markdown",
614 | "metadata": {},
615 | "source": [
616 | "langgraph 의 `create_react_agent` 를 사용하여 에이전트를 생성합니다."
617 | ]
618 | },
619 | {
620 | "cell_type": "code",
621 | "execution_count": 19,
622 | "metadata": {},
623 | "outputs": [],
624 | "source": [
625 | "from langgraph.checkpoint.memory import MemorySaver\n",
626 | "from langchain_core.runnables import RunnableConfig\n",
627 | "\n",
628 | "config = RunnableConfig(recursion_limit=30, thread_id=3)\n",
629 | "agent = create_react_agent(model, client.get_tools(), checkpointer=MemorySaver())"
630 | ]
631 | },
632 | {
633 | "cell_type": "markdown",
634 | "metadata": {},
635 | "source": [
636 | "`Desktop Commander` 도구를 사용하여 터미널 명령을 실행합니다."
637 | ]
638 | },
639 | {
640 | "cell_type": "code",
641 | "execution_count": null,
642 | "metadata": {},
643 | "outputs": [],
644 | "source": [
645 | "await astream_graph(\n",
646 | " agent,\n",
647 | " {\n",
648 | " \"messages\": \"현재 경로를 포함한 하위 폴더 구조를 tree 로 그려줘. 단, .venv 폴더는 제외하고 출력해줘.\"\n",
649 | " },\n",
650 | " config=config,\n",
651 | ")"
652 | ]
653 | },
654 | {
655 | "cell_type": "markdown",
656 | "metadata": {},
657 | "source": [
658 | "이번에는 `Sequential Thinking` 도구를 사용하여 비교적 복잡한 작업을 수행할 수 있는지 확인합니다."
659 | ]
660 | },
661 | {
662 | "cell_type": "code",
663 | "execution_count": null,
664 | "metadata": {},
665 | "outputs": [],
666 | "source": [
667 | "await astream_graph(\n",
668 | " agent,\n",
669 | " {\n",
670 | " \"messages\": (\n",
671 | " \"`retriever` 도구를 사용해서 삼성전자가 개발한 생성형 AI 관련 내용을 검색하고 \"\n",
672 | " \"`Sequential Thinking` 도구를 사용해서 보고서를 작성해줘.\"\n",
673 | " )\n",
674 | " },\n",
675 | " config=config,\n",
676 | ")"
677 | ]
678 | }
679 | ],
680 | "metadata": {
681 | "kernelspec": {
682 | "display_name": ".venv",
683 | "language": "python",
684 | "name": "python3"
685 | },
686 | "language_info": {
687 | "codemirror_mode": {
688 | "name": "ipython",
689 | "version": 3
690 | },
691 | "file_extension": ".py",
692 | "mimetype": "text/x-python",
693 | "name": "python",
694 | "nbconvert_exporter": "python",
695 | "pygments_lexer": "ipython3",
696 | "version": "3.12.8"
697 | }
698 | },
699 | "nbformat": 4,
700 | "nbformat_minor": 2
701 | }
702 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # LangGraph Agents + MCP
2 |
3 | [](README.md) [](README_KOR.md)
4 |
5 | [](https://github.com/teddylee777/langgraph-mcp-agents)
6 | [](https://opensource.org/licenses/MIT)
7 | [](https://www.python.org/)
8 | [](https://github.com/teddylee777/langgraph-mcp-agents)
9 |
10 | 
11 |
12 | ## Project Overview
13 |
14 | 
15 |
16 | `LangChain-MCP-Adapters` is a toolkit provided by **LangChain AI** that enables AI agents to interact with external tools and data sources through the Model Context Protocol (MCP). This project provides a user-friendly interface for deploying ReAct agents that can access various data sources and APIs through MCP tools.
17 |
18 | ### Features
19 |
20 | - **Streamlit Interface**: A user-friendly web interface for interacting with LangGraph `ReAct Agent` with MCP tools
21 | - **Tool Management**: Add, remove, and configure MCP tools through the UI (Smithery JSON format supported). This is done dynamically without restarting the application
22 | - **Streaming Responses**: View agent responses and tool calls in real-time
23 | - **Conversation History**: Track and manage conversations with the agent
24 |
25 | ## MCP Architecture
26 |
27 | The Model Context Protocol (MCP) consists of three main components:
28 |
29 | 1. **MCP Host**: Programs seeking to access data through MCP, such as Claude Desktop, IDEs, or LangChain/LangGraph.
30 |
31 | 2. **MCP Client**: A protocol client that maintains a 1:1 connection with the server, acting as an intermediary between the host and server.
32 |
33 | 3. **MCP Server**: A lightweight program that exposes specific functionalities through a standardized model context protocol, serving as the primary data source.
34 |
35 | ## Quick Start with Docker
36 |
37 | You can easily run this project using Docker without setting up a local Python environment.
38 |
39 | ### Requirements (Docker Desktop)
40 |
41 | Install Docker Desktop from the link below:
42 |
43 | - [Install Docker Desktop](https://www.docker.com/products/docker-desktop/)
44 |
45 | ### Run with Docker Compose
46 |
47 | 1. Navigate to the `dockers` directory
48 |
49 | ```bash
50 | cd dockers
51 | ```
52 |
53 | 2. Create a `.env` file with your API keys in the project root directory.
54 |
55 | ```bash
56 | cp .env.example .env
57 | ```
58 |
59 | Enter your obtained API keys in the `.env` file.
60 |
61 | (Note) Not all API keys are required. Only enter the ones you need.
62 | - `ANTHROPIC_API_KEY`: If you enter an Anthropic API key, you can use "claude-3-7-sonnet-latest", "claude-3-5-sonnet-latest", "claude-3-haiku-latest" models.
63 | - `OPENAI_API_KEY`: If you enter an OpenAI API key, you can use "gpt-4o", "gpt-4o-mini" models.
64 | - `LANGSMITH_API_KEY`: If you enter a LangSmith API key, you can use LangSmith tracing.
65 |
66 | ```bash
67 | ANTHROPIC_API_KEY=your_anthropic_api_key
68 | OPENAI_API_KEY=your_openai_api_key
69 | LANGSMITH_API_KEY=your_langsmith_api_key
70 | LANGSMITH_TRACING=true
71 | LANGSMITH_ENDPOINT=https://api.smith.langchain.com
72 | LANGSMITH_PROJECT=LangGraph-MCP-Agents
73 | ```
74 |
75 | When using the login feature, set `USE_LOGIN` to `true` and enter `USER_ID` and `USER_PASSWORD`.
76 |
77 | ```bash
78 | USE_LOGIN=true
79 | USER_ID=admin
80 | USER_PASSWORD=admin123
81 | ```
82 |
83 | If you don't want to use the login feature, set `USE_LOGIN` to `false`.
84 |
85 | ```bash
86 | USE_LOGIN=false
87 | ```
88 |
89 | 3. Select the Docker Compose file that matches your system architecture.
90 |
91 | **AMD64/x86_64 Architecture (Intel/AMD Processors)**
92 |
93 | ```bash
94 | # Run container
95 | docker compose -f docker-compose.yaml up -d
96 | ```
97 |
98 | **ARM64 Architecture (Apple Silicon M1/M2/M3/M4)**
99 |
100 | ```bash
101 | # Run container
102 | docker compose -f docker-compose-mac.yaml up -d
103 | ```
104 |
105 | 4. Access the application in your browser at http://localhost:8585
106 |
107 | (Note)
108 | - If you need to modify ports or other settings, edit the docker-compose.yaml file before building.
109 |
110 | ## Install Directly from Source Code
111 |
112 | 1. Clone this repository
113 |
114 | ```bash
115 | git clone https://github.com/teddynote-lab/langgraph-mcp-agents.git
116 | cd langgraph-mcp-agents
117 | ```
118 |
119 | 2. Create a virtual environment and install dependencies using uv
120 |
121 | ```bash
122 | uv venv
123 | uv pip install -r requirements.txt
124 | source .venv/bin/activate # For Windows: .venv\Scripts\activate
125 | ```
126 |
127 | 3. Create a `.env` file with your API keys (copy from `.env.example`)
128 |
129 | ```bash
130 | cp .env.example .env
131 | ```
132 |
133 | Enter your obtained API keys in the `.env` file.
134 |
135 | (Note) Not all API keys are required. Only enter the ones you need.
136 | - `ANTHROPIC_API_KEY`: If you enter an Anthropic API key, you can use "claude-3-7-sonnet-latest", "claude-3-5-sonnet-latest", "claude-3-haiku-latest" models.
137 | - `OPENAI_API_KEY`: If you enter an OpenAI API key, you can use "gpt-4o", "gpt-4o-mini" models.
138 | - `LANGSMITH_API_KEY`: If you enter a LangSmith API key, you can use LangSmith tracing.
139 | ```bash
140 | ANTHROPIC_API_KEY=your_anthropic_api_key
141 | OPENAI_API_KEY=your_openai_api_key
142 | LANGSMITH_API_KEY=your_langsmith_api_key
143 | LANGSMITH_TRACING=true
144 | LANGSMITH_ENDPOINT=https://api.smith.langchain.com
145 | LANGSMITH_PROJECT=LangGraph-MCP-Agents
146 | ```
147 |
148 | 4. (New) Use the login/logout feature
149 |
150 | When using the login feature, set `USE_LOGIN` to `true` and enter `USER_ID` and `USER_PASSWORD`.
151 |
152 | ```bash
153 | USE_LOGIN=true
154 | USER_ID=admin
155 | USER_PASSWORD=admin123
156 | ```
157 |
158 | If you don't want to use the login feature, set `USE_LOGIN` to `false`.
159 |
160 | ```bash
161 | USE_LOGIN=false
162 | ```
163 |
164 | ## Usage
165 |
166 | 1. Start the Streamlit application.
167 |
168 | ```bash
169 | streamlit run app.py
170 | ```
171 |
172 | 2. The application will run in the browser and display the main interface.
173 |
174 | 3. Use the sidebar to add and configure MCP tools
175 |
176 | Visit [Smithery](https://smithery.ai/) to find useful MCP servers.
177 |
178 | First, select the tool you want to use.
179 |
180 | Click the COPY button in the JSON configuration on the right.
181 |
182 | 
183 |
184 | Paste the copied JSON string in the `Tool JSON` section.
185 |
186 |
187 |
188 | Click the `Add Tool` button to add it to the "Registered Tools List" section.
189 |
190 | Finally, click the "Apply" button to apply the changes to initialize the agent with the new tools.
191 |
192 |
193 |
194 | 4. Check the agent's status.
195 |
196 | 
197 |
198 | 5. Interact with the ReAct agent that utilizes the configured MCP tools by asking questions in the chat interface.
199 |
200 | 
201 |
202 | ## Hands-on Tutorial
203 |
204 | For developers who want to learn more deeply about how MCP and LangGraph integration works, we provide a comprehensive Jupyter notebook tutorial:
205 |
206 | - Link: [MCP-HandsOn-KOR.ipynb](./MCP-HandsOn-KOR.ipynb)
207 |
208 | This hands-on tutorial covers:
209 |
210 | 1. **MCP Client Setup** - Learn how to configure and initialize the MultiServerMCPClient to connect to MCP servers
211 | 2. **Local MCP Server Integration** - Connect to locally running MCP servers via SSE and Stdio methods
212 | 3. **RAG Integration** - Access retriever tools using MCP for document retrieval capabilities
213 | 4. **Mixed Transport Methods** - Combine different transport protocols (SSE and Stdio) in a single agent
214 | 5. **LangChain Tools + MCP** - Integrate native LangChain tools alongside MCP tools
215 |
216 | This tutorial provides practical examples with step-by-step explanations that help you understand how to build and integrate MCP tools into LangGraph agents.
217 |
218 | ## License
219 |
220 | MIT License
221 |
222 | ## References
223 |
224 | - https://github.com/langchain-ai/langchain-mcp-adapters
--------------------------------------------------------------------------------
/README_KOR.md:
--------------------------------------------------------------------------------
1 | # LangGraph 에이전트 + MCP
2 |
3 | [](README.md) [](README_KOR.md)
4 |
5 | [](https://github.com/teddylee777/langgraph-mcp-agents)
6 | [](https://opensource.org/licenses/MIT)
7 | [](https://www.python.org/)
8 | [](https://github.com/teddylee777/langgraph-mcp-agents)
9 |
10 | 
11 |
12 | ## 프로젝트 개요
13 |
14 | 
15 |
16 | `LangChain-MCP-Adapters`는 **LangChain AI**에서 제공하는 툴킷으로, AI 에이전트가 Model Context Protocol(MCP)을 통해 외부 도구 및 데이터 소스와 상호작용할 수 있게 해줍니다. 이 프로젝트는 MCP 도구를 통해 다양한 데이터 소스와 API에 접근할 수 있는 ReAct 에이전트를 배포하기 위한 사용자 친화적인 인터페이스를 제공합니다.
17 |
18 | ### 특징
19 |
20 | - **Streamlit 인터페이스**: MCP 도구가 포함된 LangGraph `ReAct Agent`와 상호작용하기 위한 사용자 친화적인 웹 인터페이스
21 | - **도구 관리**: UI를 통해 MCP 도구를 추가, 제거 및 구성(Smithery JSON 형식 지원). 애플리케이션을 재시작하지 않고도 동적으로 이루어집니다.
22 | - **스트리밍 응답**: 에이전트 응답과 도구 호출을 실시간으로 확인
23 | - **대화 기록**: 에이전트와의 대화 추적 및 관리
24 |
25 | ## MCP 아키텍처
26 |
27 | MCP(Model Context Protocol)는 세 가지 주요 구성 요소로 이루어져 있습니다.
28 |
29 | 1. **MCP 호스트**: Claude Desktop, IDE 또는 LangChain/LangGraph와 같이 MCP를 통해 데이터에 접근하고자 하는 프로그램.
30 |
31 | 2. **MCP 클라이언트**: 서버와 1:1 연결을 유지하는 프로토콜 클라이언트로, 호스트와 서버 사이의 중개자 역할을 합니다.
32 |
33 | 3. **MCP 서버**: 표준화된 모델 컨텍스트 프로토콜을 통해 특정 기능을 노출하는 경량 프로그램으로, 주요 데이터 소스 역할을 합니다.
34 |
35 | ## Docker 로 빠른 실행
36 |
37 | 로컬 Python 환경을 설정하지 않고도 Docker를 사용하여 이 프로젝트를 쉽게 실행할 수 있습니다.
38 |
39 | ### 필수 요구사항(Docker Desktop)
40 |
41 | 아래의 링크에서 Docker Desktop을 설치합니다.
42 |
43 | - [Docker Desktop 설치](https://www.docker.com/products/docker-desktop/)
44 |
45 | ### Docker Compose로 실행하기
46 |
47 | 1. `dockers` 디렉토리로 이동
48 |
49 | ```bash
50 | cd dockers
51 | ```
52 |
53 | 2. 프로젝트 루트 디렉토리에 API 키가 포함된 `.env` 파일 생성.
54 |
55 | ```bash
56 | cp .env.example .env
57 | ```
58 |
59 | 발급 받은 API 키를 `.env` 파일에 입력합니다.
60 |
61 | (참고) 모든 API 키가 필요하지 않습니다. 필요한 경우에만 입력하세요.
62 | - `ANTHROPIC_API_KEY`: Anthropic API 키를 입력할 경우 "claude-3-7-sonnet-latest", "claude-3-5-sonnet-latest", "claude-3-haiku-latest" 모델을 사용합니다.
63 | - `OPENAI_API_KEY`: OpenAI API 키를 입력할 경우 "gpt-4o", "gpt-4o-mini" 모델을 사용합니다.
64 | - `LANGSMITH_API_KEY`: LangSmith API 키를 입력할 경우 LangSmith tracing을 사용합니다.
65 |
66 | ```bash
67 | ANTHROPIC_API_KEY=your_anthropic_api_key
68 | OPENAI_API_KEY=your_openai_api_key
69 | LANGSMITH_API_KEY=your_langsmith_api_key
70 | LANGSMITH_PROJECT=LangGraph-MCP-Agents
71 | LANGSMITH_TRACING=true
72 | LANGSMITH_ENDPOINT=https://api.smith.langchain.com
73 | ```
74 |
75 | (신규 기능) 로그인/로그아웃 기능 사용
76 |
77 | 로그인 기능을 사용시 `USE_LOGIN`을 `true`로 설정하고, `USER_ID`와 `USER_PASSWORD`를 입력합니다.
78 |
79 | ```bash
80 | USE_LOGIN=true
81 | USER_ID=admin
82 | USER_PASSWORD=admin123
83 | ```
84 |
85 | 만약, 로그인 기능을 사용하고 싶지 않다면, `USE_LOGIN`을 `false`로 설정합니다.
86 |
87 | ```bash
88 | USE_LOGIN=false
89 | ```
90 |
91 | 3. 시스템 아키텍처에 맞는 Docker Compose 파일 선택.
92 |
93 | **AMD64/x86_64 아키텍처(Intel/AMD 프로세서)**
94 |
95 | ```bash
96 | # 컨테이너 실행
97 | docker compose -f docker-compose-KOR.yaml up -d
98 | ```
99 |
100 | **ARM64 아키텍처(Apple Silicon M1/M2/M3/M4)**
101 |
102 | ```bash
103 | # 컨테이너 실행
104 | docker compose -f docker-compose-KOR-mac.yaml up -d
105 | ```
106 |
107 | 4. 브라우저에서 http://localhost:8585 로 애플리케이션 접속
108 |
109 | (참고)
110 | - 포트나 다른 설정을 수정해야 하는 경우, 빌드 전에 해당 docker-compose-KOR.yaml 파일을 편집하세요.
111 |
112 | ## 소스코드로 부터 직접 설치
113 |
114 | 1. 이 저장소를 클론합니다
115 |
116 | ```bash
117 | git clone https://github.com/teddynote-lab/langgraph-mcp-agents.git
118 | cd langgraph-mcp-agents
119 | ```
120 |
121 | 2. 가상 환경을 생성하고 uv를 사용하여 의존성을 설치합니다
122 |
123 | ```bash
124 | uv venv
125 | uv pip install -r requirements.txt
126 | source .venv/bin/activate # Windows의 경우: .venv\Scripts\activate
127 | ```
128 |
129 | 3. API 키가 포함된 `.env` 파일을 생성합니다(`.env.example` 에서 복사)
130 |
131 | ```bash
132 | cp .env.example .env
133 | ```
134 |
135 | 발급 받은 API 키를 `.env` 파일에 입력합니다.
136 |
137 | (참고) 모든 API 키가 필요하지 않습니다. 필요한 경우에만 입력하세요.
138 | - `ANTHROPIC_API_KEY`: Anthropic API 키를 입력할 경우 "claude-3-7-sonnet-latest", "claude-3-5-sonnet-latest", "claude-3-haiku-latest" 모델을 사용합니다.
139 | - `OPENAI_API_KEY`: OpenAI API 키를 입력할 경우 "gpt-4o", "gpt-4o-mini" 모델을 사용합니다.
140 | - `LANGSMITH_API_KEY`: LangSmith API 키를 입력할 경우 LangSmith tracing을 사용합니다.
141 |
142 | ```bash
143 | ANTHROPIC_API_KEY=your_anthropic_api_key
144 | OPENAI_API_KEY=your_openai_api_key(optional)
145 | LANGSMITH_API_KEY=your_langsmith_api_key
146 | LANGSMITH_PROJECT=LangGraph-MCP-Agents
147 | LANGSMITH_TRACING=true
148 | LANGSMITH_ENDPOINT=https://api.smith.langchain.com
149 | ```
150 |
151 | 4. (신규 기능) 로그인/로그아웃 기능 사용
152 |
153 | 로그인 기능을 사용시 `USE_LOGIN`을 `true`로 설정하고, `USER_ID`와 `USER_PASSWORD`를 입력합니다.
154 |
155 | ```bash
156 | USE_LOGIN=true
157 | USER_ID=admin
158 | USER_PASSWORD=admin123
159 | ```
160 |
161 | 만약, 로그인 기능을 사용하고 싶지 않다면, `USE_LOGIN`을 `false`로 설정합니다.
162 |
163 | ```bash
164 | USE_LOGIN=false
165 | ```
166 |
167 | ## 사용법
168 |
169 | 1. Streamlit 애플리케이션을 시작합니다. (한국어 버전 파일은 `app_KOR.py` 입니다.)
170 |
171 | ```bash
172 | streamlit run app_KOR.py
173 | ```
174 |
175 | 2. 애플리케이션이 브라우저에서 실행되어 메인 인터페이스를 표시합니다.
176 |
177 | 3. 사이드바를 사용하여 MCP 도구를 추가하고 구성합니다
178 |
179 | 유용한 MCP 서버를 찾으려면 [Smithery](https://smithery.ai/)를 방문하세요.
180 |
181 | 먼저, 사용하고자 하는 도구를 선택합니다.
182 |
183 | 오른쪽의 JSON 구성에서 COPY 버튼을 누릅니다.
184 |
185 | 
186 |
187 | 복사된 JSON 문자열을 `Tool JSON` 섹션에 붙여넣습니다.
188 |
189 |
190 |
191 | `Add Tool` 버튼을 눌러 "Registered Tools List" 섹션에 추가합니다.
192 |
193 | 마지막으로, "Apply" 버튼을 눌러 새로운 도구로 에이전트를 초기화하도록 변경사항을 적용합니다.
194 |
195 |
196 |
197 | 4. 에이전트의 상태를 확인합니다.
198 |
199 | 
200 |
201 | 5. 채팅 인터페이스에서 질문을 하여 구성된 MCP 도구를 활용하는 ReAct 에이전트와 상호작용합니다.
202 |
203 | 
204 |
205 | ## 핸즈온 튜토리얼
206 |
207 | 개발자가 MCP와 LangGraph의 통합 작동 방식에 대해 더 깊이 알아보려면, 포괄적인 Jupyter 노트북 튜토리얼을 제공합니다:
208 |
209 | - 링크: [MCP-HandsOn-KOR.ipynb](./MCP-HandsOn-KOR.ipynb)
210 |
211 | 이 핸즈온 튜토리얼은 다음 내용을 다룹니다.
212 |
213 | 1. **MCP 클라이언트 설정** - MCP 서버에 연결하기 위한 MultiServerMCPClient 구성 및 초기화 방법 학습
214 | 2. **로컬 MCP 서버 통합** - SSE 및 Stdio 메서드를 통해 로컬에서 실행 중인 MCP 서버에 연결
215 | 3. **RAG 통합** - 문서 검색 기능을 위해 MCP를 사용하여 리트리버 도구 접근
216 | 4. **혼합 전송 방법** - 하나의 에이전트에서 다양한 전송 프로토콜(SSE 및 Stdio) 결합
217 | 5. **LangChain 도구 + MCP** - MCP 도구와 함께 네이티브 LangChain 도구 통합
218 |
219 | 이 튜토리얼은 MCP 도구를 LangGraph 에이전트에 구축하고 통합하는 방법을 이해하는 데 도움이 되는 단계별 설명이 포함된 실용적인 예제를 제공합니다.
220 |
221 | ## 라이선스
222 |
223 | MIT License
224 |
225 | ## 튜토리얼 비디오 보기(한국어)
226 |
227 | [](https://youtu.be/ISrYHGg2C2c?si=eWmKFVUS1BLtPm5U)
228 |
229 | ## 참고 자료
230 |
231 | - https://github.com/langchain-ai/langchain-mcp-adapters
232 |
233 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 | import asyncio
3 | import nest_asyncio
4 | import json
5 | import os
6 | import platform
7 |
8 | if platform.system() == "Windows":
9 | asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
10 |
11 | # Apply nest_asyncio: Allow nested calls within an already running event loop
12 | nest_asyncio.apply()
13 |
14 | # Create and reuse global event loop (create once and continue using)
15 | if "event_loop" not in st.session_state:
16 | loop = asyncio.new_event_loop()
17 | st.session_state.event_loop = loop
18 | asyncio.set_event_loop(loop)
19 |
20 | from langgraph.prebuilt import create_react_agent
21 | from langchain_anthropic import ChatAnthropic
22 | from langchain_openai import ChatOpenAI
23 | from langchain_core.messages import HumanMessage
24 | from dotenv import load_dotenv
25 | from langchain_mcp_adapters.client import MultiServerMCPClient
26 | from utils import astream_graph, random_uuid
27 | from langchain_core.messages.ai import AIMessageChunk
28 | from langchain_core.messages.tool import ToolMessage
29 | from langgraph.checkpoint.memory import MemorySaver
30 | from langchain_core.runnables import RunnableConfig
31 |
32 | # Load environment variables (get API keys and settings from .env file)
33 | load_dotenv(override=True)
34 |
35 | # config.json file path setting
36 | CONFIG_FILE_PATH = "config.json"
37 |
38 | # Function to load settings from JSON file
39 | def load_config_from_json():
40 | """
41 | Loads settings from config.json file.
42 | Creates a file with default settings if it doesn't exist.
43 |
44 | Returns:
45 | dict: Loaded settings
46 | """
47 | default_config = {
48 | "get_current_time": {
49 | "command": "python",
50 | "args": ["./mcp_server_time.py"],
51 | "transport": "stdio"
52 | }
53 | }
54 |
55 | try:
56 | if os.path.exists(CONFIG_FILE_PATH):
57 | with open(CONFIG_FILE_PATH, "r", encoding="utf-8") as f:
58 | return json.load(f)
59 | else:
60 | # Create file with default settings if it doesn't exist
61 | save_config_to_json(default_config)
62 | return default_config
63 | except Exception as e:
64 | st.error(f"Error loading settings file: {str(e)}")
65 | return default_config
66 |
67 | # Function to save settings to JSON file
68 | def save_config_to_json(config):
69 | """
70 | Saves settings to config.json file.
71 |
72 | Args:
73 | config (dict): Settings to save
74 |
75 | Returns:
76 | bool: Save success status
77 | """
78 | try:
79 | with open(CONFIG_FILE_PATH, "w", encoding="utf-8") as f:
80 | json.dump(config, f, indent=2, ensure_ascii=False)
81 | return True
82 | except Exception as e:
83 | st.error(f"Error saving settings file: {str(e)}")
84 | return False
85 |
86 | # Initialize login session variables
87 | if "authenticated" not in st.session_state:
88 | st.session_state.authenticated = False
89 |
90 | # Check if login is required
91 | use_login = os.environ.get("USE_LOGIN", "false").lower() == "true"
92 |
93 | # Change page settings based on login status
94 | if use_login and not st.session_state.authenticated:
95 | # Login page uses default (narrow) layout
96 | st.set_page_config(page_title="Agent with MCP Tools", page_icon="🧠")
97 | else:
98 | # Main app uses wide layout
99 | st.set_page_config(page_title="Agent with MCP Tools", page_icon="🧠", layout="wide")
100 |
101 | # Display login screen if login feature is enabled and not yet authenticated
102 | if use_login and not st.session_state.authenticated:
103 | st.title("🔐 Login")
104 | st.markdown("Login is required to use the system.")
105 |
106 | # Place login form in the center of the screen with narrow width
107 | with st.form("login_form"):
108 | username = st.text_input("Username")
109 | password = st.text_input("Password", type="password")
110 | submit_button = st.form_submit_button("Login")
111 |
112 | if submit_button:
113 | expected_username = os.environ.get("USER_ID")
114 | expected_password = os.environ.get("USER_PASSWORD")
115 |
116 | if username == expected_username and password == expected_password:
117 | st.session_state.authenticated = True
118 | st.success("✅ Login successful! Please wait...")
119 | st.rerun()
120 | else:
121 | st.error("❌ Username or password is incorrect.")
122 |
123 | # Don't display the main app on the login screen
124 | st.stop()
125 |
126 | # Add author information at the top of the sidebar (placed before other sidebar elements)
127 | st.sidebar.markdown("### ✍️ Made by [TeddyNote](https://youtube.com/c/teddynote) 🚀")
128 | st.sidebar.markdown(
129 | "### 💻 [Project Page](https://github.com/teddynote-lab/langgraph-mcp-agents)"
130 | )
131 |
132 | st.sidebar.divider() # Add divider
133 |
134 | # Existing page title and description
135 | st.title("💬 MCP Tool Utilization Agent")
136 | st.markdown("✨ Ask questions to the ReAct agent that utilizes MCP tools.")
137 |
138 | SYSTEM_PROMPT = """
139 | You are a smart agent with an ability to use tools.
140 | You will be given a question and you will use the tools to answer the question.
141 | Pick the most relevant tool to answer the question.
142 | If you are failed to answer the question, try different tools to get context.
143 | Your answer should be very polite and professional.
144 |
145 |
146 | ----
147 |
148 |
149 | Step 1: Analyze the question
150 | - Analyze user's question and final goal.
151 | - If the user's question is consist of multiple sub-questions, split them into smaller sub-questions.
152 |
153 | Step 2: Pick the most relevant tool
154 | - Pick the most relevant tool to answer the question.
155 | - If you are failed to answer the question, try different tools to get context.
156 |
157 | Step 3: Answer the question
158 | - Answer the question in the same language as the question.
159 | - Your answer should be very polite and professional.
160 |
161 | Step 4: Provide the source of the answer(if applicable)
162 | - If you've used the tool, provide the source of the answer.
163 | - Valid sources are either a website(URL) or a document(PDF, etc).
164 |
165 | Guidelines:
166 | - If you've used the tool, your answer should be based on the tool's output(tool's output is more important than your own knowledge).
167 | - If you've used the tool, and the source is valid URL, provide the source(URL) of the answer.
168 | - Skip providing the source if the source is not URL.
169 | - Answer in the same language as the question.
170 | - Answer should be concise and to the point.
171 | - Avoid response your output with any other information than the answer and the source.
172 |
173 |
174 | ----
175 |
176 |
177 | (concise answer to the question)
178 |
179 | **Source**(if applicable)
180 | - (source1: valid URL)
181 | - (source2: valid URL)
182 | - ...
183 |
184 | """
185 |
186 | OUTPUT_TOKEN_INFO = {
187 | "claude-3-5-sonnet-latest": {"max_tokens": 8192},
188 | "claude-3-5-haiku-latest": {"max_tokens": 8192},
189 | "claude-3-7-sonnet-latest": {"max_tokens": 64000},
190 | "gpt-4o": {"max_tokens": 16000},
191 | "gpt-4o-mini": {"max_tokens": 16000},
192 | }
193 |
194 | # Initialize session state
195 | if "session_initialized" not in st.session_state:
196 | st.session_state.session_initialized = False # Session initialization flag
197 | st.session_state.agent = None # Storage for ReAct agent object
198 | st.session_state.history = [] # List for storing conversation history
199 | st.session_state.mcp_client = None # Storage for MCP client object
200 | st.session_state.timeout_seconds = (
201 | 120 # Response generation time limit (seconds), default 120 seconds
202 | )
203 | st.session_state.selected_model = (
204 | "claude-3-7-sonnet-latest" # Default model selection
205 | )
206 | st.session_state.recursion_limit = 100 # Recursion call limit, default 100
207 |
208 | if "thread_id" not in st.session_state:
209 | st.session_state.thread_id = random_uuid()
210 |
211 |
212 | # --- Function Definitions ---
213 |
214 |
215 | async def cleanup_mcp_client():
216 | """
217 | Safely terminates the existing MCP client.
218 |
219 | Properly releases resources if an existing client exists.
220 | """
221 | if "mcp_client" in st.session_state and st.session_state.mcp_client is not None:
222 | try:
223 |
224 | await st.session_state.mcp_client.__aexit__(None, None, None)
225 | st.session_state.mcp_client = None
226 | except Exception as e:
227 | import traceback
228 |
229 | # st.warning(f"Error while terminating MCP client: {str(e)}")
230 | # st.warning(traceback.format_exc())
231 |
232 |
233 | def print_message():
234 | """
235 | Displays chat history on the screen.
236 |
237 | Distinguishes between user and assistant messages on the screen,
238 | and displays tool call information within the assistant message container.
239 | """
240 | i = 0
241 | while i < len(st.session_state.history):
242 | message = st.session_state.history[i]
243 |
244 | if message["role"] == "user":
245 | st.chat_message("user", avatar="🧑💻").markdown(message["content"])
246 | i += 1
247 | elif message["role"] == "assistant":
248 | # Create assistant message container
249 | with st.chat_message("assistant", avatar="🤖"):
250 | # Display assistant message content
251 | st.markdown(message["content"])
252 |
253 | # Check if the next message is tool call information
254 | if (
255 | i + 1 < len(st.session_state.history)
256 | and st.session_state.history[i + 1]["role"] == "assistant_tool"
257 | ):
258 | # Display tool call information in the same container as an expander
259 | with st.expander("🔧 Tool Call Information", expanded=False):
260 | st.markdown(st.session_state.history[i + 1]["content"])
261 | i += 2 # Increment by 2 as we processed two messages together
262 | else:
263 | i += 1 # Increment by 1 as we only processed a regular message
264 | else:
265 | # Skip assistant_tool messages as they are handled above
266 | i += 1
267 |
268 |
269 | def get_streaming_callback(text_placeholder, tool_placeholder):
270 | """
271 | Creates a streaming callback function.
272 |
273 | This function creates a callback function to display responses generated from the LLM in real-time.
274 | It displays text responses and tool call information in separate areas.
275 |
276 | Args:
277 | text_placeholder: Streamlit component to display text responses
278 | tool_placeholder: Streamlit component to display tool call information
279 |
280 | Returns:
281 | callback_func: Streaming callback function
282 | accumulated_text: List to store accumulated text responses
283 | accumulated_tool: List to store accumulated tool call information
284 | """
285 | accumulated_text = []
286 | accumulated_tool = []
287 |
288 | def callback_func(message: dict):
289 | nonlocal accumulated_text, accumulated_tool
290 | message_content = message.get("content", None)
291 |
292 | if isinstance(message_content, AIMessageChunk):
293 | content = message_content.content
294 | # If content is in list form (mainly occurs in Claude models)
295 | if isinstance(content, list) and len(content) > 0:
296 | message_chunk = content[0]
297 | # Process text type
298 | if message_chunk["type"] == "text":
299 | accumulated_text.append(message_chunk["text"])
300 | text_placeholder.markdown("".join(accumulated_text))
301 | # Process tool use type
302 | elif message_chunk["type"] == "tool_use":
303 | if "partial_json" in message_chunk:
304 | accumulated_tool.append(message_chunk["partial_json"])
305 | else:
306 | tool_call_chunks = message_content.tool_call_chunks
307 | tool_call_chunk = tool_call_chunks[0]
308 | accumulated_tool.append(
309 | "\n```json\n" + str(tool_call_chunk) + "\n```\n"
310 | )
311 | with tool_placeholder.expander(
312 | "🔧 Tool Call Information", expanded=True
313 | ):
314 | st.markdown("".join(accumulated_tool))
315 | # Process if tool_calls attribute exists (mainly occurs in OpenAI models)
316 | elif (
317 | hasattr(message_content, "tool_calls")
318 | and message_content.tool_calls
319 | and len(message_content.tool_calls[0]["name"]) > 0
320 | ):
321 | tool_call_info = message_content.tool_calls[0]
322 | accumulated_tool.append("\n```json\n" + str(tool_call_info) + "\n```\n")
323 | with tool_placeholder.expander(
324 | "🔧 Tool Call Information", expanded=True
325 | ):
326 | st.markdown("".join(accumulated_tool))
327 | # Process if content is a simple string
328 | elif isinstance(content, str):
329 | accumulated_text.append(content)
330 | text_placeholder.markdown("".join(accumulated_text))
331 | # Process if invalid tool call information exists
332 | elif (
333 | hasattr(message_content, "invalid_tool_calls")
334 | and message_content.invalid_tool_calls
335 | ):
336 | tool_call_info = message_content.invalid_tool_calls[0]
337 | accumulated_tool.append("\n```json\n" + str(tool_call_info) + "\n```\n")
338 | with tool_placeholder.expander(
339 | "🔧 Tool Call Information (Invalid)", expanded=True
340 | ):
341 | st.markdown("".join(accumulated_tool))
342 | # Process if tool_call_chunks attribute exists
343 | elif (
344 | hasattr(message_content, "tool_call_chunks")
345 | and message_content.tool_call_chunks
346 | ):
347 | tool_call_chunk = message_content.tool_call_chunks[0]
348 | accumulated_tool.append(
349 | "\n```json\n" + str(tool_call_chunk) + "\n```\n"
350 | )
351 | with tool_placeholder.expander(
352 | "🔧 Tool Call Information", expanded=True
353 | ):
354 | st.markdown("".join(accumulated_tool))
355 | # Process if tool_calls exists in additional_kwargs (supports various model compatibility)
356 | elif (
357 | hasattr(message_content, "additional_kwargs")
358 | and "tool_calls" in message_content.additional_kwargs
359 | ):
360 | tool_call_info = message_content.additional_kwargs["tool_calls"][0]
361 | accumulated_tool.append("\n```json\n" + str(tool_call_info) + "\n```\n")
362 | with tool_placeholder.expander(
363 | "🔧 Tool Call Information", expanded=True
364 | ):
365 | st.markdown("".join(accumulated_tool))
366 | # Process if it's a tool message (tool response)
367 | elif isinstance(message_content, ToolMessage):
368 | accumulated_tool.append(
369 | "\n```json\n" + str(message_content.content) + "\n```\n"
370 | )
371 | with tool_placeholder.expander("🔧 Tool Call Information", expanded=True):
372 | st.markdown("".join(accumulated_tool))
373 | return None
374 |
375 | return callback_func, accumulated_text, accumulated_tool
376 |
377 |
378 | async def process_query(query, text_placeholder, tool_placeholder, timeout_seconds=60):
379 | """
380 | Processes user questions and generates responses.
381 |
382 | This function passes the user's question to the agent and streams the response in real-time.
383 | Returns a timeout error if the response is not completed within the specified time.
384 |
385 | Args:
386 | query: Text of the question entered by the user
387 | text_placeholder: Streamlit component to display text responses
388 | tool_placeholder: Streamlit component to display tool call information
389 | timeout_seconds: Response generation time limit (seconds)
390 |
391 | Returns:
392 | response: Agent's response object
393 | final_text: Final text response
394 | final_tool: Final tool call information
395 | """
396 | try:
397 | if st.session_state.agent:
398 | streaming_callback, accumulated_text_obj, accumulated_tool_obj = (
399 | get_streaming_callback(text_placeholder, tool_placeholder)
400 | )
401 | try:
402 | response = await asyncio.wait_for(
403 | astream_graph(
404 | st.session_state.agent,
405 | {"messages": [HumanMessage(content=query)]},
406 | callback=streaming_callback,
407 | config=RunnableConfig(
408 | recursion_limit=st.session_state.recursion_limit,
409 | thread_id=st.session_state.thread_id,
410 | ),
411 | ),
412 | timeout=timeout_seconds,
413 | )
414 | except asyncio.TimeoutError:
415 | error_msg = f"⏱️ Request time exceeded {timeout_seconds} seconds. Please try again later."
416 | return {"error": error_msg}, error_msg, ""
417 |
418 | final_text = "".join(accumulated_text_obj)
419 | final_tool = "".join(accumulated_tool_obj)
420 | return response, final_text, final_tool
421 | else:
422 | return (
423 | {"error": "🚫 Agent has not been initialized."},
424 | "🚫 Agent has not been initialized.",
425 | "",
426 | )
427 | except Exception as e:
428 | import traceback
429 |
430 | error_msg = f"❌ Error occurred during query processing: {str(e)}\n{traceback.format_exc()}"
431 | return {"error": error_msg}, error_msg, ""
432 |
433 |
434 | async def initialize_session(mcp_config=None):
435 | """
436 | Initializes MCP session and agent.
437 |
438 | Args:
439 | mcp_config: MCP tool configuration information (JSON). Uses default settings if None
440 |
441 | Returns:
442 | bool: Initialization success status
443 | """
444 | with st.spinner("🔄 Connecting to MCP server..."):
445 | # First safely clean up existing client
446 | await cleanup_mcp_client()
447 |
448 | if mcp_config is None:
449 | # Load settings from config.json file
450 | mcp_config = load_config_from_json()
451 | client = MultiServerMCPClient(mcp_config)
452 | await client.__aenter__()
453 | tools = client.get_tools()
454 | st.session_state.tool_count = len(tools)
455 | st.session_state.mcp_client = client
456 |
457 | # Initialize appropriate model based on selection
458 | selected_model = st.session_state.selected_model
459 |
460 | if selected_model in [
461 | "claude-3-7-sonnet-latest",
462 | "claude-3-5-sonnet-latest",
463 | "claude-3-5-haiku-latest",
464 | ]:
465 | model = ChatAnthropic(
466 | model=selected_model,
467 | temperature=0.1,
468 | max_tokens=OUTPUT_TOKEN_INFO[selected_model]["max_tokens"],
469 | )
470 | else: # Use OpenAI model
471 | model = ChatOpenAI(
472 | model=selected_model,
473 | temperature=0.1,
474 | max_tokens=OUTPUT_TOKEN_INFO[selected_model]["max_tokens"],
475 | )
476 | agent = create_react_agent(
477 | model,
478 | tools,
479 | checkpointer=MemorySaver(),
480 | prompt=SYSTEM_PROMPT,
481 | )
482 | st.session_state.agent = agent
483 | st.session_state.session_initialized = True
484 | return True
485 |
486 |
487 | # --- Sidebar: System Settings Section ---
488 | with st.sidebar:
489 | st.subheader("⚙️ System Settings")
490 |
491 | # Model selection feature
492 | # Create list of available models
493 | available_models = []
494 |
495 | # Check Anthropic API key
496 | has_anthropic_key = os.environ.get("ANTHROPIC_API_KEY") is not None
497 | if has_anthropic_key:
498 | available_models.extend(
499 | [
500 | "claude-3-7-sonnet-latest",
501 | "claude-3-5-sonnet-latest",
502 | "claude-3-5-haiku-latest",
503 | ]
504 | )
505 |
506 | # Check OpenAI API key
507 | has_openai_key = os.environ.get("OPENAI_API_KEY") is not None
508 | if has_openai_key:
509 | available_models.extend(["gpt-4o", "gpt-4o-mini"])
510 |
511 | # Display message if no models are available
512 | if not available_models:
513 | st.warning(
514 | "⚠️ API keys are not configured. Please add ANTHROPIC_API_KEY or OPENAI_API_KEY to your .env file."
515 | )
516 | # Add Claude model as default (to show UI even without keys)
517 | available_models = ["claude-3-7-sonnet-latest"]
518 |
519 | # Model selection dropdown
520 | previous_model = st.session_state.selected_model
521 | st.session_state.selected_model = st.selectbox(
522 | "🤖 Select model to use",
523 | options=available_models,
524 | index=(
525 | available_models.index(st.session_state.selected_model)
526 | if st.session_state.selected_model in available_models
527 | else 0
528 | ),
529 | help="Anthropic models require ANTHROPIC_API_KEY and OpenAI models require OPENAI_API_KEY to be set as environment variables.",
530 | )
531 |
532 | # Notify when model is changed and session needs to be reinitialized
533 | if (
534 | previous_model != st.session_state.selected_model
535 | and st.session_state.session_initialized
536 | ):
537 | st.warning(
538 | "⚠️ Model has been changed. Click 'Apply Settings' button to apply changes."
539 | )
540 |
541 | # Add timeout setting slider
542 | st.session_state.timeout_seconds = st.slider(
543 | "⏱️ Response generation time limit (seconds)",
544 | min_value=60,
545 | max_value=300,
546 | value=st.session_state.timeout_seconds,
547 | step=10,
548 | help="Set the maximum time for the agent to generate a response. Complex tasks may require more time.",
549 | )
550 |
551 | st.session_state.recursion_limit = st.slider(
552 | "⏱️ Recursion call limit (count)",
553 | min_value=10,
554 | max_value=200,
555 | value=st.session_state.recursion_limit,
556 | step=10,
557 | help="Set the recursion call limit. Setting too high a value may cause memory issues.",
558 | )
559 |
560 | st.divider() # Add divider
561 |
562 | # Tool settings section
563 | st.subheader("🔧 Tool Settings")
564 |
565 | # Manage expander state in session state
566 | if "mcp_tools_expander" not in st.session_state:
567 | st.session_state.mcp_tools_expander = False
568 |
569 | # MCP tool addition interface
570 | with st.expander("🧰 Add MCP Tools", expanded=st.session_state.mcp_tools_expander):
571 | # Load settings from config.json file
572 | loaded_config = load_config_from_json()
573 | default_config_text = json.dumps(loaded_config, indent=2, ensure_ascii=False)
574 |
575 | # Create pending config based on existing mcp_config_text if not present
576 | if "pending_mcp_config" not in st.session_state:
577 | try:
578 | st.session_state.pending_mcp_config = loaded_config
579 | except Exception as e:
580 | st.error(f"Failed to set initial pending config: {e}")
581 |
582 | # UI for adding individual tools
583 | st.subheader("Add Tool(JSON format)")
584 | st.markdown(
585 | """
586 | Please insert **ONE tool** in JSON format.
587 |
588 | [How to Set Up?](https://teddylee777.notion.site/MCP-Tool-Setup-Guide-English-1d324f35d1298030a831dfb56045906a)
589 |
590 | ⚠️ **Important**: JSON must be wrapped in curly braces (`{}`).
591 | """
592 | )
593 |
594 | # Provide clearer example
595 | example_json = {
596 | "github": {
597 | "command": "npx",
598 | "args": [
599 | "-y",
600 | "@smithery/cli@latest",
601 | "run",
602 | "@smithery-ai/github",
603 | "--config",
604 | '{"githubPersonalAccessToken":"your_token_here"}',
605 | ],
606 | "transport": "stdio",
607 | }
608 | }
609 |
610 | default_text = json.dumps(example_json, indent=2, ensure_ascii=False)
611 |
612 | new_tool_json = st.text_area(
613 | "Tool JSON",
614 | default_text,
615 | height=250,
616 | )
617 |
618 | # Add button
619 | if st.button(
620 | "Add Tool",
621 | type="primary",
622 | key="add_tool_button",
623 | use_container_width=True,
624 | ):
625 | try:
626 | # Validate input
627 | if not new_tool_json.strip().startswith(
628 | "{"
629 | ) or not new_tool_json.strip().endswith("}"):
630 | st.error("JSON must start and end with curly braces ({}).")
631 | st.markdown('Correct format: `{ "tool_name": { ... } }`')
632 | else:
633 | # Parse JSON
634 | parsed_tool = json.loads(new_tool_json)
635 |
636 | # Check if it's in mcpServers format and process accordingly
637 | if "mcpServers" in parsed_tool:
638 | # Move contents of mcpServers to top level
639 | parsed_tool = parsed_tool["mcpServers"]
640 | st.info(
641 | "'mcpServers' format detected. Converting automatically."
642 | )
643 |
644 | # Check number of tools entered
645 | if len(parsed_tool) == 0:
646 | st.error("Please enter at least one tool.")
647 | else:
648 | # Process all tools
649 | success_tools = []
650 | for tool_name, tool_config in parsed_tool.items():
651 | # Check URL field and set transport
652 | if "url" in tool_config:
653 | # Set transport to "sse" if URL exists
654 | tool_config["transport"] = "sse"
655 | st.info(
656 | f"URL detected in '{tool_name}' tool, setting transport to 'sse'."
657 | )
658 | elif "transport" not in tool_config:
659 | # Set default "stdio" if URL doesn't exist and transport isn't specified
660 | tool_config["transport"] = "stdio"
661 |
662 | # Check required fields
663 | if (
664 | "command" not in tool_config
665 | and "url" not in tool_config
666 | ):
667 | st.error(
668 | f"'{tool_name}' tool configuration requires either 'command' or 'url' field."
669 | )
670 | elif "command" in tool_config and "args" not in tool_config:
671 | st.error(
672 | f"'{tool_name}' tool configuration requires 'args' field."
673 | )
674 | elif "command" in tool_config and not isinstance(
675 | tool_config["args"], list
676 | ):
677 | st.error(
678 | f"'args' field in '{tool_name}' tool must be an array ([]) format."
679 | )
680 | else:
681 | # Add tool to pending_mcp_config
682 | st.session_state.pending_mcp_config[tool_name] = (
683 | tool_config
684 | )
685 | success_tools.append(tool_name)
686 |
687 | # Success message
688 | if success_tools:
689 | if len(success_tools) == 1:
690 | st.success(
691 | f"{success_tools[0]} tool has been added. Click 'Apply Settings' button to apply."
692 | )
693 | else:
694 | tool_names = ", ".join(success_tools)
695 | st.success(
696 | f"Total {len(success_tools)} tools ({tool_names}) have been added. Click 'Apply Settings' button to apply."
697 | )
698 | # Collapse expander after adding
699 | st.session_state.mcp_tools_expander = False
700 | st.rerun()
701 | except json.JSONDecodeError as e:
702 | st.error(f"JSON parsing error: {e}")
703 | st.markdown(
704 | f"""
705 | **How to fix**:
706 | 1. Check that your JSON format is correct.
707 | 2. All keys must be wrapped in double quotes (").
708 | 3. String values must also be wrapped in double quotes (").
709 | 4. When using double quotes within a string, they must be escaped (\\").
710 | """
711 | )
712 | except Exception as e:
713 | st.error(f"Error occurred: {e}")
714 |
715 | # Display registered tools list and add delete buttons
716 | with st.expander("📋 Registered Tools List", expanded=True):
717 | try:
718 | pending_config = st.session_state.pending_mcp_config
719 | except Exception as e:
720 | st.error("Not a valid MCP tool configuration.")
721 | else:
722 | # Iterate through keys (tool names) in pending config
723 | for tool_name in list(pending_config.keys()):
724 | col1, col2 = st.columns([8, 2])
725 | col1.markdown(f"- **{tool_name}**")
726 | if col2.button("Delete", key=f"delete_{tool_name}"):
727 | # Delete tool from pending config (not applied immediately)
728 | del st.session_state.pending_mcp_config[tool_name]
729 | st.success(
730 | f"{tool_name} tool has been deleted. Click 'Apply Settings' button to apply."
731 | )
732 |
733 | st.divider() # Add divider
734 |
735 | # --- Sidebar: System Information and Action Buttons Section ---
736 | with st.sidebar:
737 | st.subheader("📊 System Information")
738 | st.write(
739 | f"🛠️ MCP Tools Count: {st.session_state.get('tool_count', 'Initializing...')}"
740 | )
741 | selected_model_name = st.session_state.selected_model
742 | st.write(f"🧠 Current Model: {selected_model_name}")
743 |
744 | # Move Apply Settings button here
745 | if st.button(
746 | "Apply Settings",
747 | key="apply_button",
748 | type="primary",
749 | use_container_width=True,
750 | ):
751 | # Display applying message
752 | apply_status = st.empty()
753 | with apply_status.container():
754 | st.warning("🔄 Applying changes. Please wait...")
755 | progress_bar = st.progress(0)
756 |
757 | # Save settings
758 | st.session_state.mcp_config_text = json.dumps(
759 | st.session_state.pending_mcp_config, indent=2, ensure_ascii=False
760 | )
761 |
762 | # Save settings to config.json file
763 | save_result = save_config_to_json(st.session_state.pending_mcp_config)
764 | if not save_result:
765 | st.error("❌ Failed to save settings file.")
766 |
767 | progress_bar.progress(15)
768 |
769 | # Prepare session initialization
770 | st.session_state.session_initialized = False
771 | st.session_state.agent = None
772 |
773 | # Update progress
774 | progress_bar.progress(30)
775 |
776 | # Run initialization
777 | success = st.session_state.event_loop.run_until_complete(
778 | initialize_session(st.session_state.pending_mcp_config)
779 | )
780 |
781 | # Update progress
782 | progress_bar.progress(100)
783 |
784 | if success:
785 | st.success("✅ New settings have been applied.")
786 | # Collapse tool addition expander
787 | if "mcp_tools_expander" in st.session_state:
788 | st.session_state.mcp_tools_expander = False
789 | else:
790 | st.error("❌ Failed to apply settings.")
791 |
792 | # Refresh page
793 | st.rerun()
794 |
795 | st.divider() # Add divider
796 |
797 | # Action buttons section
798 | st.subheader("🔄 Actions")
799 |
800 | # Reset conversation button
801 | if st.button("Reset Conversation", use_container_width=True, type="primary"):
802 | # Reset thread_id
803 | st.session_state.thread_id = random_uuid()
804 |
805 | # Reset conversation history
806 | st.session_state.history = []
807 |
808 | # Notification message
809 | st.success("✅ Conversation has been reset.")
810 |
811 | # Refresh page
812 | st.rerun()
813 |
814 | # Show logout button only if login feature is enabled
815 | if use_login and st.session_state.authenticated:
816 | st.divider() # Add divider
817 | if st.button("Logout", use_container_width=True, type="secondary"):
818 | st.session_state.authenticated = False
819 | st.success("✅ You have been logged out.")
820 | st.rerun()
821 |
822 | # --- Initialize default session (if not initialized) ---
823 | if not st.session_state.session_initialized:
824 | st.info(
825 | "MCP server and agent are not initialized. Please click the 'Apply Settings' button in the left sidebar to initialize."
826 | )
827 |
828 |
829 | # --- Print conversation history ---
830 | print_message()
831 |
832 | # --- User input and processing ---
833 | user_query = st.chat_input("💬 Enter your question")
834 | if user_query:
835 | if st.session_state.session_initialized:
836 | st.chat_message("user", avatar="🧑💻").markdown(user_query)
837 | with st.chat_message("assistant", avatar="🤖"):
838 | tool_placeholder = st.empty()
839 | text_placeholder = st.empty()
840 | resp, final_text, final_tool = (
841 | st.session_state.event_loop.run_until_complete(
842 | process_query(
843 | user_query,
844 | text_placeholder,
845 | tool_placeholder,
846 | st.session_state.timeout_seconds,
847 | )
848 | )
849 | )
850 | if "error" in resp:
851 | st.error(resp["error"])
852 | else:
853 | st.session_state.history.append({"role": "user", "content": user_query})
854 | st.session_state.history.append(
855 | {"role": "assistant", "content": final_text}
856 | )
857 | if final_tool.strip():
858 | st.session_state.history.append(
859 | {"role": "assistant_tool", "content": final_tool}
860 | )
861 | st.rerun()
862 | else:
863 | st.warning(
864 | "⚠️ MCP server and agent are not initialized. Please click the 'Apply Settings' button in the left sidebar to initialize."
865 | )
866 |
--------------------------------------------------------------------------------
/app_KOR.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 | import asyncio
3 | import nest_asyncio
4 | import json
5 | import os
6 | import platform
7 |
8 | if platform.system() == "Windows":
9 | asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
10 |
11 | # nest_asyncio 적용: 이미 실행 중인 이벤트 루프 내에서 중첩 호출 허용
12 | nest_asyncio.apply()
13 |
14 | # 전역 이벤트 루프 생성 및 재사용 (한번 생성한 후 계속 사용)
15 | if "event_loop" not in st.session_state:
16 | loop = asyncio.new_event_loop()
17 | st.session_state.event_loop = loop
18 | asyncio.set_event_loop(loop)
19 |
20 | from langgraph.prebuilt import create_react_agent
21 | from langchain_anthropic import ChatAnthropic
22 | from langchain_openai import ChatOpenAI
23 | from langchain_core.messages import HumanMessage
24 | from dotenv import load_dotenv
25 | from langchain_mcp_adapters.client import MultiServerMCPClient
26 | from utils import astream_graph, random_uuid
27 | from langchain_core.messages.ai import AIMessageChunk
28 | from langchain_core.messages.tool import ToolMessage
29 | from langgraph.checkpoint.memory import MemorySaver
30 | from langchain_core.runnables import RunnableConfig
31 |
32 | # 환경 변수 로드 (.env 파일에서 API 키 등의 설정을 가져옴)
33 | load_dotenv(override=True)
34 |
35 | # config.json 파일 경로 설정
36 | CONFIG_FILE_PATH = "config.json"
37 |
38 | # JSON 설정 파일 로드 함수
39 | def load_config_from_json():
40 | """
41 | config.json 파일에서 설정을 로드합니다.
42 | 파일이 없는 경우 기본 설정으로 파일을 생성합니다.
43 |
44 | 반환값:
45 | dict: 로드된 설정
46 | """
47 | default_config = {
48 | "get_current_time": {
49 | "command": "python",
50 | "args": ["./mcp_server_time.py"],
51 | "transport": "stdio"
52 | }
53 | }
54 |
55 | try:
56 | if os.path.exists(CONFIG_FILE_PATH):
57 | with open(CONFIG_FILE_PATH, "r", encoding="utf-8") as f:
58 | return json.load(f)
59 | else:
60 | # 파일이 없는 경우 기본 설정으로 파일 생성
61 | save_config_to_json(default_config)
62 | return default_config
63 | except Exception as e:
64 | st.error(f"설정 파일 로드 중 오류 발생: {str(e)}")
65 | return default_config
66 |
67 | # JSON 설정 파일 저장 함수
68 | def save_config_to_json(config):
69 | """
70 | 설정을 config.json 파일에 저장합니다.
71 |
72 | 매개변수:
73 | config (dict): 저장할 설정
74 |
75 | 반환값:
76 | bool: 저장 성공 여부
77 | """
78 | try:
79 | with open(CONFIG_FILE_PATH, "w", encoding="utf-8") as f:
80 | json.dump(config, f, indent=2, ensure_ascii=False)
81 | return True
82 | except Exception as e:
83 | st.error(f"설정 파일 저장 중 오류 발생: {str(e)}")
84 | return False
85 |
86 | # 로그인 세션 변수 초기화
87 | if "authenticated" not in st.session_state:
88 | st.session_state.authenticated = False
89 |
90 | # 로그인 필요 여부 확인
91 | use_login = os.environ.get("USE_LOGIN", "false").lower() == "true"
92 |
93 | # 로그인 상태에 따라 페이지 설정 변경
94 | if use_login and not st.session_state.authenticated:
95 | # 로그인 페이지는 기본(narrow) 레이아웃 사용
96 | st.set_page_config(page_title="Agent with MCP Tools", page_icon="🧠")
97 | else:
98 | # 메인 앱은 wide 레이아웃 사용
99 | st.set_page_config(page_title="Agent with MCP Tools", page_icon="🧠", layout="wide")
100 |
101 | # 로그인 기능이 활성화되어 있고 아직 인증되지 않은 경우 로그인 화면 표시
102 | if use_login and not st.session_state.authenticated:
103 | st.title("🔐 로그인")
104 | st.markdown("시스템을 사용하려면 로그인이 필요합니다.")
105 |
106 | # 로그인 폼을 화면 중앙에 좁게 배치
107 | with st.form("login_form"):
108 | username = st.text_input("아이디")
109 | password = st.text_input("비밀번호", type="password")
110 | submit_button = st.form_submit_button("로그인")
111 |
112 | if submit_button:
113 | expected_username = os.environ.get("USER_ID")
114 | expected_password = os.environ.get("USER_PASSWORD")
115 |
116 | if username == expected_username and password == expected_password:
117 | st.session_state.authenticated = True
118 | st.success("✅ 로그인 성공! 잠시만 기다려주세요...")
119 | st.rerun()
120 | else:
121 | st.error("❌ 아이디 또는 비밀번호가 올바르지 않습니다.")
122 |
123 | # 로그인 화면에서는 메인 앱을 표시하지 않음
124 | st.stop()
125 |
126 | # 사이드바 최상단에 저자 정보 추가 (다른 사이드바 요소보다 먼저 배치)
127 | st.sidebar.markdown("### ✍️ Made by [테디노트](https://youtube.com/c/teddynote) 🚀")
128 | st.sidebar.markdown(
129 | "### 💻 [Project Page](https://github.com/teddynote-lab/langgraph-mcp-agents)"
130 | )
131 |
132 | st.sidebar.divider() # 구분선 추가
133 |
134 | # 기존 페이지 타이틀 및 설명
135 | st.title("💬 MCP 도구 활용 에이전트")
136 | st.markdown("✨ MCP 도구를 활용한 ReAct 에이전트에게 질문해보세요.")
137 |
138 | SYSTEM_PROMPT = """
139 | You are a smart agent with an ability to use tools.
140 | You will be given a question and you will use the tools to answer the question.
141 | Pick the most relevant tool to answer the question.
142 | If you are failed to answer the question, try different tools to get context.
143 | Your answer should be very polite and professional.
144 |
145 |
146 | ----
147 |
148 |
149 | Step 1: Analyze the question
150 | - Analyze user's question and final goal.
151 | - If the user's question is consist of multiple sub-questions, split them into smaller sub-questions.
152 |
153 | Step 2: Pick the most relevant tool
154 | - Pick the most relevant tool to answer the question.
155 | - If you are failed to answer the question, try different tools to get context.
156 |
157 | Step 3: Answer the question
158 | - Answer the question in the same language as the question.
159 | - Your answer should be very polite and professional.
160 |
161 | Step 4: Provide the source of the answer(if applicable)
162 | - If you've used the tool, provide the source of the answer.
163 | - Valid sources are either a website(URL) or a document(PDF, etc).
164 |
165 | Guidelines:
166 | - If you've used the tool, your answer should be based on the tool's output(tool's output is more important than your own knowledge).
167 | - If you've used the tool, and the source is valid URL, provide the source(URL) of the answer.
168 | - Skip providing the source if the source is not URL.
169 | - Answer in the same language as the question.
170 | - Answer should be concise and to the point.
171 | - Avoid response your output with any other information than the answer and the source.
172 |
173 |
174 | ----
175 |
176 |
177 | (concise answer to the question)
178 |
179 | **Source**(if applicable)
180 | - (source1: valid URL)
181 | - (source2: valid URL)
182 | - ...
183 |
184 | """
185 |
186 | OUTPUT_TOKEN_INFO = {
187 | "claude-3-5-sonnet-latest": {"max_tokens": 8192},
188 | "claude-3-5-haiku-latest": {"max_tokens": 8192},
189 | "claude-3-7-sonnet-latest": {"max_tokens": 64000},
190 | "gpt-4o": {"max_tokens": 16000},
191 | "gpt-4o-mini": {"max_tokens": 16000},
192 | }
193 |
194 | # 세션 상태 초기화
195 | if "session_initialized" not in st.session_state:
196 | st.session_state.session_initialized = False # 세션 초기화 상태 플래그
197 | st.session_state.agent = None # ReAct 에이전트 객체 저장 공간
198 | st.session_state.history = [] # 대화 기록 저장 리스트
199 | st.session_state.mcp_client = None # MCP 클라이언트 객체 저장 공간
200 | st.session_state.timeout_seconds = 120 # 응답 생성 제한 시간(초), 기본값 120초
201 | st.session_state.selected_model = "claude-3-7-sonnet-latest" # 기본 모델 선택
202 | st.session_state.recursion_limit = 100 # 재귀 호출 제한, 기본값 100
203 |
204 | if "thread_id" not in st.session_state:
205 | st.session_state.thread_id = random_uuid()
206 |
207 |
208 | # --- 함수 정의 부분 ---
209 |
210 |
211 | async def cleanup_mcp_client():
212 | """
213 | 기존 MCP 클라이언트를 안전하게 종료합니다.
214 |
215 | 기존 클라이언트가 있는 경우 정상적으로 리소스를 해제합니다.
216 | """
217 | if "mcp_client" in st.session_state and st.session_state.mcp_client is not None:
218 | try:
219 |
220 | await st.session_state.mcp_client.__aexit__(None, None, None)
221 | st.session_state.mcp_client = None
222 | except Exception as e:
223 | import traceback
224 |
225 | # st.warning(f"MCP 클라이언트 종료 중 오류: {str(e)}")
226 | # st.warning(traceback.format_exc())
227 |
228 |
229 | def print_message():
230 | """
231 | 채팅 기록을 화면에 출력합니다.
232 |
233 | 사용자와 어시스턴트의 메시지를 구분하여 화면에 표시하고,
234 | 도구 호출 정보는 어시스턴트 메시지 컨테이너 내에 표시합니다.
235 | """
236 | i = 0
237 | while i < len(st.session_state.history):
238 | message = st.session_state.history[i]
239 |
240 | if message["role"] == "user":
241 | st.chat_message("user", avatar="🧑💻").markdown(message["content"])
242 | i += 1
243 | elif message["role"] == "assistant":
244 | # 어시스턴트 메시지 컨테이너 생성
245 | with st.chat_message("assistant", avatar="🤖"):
246 | # 어시스턴트 메시지 내용 표시
247 | st.markdown(message["content"])
248 |
249 | # 다음 메시지가 도구 호출 정보인지 확인
250 | if (
251 | i + 1 < len(st.session_state.history)
252 | and st.session_state.history[i + 1]["role"] == "assistant_tool"
253 | ):
254 | # 도구 호출 정보를 동일한 컨테이너 내에 expander로 표시
255 | with st.expander("🔧 도구 호출 정보", expanded=False):
256 | st.markdown(st.session_state.history[i + 1]["content"])
257 | i += 2 # 두 메시지를 함께 처리했으므로 2 증가
258 | else:
259 | i += 1 # 일반 메시지만 처리했으므로 1 증가
260 | else:
261 | # assistant_tool 메시지는 위에서 처리되므로 건너뜀
262 | i += 1
263 |
264 |
265 | def get_streaming_callback(text_placeholder, tool_placeholder):
266 | """
267 | 스트리밍 콜백 함수를 생성합니다.
268 |
269 | 이 함수는 LLM에서 생성되는 응답을 실시간으로 화면에 표시하기 위한 콜백 함수를 생성합니다.
270 | 텍스트 응답과 도구 호출 정보를 각각 다른 영역에 표시합니다.
271 |
272 | 매개변수:
273 | text_placeholder: 텍스트 응답을 표시할 Streamlit 컴포넌트
274 | tool_placeholder: 도구 호출 정보를 표시할 Streamlit 컴포넌트
275 |
276 | 반환값:
277 | callback_func: 스트리밍 콜백 함수
278 | accumulated_text: 누적된 텍스트 응답을 저장하는 리스트
279 | accumulated_tool: 누적된 도구 호출 정보를 저장하는 리스트
280 | """
281 | accumulated_text = []
282 | accumulated_tool = []
283 |
284 | def callback_func(message: dict):
285 | nonlocal accumulated_text, accumulated_tool
286 | message_content = message.get("content", None)
287 |
288 | if isinstance(message_content, AIMessageChunk):
289 | content = message_content.content
290 | # 콘텐츠가 리스트 형태인 경우 (Claude 모델 등에서 주로 발생)
291 | if isinstance(content, list) and len(content) > 0:
292 | message_chunk = content[0]
293 | # 텍스트 타입인 경우 처리
294 | if message_chunk["type"] == "text":
295 | accumulated_text.append(message_chunk["text"])
296 | text_placeholder.markdown("".join(accumulated_text))
297 | # 도구 사용 타입인 경우 처리
298 | elif message_chunk["type"] == "tool_use":
299 | if "partial_json" in message_chunk:
300 | accumulated_tool.append(message_chunk["partial_json"])
301 | else:
302 | tool_call_chunks = message_content.tool_call_chunks
303 | tool_call_chunk = tool_call_chunks[0]
304 | accumulated_tool.append(
305 | "\n```json\n" + str(tool_call_chunk) + "\n```\n"
306 | )
307 | with tool_placeholder.expander("🔧 도구 호출 정보", expanded=True):
308 | st.markdown("".join(accumulated_tool))
309 | # tool_calls 속성이 있는 경우 처리 (OpenAI 모델 등에서 주로 발생)
310 | elif (
311 | hasattr(message_content, "tool_calls")
312 | and message_content.tool_calls
313 | and len(message_content.tool_calls[0]["name"]) > 0
314 | ):
315 | tool_call_info = message_content.tool_calls[0]
316 | accumulated_tool.append("\n```json\n" + str(tool_call_info) + "\n```\n")
317 | with tool_placeholder.expander("🔧 도구 호출 정보", expanded=True):
318 | st.markdown("".join(accumulated_tool))
319 | # 단순 문자열인 경우 처리
320 | elif isinstance(content, str):
321 | accumulated_text.append(content)
322 | text_placeholder.markdown("".join(accumulated_text))
323 | # 유효하지 않은 도구 호출 정보가 있는 경우 처리
324 | elif (
325 | hasattr(message_content, "invalid_tool_calls")
326 | and message_content.invalid_tool_calls
327 | ):
328 | tool_call_info = message_content.invalid_tool_calls[0]
329 | accumulated_tool.append("\n```json\n" + str(tool_call_info) + "\n```\n")
330 | with tool_placeholder.expander(
331 | "🔧 도구 호출 정보 (유효하지 않음)", expanded=True
332 | ):
333 | st.markdown("".join(accumulated_tool))
334 | # tool_call_chunks 속성이 있는 경우 처리
335 | elif (
336 | hasattr(message_content, "tool_call_chunks")
337 | and message_content.tool_call_chunks
338 | ):
339 | tool_call_chunk = message_content.tool_call_chunks[0]
340 | accumulated_tool.append(
341 | "\n```json\n" + str(tool_call_chunk) + "\n```\n"
342 | )
343 | with tool_placeholder.expander("🔧 도구 호출 정보", expanded=True):
344 | st.markdown("".join(accumulated_tool))
345 | # additional_kwargs에 tool_calls가 있는 경우 처리 (다양한 모델 호환성 지원)
346 | elif (
347 | hasattr(message_content, "additional_kwargs")
348 | and "tool_calls" in message_content.additional_kwargs
349 | ):
350 | tool_call_info = message_content.additional_kwargs["tool_calls"][0]
351 | accumulated_tool.append("\n```json\n" + str(tool_call_info) + "\n```\n")
352 | with tool_placeholder.expander("🔧 도구 호출 정보", expanded=True):
353 | st.markdown("".join(accumulated_tool))
354 | # 도구 메시지인 경우 처리 (도구의 응답)
355 | elif isinstance(message_content, ToolMessage):
356 | accumulated_tool.append(
357 | "\n```json\n" + str(message_content.content) + "\n```\n"
358 | )
359 | with tool_placeholder.expander("🔧 도구 호출 정보", expanded=True):
360 | st.markdown("".join(accumulated_tool))
361 | return None
362 |
363 | return callback_func, accumulated_text, accumulated_tool
364 |
365 |
366 | async def process_query(query, text_placeholder, tool_placeholder, timeout_seconds=60):
367 | """
368 | 사용자 질문을 처리하고 응답을 생성합니다.
369 |
370 | 이 함수는 사용자의 질문을 에이전트에 전달하고, 응답을 실시간으로 스트리밍하여 표시합니다.
371 | 지정된 시간 내에 응답이 완료되지 않으면 타임아웃 오류를 반환합니다.
372 |
373 | 매개변수:
374 | query: 사용자가 입력한 질문 텍스트
375 | text_placeholder: 텍스트 응답을 표시할 Streamlit 컴포넌트
376 | tool_placeholder: 도구 호출 정보를 표시할 Streamlit 컴포넌트
377 | timeout_seconds: 응답 생성 제한 시간(초)
378 |
379 | 반환값:
380 | response: 에이전트의 응답 객체
381 | final_text: 최종 텍스트 응답
382 | final_tool: 최종 도구 호출 정보
383 | """
384 | try:
385 | if st.session_state.agent:
386 | streaming_callback, accumulated_text_obj, accumulated_tool_obj = (
387 | get_streaming_callback(text_placeholder, tool_placeholder)
388 | )
389 | try:
390 | response = await asyncio.wait_for(
391 | astream_graph(
392 | st.session_state.agent,
393 | {"messages": [HumanMessage(content=query)]},
394 | callback=streaming_callback,
395 | config=RunnableConfig(
396 | recursion_limit=st.session_state.recursion_limit,
397 | thread_id=st.session_state.thread_id,
398 | ),
399 | ),
400 | timeout=timeout_seconds,
401 | )
402 | except asyncio.TimeoutError:
403 | error_msg = f"⏱️ 요청 시간이 {timeout_seconds}초를 초과했습니다. 나중에 다시 시도해 주세요."
404 | return {"error": error_msg}, error_msg, ""
405 |
406 | final_text = "".join(accumulated_text_obj)
407 | final_tool = "".join(accumulated_tool_obj)
408 | return response, final_text, final_tool
409 | else:
410 | return (
411 | {"error": "🚫 에이전트가 초기화되지 않았습니다."},
412 | "🚫 에이전트가 초기화되지 않았습니다.",
413 | "",
414 | )
415 | except Exception as e:
416 | import traceback
417 |
418 | error_msg = f"❌ 쿼리 처리 중 오류 발생: {str(e)}\n{traceback.format_exc()}"
419 | return {"error": error_msg}, error_msg, ""
420 |
421 |
422 | async def initialize_session(mcp_config=None):
423 | """
424 | MCP 세션과 에이전트를 초기화합니다.
425 |
426 | 매개변수:
427 | mcp_config: MCP 도구 설정 정보(JSON). None인 경우 기본 설정 사용
428 |
429 | 반환값:
430 | bool: 초기화 성공 여부
431 | """
432 | with st.spinner("🔄 MCP 서버에 연결 중..."):
433 | # 먼저 기존 클라이언트를 안전하게 정리
434 | await cleanup_mcp_client()
435 |
436 | if mcp_config is None:
437 | # config.json 파일에서 설정 로드
438 | mcp_config = load_config_from_json()
439 | client = MultiServerMCPClient(mcp_config)
440 | await client.__aenter__()
441 | tools = client.get_tools()
442 | st.session_state.tool_count = len(tools)
443 | st.session_state.mcp_client = client
444 |
445 | # 선택된 모델에 따라 적절한 모델 초기화
446 | selected_model = st.session_state.selected_model
447 |
448 | if selected_model in [
449 | "claude-3-7-sonnet-latest",
450 | "claude-3-5-sonnet-latest",
451 | "claude-3-5-haiku-latest",
452 | ]:
453 | model = ChatAnthropic(
454 | model=selected_model,
455 | temperature=0.1,
456 | max_tokens=OUTPUT_TOKEN_INFO[selected_model]["max_tokens"],
457 | )
458 | else: # OpenAI 모델 사용
459 | model = ChatOpenAI(
460 | model=selected_model,
461 | temperature=0.1,
462 | max_tokens=OUTPUT_TOKEN_INFO[selected_model]["max_tokens"],
463 | )
464 | agent = create_react_agent(
465 | model,
466 | tools,
467 | checkpointer=MemorySaver(),
468 | prompt=SYSTEM_PROMPT,
469 | )
470 | st.session_state.agent = agent
471 | st.session_state.session_initialized = True
472 | return True
473 |
474 |
475 | # --- 사이드바: 시스템 설정 섹션 ---
476 | with st.sidebar:
477 | st.subheader("⚙️ 시스템 설정")
478 |
479 | # 모델 선택 기능
480 | # 사용 가능한 모델 목록 생성
481 | available_models = []
482 |
483 | # Anthropic API 키 확인
484 | has_anthropic_key = os.environ.get("ANTHROPIC_API_KEY") is not None
485 | if has_anthropic_key:
486 | available_models.extend(
487 | [
488 | "claude-3-7-sonnet-latest",
489 | "claude-3-5-sonnet-latest",
490 | "claude-3-5-haiku-latest",
491 | ]
492 | )
493 |
494 | # OpenAI API 키 확인
495 | has_openai_key = os.environ.get("OPENAI_API_KEY") is not None
496 | if has_openai_key:
497 | available_models.extend(["gpt-4o", "gpt-4o-mini"])
498 |
499 | # 사용 가능한 모델이 없는 경우 메시지 표시
500 | if not available_models:
501 | st.warning(
502 | "⚠️ API 키가 설정되지 않았습니다. .env 파일에 ANTHROPIC_API_KEY 또는 OPENAI_API_KEY를 추가해주세요."
503 | )
504 | # 기본값으로 Claude 모델 추가 (키가 없어도 UI를 보여주기 위함)
505 | available_models = ["claude-3-7-sonnet-latest"]
506 |
507 | # 모델 선택 드롭다운
508 | previous_model = st.session_state.selected_model
509 | st.session_state.selected_model = st.selectbox(
510 | "🤖 사용할 모델 선택",
511 | options=available_models,
512 | index=(
513 | available_models.index(st.session_state.selected_model)
514 | if st.session_state.selected_model in available_models
515 | else 0
516 | ),
517 | help="Anthropic 모델은 ANTHROPIC_API_KEY가, OpenAI 모델은 OPENAI_API_KEY가 환경변수로 설정되어야 합니다.",
518 | )
519 |
520 | # 모델이 변경되었을 때 세션 초기화 필요 알림
521 | if (
522 | previous_model != st.session_state.selected_model
523 | and st.session_state.session_initialized
524 | ):
525 | st.warning(
526 | "⚠️ 모델이 변경되었습니다. '설정 적용하기' 버튼을 눌러 변경사항을 적용하세요."
527 | )
528 |
529 | # 타임아웃 설정 슬라이더 추가
530 | st.session_state.timeout_seconds = st.slider(
531 | "⏱️ 응답 생성 제한 시간(초)",
532 | min_value=60,
533 | max_value=300,
534 | value=st.session_state.timeout_seconds,
535 | step=10,
536 | help="에이전트가 응답을 생성하는 최대 시간을 설정합니다. 복잡한 작업은 더 긴 시간이 필요할 수 있습니다.",
537 | )
538 |
539 | st.session_state.recursion_limit = st.slider(
540 | "⏱️ 재귀 호출 제한(횟수)",
541 | min_value=10,
542 | max_value=200,
543 | value=st.session_state.recursion_limit,
544 | step=10,
545 | help="재귀 호출 제한 횟수를 설정합니다. 너무 높은 값을 설정하면 메모리 부족 문제가 발생할 수 있습니다.",
546 | )
547 |
548 | st.divider() # 구분선 추가
549 |
550 | # 도구 설정 섹션 추가
551 | st.subheader("🔧 도구 설정")
552 |
553 | # expander 상태를 세션 상태로 관리
554 | if "mcp_tools_expander" not in st.session_state:
555 | st.session_state.mcp_tools_expander = False
556 |
557 | # MCP 도구 추가 인터페이스
558 | with st.expander("🧰 MCP 도구 추가", expanded=st.session_state.mcp_tools_expander):
559 | # config.json 파일에서 설정 로드하여 표시
560 | loaded_config = load_config_from_json()
561 | default_config_text = json.dumps(loaded_config, indent=2, ensure_ascii=False)
562 |
563 | # pending config가 없으면 기존 mcp_config_text 기반으로 생성
564 | if "pending_mcp_config" not in st.session_state:
565 | try:
566 | st.session_state.pending_mcp_config = loaded_config
567 | except Exception as e:
568 | st.error(f"초기 pending config 설정 실패: {e}")
569 |
570 | # 개별 도구 추가를 위한 UI
571 | st.subheader("도구 추가")
572 | st.markdown(
573 | """
574 | [어떻게 설정 하나요?](https://teddylee777.notion.site/MCP-1d324f35d12980c8b018e12afdf545a1?pvs=4)
575 |
576 | ⚠️ **중요**: JSON을 반드시 중괄호(`{}`)로 감싸야 합니다."""
577 | )
578 |
579 | # 보다 명확한 예시 제공
580 | example_json = {
581 | "github": {
582 | "command": "npx",
583 | "args": [
584 | "-y",
585 | "@smithery/cli@latest",
586 | "run",
587 | "@smithery-ai/github",
588 | "--config",
589 | '{"githubPersonalAccessToken":"your_token_here"}',
590 | ],
591 | "transport": "stdio",
592 | }
593 | }
594 |
595 | default_text = json.dumps(example_json, indent=2, ensure_ascii=False)
596 |
597 | new_tool_json = st.text_area(
598 | "도구 JSON",
599 | default_text,
600 | height=250,
601 | )
602 |
603 | # 추가하기 버튼
604 | if st.button(
605 | "도구 추가",
606 | type="primary",
607 | key="add_tool_button",
608 | use_container_width=True,
609 | ):
610 | try:
611 | # 입력값 검증
612 | if not new_tool_json.strip().startswith(
613 | "{"
614 | ) or not new_tool_json.strip().endswith("}"):
615 | st.error("JSON은 중괄호({})로 시작하고 끝나야 합니다.")
616 | st.markdown('올바른 형식: `{ "도구이름": { ... } }`')
617 | else:
618 | # JSON 파싱
619 | parsed_tool = json.loads(new_tool_json)
620 |
621 | # mcpServers 형식인지 확인하고 처리
622 | if "mcpServers" in parsed_tool:
623 | # mcpServers 안의 내용을 최상위로 이동
624 | parsed_tool = parsed_tool["mcpServers"]
625 | st.info(
626 | "'mcpServers' 형식이 감지되었습니다. 자동으로 변환합니다."
627 | )
628 |
629 | # 입력된 도구 수 확인
630 | if len(parsed_tool) == 0:
631 | st.error("최소 하나 이상의 도구를 입력해주세요.")
632 | else:
633 | # 모든 도구에 대해 처리
634 | success_tools = []
635 | for tool_name, tool_config in parsed_tool.items():
636 | # URL 필드 확인 및 transport 설정
637 | if "url" in tool_config:
638 | # URL이 있는 경우 transport를 "sse"로 설정
639 | tool_config["transport"] = "sse"
640 | st.info(
641 | f"'{tool_name}' 도구에 URL이 감지되어 transport를 'sse'로 설정했습니다."
642 | )
643 | elif "transport" not in tool_config:
644 | # URL이 없고 transport도 없는 경우 기본값 "stdio" 설정
645 | tool_config["transport"] = "stdio"
646 |
647 | # 필수 필드 확인
648 | if (
649 | "command" not in tool_config
650 | and "url" not in tool_config
651 | ):
652 | st.error(
653 | f"'{tool_name}' 도구 설정에는 'command' 또는 'url' 필드가 필요합니다."
654 | )
655 | elif "command" in tool_config and "args" not in tool_config:
656 | st.error(
657 | f"'{tool_name}' 도구 설정에는 'args' 필드가 필요합니다."
658 | )
659 | elif "command" in tool_config and not isinstance(
660 | tool_config["args"], list
661 | ):
662 | st.error(
663 | f"'{tool_name}' 도구의 'args' 필드는 반드시 배열([]) 형식이어야 합니다."
664 | )
665 | else:
666 | # pending_mcp_config에 도구 추가
667 | st.session_state.pending_mcp_config[tool_name] = (
668 | tool_config
669 | )
670 | success_tools.append(tool_name)
671 |
672 | # 성공 메시지
673 | if success_tools:
674 | if len(success_tools) == 1:
675 | st.success(
676 | f"{success_tools[0]} 도구가 추가되었습니다. 적용하려면 '설정 적용하기' 버튼을 눌러주세요."
677 | )
678 | else:
679 | tool_names = ", ".join(success_tools)
680 | st.success(
681 | f"총 {len(success_tools)}개 도구({tool_names})가 추가되었습니다. 적용하려면 '설정 적용하기' 버튼을 눌러주세요."
682 | )
683 | # 추가되면 expander를 접어줌
684 | st.session_state.mcp_tools_expander = False
685 | st.rerun()
686 | except json.JSONDecodeError as e:
687 | st.error(f"JSON 파싱 에러: {e}")
688 | st.markdown(
689 | f"""
690 | **수정 방법**:
691 | 1. JSON 형식이 올바른지 확인하세요.
692 | 2. 모든 키는 큰따옴표(")로 감싸야 합니다.
693 | 3. 문자열 값도 큰따옴표(")로 감싸야 합니다.
694 | 4. 문자열 내에서 큰따옴표를 사용할 경우 이스케이프(\\")해야 합니다.
695 | """
696 | )
697 | except Exception as e:
698 | st.error(f"오류 발생: {e}")
699 |
700 | # 등록된 도구 목록 표시 및 삭제 버튼 추가
701 | with st.expander("📋 등록된 도구 목록", expanded=True):
702 | try:
703 | pending_config = st.session_state.pending_mcp_config
704 | except Exception as e:
705 | st.error("유효한 MCP 도구 설정이 아닙니다.")
706 | else:
707 | # pending config의 키(도구 이름) 목록을 순회하며 표시
708 | for tool_name in list(pending_config.keys()):
709 | col1, col2 = st.columns([8, 2])
710 | col1.markdown(f"- **{tool_name}**")
711 | if col2.button("삭제", key=f"delete_{tool_name}"):
712 | # pending config에서 해당 도구 삭제 (즉시 적용되지는 않음)
713 | del st.session_state.pending_mcp_config[tool_name]
714 | st.success(
715 | f"{tool_name} 도구가 삭제되었습니다. 적용하려면 '설정 적용하기' 버튼을 눌러주세요."
716 | )
717 |
718 | st.divider() # 구분선 추가
719 |
720 | # --- 사이드바: 시스템 정보 및 작업 버튼 섹션 ---
721 | with st.sidebar:
722 | st.subheader("📊 시스템 정보")
723 | st.write(f"🛠️ MCP 도구 수: {st.session_state.get('tool_count', '초기화 중...')}")
724 | selected_model_name = st.session_state.selected_model
725 | st.write(f"🧠 현재 모델: {selected_model_name}")
726 |
727 | # 설정 적용하기 버튼을 여기로 이동
728 | if st.button(
729 | "설정 적용하기",
730 | key="apply_button",
731 | type="primary",
732 | use_container_width=True,
733 | ):
734 | # 적용 중 메시지 표시
735 | apply_status = st.empty()
736 | with apply_status.container():
737 | st.warning("🔄 변경사항을 적용하고 있습니다. 잠시만 기다려주세요...")
738 | progress_bar = st.progress(0)
739 |
740 | # 설정 저장
741 | st.session_state.mcp_config_text = json.dumps(
742 | st.session_state.pending_mcp_config, indent=2, ensure_ascii=False
743 | )
744 |
745 | # config.json 파일에 설정 저장
746 | save_result = save_config_to_json(st.session_state.pending_mcp_config)
747 | if not save_result:
748 | st.error("❌ 설정 파일 저장에 실패했습니다.")
749 |
750 | progress_bar.progress(15)
751 |
752 | # 세션 초기화 준비
753 | st.session_state.session_initialized = False
754 | st.session_state.agent = None
755 |
756 | # 진행 상태 업데이트
757 | progress_bar.progress(30)
758 |
759 | # 초기화 실행
760 | success = st.session_state.event_loop.run_until_complete(
761 | initialize_session(st.session_state.pending_mcp_config)
762 | )
763 |
764 | # 진행 상태 업데이트
765 | progress_bar.progress(100)
766 |
767 | if success:
768 | st.success("✅ 새로운 설정이 적용되었습니다.")
769 | # 도구 추가 expander 접기
770 | if "mcp_tools_expander" in st.session_state:
771 | st.session_state.mcp_tools_expander = False
772 | else:
773 | st.error("❌ 설정 적용에 실패하였습니다.")
774 |
775 | # 페이지 새로고침
776 | st.rerun()
777 |
778 | st.divider() # 구분선 추가
779 |
780 | # 작업 버튼 섹션
781 | st.subheader("🔄 작업")
782 |
783 | # 대화 초기화 버튼
784 | if st.button("대화 초기화", use_container_width=True, type="primary"):
785 | # thread_id 초기화
786 | st.session_state.thread_id = random_uuid()
787 |
788 | # 대화 히스토리 초기화
789 | st.session_state.history = []
790 |
791 | # 알림 메시지
792 | st.success("✅ 대화가 초기화되었습니다.")
793 |
794 | # 페이지 새로고침
795 | st.rerun()
796 |
797 | # 로그인 기능이 활성화된 경우에만 로그아웃 버튼 표시
798 | if use_login and st.session_state.authenticated:
799 | st.divider() # 구분선 추가
800 | if st.button("로그아웃", use_container_width=True, type="secondary"):
801 | st.session_state.authenticated = False
802 | st.success("✅ 로그아웃 되었습니다.")
803 | st.rerun()
804 |
805 | # --- 기본 세션 초기화 (초기화되지 않은 경우) ---
806 | if not st.session_state.session_initialized:
807 | st.info(
808 | "MCP 서버와 에이전트가 초기화되지 않았습니다. 왼쪽 사이드바의 '설정 적용하기' 버튼을 클릭하여 초기화해주세요."
809 | )
810 |
811 |
812 | # --- 대화 기록 출력 ---
813 | print_message()
814 |
815 | # --- 사용자 입력 및 처리 ---
816 | user_query = st.chat_input("💬 질문을 입력하세요")
817 | if user_query:
818 | if st.session_state.session_initialized:
819 | st.chat_message("user", avatar="🧑💻").markdown(user_query)
820 | with st.chat_message("assistant", avatar="🤖"):
821 | tool_placeholder = st.empty()
822 | text_placeholder = st.empty()
823 | resp, final_text, final_tool = (
824 | st.session_state.event_loop.run_until_complete(
825 | process_query(
826 | user_query,
827 | text_placeholder,
828 | tool_placeholder,
829 | st.session_state.timeout_seconds,
830 | )
831 | )
832 | )
833 | if "error" in resp:
834 | st.error(resp["error"])
835 | else:
836 | st.session_state.history.append({"role": "user", "content": user_query})
837 | st.session_state.history.append(
838 | {"role": "assistant", "content": final_text}
839 | )
840 | if final_tool.strip():
841 | st.session_state.history.append(
842 | {"role": "assistant_tool", "content": final_tool}
843 | )
844 | st.rerun()
845 | else:
846 | st.warning(
847 | "⚠️ MCP 서버와 에이전트가 초기화되지 않았습니다. 왼쪽 사이드바의 '설정 적용하기' 버튼을 클릭하여 초기화해주세요."
848 | )
849 |
--------------------------------------------------------------------------------
/assets/add-tools.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/teddynote-lab/langgraph-mcp-agents/d4694e9db841343a2bd4dd3a30175a02b63d9144/assets/add-tools.png
--------------------------------------------------------------------------------
/assets/apply-tool-configuration.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/teddynote-lab/langgraph-mcp-agents/d4694e9db841343a2bd4dd3a30175a02b63d9144/assets/apply-tool-configuration.png
--------------------------------------------------------------------------------
/assets/architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/teddynote-lab/langgraph-mcp-agents/d4694e9db841343a2bd4dd3a30175a02b63d9144/assets/architecture.png
--------------------------------------------------------------------------------
/assets/check-status.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/teddynote-lab/langgraph-mcp-agents/d4694e9db841343a2bd4dd3a30175a02b63d9144/assets/check-status.png
--------------------------------------------------------------------------------
/assets/project-demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/teddynote-lab/langgraph-mcp-agents/d4694e9db841343a2bd4dd3a30175a02b63d9144/assets/project-demo.png
--------------------------------------------------------------------------------
/assets/smithery-copy-json.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/teddynote-lab/langgraph-mcp-agents/d4694e9db841343a2bd4dd3a30175a02b63d9144/assets/smithery-copy-json.png
--------------------------------------------------------------------------------
/assets/smithery-json.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/teddynote-lab/langgraph-mcp-agents/d4694e9db841343a2bd4dd3a30175a02b63d9144/assets/smithery-json.png
--------------------------------------------------------------------------------
/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "get_current_time": {
3 | "command": "python",
4 | "args": [
5 | "./mcp_server_time.py"
6 | ],
7 | "transport": "stdio"
8 | }
9 | }
--------------------------------------------------------------------------------
/dockers/.env.example:
--------------------------------------------------------------------------------
1 | ANTHROPIC_API_KEY=sk-ant-api03...
2 | OPENAI_API_KEY=sk-proj-o0gulL2J2a...
3 | LANGSMITH_API_KEY=lsv2_sk_ed22...
4 | LANGSMITH_TRACING=true
5 | LANGSMITH_ENDPOINT=https://api.smith.langchain.com
6 | LANGSMITH_PROJECT=LangGraph-MCP-Agents
7 |
8 | USE_LOGIN=true
9 | USER_ID=admin
10 | USER_PASSWORD=admin1234
--------------------------------------------------------------------------------
/dockers/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "get_current_time": {
3 | "command": "python",
4 | "args": [
5 | "./mcp_server_time.py"
6 | ],
7 | "transport": "stdio"
8 | }
9 | }
--------------------------------------------------------------------------------
/dockers/docker-compose-KOR-mac.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | app:
3 | build:
4 | context: .
5 | dockerfile: Dockerfile
6 | args:
7 | BUILDPLATFORM: ${BUILDPLATFORM:-linux/arm64}
8 | TARGETPLATFORM: "linux/arm64"
9 | image: teddylee777/langgraph-mcp-agents:KOR-0.2.1
10 | platform: "linux/arm64"
11 | ports:
12 | - "8585:8585"
13 | env_file:
14 | - ./.env
15 | environment:
16 | - PYTHONUNBUFFERED=1
17 | # Mac-specific optimizations
18 | - NODE_OPTIONS=--max_old_space_size=2048
19 | # Delegated file system performance for macOS
20 | - PYTHONMALLOC=malloc
21 | - USE_LOGIN=${USE_LOGIN:-false}
22 | - USER_ID=${USER_ID:-}
23 | - USER_PASSWORD=${USER_PASSWORD:-}
24 | - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
25 | - OPENAI_API_KEY=${OPENAI_API_KEY}
26 | - NODE_OPTIONS=${NODE_OPTIONS:-}
27 | volumes:
28 | - ./data:/app/data:cached
29 | - ./config.json:/app/config.json
30 | restart: unless-stopped
31 | healthcheck:
32 | test: ["CMD", "curl", "--fail", "http://localhost:8585/_stcore/health"]
33 | interval: 30s
34 | timeout: 10s
35 | retries: 3
36 | start_period: 40s
37 |
--------------------------------------------------------------------------------
/dockers/docker-compose-KOR.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | app:
3 | build:
4 | context: .
5 | dockerfile: Dockerfile
6 | args:
7 | BUILDPLATFORM: ${BUILDPLATFORM:-linux/amd64}
8 | TARGETPLATFORM: ${TARGETPLATFORM:-linux/amd64}
9 | image: teddylee777/langgraph-mcp-agents:KOR-0.2.1
10 | platform: ${TARGETPLATFORM:-linux/amd64}
11 | ports:
12 | - "8585:8585"
13 | volumes:
14 | - ./.env:/app/.env:ro
15 | - ./data:/app/data:rw
16 | - ./config.json:/app/config.json
17 | env_file:
18 | - ./.env
19 | environment:
20 | - PYTHONUNBUFFERED=1
21 | - USE_LOGIN=${USE_LOGIN:-false}
22 | - USER_ID=${USER_ID:-}
23 | - USER_PASSWORD=${USER_PASSWORD:-}
24 | - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
25 | - OPENAI_API_KEY=${OPENAI_API_KEY}
26 | - NODE_OPTIONS=${NODE_OPTIONS:-}
27 | restart: unless-stopped
28 | healthcheck:
29 | test: ["CMD", "curl", "--fail", "http://localhost:8585/_stcore/health"]
30 | interval: 30s
31 | timeout: 10s
32 | retries: 3
33 | start_period: 40s
34 |
--------------------------------------------------------------------------------
/dockers/docker-compose-mac.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | app:
3 | build:
4 | context: .
5 | dockerfile: Dockerfile
6 | args:
7 | BUILDPLATFORM: ${BUILDPLATFORM:-linux/arm64}
8 | TARGETPLATFORM: "linux/arm64"
9 | image: teddylee777/langgraph-mcp-agents:0.2.1
10 | platform: "linux/arm64"
11 | ports:
12 | - "8585:8585"
13 | env_file:
14 | - ./.env
15 | environment:
16 | - PYTHONUNBUFFERED=1
17 | # Mac-specific optimizations
18 | - NODE_OPTIONS=--max_old_space_size=2048
19 | # Delegated file system performance for macOS
20 | - PYTHONMALLOC=malloc
21 | - USE_LOGIN=${USE_LOGIN:-false}
22 | - USER_ID=${USER_ID:-}
23 | - USER_PASSWORD=${USER_PASSWORD:-}
24 | - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
25 | - OPENAI_API_KEY=${OPENAI_API_KEY}
26 | - NODE_OPTIONS=${NODE_OPTIONS:-}
27 | volumes:
28 | - ./data:/app/data:cached
29 | - ./config.json:/app/config.json
30 | restart: unless-stopped
31 | healthcheck:
32 | test: ["CMD", "curl", "--fail", "http://localhost:8585/_stcore/health"]
33 | interval: 30s
34 | timeout: 10s
35 | retries: 3
36 | start_period: 40s
37 |
--------------------------------------------------------------------------------
/dockers/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | app:
3 | build:
4 | context: .
5 | dockerfile: Dockerfile
6 | args:
7 | BUILDPLATFORM: ${BUILDPLATFORM:-linux/amd64}
8 | TARGETPLATFORM: ${TARGETPLATFORM:-linux/amd64}
9 | image: teddylee777/langgraph-mcp-agents:0.2.1
10 | platform: ${TARGETPLATFORM:-linux/amd64}
11 | ports:
12 | - "8585:8585"
13 | volumes:
14 | # Optionally, you can remove this volume if you don’t need the file at runtime
15 | - ./.env:/app/.env:ro
16 | - ./data:/app/data:rw
17 | - ./config.json:/app/config.json
18 | env_file:
19 | - ./.env
20 | environment:
21 | - PYTHONUNBUFFERED=1
22 | - USE_LOGIN=${USE_LOGIN:-false}
23 | - USER_ID=${USER_ID:-}
24 | - USER_PASSWORD=${USER_PASSWORD:-}
25 | - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
26 | - OPENAI_API_KEY=${OPENAI_API_KEY}
27 | - NODE_OPTIONS=${NODE_OPTIONS:-}
28 | restart: unless-stopped
29 | healthcheck:
30 | test: ["CMD", "curl", "--fail", "http://localhost:8585/_stcore/health"]
31 | interval: 30s
32 | timeout: 10s
33 | retries: 3
34 | start_period: 40s
35 |
--------------------------------------------------------------------------------
/example_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "get_current_time": {
3 | "command": "python",
4 | "args": ["./mcp_server_time.py"],
5 | "transport": "stdio"
6 | }
7 | }
--------------------------------------------------------------------------------
/mcp_server_local.py:
--------------------------------------------------------------------------------
1 | from mcp.server.fastmcp import FastMCP
2 |
3 | # Initialize FastMCP server with configuration
4 | mcp = FastMCP(
5 | "Weather", # Name of the MCP server
6 | instructions="You are a weather assistant that can answer questions about the weather in a given location.", # Instructions for the LLM on how to use this tool
7 | host="0.0.0.0", # Host address (0.0.0.0 allows connections from any IP)
8 | port=8005, # Port number for the server
9 | )
10 |
11 |
12 | @mcp.tool()
13 | async def get_weather(location: str) -> str:
14 | """
15 | Get current weather information for the specified location.
16 |
17 | This function simulates a weather service by returning a fixed response.
18 | In a production environment, this would connect to a real weather API.
19 |
20 | Args:
21 | location (str): The name of the location (city, region, etc.) to get weather for
22 |
23 | Returns:
24 | str: A string containing the weather information for the specified location
25 | """
26 | # Return a mock weather response
27 | # In a real implementation, this would call a weather API
28 | return f"It's always Sunny in {location}"
29 |
30 |
31 | if __name__ == "__main__":
32 | # Start the MCP server with stdio transport
33 | # stdio transport allows the server to communicate with clients
34 | # through standard input/output streams, making it suitable for
35 | # local development and testing
36 | mcp.run(transport="stdio")
37 |
--------------------------------------------------------------------------------
/mcp_server_rag.py:
--------------------------------------------------------------------------------
1 | from langchain_text_splitters import RecursiveCharacterTextSplitter
2 | from langchain_community.document_loaders import PyMuPDFLoader
3 | from langchain_community.vectorstores import FAISS
4 | from langchain_openai import OpenAIEmbeddings
5 | from mcp.server.fastmcp import FastMCP
6 | from dotenv import load_dotenv
7 | from typing import Any
8 |
9 | # Load environment variables from .env file (contains API keys)
10 | load_dotenv(override=True)
11 |
12 |
13 | def create_retriever() -> Any:
14 | """
15 | Creates and returns a document retriever based on FAISS vector store.
16 |
17 | This function performs the following steps:
18 | 1. Loads a PDF document(place your PDF file in the data folder)
19 | 2. Splits the document into manageable chunks
20 | 3. Creates embeddings for each chunk
21 | 4. Builds a FAISS vector store from the embeddings
22 | 5. Returns a retriever interface to the vector store
23 |
24 | Returns:
25 | Any: A retriever object that can be used to query the document database
26 | """
27 | # Step 1: Load Documents
28 | # PyMuPDFLoader is used to extract text from PDF files
29 | loader = PyMuPDFLoader("data/sample.pdf")
30 | docs = loader.load()
31 |
32 | # Step 2: Split Documents
33 | # Recursive splitter divides documents into chunks with some overlap to maintain context
34 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=50)
35 | split_documents = text_splitter.split_documents(docs)
36 |
37 | # Step 3: Create Embeddings
38 | # OpenAI's text-embedding-3-small model is used to convert text chunks into vector embeddings
39 | embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
40 |
41 | # Step 4: Create Vector Database
42 | # FAISS is an efficient similarity search library that stores vector embeddings
43 | # and allows for fast retrieval of similar vectors
44 | vectorstore = FAISS.from_documents(documents=split_documents, embedding=embeddings)
45 |
46 | # Step 5: Create Retriever
47 | # The retriever provides an interface to search the vector database
48 | # and retrieve documents relevant to a query
49 | retriever = vectorstore.as_retriever()
50 | return retriever
51 |
52 |
53 | # Initialize FastMCP server with configuration
54 | mcp = FastMCP(
55 | "Retriever",
56 | instructions="A Retriever that can retrieve information from the database.",
57 | host="0.0.0.0",
58 | port=8005,
59 | )
60 |
61 |
62 | @mcp.tool()
63 | async def retrieve(query: str) -> str:
64 | """
65 | Retrieves information from the document database based on the query.
66 |
67 | This function creates a retriever, queries it with the provided input,
68 | and returns the concatenated content of all retrieved documents.
69 |
70 | Args:
71 | query (str): The search query to find relevant information
72 |
73 | Returns:
74 | str: Concatenated text content from all retrieved documents
75 | """
76 | # Create a new retriever instance for each query
77 | # Note: In production, consider caching the retriever for better performance
78 | retriever = create_retriever()
79 |
80 | # Use the invoke() method to get relevant documents based on the query
81 | retrieved_docs = retriever.invoke(query)
82 |
83 | # Join all document contents with newlines and return as a single string
84 | return "\n".join([doc.page_content for doc in retrieved_docs])
85 |
86 |
87 | if __name__ == "__main__":
88 | # Run the MCP server with stdio transport for integration with MCP clients
89 | mcp.run(transport="stdio")
90 |
--------------------------------------------------------------------------------
/mcp_server_remote.py:
--------------------------------------------------------------------------------
1 | from mcp.server.fastmcp import FastMCP
2 |
3 | mcp = FastMCP(
4 | "Weather", # Name of the MCP server
5 | instructions="You are a weather assistant that can answer questions about the weather in a given location.", # Instructions for the LLM on how to use this tool
6 | host="0.0.0.0", # Host address (0.0.0.0 allows connections from any IP)
7 | port=8005, # Port number for the server
8 | )
9 |
10 |
11 | @mcp.tool()
12 | async def get_weather(location: str) -> str:
13 | """
14 | Get current weather information for the specified location.
15 |
16 | This function simulates a weather service by returning a fixed response.
17 | In a production environment, this would connect to a real weather API.
18 |
19 | Args:
20 | location (str): The name of the location (city, region, etc.) to get weather for
21 |
22 | Returns:
23 | str: A string containing the weather information for the specified location
24 | """
25 | # Return a mock weather response
26 | # In a real implementation, this would call a weather API
27 | return f"It's always Sunny in {location}"
28 |
29 |
30 | if __name__ == "__main__":
31 | # Print a message indicating the server is starting
32 | print("mcp remote server is running...")
33 |
34 | # Start the MCP server with SSE transport
35 | # Server-Sent Events (SSE) transport allows the server to communicate with clients
36 | # over HTTP, making it suitable for remote/distributed deployments
37 | mcp.run(transport="sse")
38 |
--------------------------------------------------------------------------------
/mcp_server_time.py:
--------------------------------------------------------------------------------
1 | from mcp.server.fastmcp import FastMCP
2 | from datetime import datetime
3 | import pytz
4 | from typing import Optional
5 |
6 | # Initialize FastMCP server with configuration
7 | mcp = FastMCP(
8 | "TimeService", # Name of the MCP server
9 | instructions="You are a time assistant that can provide the current time for different timezones.", # Instructions for the LLM on how to use this tool
10 | host="0.0.0.0", # Host address (0.0.0.0 allows connections from any IP)
11 | port=8005, # Port number for the server
12 | )
13 |
14 |
15 | @mcp.tool()
16 | async def get_current_time(timezone: Optional[str] = "Asia/Seoul") -> str:
17 | """
18 | Get current time information for the specified timezone.
19 |
20 | This function returns the current system time for the requested timezone.
21 |
22 | Args:
23 | timezone (str, optional): The timezone to get current time for. Defaults to "Asia/Seoul".
24 |
25 | Returns:
26 | str: A string containing the current time information for the specified timezone
27 | """
28 | try:
29 | # Get the timezone object
30 | tz = pytz.timezone(timezone)
31 |
32 | # Get current time in the specified timezone
33 | current_time = datetime.now(tz)
34 |
35 | # Format the time as a string
36 | formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S %Z")
37 |
38 | return f"Current time in {timezone} is: {formatted_time}"
39 | except pytz.exceptions.UnknownTimeZoneError:
40 | return f"Error: Unknown timezone '{timezone}'. Please provide a valid timezone."
41 | except Exception as e:
42 | return f"Error getting time: {str(e)}"
43 |
44 |
45 | if __name__ == "__main__":
46 | # Start the MCP server with stdio transport
47 | # stdio transport allows the server to communicate with clients
48 | # through standard input/output streams, making it suitable for
49 | # local development and testing
50 | mcp.run(transport="stdio")
51 |
--------------------------------------------------------------------------------
/packages.txt:
--------------------------------------------------------------------------------
1 | curl
2 | gnupg
3 | ca-certificates
4 | nodejs
5 | npm
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "langgraph-mcp-agents"
3 | version = "0.1.0"
4 | description = "LangGraph Agent with MCP Adapters"
5 | readme = "README.md"
6 | requires-python = ">=3.12"
7 | dependencies = [
8 | "nest-asyncio>=1.6.0",
9 | "faiss-cpu>=1.10.0",
10 | "jupyter>=1.1.1",
11 | "langchain-anthropic>=0.3.10",
12 | "langchain-community>=0.3.20",
13 | "langchain-mcp-adapters>=0.0.7",
14 | "langchain-openai>=0.3.11",
15 | "langgraph>=0.3.21",
16 | "mcp[cli]>=1.6.0",
17 | "notebook>=7.3.3",
18 | "pymupdf>=1.25.4",
19 | "python-dotenv>=1.1.0",
20 | "streamlit>=1.44.1",
21 | ]
22 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | faiss-cpu>=1.10.0
2 | jupyter>=1.1.1
3 | langchain-anthropic>=0.3.10
4 | langchain-community>=0.3.20
5 | langchain-mcp-adapters>=0.0.7
6 | langchain-openai>=0.3.11
7 | langgraph>=0.3.21
8 | mcp>=1.6.0
9 | notebook>=7.3.3
10 | pymupdf>=1.25.4
11 | python-dotenv>=1.1.0
12 | streamlit>=1.44.1
13 | nest-asyncio>=1.6.0
--------------------------------------------------------------------------------
/utils.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict, List, Callable, Optional
2 | from langchain_core.messages import BaseMessage
3 | from langchain_core.runnables import RunnableConfig
4 | from langgraph.graph.state import CompiledStateGraph
5 | import uuid
6 |
7 |
8 | def random_uuid():
9 | return str(uuid.uuid4())
10 |
11 |
12 | async def astream_graph(
13 | graph: CompiledStateGraph,
14 | inputs: dict,
15 | config: Optional[RunnableConfig] = None,
16 | node_names: List[str] = [],
17 | callback: Optional[Callable] = None,
18 | stream_mode: str = "messages",
19 | include_subgraphs: bool = False,
20 | ) -> Dict[str, Any]:
21 | """
22 | LangGraph의 실행 결과를 비동기적으로 스트리밍하고 직접 출력하는 함수입니다.
23 |
24 | Args:
25 | graph (CompiledStateGraph): 실행할 컴파일된 LangGraph 객체
26 | inputs (dict): 그래프에 전달할 입력값 딕셔너리
27 | config (Optional[RunnableConfig]): 실행 설정 (선택적)
28 | node_names (List[str], optional): 출력할 노드 이름 목록. 기본값은 빈 리스트
29 | callback (Optional[Callable], optional): 각 청크 처리를 위한 콜백 함수. 기본값은 None
30 | 콜백 함수는 {"node": str, "content": Any} 형태의 딕셔너리를 인자로 받습니다.
31 | stream_mode (str, optional): 스트리밍 모드 ("messages" 또는 "updates"). 기본값은 "messages"
32 | include_subgraphs (bool, optional): 서브그래프 포함 여부. 기본값은 False
33 |
34 | Returns:
35 | Dict[str, Any]: 최종 결과 (선택적)
36 | """
37 | config = config or {}
38 | final_result = {}
39 |
40 | def format_namespace(namespace):
41 | return namespace[-1].split(":")[0] if len(namespace) > 0 else "root graph"
42 |
43 | prev_node = ""
44 |
45 | if stream_mode == "messages":
46 | async for chunk_msg, metadata in graph.astream(
47 | inputs, config, stream_mode=stream_mode
48 | ):
49 | curr_node = metadata["langgraph_node"]
50 | final_result = {
51 | "node": curr_node,
52 | "content": chunk_msg,
53 | "metadata": metadata,
54 | }
55 |
56 | # node_names가 비어있거나 현재 노드가 node_names에 있는 경우에만 처리
57 | if not node_names or curr_node in node_names:
58 | # 콜백 함수가 있는 경우 실행
59 | if callback:
60 | result = callback({"node": curr_node, "content": chunk_msg})
61 | if hasattr(result, "__await__"):
62 | await result
63 | # 콜백이 없는 경우 기본 출력
64 | else:
65 | # 노드가 변경된 경우에만 구분선 출력
66 | if curr_node != prev_node:
67 | print("\n" + "=" * 50)
68 | print(f"🔄 Node: \033[1;36m{curr_node}\033[0m 🔄")
69 | print("- " * 25)
70 |
71 | # Claude/Anthropic 모델의 토큰 청크 처리 - 항상 텍스트만 추출
72 | if hasattr(chunk_msg, "content"):
73 | # 리스트 형태의 content (Anthropic/Claude 스타일)
74 | if isinstance(chunk_msg.content, list):
75 | for item in chunk_msg.content:
76 | if isinstance(item, dict) and "text" in item:
77 | print(item["text"], end="", flush=True)
78 | # 문자열 형태의 content
79 | elif isinstance(chunk_msg.content, str):
80 | print(chunk_msg.content, end="", flush=True)
81 | # 그 외 형태의 chunk_msg 처리
82 | else:
83 | print(chunk_msg, end="", flush=True)
84 |
85 | prev_node = curr_node
86 |
87 | elif stream_mode == "updates":
88 | # 에러 수정: 언패킹 방식 변경
89 | # REACT 에이전트 등 일부 그래프에서는 단일 딕셔너리만 반환함
90 | async for chunk in graph.astream(
91 | inputs, config, stream_mode=stream_mode, subgraphs=include_subgraphs
92 | ):
93 | # 반환 형식에 따라 처리 방법 분기
94 | if isinstance(chunk, tuple) and len(chunk) == 2:
95 | # 기존 예상 형식: (namespace, chunk_dict)
96 | namespace, node_chunks = chunk
97 | else:
98 | # 단일 딕셔너리만 반환하는 경우 (REACT 에이전트 등)
99 | namespace = [] # 빈 네임스페이스 (루트 그래프)
100 | node_chunks = chunk # chunk 자체가 노드 청크 딕셔너리
101 |
102 | # 딕셔너리인지 확인하고 항목 처리
103 | if isinstance(node_chunks, dict):
104 | for node_name, node_chunk in node_chunks.items():
105 | final_result = {
106 | "node": node_name,
107 | "content": node_chunk,
108 | "namespace": namespace,
109 | }
110 |
111 | # node_names가 비어있지 않은 경우에만 필터링
112 | if len(node_names) > 0 and node_name not in node_names:
113 | continue
114 |
115 | # 콜백 함수가 있는 경우 실행
116 | if callback is not None:
117 | result = callback({"node": node_name, "content": node_chunk})
118 | if hasattr(result, "__await__"):
119 | await result
120 | # 콜백이 없는 경우 기본 출력
121 | else:
122 | # 노드가 변경된 경우에만 구분선 출력 (messages 모드와 동일하게)
123 | if node_name != prev_node:
124 | print("\n" + "=" * 50)
125 | print(f"🔄 Node: \033[1;36m{node_name}\033[0m 🔄")
126 | print("- " * 25)
127 |
128 | # 노드의 청크 데이터 출력 - 텍스트 중심으로 처리
129 | if isinstance(node_chunk, dict):
130 | for k, v in node_chunk.items():
131 | if isinstance(v, BaseMessage):
132 | # BaseMessage의 content 속성이 텍스트나 리스트인 경우를 처리
133 | if hasattr(v, "content"):
134 | if isinstance(v.content, list):
135 | for item in v.content:
136 | if (
137 | isinstance(item, dict)
138 | and "text" in item
139 | ):
140 | print(
141 | item["text"], end="", flush=True
142 | )
143 | else:
144 | print(v.content, end="", flush=True)
145 | else:
146 | v.pretty_print()
147 | elif isinstance(v, list):
148 | for list_item in v:
149 | if isinstance(list_item, BaseMessage):
150 | if hasattr(list_item, "content"):
151 | if isinstance(list_item.content, list):
152 | for item in list_item.content:
153 | if (
154 | isinstance(item, dict)
155 | and "text" in item
156 | ):
157 | print(
158 | item["text"],
159 | end="",
160 | flush=True,
161 | )
162 | else:
163 | print(
164 | list_item.content,
165 | end="",
166 | flush=True,
167 | )
168 | else:
169 | list_item.pretty_print()
170 | elif (
171 | isinstance(list_item, dict)
172 | and "text" in list_item
173 | ):
174 | print(list_item["text"], end="", flush=True)
175 | else:
176 | print(list_item, end="", flush=True)
177 | elif isinstance(v, dict) and "text" in v:
178 | print(v["text"], end="", flush=True)
179 | else:
180 | print(v, end="", flush=True)
181 | elif node_chunk is not None:
182 | if hasattr(node_chunk, "__iter__") and not isinstance(
183 | node_chunk, str
184 | ):
185 | for item in node_chunk:
186 | if isinstance(item, dict) and "text" in item:
187 | print(item["text"], end="", flush=True)
188 | else:
189 | print(item, end="", flush=True)
190 | else:
191 | print(node_chunk, end="", flush=True)
192 |
193 | # 구분선을 여기서 출력하지 않음 (messages 모드와 동일하게)
194 |
195 | prev_node = node_name
196 | else:
197 | # 딕셔너리가 아닌 경우 전체 청크 출력
198 | print("\n" + "=" * 50)
199 | print(f"🔄 Raw output 🔄")
200 | print("- " * 25)
201 | print(node_chunks, end="", flush=True)
202 | # 구분선을 여기서 출력하지 않음
203 | final_result = {"content": node_chunks}
204 |
205 | else:
206 | raise ValueError(
207 | f"Invalid stream_mode: {stream_mode}. Must be 'messages' or 'updates'."
208 | )
209 |
210 | # 필요에 따라 최종 결과 반환
211 | return final_result
212 |
213 |
214 | async def ainvoke_graph(
215 | graph: CompiledStateGraph,
216 | inputs: dict,
217 | config: Optional[RunnableConfig] = None,
218 | node_names: List[str] = [],
219 | callback: Optional[Callable] = None,
220 | include_subgraphs: bool = True,
221 | ) -> Dict[str, Any]:
222 | """
223 | LangGraph 앱의 실행 결과를 비동기적으로 스트리밍하여 출력하는 함수입니다.
224 |
225 | Args:
226 | graph (CompiledStateGraph): 실행할 컴파일된 LangGraph 객체
227 | inputs (dict): 그래프에 전달할 입력값 딕셔너리
228 | config (Optional[RunnableConfig]): 실행 설정 (선택적)
229 | node_names (List[str], optional): 출력할 노드 이름 목록. 기본값은 빈 리스트
230 | callback (Optional[Callable], optional): 각 청크 처리를 위한 콜백 함수. 기본값은 None
231 | 콜백 함수는 {"node": str, "content": Any} 형태의 딕셔너리를 인자로 받습니다.
232 | include_subgraphs (bool, optional): 서브그래프 포함 여부. 기본값은 True
233 |
234 | Returns:
235 | Dict[str, Any]: 최종 결과 (마지막 노드의 출력)
236 | """
237 | config = config or {}
238 | final_result = {}
239 |
240 | def format_namespace(namespace):
241 | return namespace[-1].split(":")[0] if len(namespace) > 0 else "root graph"
242 |
243 | # subgraphs 매개변수를 통해 서브그래프의 출력도 포함
244 | async for chunk in graph.astream(
245 | inputs, config, stream_mode="updates", subgraphs=include_subgraphs
246 | ):
247 | # 반환 형식에 따라 처리 방법 분기
248 | if isinstance(chunk, tuple) and len(chunk) == 2:
249 | # 기존 예상 형식: (namespace, chunk_dict)
250 | namespace, node_chunks = chunk
251 | else:
252 | # 단일 딕셔너리만 반환하는 경우 (REACT 에이전트 등)
253 | namespace = [] # 빈 네임스페이스 (루트 그래프)
254 | node_chunks = chunk # chunk 자체가 노드 청크 딕셔너리
255 |
256 | # 딕셔너리인지 확인하고 항목 처리
257 | if isinstance(node_chunks, dict):
258 | for node_name, node_chunk in node_chunks.items():
259 | final_result = {
260 | "node": node_name,
261 | "content": node_chunk,
262 | "namespace": namespace,
263 | }
264 |
265 | # node_names가 비어있지 않은 경우에만 필터링
266 | if node_names and node_name not in node_names:
267 | continue
268 |
269 | # 콜백 함수가 있는 경우 실행
270 | if callback is not None:
271 | result = callback({"node": node_name, "content": node_chunk})
272 | # 코루틴인 경우 await
273 | if hasattr(result, "__await__"):
274 | await result
275 | # 콜백이 없는 경우 기본 출력
276 | else:
277 | print("\n" + "=" * 50)
278 | formatted_namespace = format_namespace(namespace)
279 | if formatted_namespace == "root graph":
280 | print(f"🔄 Node: \033[1;36m{node_name}\033[0m 🔄")
281 | else:
282 | print(
283 | f"🔄 Node: \033[1;36m{node_name}\033[0m in [\033[1;33m{formatted_namespace}\033[0m] 🔄"
284 | )
285 | print("- " * 25)
286 |
287 | # 노드의 청크 데이터 출력
288 | if isinstance(node_chunk, dict):
289 | for k, v in node_chunk.items():
290 | if isinstance(v, BaseMessage):
291 | v.pretty_print()
292 | elif isinstance(v, list):
293 | for list_item in v:
294 | if isinstance(list_item, BaseMessage):
295 | list_item.pretty_print()
296 | else:
297 | print(list_item)
298 | elif isinstance(v, dict):
299 | for node_chunk_key, node_chunk_value in v.items():
300 | print(f"{node_chunk_key}:\n{node_chunk_value}")
301 | else:
302 | print(f"\033[1;32m{k}\033[0m:\n{v}")
303 | elif node_chunk is not None:
304 | if hasattr(node_chunk, "__iter__") and not isinstance(
305 | node_chunk, str
306 | ):
307 | for item in node_chunk:
308 | print(item)
309 | else:
310 | print(node_chunk)
311 | print("=" * 50)
312 | else:
313 | # 딕셔너리가 아닌 경우 전체 청크 출력
314 | print("\n" + "=" * 50)
315 | print(f"🔄 Raw output 🔄")
316 | print("- " * 25)
317 | print(node_chunks)
318 | print("=" * 50)
319 | final_result = {"content": node_chunks}
320 |
321 | # 최종 결과 반환
322 | return final_result
323 |
--------------------------------------------------------------------------------