├── .gitignore ├── LICENSE ├── README.md ├── agent ├── .env.example ├── .gitignore ├── README.md ├── app │ ├── __init__.py │ ├── agent │ │ ├── __init__.py │ │ └── graph.py │ ├── server.py │ └── utils.py ├── langgraph.json ├── poetry.lock └── pyproject.toml ├── client ├── .env ├── .gitignore ├── README.md ├── app │ ├── api │ │ └── agent │ │ │ └── route.ts │ ├── chat │ │ ├── [id] │ │ │ ├── agent-types.ts │ │ │ ├── components │ │ │ │ ├── chatbot-node.tsx │ │ │ │ ├── checkpoint-card.tsx │ │ │ │ ├── node-card.tsx │ │ │ │ ├── reminder.tsx │ │ │ │ └── weather │ │ │ │ │ ├── cloudy.tsx │ │ │ │ │ ├── rainy.tsx │ │ │ │ │ ├── snowy.tsx │ │ │ │ │ ├── sunny.tsx │ │ │ │ │ └── weather-node.tsx │ │ │ └── page.tsx │ │ └── page.tsx │ ├── favicon.ico │ ├── globals.css │ ├── layout.tsx │ └── page.tsx ├── components.json ├── components │ ├── app-sidebar.tsx │ ├── theme-provider.tsx │ ├── theme-switcher.tsx │ └── ui │ │ ├── badge.tsx │ │ ├── button.tsx │ │ ├── card.tsx │ │ ├── checkbox.tsx │ │ ├── input.tsx │ │ ├── popover.tsx │ │ ├── separator.tsx │ │ ├── sheet.tsx │ │ ├── sidebar.tsx │ │ ├── skeleton.tsx │ │ ├── textarea.tsx │ │ └── tooltip.tsx ├── eslint.config.mjs ├── hooks │ ├── use-mobile.tsx │ └── useLangGraphAgent │ │ ├── actions.ts │ │ ├── api.ts │ │ ├── ascii-tree.ts │ │ ├── types.ts │ │ └── useLangGraphAgent.tsx ├── lib │ └── utils.ts ├── next.config.ts ├── package-lock.json ├── package.json ├── postcss.config.mjs ├── public │ ├── file.svg │ ├── globe.svg │ ├── next.svg │ ├── vercel.svg │ └── window.svg ├── stores │ └── chat-store.tsx ├── tailwind.config.ts └── tsconfig.json ├── images ├── header.jpeg └── langgraph-nextjs.jpeg └── mcp-servers ├── booking-mcp ├── .gitignore ├── README.md ├── bun.lock ├── package.json ├── src │ ├── booking-mcp-server.ts │ ├── sse-server.ts │ └── stdio-server.ts └── tsconfig.json └── calendar-mcp ├── .python-version ├── README.md ├── calendar-mcp-server.py ├── pyproject.toml └── uv.lock /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | **/.DS_Store 3 | .DS_Store? 4 | ._* 5 | .Spotlight-V100 6 | .Trashes 7 | ehthumbs.db 8 | Thumbs.db 9 | 10 | .idea/ 11 | .vscode/ 12 | *.swp 13 | *.swo 14 | *~ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Akveo 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![Header image](./images/header.jpeg) 2 | 3 | # AI cookbook 4 | 5 | This repository contains a set of use cases demonstrating how to build AI-featured applications. 6 | - [LangGraph + Next.js](#langgraph--nextjs) 7 | - [MCP (Model Context Protocol)](#mcp-model-context-protocol) 8 | 9 | # LangGraph + Next.js 10 | This demo shows how to create a simple AI agent using LangGraph and integrate it into a Next.js application. [LangGraph](https://langchain-ai.github.io/langgraph/) is a robust framework for building agent and multi-agent workflows. It provides flexibility to build complex logic and has great tooling for debugging (LangGraph Studio) and monitoring (LangSmith). [Next.js](https://nextjs.org/) is a popular framework for building web applications. 11 | 12 | The demo includes the following capabilities: 13 | ### Capabilities 14 | - Streaming. Agent streams LLM tokens to the client application. 15 | - Generative UI. Renders components based on agent state. For example, weather widget. 16 | - Human in the loop. Agent can ask users for clarification to proceed with tasks. For example, reminder creation confirmation. 17 | - Persistence. LangGraph has a built-in persistence layer. It can be used to persist agent state between sessions. In the demo app, state is persisted in memory. See [LangGraph Persistence](https://langchain-ai.github.io/langgraph/how-tos/#persistence) for how to use PostgreSQL or MongoDB. 18 | - Reply and Fork. Agent can be replied to or forked from any checkpoint. 19 | - Agent state replication. Agent state is fully replicated on the client side based on the graph checkpoints. 20 | - Error handling. The app displays global agent errors, such as when an agent is not accessible, as well as errors that occur at the graph node level. 21 | - Stop agent. Agent execution can be stopped and resumed later. 22 | - No dependencies. There are no dependencies on third-party libraries for integration. You can adjust it to your needs. 23 | - Clean UI. The app is based on [shadcn](https://ui.shadcn.com/) components and has dark and light theme support. 24 | 25 | ### Limitations 26 | There are some features that are not implemented yet: 27 | - Graph interruption (Human in the loop) in parallel nodes. 28 | - Send custom events from the same parallel nodes. E.g., when checking weather for multiple cities at the same time, it is not possible to distinguish between them on the client side. 29 | 30 | ![LangGraph NextJS Demo](./images/langgraph-nextjs.jpeg) 31 | 32 | # MCP (Model Context Protocol) 33 | This demo shows how to create and use the [Model Context Protocol (MCP)](https://github.com/modelcontextprotocol) in your application. The Model Context Protocol is a method for integrating external data sources or services into your LLM application. The demo includes the following: 34 | - TypeScript and Python MCP servers implementations 35 | - `STDIO` and `SSE` transport protocols 36 | - Integraion MCP servers with LangGraph servers 37 | 38 | # Starter kit 39 | You can use this project as the starting point for your projects: 40 | - Clone the repository 41 | - Adjust the AI agent logic in the [`graph.py`](/agent/app/agent/graph.py) file or create a brand new one 42 | - Adjust the agent state in the [`agent-types.ts`](/client/app/chat/[id]/agent-types.ts) file 43 | - In the client app, call agent using [`useLangGraphAgent`](/client/src/hooks/useLangGraphAgent.ts) hook in your components 44 | 45 | # How to run examples 46 | Add .env file to the [`/agent`](/agent) directory and set your OPENAI_API_KEY (See [`.env.example`](/agent/.env.example)) 47 | 48 | ## LangGraph server 49 | ```bash 50 | cd agent/ 51 | poetry install 52 | poetry run server 53 | ``` 54 | ### MCP Servers 55 | To run the AI server with MCP tools using the `SSE` protocol, first start the MCP servers. MCP servers using the `STDIO` protocol run automatically. 56 | - Start Booking MCP demo server 57 | ```bash 58 | cd mcp-servers/booking-mcp 59 | bun install 60 | npm start 61 | ``` 62 | - Start Calendar MCP demo server 63 | ```bash 64 | cd mcp-servers/calandar-mcp 65 | uv sync 66 | uv run python calendar-mcp-server.py sse 67 | ``` 68 | - Edit MCP servers confuguration in [graph.py](/agent/app/agent/graph.py) 69 | - Run agent server with `--mcp` flag 70 | ```bash 71 | cd agent/ 72 | poetry install 73 | poetry run server --mcp 74 | ``` 75 | 76 | ## Next.js client 77 | ```bash 78 | cd client/ 79 | npm install 80 | npm run dev 81 | ``` 82 | Application will start at http://localhost:3000 by default 83 | 84 | # Building AI-powered apps? 85 | Get expert support with [`Akveo's AI development services`](https://www.akveo.com/services/ai-development-services). -------------------------------------------------------------------------------- /agent/.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY=your_openai_api_key -------------------------------------------------------------------------------- /agent/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | *.pyc 3 | 4 | .env 5 | 6 | .DS_Store 7 | **/.DS_Store 8 | -------------------------------------------------------------------------------- /agent/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akveo/ai-cookbook/cc751016ab51e4d70a5f252624d048292dccc0a8/agent/README.md -------------------------------------------------------------------------------- /agent/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akveo/ai-cookbook/cc751016ab51e4d70a5f252624d048292dccc0a8/agent/app/__init__.py -------------------------------------------------------------------------------- /agent/app/agent/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akveo/ai-cookbook/cc751016ab51e4d70a5f252624d048292dccc0a8/agent/app/agent/__init__.py -------------------------------------------------------------------------------- /agent/app/agent/graph.py: -------------------------------------------------------------------------------- 1 | import operator 2 | from typing import Literal, TypedDict, Any, Annotated 3 | from dotenv import load_dotenv 4 | from langchain_openai import ChatOpenAI 5 | from langgraph.graph import StateGraph, MessagesState, START, END 6 | from langgraph.checkpoint.memory import MemorySaver 7 | from langgraph.types import StreamWriter, interrupt, Send 8 | from langchain_core.messages import ToolMessage 9 | from langchain_core.tools import tool 10 | from langchain_mcp_adapters.tools import load_mcp_tools 11 | from langchain_mcp_adapters.client import MultiServerMCPClient 12 | from mcp import ClientSession, StdioServerParameters 13 | from mcp.client.sse import sse_client 14 | from mcp.client.stdio import stdio_client 15 | import random 16 | import asyncio 17 | 18 | from mcp import ClientSession, StdioServerParameters 19 | from mcp.client.stdio import stdio_client 20 | 21 | 22 | load_dotenv() 23 | 24 | # MCP servers to connect to 25 | mcp_servers = { 26 | # Booking MCP server with sse transport protocol 27 | # "booking": { 28 | # "url": "http://localhost:3001/sse", 29 | # "transport": "sse", 30 | # }, 31 | 32 | # Booking MCP server with stdio transport protocol 33 | # Make sure to update to the full absolute path to script file 34 | # "booking": { 35 | # "command": "npx", 36 | # "args": ["tsx", "/stdio-server.ts"], 37 | # "transport": "stdio", 38 | # }, 39 | 40 | # Calendar MCP server with sse transport protocol 41 | # "calendar": { 42 | # "url": "http://localhost:3002/sse", 43 | # "transport": "sse", 44 | # }, 45 | 46 | # Calendar MCP server example with stdio transport protocol 47 | # Make sure to update to the full absolute path to script file 48 | # "calendar": { 49 | # "command": "uvx", 50 | # "args": ["--with", "mcp", "python", "/calendar-mcp-server.py"], 51 | # "transport": "stdio", 52 | # }, 53 | } 54 | 55 | # Global variable to store MCP servers and their tools. Populated by initialize_mcp_tools() 56 | mcp_servers_with_tools = {} 57 | # Global variable to store tool name to server name mapping 58 | tool_to_server_lookup = {} 59 | 60 | 61 | class Weather(TypedDict): 62 | location: str 63 | search_status: str 64 | result: str 65 | 66 | 67 | class State(MessagesState): 68 | weather_forecast: Annotated[list[Weather], operator.add] 69 | 70 | 71 | class WeatherInput(TypedDict): 72 | location: str 73 | tool_call_id: str 74 | 75 | 76 | class ToolNodeArgs(TypedDict): 77 | name: str 78 | args: dict[str, Any] 79 | id: str 80 | 81 | 82 | class McpToolNodeArgs(TypedDict): 83 | server_name: str 84 | name: str 85 | args: dict[str, Any] 86 | id: str 87 | 88 | 89 | @tool 90 | async def weather_tool(query: str) -> str: 91 | """Call to get current weather""" 92 | return "Sunny" 93 | 94 | 95 | @tool 96 | async def create_reminder_tool(reminder_text: str) -> str: 97 | """Call to create a reminder""" 98 | return "Reminder created" 99 | 100 | 101 | async def weather(input: WeatherInput, writer: StreamWriter): 102 | location = input["args"]["query"] 103 | 104 | # Send custom event to the client. It will update the state of the last checkpoint and all child nodes. 105 | # Note: if there are multiple child nodes (e.g. parallel nodes), the state will be updated for all of them. 106 | writer({"weather_forecast": [ 107 | {"location": location, "search_status": f"Checking weather in {location}"}]}) 108 | 109 | await asyncio.sleep(2) 110 | weather = random.choice(["Sunny", "Cloudy", "Rainy", "Snowy"]) 111 | 112 | return {"messages": [ToolMessage(content=weather, tool_call_id=input["id"])], "weather_forecast": [{"location": location, "search_status": "", "result": weather}]} 113 | 114 | 115 | async def reminder(input: ToolNodeArgs): 116 | res = interrupt(input['args']['reminder_text']) 117 | 118 | tool_answer = "Reminder created." if res == 'approve' else "Reminder creation cancelled by user." 119 | 120 | return {"messages": [ToolMessage(content=tool_answer, tool_call_id=input["id"])]} 121 | 122 | 123 | async def mcp_tool(input: McpToolNodeArgs): 124 | if input["server_name"] not in mcp_servers: 125 | raise ValueError( 126 | f"Server with name {input['server_name']} not found in MCP servers list") 127 | 128 | protocol = mcp_servers[input["server_name"]]["transport"] 129 | 130 | tool_result = None 131 | if protocol == "sse": 132 | async with sse_client(mcp_servers[input["server_name"]]["url"]) as (reader, writer): 133 | async with ClientSession(reader, writer) as session: 134 | await session.initialize() 135 | tool_result = await session.call_tool(input["name"], input["args"]) 136 | elif protocol == "stdio": 137 | server_params = StdioServerParameters( 138 | command=mcp_servers[input["server_name"]]["command"], 139 | args=mcp_servers[input["server_name"]]["args"], 140 | env=None, # Optional environment variables 141 | ) 142 | async with stdio_client(server_params) as (reader, writer): 143 | async with ClientSession(reader, writer) as session: 144 | await session.initialize() 145 | tool_result = await session.call_tool(input["name"], input["args"]) 146 | 147 | if not tool_result or tool_result.isError or not tool_result.content: 148 | return {"messages": [ToolMessage(content="Error calling tool", tool_call_id=input["id"])]} 149 | 150 | return {"messages": [ToolMessage(content=tool_result.content[0].text, tool_call_id=input["id"])]} 151 | 152 | 153 | async def chatbot(state: State): 154 | tools = [ 155 | weather_tool, 156 | create_reminder_tool, 157 | ] + [tool for tools_list in mcp_servers_with_tools.values() for tool in tools_list] 158 | 159 | llm = ChatOpenAI(model="gpt-4o-mini").bind_tools(tools) 160 | response = await llm.ainvoke(state["messages"]) 161 | return {"messages": [response]} 162 | 163 | 164 | # Chatbot node router. Based on tool calls, creates the list of the next parallel nodes. 165 | def assign_tool(state: State) -> Literal["weather", "reminder", "mcp_tool", "__end__"]: 166 | messages = state["messages"] 167 | last_message = messages[-1] 168 | if last_message.tool_calls: 169 | send_list = [] 170 | for tool in last_message.tool_calls: 171 | if tool["name"] == 'weather_tool': 172 | send_list.append(Send('weather', tool)) 173 | elif tool["name"] == 'create_reminder_tool': 174 | send_list.append(Send('reminder', tool)) 175 | elif any(tool["name"] == mcp_tool.name for mcp_tool in [tool for tools_list in mcp_servers_with_tools.values() for tool in tools_list]): 176 | server_name = tool_to_server_lookup.get(tool["name"], None) 177 | args = McpToolNodeArgs( 178 | server_name=server_name, 179 | name=tool["name"], 180 | args=tool["args"], 181 | id=tool["id"] 182 | ) 183 | send_list.append(Send('mcp_tool', args)) 184 | return send_list if len(send_list) > 0 else "__end__" 185 | return "__end__" 186 | 187 | 188 | async def initialize_mcp_tools(): 189 | global mcp_servers_with_tools, tool_to_server_lookup 190 | try: 191 | async with MultiServerMCPClient(mcp_servers) as client: 192 | mcp_servers_with_tools = client.server_name_to_tools 193 | tool_to_server_lookup = {} 194 | for server_name, tools in mcp_servers_with_tools.items(): 195 | for tool in tools: 196 | tool_to_server_lookup[tool.name] = server_name 197 | except Exception as e: 198 | print(f"Error initializing MCP tools: {str(e)}") 199 | 200 | 201 | async def init_agent(use_mcp: bool): 202 | if use_mcp: 203 | await initialize_mcp_tools() 204 | 205 | builder = StateGraph(State) 206 | 207 | builder.add_node("chatbot", chatbot) 208 | builder.add_node("weather", weather) 209 | builder.add_node("reminder", reminder) 210 | builder.add_node("mcp_tool", mcp_tool) 211 | 212 | builder.add_edge(START, "chatbot") 213 | builder.add_conditional_edges("chatbot", assign_tool) 214 | builder.add_edge("weather", "chatbot") 215 | builder.add_edge("reminder", "chatbot") 216 | builder.add_edge("mcp_tool", "chatbot") 217 | 218 | builder.add_edge("chatbot", END) 219 | 220 | memory = MemorySaver() 221 | graph = builder.compile(checkpointer=memory) 222 | graph.name = "LangGraph Agent" 223 | return graph 224 | 225 | # To execute graph in LangGraph Studio uncomment the following line 226 | # graph = asyncio.run(init_agent()) 227 | -------------------------------------------------------------------------------- /agent/app/server.py: -------------------------------------------------------------------------------- 1 | import uvicorn 2 | from langgraph.types import Command, Interrupt 3 | from fastapi import FastAPI, Request, HTTPException 4 | from fastapi.middleware.cors import CORSMiddleware 5 | from sse_starlette.sse import EventSourceResponse 6 | from typing import AsyncGenerator, Dict 7 | from app.utils import message_chunk_event, interrupt_event, custom_event, checkpoint_event, format_state_snapshot 8 | from contextlib import asynccontextmanager 9 | import asyncio 10 | import argparse 11 | 12 | from app.agent.graph import init_agent 13 | 14 | # Track active connections 15 | active_connections: Dict[str, asyncio.Event] = {} 16 | 17 | graph = None 18 | use_mcp = False 19 | 20 | parser = argparse.ArgumentParser(description='Agent Server') 21 | parser.add_argument('--mcp', action='store_true') 22 | args = parser.parse_args() 23 | use_mcp = args.mcp 24 | 25 | 26 | @asynccontextmanager 27 | async def lifespan(app: FastAPI): 28 | global graph 29 | graph = await init_agent(use_mcp=use_mcp) 30 | yield 31 | 32 | app = FastAPI( 33 | title="LangGraph API", 34 | description="API for LangGraph interactions", 35 | version="0.1.0", 36 | lifespan=lifespan 37 | ) 38 | 39 | # Configure CORS 40 | app.add_middleware( 41 | CORSMiddleware, 42 | allow_origins=["*"], # In production, replace with specific origins 43 | allow_credentials=True, 44 | allow_methods=["*"], 45 | allow_headers=["*"], 46 | ) 47 | 48 | 49 | @app.get("/state") 50 | async def state(thread_id: str | None = None): 51 | """Endpoint returning current graph state.""" 52 | if not thread_id: 53 | raise HTTPException(status_code=400, detail="thread_id is required") 54 | 55 | config = {"configurable": {"thread_id": thread_id}} 56 | 57 | state = await graph.aget_state(config) 58 | return format_state_snapshot(state) 59 | 60 | 61 | @app.get("/history") 62 | async def history(thread_id: str | None = None): 63 | """Endpoint returning complete state history. Used for restoring graph.""" 64 | if not thread_id: 65 | raise HTTPException(status_code=400, detail="thread_id is required") 66 | 67 | config = {"configurable": {"thread_id": thread_id}} 68 | 69 | records = [] 70 | async for state in graph.aget_state_history(config): 71 | records.append(format_state_snapshot(state)) 72 | return records 73 | 74 | 75 | @app.post("/agent/stop") 76 | async def stop_agent(request: Request): 77 | """Endpoint for stopping the running agent.""" 78 | body = await request.json() 79 | thread_id = body.get("thread_id") 80 | if not thread_id: 81 | raise HTTPException(status_code=400, detail="thread_id is required") 82 | 83 | if thread_id in active_connections: 84 | active_connections[thread_id].set() 85 | return {"status": "stopped", "thread_id": thread_id} 86 | raise HTTPException(status_code=404, detail="Thread is not running") 87 | 88 | 89 | @app.post("/agent") 90 | async def agent(request: Request): 91 | """Endpoint for running the agent.""" 92 | body = await request.json() 93 | 94 | request_type = body.get("type") 95 | if not request_type: 96 | raise HTTPException(status_code=400, detail="type is required") 97 | 98 | thread_id = body.get("thread_id") 99 | if not thread_id: 100 | raise HTTPException(status_code=400, detail="thread_id is required") 101 | 102 | stop_event = asyncio.Event() 103 | active_connections[thread_id] = stop_event 104 | 105 | config = {"configurable": {"thread_id": thread_id}} 106 | 107 | if request_type == "run": 108 | input = body.get("state", None) 109 | elif request_type == "resume": 110 | resume = body.get("resume") 111 | if not resume: 112 | raise HTTPException(status_code=400, detail="resume is required") 113 | input = Command(resume=resume) 114 | elif request_type == "fork": 115 | config = body.get("config") 116 | if not config: 117 | raise HTTPException(status_code=400, detail="config is required") 118 | input = body.get("state", None) 119 | print("input before update:", input) 120 | print("config before update:", config) 121 | config = await graph.aupdate_state(config, input) 122 | input = None 123 | elif request_type == "replay": 124 | config = body.get("config") 125 | if not config: 126 | raise HTTPException(status_code=400, detail="config is required") 127 | input = None 128 | else: 129 | raise HTTPException(status_code=400, detail="invalid request type") 130 | 131 | print("request_type:", request_type) 132 | print("thread_id:", thread_id) 133 | print("input:", input) 134 | print("config:", config) 135 | 136 | async def generate_events() -> AsyncGenerator[dict, None]: 137 | try: 138 | async for chunk in graph.astream( 139 | input, 140 | config, 141 | stream_mode=["debug", "messages", "updates", "custom"], 142 | ): 143 | if stop_event.is_set(): 144 | break 145 | 146 | chunk_type, chunk_data = chunk 147 | 148 | if chunk_type == "debug": 149 | # type can be checkpoint, task, task_result 150 | debug_type = chunk_data["type"] 151 | if debug_type == "checkpoint": 152 | yield checkpoint_event(chunk_data) 153 | elif debug_type == "task_result": 154 | interrupts = chunk_data["payload"].get( 155 | "interrupts", []) 156 | if interrupts and len(interrupts) > 0: 157 | yield interrupt_event(interrupts) 158 | elif chunk_type == "messages": 159 | yield message_chunk_event(chunk_data[1]["langgraph_node"], chunk_data[0]) 160 | elif chunk_type == "custom": 161 | yield custom_event(chunk_data) 162 | finally: 163 | if thread_id in active_connections: 164 | del active_connections[thread_id] 165 | 166 | return EventSourceResponse(generate_events()) 167 | 168 | 169 | def main(): 170 | uvicorn.run("app.server:app", host="0.0.0.0", port=8000, reload=True) 171 | 172 | 173 | if __name__ == "__main__": 174 | main() 175 | -------------------------------------------------------------------------------- /agent/app/utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | from langgraph.types import StateSnapshot 3 | 4 | 5 | def checkpoint_event(value): 6 | """Create a checkpoint event for the client.""" 7 | 8 | def format_values(values: dict): 9 | formatted_values = values.copy() 10 | if "messages" in formatted_values: 11 | formatted_values["messages"] = [ 12 | { 13 | "type": msg.get("type") if isinstance(msg, dict) else msg.type, 14 | "content": msg.get("content") if isinstance(msg, dict) else msg.content, 15 | "id": msg.get("id") if isinstance(msg, dict) else msg.id, 16 | "tool_calls": msg.get("tool_calls") if isinstance(msg, dict) else (msg.tool_calls if hasattr(msg, 'tool_calls') else None) 17 | } 18 | for msg in formatted_values["messages"] 19 | ] 20 | return formatted_values 21 | 22 | def format_writes(writes: dict): 23 | if writes is None: 24 | return None 25 | formatted_writes = {} 26 | for key, value in writes.items(): 27 | if isinstance(value, dict): 28 | formatted_writes[key] = format_values(value) 29 | elif isinstance(value, list): 30 | formatted_writes[key] = [format_values(item) if isinstance( 31 | item, dict) else item for item in value] 32 | else: 33 | formatted_writes[key] = value 34 | return formatted_writes 35 | 36 | configurable = value["payload"]["config"]["configurable"] 37 | data = { 38 | "next": value["payload"]["next"], 39 | "values": format_values(value["payload"]["values"]), 40 | "config": { 41 | "configurable": { 42 | "checkpoint_id": configurable["checkpoint_id"], 43 | "checkpoint_ns": configurable["checkpoint_ns"], 44 | "thread_id": configurable["thread_id"] 45 | } 46 | }, 47 | "metadata": { 48 | "source": value["payload"]["metadata"]["source"], 49 | "step": value["payload"]["metadata"]["step"], 50 | "writes": format_writes(value["payload"]["metadata"]["writes"]), 51 | "parents": value["payload"]["metadata"]["parents"] 52 | } 53 | } 54 | return { 55 | "event": "checkpoint", 56 | "data": json.dumps(data) 57 | } 58 | 59 | 60 | def message_chunk_event(node_name, message_chunk): 61 | """Create a message chunk event for the client.""" 62 | 63 | def format_messages(value): 64 | """Format message chunk into a serializable dictionary. 65 | This is needed because the message class is not serializable. 66 | """ 67 | return { 68 | "content": value.content, 69 | "id": value.id, 70 | "tool_calls": value.tool_calls if hasattr(value, 'tool_calls') else None, 71 | "tool_call_chunks": value.tool_call_chunks if hasattr(value, 'tool_call_chunks') else None 72 | } 73 | 74 | return { 75 | "event": "message_chunk", 76 | "data": json.dumps({ 77 | "node_name": node_name, 78 | "message_chunk": format_messages(message_chunk) 79 | }) 80 | } 81 | 82 | 83 | def interrupt_event(interrupts): 84 | """Create an interrupt event for the client.""" 85 | formatted_interrupts = [{"value": interrupt["value"]} 86 | for interrupt in interrupts] 87 | return { 88 | "event": "interrupt", 89 | "data": json.dumps(formatted_interrupts) 90 | } 91 | 92 | 93 | def custom_event(value): 94 | """Create a custom event for the client.""" 95 | return { 96 | "event": "custom", 97 | "data": json.dumps(value) 98 | } 99 | 100 | 101 | def format_state_snapshot(snapshot: StateSnapshot): 102 | interrupts = [] 103 | for task in snapshot.tasks: 104 | for interrupt in task.interrupts: 105 | interrupts.append({"value": interrupt.value}) 106 | return { 107 | "values": snapshot.values, 108 | "next": snapshot.next, 109 | "config": snapshot.config, 110 | "interrupts": interrupts, 111 | "parent_config": snapshot.parent_config, 112 | "metadata": snapshot.metadata 113 | } 114 | -------------------------------------------------------------------------------- /agent/langgraph.json: -------------------------------------------------------------------------------- 1 | { 2 | "graphs": { 3 | "agent": "./app/agent/graph.py:graph" 4 | }, 5 | "env": ".env", 6 | "python_version": "3.12", 7 | "dependencies": [ 8 | "." 9 | ] 10 | } -------------------------------------------------------------------------------- /agent/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "langgraph-agent" 3 | version = "0.1.0" 4 | description = "" 5 | authors = ["elupanov "] 6 | readme = "README.md" 7 | packages = [ 8 | { include = "app" }, 9 | ] 10 | 11 | [tool.poetry.dependencies] 12 | python = "^3.12" 13 | langgraph = "^0.3.22" 14 | langchain = "^0.3.14" 15 | langchain-openai = "^0.3.11" 16 | python-dotenv = "^1.0.1" 17 | fastapi = "^0.115.5" 18 | uvicorn = "^0.34.0" 19 | sse-starlette = ">=2.1.0" 20 | langchain-mcp-adapters = "^0.0.7" 21 | mcp = "^1.6.0" 22 | 23 | [build-system] 24 | requires = ["poetry-core"] 25 | build-backend = "poetry.core.masonry.api" 26 | 27 | [tool.poetry.scripts] 28 | server = "app.server:main" 29 | -------------------------------------------------------------------------------- /client/.env: -------------------------------------------------------------------------------- 1 | NEXT_PUBLIC_AGENT_URL=http://localhost:8000 -------------------------------------------------------------------------------- /client/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.* 7 | .yarn/* 8 | !.yarn/patches 9 | !.yarn/plugins 10 | !.yarn/releases 11 | !.yarn/versions 12 | 13 | # testing 14 | /coverage 15 | 16 | # next.js 17 | /.next/ 18 | /out/ 19 | 20 | # production 21 | /build 22 | 23 | # misc 24 | .DS_Store 25 | *.pem 26 | 27 | # debug 28 | npm-debug.log* 29 | yarn-debug.log* 30 | yarn-error.log* 31 | .pnpm-debug.log* 32 | 33 | # env files (can opt-in for committing if needed) 34 | .env.* 35 | 36 | # vercel 37 | .vercel 38 | 39 | # typescript 40 | *.tsbuildinfo 41 | next-env.d.ts 42 | -------------------------------------------------------------------------------- /client/README.md: -------------------------------------------------------------------------------- 1 | This is a [Next.js](https://nextjs.org) project bootstrapped with [`create-next-app`](https://nextjs.org/docs/app/api-reference/cli/create-next-app). 2 | 3 | ## Getting Started 4 | 5 | First, run the development server: 6 | 7 | ```bash 8 | npm run dev 9 | # or 10 | yarn dev 11 | # or 12 | pnpm dev 13 | # or 14 | bun dev 15 | ``` 16 | 17 | Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. 18 | 19 | You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file. 20 | 21 | This project uses [`next/font`](https://nextjs.org/docs/app/building-your-application/optimizing/fonts) to automatically optimize and load [Geist](https://vercel.com/font), a new font family for Vercel. 22 | 23 | ## Learn More 24 | 25 | To learn more about Next.js, take a look at the following resources: 26 | 27 | - [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API. 28 | - [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial. 29 | 30 | You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js) - your feedback and contributions are welcome! 31 | 32 | ## Deploy on Vercel 33 | 34 | The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js. 35 | 36 | Check out our [Next.js deployment documentation](https://nextjs.org/docs/app/building-your-application/deploying) for more details. 37 | -------------------------------------------------------------------------------- /client/app/api/agent/route.ts: -------------------------------------------------------------------------------- 1 | import { NextRequest, NextResponse } from 'next/server'; 2 | 3 | // This API route serves as a proxy to the agent endpoint of the ai service. 4 | // It is necessary to send requests from the Next.js backend rather than the client. 5 | // This approach prevents exposing the AI service as a public endpoint and eliminates the need to implement authentication logic. 6 | // The mode elegant way is to use server actions, but it is not possible with streaming response. 7 | 8 | const AGENT_URL = process.env.NEXT_PUBLIC_AGENT_URL; 9 | 10 | export async function POST(request: NextRequest) { 11 | const body = await request.json(); 12 | 13 | try { 14 | const response = await fetch(`${AGENT_URL}/agent`, { 15 | method: 'POST', 16 | headers: { 17 | 'Content-Type': 'application/json', 18 | 'Accept': 'text/event-stream', 19 | }, 20 | body: JSON.stringify(body), 21 | }); 22 | 23 | if (!response.ok) { 24 | const error = await response.json(); 25 | throw new Error(error.detail || 'Failed to call agent'); 26 | } 27 | 28 | const stream = new TransformStream(); 29 | const writer = stream.writable.getWriter(); 30 | 31 | (async () => { 32 | try { 33 | const reader = response.body?.getReader(); 34 | if (!reader) throw new Error('No reader available'); 35 | 36 | while (true) { 37 | const { done, value } = await reader.read(); 38 | if (done) { 39 | await writer.close(); 40 | break; 41 | } 42 | 43 | // Just forward the raw chunks 44 | await writer.write(value); 45 | } 46 | } catch (error) { 47 | console.error('Stream processing error:', error); 48 | 49 | // Write an error message to the stream before closing 50 | const errorData = JSON.stringify({ error: "Error in agent" }); 51 | await writer.write(new TextEncoder().encode(`event: error\ndata: ${errorData}\n\n`)); 52 | await writer.close(); 53 | } 54 | })(); 55 | 56 | return new Response(stream.readable, { 57 | headers: { 58 | 'Content-Type': 'text/event-stream', 59 | 'Cache-Control': 'no-cache', 60 | 'Connection': 'keep-alive', 61 | }, 62 | }); 63 | 64 | } catch (error) { 65 | console.error('Error in agent route', error); 66 | return NextResponse.json( 67 | { error: 'Failed to process /agent request' }, 68 | { status: 500 } 69 | ); 70 | } 71 | } -------------------------------------------------------------------------------- /client/app/chat/[id]/agent-types.ts: -------------------------------------------------------------------------------- 1 | import { WithMessages } from "@/hooks/useLangGraphAgent/types"; 2 | 3 | // The agent state which mirrors the LangGraph state. If your sate have messages, extend WithMessages interface. 4 | export interface AgentState extends WithMessages { 5 | weather_forecast: WeatherForecast[]; 6 | } 7 | 8 | export interface WeatherForecast { 9 | location: string; 10 | search_status: string; 11 | result: "Sunny" | "Cloudy" | "Rainy" | "Snowy"; 12 | } 13 | 14 | // All possible interrupt types from the graph. We are using string for Reminder node 15 | export type InterruptValue = string | number | { "question": string }; 16 | 17 | // All possible resume types to send to the graph. We are using string for Reminder node 18 | export type ResumeValue = string | number; 19 | -------------------------------------------------------------------------------- /client/app/chat/[id]/components/chatbot-node.tsx: -------------------------------------------------------------------------------- 1 | import { AgentState } from '../agent-types'; 2 | import { Bot, User } from 'lucide-react'; 3 | import { cn } from '@/lib/utils'; 4 | import ReactMarkdown from 'react-markdown'; 5 | import remarkGfm from 'remark-gfm'; 6 | import { Badge } from '@/components/ui/badge'; 7 | 8 | interface ChatbotNodeProps { 9 | nodeState: Partial; 10 | } 11 | 12 | export function ChatbotNode({ nodeState }: ChatbotNodeProps) { 13 | 14 | const getMessageIcon = (type: string) => { 15 | const baseClasses = "bg-gray-100 dark:bg-gray-800 text-gray-600 dark:text-gray-300 border-gray-200 dark:border-gray-700"; 16 | 17 | switch (type) { 18 | case 'ai': 19 | return { 20 | icon: , 21 | className: baseClasses 22 | }; 23 | case 'user': 24 | case 'human': 25 | return { 26 | icon: , 27 | className: baseClasses 28 | }; 29 | default: 30 | return { 31 | icon: , 32 | className: baseClasses 33 | }; 34 | } 35 | }; 36 | 37 | return ( 38 |
39 | {nodeState?.messages?.map((msg, index) => ( 40 | // When restoring data from checkpoint history, user input messages do not have an id. 41 | // Use index as key to avoid React warnings. 42 |
43 |
47 | {getMessageIcon(msg.type).icon} 48 |
49 |
50 |
51 |

{children}

, 55 | code: ({ children, className }) => { 56 | const isInline = !className?.includes('language-'); 57 | return ( 58 | 62 | {children} 63 | 64 | ); 65 | }, 66 | pre: ({ children }) =>
{children}
, 67 | ul: ({ children }) =>
    {children}
, 68 | ol: ({ children }) =>
    {children}
, 69 | }} 70 | > 71 | {msg.content} 72 |
73 |
74 | {msg.tool_calls && msg.tool_calls.length > 0 && ( 75 |
76 | Tool calls: 77 | {msg.tool_calls?.map((toolCall) => ( 78 |
79 | {toolCall.name} 80 |
81 | ))} 82 |
83 | )} 84 |
85 |
86 | ))} 87 |
88 | ) 89 | } -------------------------------------------------------------------------------- /client/app/chat/[id]/components/checkpoint-card.tsx: -------------------------------------------------------------------------------- 1 | import { Button } from '@/components/ui/button'; 2 | import { AppCheckpoint, ReplayAgentInput } from '@/hooks/useLangGraphAgent/types'; 3 | import { AgentState, InterruptValue } from '../agent-types'; 4 | import { Check, Redo, AlertCircle } from 'lucide-react'; 5 | import { 6 | Popover, 7 | PopoverContent, 8 | PopoverTrigger, 9 | } from "@/components/ui/popover" 10 | import { JsonView, defaultStyles } from 'react-json-view-lite'; 11 | import { cn } from '@/lib/utils'; 12 | 13 | interface CheckpointCardProps { 14 | thread_id: string; 15 | appCheckpoint: AppCheckpoint; 16 | replayHandler: (agentInput: ReplayAgentInput) => void; 17 | } 18 | 19 | export function CheckpointCard({ thread_id, appCheckpoint: node, replayHandler }: CheckpointCardProps) { 20 | return ( 21 |
25 | {node.error ? ( 26 | 27 | ) : ( 28 | 29 | )} 30 |
31 | checkpoint id: {node.checkpointConfig.configurable.checkpoint_id} 32 |
33 | next nodes: {node.nodes.map(n => n.name).join(', ')} 34 |
35 | 36 | 37 | 40 | 41 | 42 |
43 | 50 |
51 |
52 |
53 | 54 | 55 | 58 | 59 | 60 |
61 | 68 |
69 |
70 |
71 | 80 |
81 |
82 |
83 |
84 | ) 85 | } -------------------------------------------------------------------------------- /client/app/chat/[id]/components/node-card.tsx: -------------------------------------------------------------------------------- 1 | import { GraphNode } from "@/hooks/useLangGraphAgent/types"; 2 | import { AgentState } from "../agent-types"; 3 | import { Button } from '@/components/ui/button'; 4 | import { 5 | Popover, 6 | PopoverContent, 7 | PopoverTrigger, 8 | } from "@/components/ui/popover" 9 | import { JsonView, defaultStyles } from 'react-json-view-lite'; 10 | 11 | export function NodeCard({ node }: { node: GraphNode }) { 12 | return ( 13 |
14 | node: {node.name} 15 |
16 | 17 | 18 | 21 | 22 | 23 |
24 | 31 |
32 |
33 |
34 |
35 |
36 | ); 37 | } -------------------------------------------------------------------------------- /client/app/chat/[id]/components/reminder.tsx: -------------------------------------------------------------------------------- 1 | import { Card, CardHeader, CardFooter, CardTitle } from "@/components/ui/card"; 2 | import { Button } from "@/components/ui/button"; 3 | import { useState } from "react"; 4 | import { Loader2 } from "lucide-react"; 5 | 6 | interface ReminderProps { 7 | interruptValue: string; 8 | onResume: (resumeValue: string) => void; 9 | } 10 | 11 | export default function Reminder({ interruptValue, onResume }: ReminderProps) { 12 | const [isLoading, setIsLoading] = useState(false); 13 | 14 | // Do not show the confirmation after user action 15 | if (!interruptValue) { 16 | return null; 17 | } 18 | 19 | const handleAction = (action: "approve" | "cancel") => { 20 | setIsLoading(true); 21 | onResume(action); 22 | }; 23 | 24 | return ( 25 |
26 | 27 | 28 | {interruptValue} 29 |

Are u sure you want to create a reminder?

30 |
31 | 32 | {isLoading && } 33 |
34 | 41 | 47 |
48 |
49 |
50 |
51 | ); 52 | } -------------------------------------------------------------------------------- /client/app/chat/[id]/components/weather/cloudy.tsx: -------------------------------------------------------------------------------- 1 | "use client" 2 | 3 | import { Cloud, Droplets, Wind } from "lucide-react" 4 | import { Card } from "@/components/ui/card" 5 | 6 | export default function Cloudy() { 7 | return ( 8 | 9 | {/* Gradient Background */} 10 |
11 | 12 | {/* Content Container */} 13 |
14 | {/* Top Section */} 15 |
16 |
17 | 18 |
19 |
20 |

Cloudy

21 |

Today's Forecast

22 |
23 |
24 | 25 | {/* Bottom Section */} 26 |
27 |
28 | 29 | 60% 30 |
31 |
32 | 33 | 15 km/h 34 |
35 |
22°C
36 |
37 |
38 | 39 | ) 40 | } 41 | 42 | -------------------------------------------------------------------------------- /client/app/chat/[id]/components/weather/rainy.tsx: -------------------------------------------------------------------------------- 1 | "use client" 2 | 3 | import { Cloud, Droplets, Wind } from "lucide-react" 4 | import { Card } from "@/components/ui/card" 5 | import { motion } from "framer-motion" 6 | 7 | export default function Rainy() { 8 | return ( 9 | 10 | {/* Gradient Background */} 11 |
12 | 13 | {/* Content Container */} 14 |
15 | {/* Top Section */} 16 |
17 |
18 | 19 |
20 |
21 |

Rainy

22 |

Today's Forecast

23 |
24 |
25 | 26 | {/* Bottom Section */} 27 |
28 |
29 | 30 | 75% 31 |
32 |
33 | 34 | 12 km/h 35 |
36 |
18°C
37 |
38 | 39 | {/* Animated Rain Effect */} 40 |
41 | {[...Array(10)].map((_, i) => ( 42 | 58 | ))} 59 | {[...Array(10)].map((_, i) => ( 60 | 76 | ))} 77 |
78 |
79 | 80 | ) 81 | } 82 | 83 | -------------------------------------------------------------------------------- /client/app/chat/[id]/components/weather/snowy.tsx: -------------------------------------------------------------------------------- 1 | "use client" 2 | 3 | import { Snowflake, Thermometer, Wind } from "lucide-react" 4 | import { Card } from "@/components/ui/card" 5 | import { motion } from "framer-motion" 6 | 7 | export default function Snowy() { 8 | return ( 9 | 10 | {/* Gradient Background */} 11 |
12 | 13 | {/* Content Container */} 14 |
15 | {/* Top Section */} 16 |
17 |
18 | 19 |
20 |
21 |

Snowy

22 |

Today's Forecast

23 |
24 |
25 | 26 | {/* Bottom Section */} 27 |
28 |
29 | 30 | -2°C 31 |
32 |
33 | 34 | 10 km/h 35 |
36 |
5 cm
37 |
38 | 39 | {/* Animated Snowfall Effect */} 40 |
41 | {[...Array(15)].map((_, i) => ( 42 | 65 | 66 | 67 | 68 | 69 | 70 | ))} 71 | {[...Array(15)].map((_, i) => ( 72 | 95 | 96 | 97 | 98 | 99 | 100 | ))} 101 |
102 |
103 | 104 | ) 105 | } 106 | 107 | -------------------------------------------------------------------------------- /client/app/chat/[id]/components/weather/sunny.tsx: -------------------------------------------------------------------------------- 1 | "use client" 2 | 3 | import { Sun, Thermometer, Wind } from "lucide-react" 4 | import { Card } from "@/components/ui/card" 5 | import { motion } from "framer-motion" 6 | 7 | export default function Sunny() { 8 | return ( 9 | 10 | {/* Gradient Background */} 11 |
12 | 13 | {/* Content Container */} 14 |
15 | {/* Top Section */} 16 |
17 |
18 | {/* Animated Sun Rays Effect */} 19 | {[...Array(6)].map((_, i) => ( 20 | 41 | ))} 42 | 43 |
44 |
45 |

Sunny

46 |

Today's Forecast

47 |
48 |
49 | 50 | {/* Bottom Section */} 51 |
52 |
53 | 54 | UV 8 55 |
56 |
57 | 58 | 8 km/h 59 |
60 |
28°C
61 |
62 |
63 | 64 | ) 65 | } 66 | 67 | -------------------------------------------------------------------------------- /client/app/chat/[id]/components/weather/weather-node.tsx: -------------------------------------------------------------------------------- 1 | import { AgentState } from "../../agent-types"; 2 | import { Loader2 } from "lucide-react"; 3 | import { Card, CardContent } from "@/components/ui/card"; 4 | import Rainy from "./rainy"; 5 | import Sunny from "./sunny"; 6 | import Cloudy from "./cloudy"; 7 | import Snowy from "./snowy"; 8 | 9 | interface WeatherNodeProps { 10 | nodeState: Partial; 11 | } 12 | 13 | export default function WeatherNode({ nodeState }: WeatherNodeProps) { 14 | if (nodeState?.weather_forecast?.[0]?.search_status) { 15 | return ( 16 |
17 | 18 | 19 |
20 | 21 |
{nodeState?.weather_forecast?.[0]?.search_status}
22 |
23 |
24 |
25 |
26 | ); 27 | } 28 | 29 | if (!nodeState?.weather_forecast?.[0]?.result) { 30 | return null; 31 | } 32 | 33 | const WeatherComponents = { 34 | Sunny, 35 | Cloudy, 36 | Rainy, 37 | Snowy, 38 | } as const; 39 | 40 | const WeatherComponent = WeatherComponents[nodeState?.weather_forecast?.[0].result]; 41 | 42 | return ( 43 |
44 | 45 |
46 | ); 47 | } -------------------------------------------------------------------------------- /client/app/chat/[id]/page.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | import { useState, useEffect, useRef } from 'react'; 4 | import { useParams } from 'next/navigation'; 5 | import { Button } from '@/components/ui/button'; 6 | import { Textarea } from "@/components/ui/textarea"; 7 | import { ArrowUp, Square, ArrowDown, Ellipsis, AlertTriangle } from "lucide-react"; 8 | import { useLangGraphAgent } from '@/hooks/useLangGraphAgent/useLangGraphAgent'; 9 | import { AppCheckpoint, GraphNode } from '@/hooks/useLangGraphAgent/types'; 10 | import { AgentState, InterruptValue, ResumeValue } from './agent-types'; 11 | import { CheckpointCard } from './components/checkpoint-card'; 12 | import { ChatbotNode } from './components/chatbot-node'; 13 | import { Checkbox } from "@/components/ui/checkbox"; 14 | import WeatherNode from './components/weather/weather-node'; 15 | import Reminder from './components/reminder'; 16 | import { NodeCard } from './components/node-card'; 17 | 18 | export default function ChatPage() { 19 | const params = useParams<{ id: string }>(); 20 | const messagesContainerRef = useRef(null); 21 | const inputRef = useRef(null); 22 | 23 | const [threadId] = useState(params.id); 24 | const [inputValue, setInputValue] = useState(''); 25 | const [showScrollButton, setShowScrollButton] = useState(false); 26 | const [shouldAutoScroll, setShouldAutoScroll] = useState(true); 27 | const [showNodesinfo, setShowNodesinfo] = useState(false); 28 | const [restoreError, setRestoreError] = useState(false); 29 | 30 | const exampleMessages = [ 31 | "What's the weather in SF today?", 32 | "Set a reminder for to call John", 33 | "Tell me a joke", 34 | "What can you do?" 35 | ]; 36 | 37 | const onCheckpointStart = (checkpoint: AppCheckpoint) => { 38 | console.log('Checkpoint started:', checkpoint.nodes); 39 | } 40 | 41 | const onCheckpointEnd = (checkpoint: AppCheckpoint) => { 42 | console.log('Checkpoint ended:', checkpoint.nodes); 43 | 44 | // Example how to do some application logic based on the agent flow. E.g. reminders list. 45 | if (checkpoint.nodes.some(n => n.name === 'reminder')) { 46 | console.log('Reminder created'); 47 | } 48 | } 49 | 50 | const onCheckpointStateUpdate = (checkpoint: AppCheckpoint) => { 51 | console.log('Checkpoint intermediate state updated:', checkpoint.nodes, checkpoint.state); 52 | } 53 | 54 | const { status, appCheckpoints, run, resume, replay, restore, stop, restoring } = useLangGraphAgent({ onCheckpointStart, onCheckpointEnd, onCheckpointStateUpdate }); 55 | 56 | // Restore chat on page open 57 | useEffect(() => { 58 | if (threadId) { 59 | restore(threadId).catch(() => { 60 | setRestoreError(true); 61 | }); 62 | } 63 | }, [threadId]); 64 | 65 | // Focus input on page load and after message is sent 66 | useEffect(() => { 67 | const isInputEnabled = status !== 'running' && !restoring; 68 | if (inputRef.current && isInputEnabled) { 69 | inputRef.current.focus(); 70 | } 71 | }, [status, restoring]); 72 | 73 | // Add scroll event listener 74 | useEffect(() => { 75 | const messagesContainer = messagesContainerRef.current; 76 | if (messagesContainer) { 77 | messagesContainer.addEventListener('scroll', handleScrollUpdate); 78 | return () => messagesContainer.removeEventListener('scroll', handleScrollUpdate); 79 | } 80 | }, []); 81 | 82 | // Auto-scroll when new nodes appear 83 | useEffect(() => { 84 | if (shouldAutoScroll) { 85 | scrollToBottom(); 86 | } 87 | }, [appCheckpoints, shouldAutoScroll]); 88 | 89 | const handleScrollUpdate = () => { 90 | if (messagesContainerRef.current) { 91 | const { scrollTop, scrollHeight, clientHeight } = messagesContainerRef.current; 92 | const isAtBottom = scrollHeight - scrollTop - clientHeight < 100; // 100px threshold 93 | setShowScrollButton(!isAtBottom); 94 | 95 | if (isAtBottom) { 96 | setShouldAutoScroll(true); 97 | } else { 98 | setShouldAutoScroll(false); 99 | } 100 | } 101 | }; 102 | 103 | const scrollToBottom = () => { 104 | if (messagesContainerRef.current) { 105 | messagesContainerRef.current.scrollTo({ 106 | top: messagesContainerRef.current.scrollHeight, 107 | behavior: 'smooth' 108 | }); 109 | } 110 | }; 111 | 112 | const handleExampleClick = (message: string) => { 113 | if (status !== 'running' && !restoring) { 114 | setRestoreError(false); 115 | run({ thread_id: threadId, state: { "messages": [{ type: 'user', content: message }] } }); 116 | } 117 | }; 118 | 119 | const handleResume = (resumeValue: ResumeValue) => { 120 | resume({ thread_id: threadId, resume: resumeValue }); 121 | } 122 | 123 | const renderCheckpointError = (checkpoint: AppCheckpoint): React.ReactNode => { 124 | return ( 125 |
126 | 127 | Error in {checkpoint.checkpointConfig.configurable.checkpoint_id} 128 |
129 | ); 130 | } 131 | 132 | const renderNode = (checkpoint: AppCheckpoint, node: GraphNode): React.ReactNode => { 133 | switch (node.name) { 134 | case '__start__': 135 | case 'chatbot': 136 | return ; 137 | case 'weather': 138 | return ; 139 | case 'reminder': 140 | return ; 141 | default: 142 | return null; 143 | } 144 | } 145 | 146 | return ( 147 |
148 |
149 |
150 | setShowNodesinfo(checked === true)} 154 | /> 155 | 161 |
162 |
163 | 164 |
168 |
169 | {appCheckpoints.map((checkpoint) => ( 170 |
171 | {showNodesinfo && ( 172 | 177 | )} 178 | {checkpoint.error ? renderCheckpointError(checkpoint) : checkpoint.nodes.map((node, nodeIndex) => ( 179 |
180 | {showNodesinfo && } 181 | {renderNode(checkpoint, node)} 182 |
183 | ))} 184 |
185 | ))} 186 | {(status === 'running' || restoring) && ( 187 |
188 | 189 |
190 | )} 191 | {(status === 'error') && ( 192 |
193 | 194 | Error running agent. 195 |
196 | )} 197 | {restoreError && ( 198 |
199 | 200 | Error restoring agent. Check if agent server is running. 201 |
202 | )} 203 |
204 | 205 | {showScrollButton && ( 206 | 214 | )} 215 |
216 | 217 |
218 |
219 |
220 | {exampleMessages.map((message, index) => ( 221 | 231 | ))} 232 |
233 |
234 |