├── my_agent ├── __init__.py ├── utils │ ├── __init__.py │ ├── tools.py │ ├── state.py │ └── nodes.py ├── requirements.txt └── agent.py ├── .gitignore ├── .env.example ├── static └── agent_ui.png ├── langgraph.json └── README.md /my_agent/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /my_agent/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | .ipynb_checkpoints 3 | .langgraph-data 4 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | ANTHROPIC_API_KEY=... 2 | TAVILY_API_KEY=... 3 | OPENAI_API_KEY=... 4 | -------------------------------------------------------------------------------- /static/agent_ui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langchain-ai/langgraph-example/HEAD/static/agent_ui.png -------------------------------------------------------------------------------- /my_agent/requirements.txt: -------------------------------------------------------------------------------- 1 | langgraph 2 | langchain_anthropic 3 | tavily-python 4 | langchain_community 5 | langchain_openai 6 | -------------------------------------------------------------------------------- /my_agent/utils/tools.py: -------------------------------------------------------------------------------- 1 | from langchain_community.tools.tavily_search import TavilySearchResults 2 | 3 | tools = [TavilySearchResults(max_results=1)] -------------------------------------------------------------------------------- /langgraph.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": ["./my_agent"], 3 | "graphs": { 4 | "agent": "./my_agent/agent.py:graph" 5 | }, 6 | "env": ".env" 7 | } 8 | -------------------------------------------------------------------------------- /my_agent/utils/state.py: -------------------------------------------------------------------------------- 1 | from langgraph.graph import add_messages 2 | from langchain_core.messages import BaseMessage 3 | from typing import TypedDict, Annotated, Sequence 4 | 5 | class AgentState(TypedDict): 6 | messages: Annotated[Sequence[BaseMessage], add_messages] 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LangGraph Cloud Example 2 | 3 | ![](static/agent_ui.png) 4 | 5 | This is an example agent to deploy with LangGraph Cloud. 6 | 7 | > [!TIP] 8 | > If you would rather use `pyproject.toml` for managing dependencies in your LangGraph Cloud project, please check out [this repository](https://github.com/langchain-ai/langgraph-example-pyproject). 9 | 10 | [LangGraph](https://github.com/langchain-ai/langgraph) is a library for building stateful, multi-actor applications with LLMs. The main use cases for LangGraph are conversational agents, and long-running, multi-step LLM applications or any LLM application that would benefit from built-in support for persistent checkpoints, cycles and human-in-the-loop interactions (ie. LLM and human collaboration). 11 | 12 | LangGraph shortens the time-to-market for developers using LangGraph, with a one-liner command to start a production-ready HTTP microservice for your LangGraph applications, with built-in persistence. This lets you focus on the logic of your LangGraph graph, and leave the scaling and API design to us. The API is inspired by the OpenAI assistants API, and is designed to fit in alongside your existing services. 13 | 14 | In order to deploy this agent to LangGraph Cloud you will want to first fork this repo. After that, you can follow the instructions [here](https://langchain-ai.github.io/langgraph/cloud/) to deploy to LangGraph Cloud. 15 | -------------------------------------------------------------------------------- /my_agent/utils/nodes.py: -------------------------------------------------------------------------------- 1 | from functools import lru_cache 2 | from langchain_anthropic import ChatAnthropic 3 | from langchain_openai import ChatOpenAI 4 | from my_agent.utils.tools import tools 5 | from langgraph.prebuilt import ToolNode 6 | 7 | 8 | @lru_cache(maxsize=4) 9 | def _get_model(model_name: str): 10 | if model_name == "openai": 11 | model = ChatOpenAI(temperature=0, model_name="gpt-4o") 12 | elif model_name == "anthropic": 13 | model = ChatAnthropic(temperature=0, model_name="claude-3-sonnet-20240229") 14 | else: 15 | raise ValueError(f"Unsupported model type: {model_name}") 16 | 17 | model = model.bind_tools(tools) 18 | return model 19 | 20 | # Define the function that determines whether to continue or not 21 | def should_continue(state): 22 | messages = state["messages"] 23 | last_message = messages[-1] 24 | # If there are no tool calls, then we finish 25 | if not last_message.tool_calls: 26 | return "end" 27 | # Otherwise if there is, we continue 28 | else: 29 | return "continue" 30 | 31 | 32 | system_prompt = """Be a helpful assistant""" 33 | 34 | # Define the function that calls the model 35 | def call_model(state, config): 36 | messages = state["messages"] 37 | messages = [{"role": "system", "content": system_prompt}] + messages 38 | model_name = config.get('configurable', {}).get("model_name", "anthropic") 39 | model = _get_model(model_name) 40 | response = model.invoke(messages) 41 | # We return a list, because this will get added to the existing list 42 | return {"messages": [response]} 43 | 44 | # Define the function to execute tools 45 | tool_node = ToolNode(tools) -------------------------------------------------------------------------------- /my_agent/agent.py: -------------------------------------------------------------------------------- 1 | from typing import TypedDict, Literal 2 | 3 | from langgraph.graph import StateGraph, END 4 | from my_agent.utils.nodes import call_model, should_continue, tool_node 5 | from my_agent.utils.state import AgentState 6 | 7 | 8 | # Define the config 9 | class GraphConfig(TypedDict): 10 | model_name: Literal["anthropic", "openai"] 11 | 12 | 13 | # Define a new graph 14 | workflow = StateGraph(AgentState, config_schema=GraphConfig) 15 | 16 | # Define the two nodes we will cycle between 17 | workflow.add_node("agent", call_model) 18 | workflow.add_node("action", tool_node) 19 | 20 | # Set the entrypoint as `agent` 21 | # This means that this node is the first one called 22 | workflow.set_entry_point("agent") 23 | 24 | # We now add a conditional edge 25 | workflow.add_conditional_edges( 26 | # First, we define the start node. We use `agent`. 27 | # This means these are the edges taken after the `agent` node is called. 28 | "agent", 29 | # Next, we pass in the function that will determine which node is called next. 30 | should_continue, 31 | # Finally we pass in a mapping. 32 | # The keys are strings, and the values are other nodes. 33 | # END is a special node marking that the graph should finish. 34 | # What will happen is we will call `should_continue`, and then the output of that 35 | # will be matched against the keys in this mapping. 36 | # Based on which one it matches, that node will then be called. 37 | { 38 | # If `tools`, then we call the tool node. 39 | "continue": "action", 40 | # Otherwise we finish. 41 | "end": END, 42 | }, 43 | ) 44 | 45 | # We now add a normal edge from `tools` to `agent`. 46 | # This means that after `tools` is called, `agent` node is called next. 47 | workflow.add_edge("action", "agent") 48 | 49 | # Finally, we compile it! 50 | # This compiles it into a LangChain Runnable, 51 | # meaning you can use it as you would any other runnable 52 | graph = workflow.compile() 53 | --------------------------------------------------------------------------------