├── .github ├── actions │ └── uv_setup │ │ └── action.yml └── workflows │ ├── _lint.yml │ ├── _test.yml │ ├── ci.yml │ └── release.yml ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── examples ├── customer_support │ ├── README.md │ ├── langgraph.json │ ├── pyproject.toml │ └── src │ │ └── agent │ │ ├── customer_support.ipynb │ │ └── customer_support.py └── research │ ├── README.md │ ├── langgraph.json │ ├── pyproject.toml │ └── src │ └── agent │ ├── agent.ipynb │ ├── agent.py │ ├── configuration.py │ ├── prompts.py │ └── utils.py ├── langgraph_swarm ├── __init__.py ├── handoff.py ├── py.typed └── swarm.py ├── pyproject.toml ├── static └── img │ └── swarm.png ├── tests ├── __init__.py ├── test_import.py └── test_swarm.py └── uv.lock /.github/actions/uv_setup/action.yml: -------------------------------------------------------------------------------- 1 | # TODO: https://docs.astral.sh/uv/guides/integration/github/#caching 2 | 3 | name: uv-install 4 | description: Set up Python and uv 5 | 6 | inputs: 7 | python-version: 8 | description: Python version, supporting MAJOR.MINOR only 9 | required: true 10 | 11 | env: 12 | UV_VERSION: "0.5.25" 13 | 14 | runs: 15 | using: composite 16 | steps: 17 | - name: Install uv and set the python version 18 | uses: astral-sh/setup-uv@v5 19 | with: 20 | version: ${{ env.UV_VERSION }} 21 | python-version: ${{ inputs.python-version }} 22 | -------------------------------------------------------------------------------- /.github/workflows/_lint.yml: -------------------------------------------------------------------------------- 1 | name: lint 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | working-directory: 7 | required: true 8 | type: string 9 | description: "From which folder this pipeline executes" 10 | python-version: 11 | required: true 12 | type: string 13 | description: "Python version to use" 14 | 15 | env: 16 | WORKDIR: ${{ inputs.working-directory == '' && '.' || inputs.working-directory }} 17 | 18 | # This env var allows us to get inline annotations when ruff has complaints. 19 | RUFF_OUTPUT_FORMAT: github 20 | 21 | UV_FROZEN: "true" 22 | 23 | jobs: 24 | build: 25 | name: "make lint #${{ inputs.python-version }}" 26 | runs-on: ubuntu-latest 27 | timeout-minutes: 20 28 | steps: 29 | - uses: actions/checkout@v4 30 | 31 | - name: Set up Python ${{ inputs.python-version }} + uv 32 | uses: "./.github/actions/uv_setup" 33 | with: 34 | python-version: ${{ inputs.python-version }} 35 | 36 | - name: Install dependencies 37 | working-directory: ${{ inputs.working-directory }} 38 | run: | 39 | uv sync --group test 40 | 41 | - name: Analysing the code with our lint 42 | working-directory: ${{ inputs.working-directory }} 43 | run: | 44 | make lint 45 | -------------------------------------------------------------------------------- /.github/workflows/_test.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | working-directory: 7 | required: true 8 | type: string 9 | description: "From which folder this pipeline executes" 10 | python-version: 11 | required: true 12 | type: string 13 | description: "Python version to use" 14 | 15 | env: 16 | UV_FROZEN: "true" 17 | UV_NO_SYNC: "true" 18 | 19 | jobs: 20 | build: 21 | defaults: 22 | run: 23 | working-directory: ${{ inputs.working-directory }} 24 | runs-on: ubuntu-latest 25 | timeout-minutes: 20 26 | name: "make test #${{ inputs.python-version }}" 27 | steps: 28 | - uses: actions/checkout@v4 29 | 30 | - name: Set up Python ${{ inputs.python-version }} + uv 31 | uses: "./.github/actions/uv_setup" 32 | id: setup-python 33 | with: 34 | python-version: ${{ inputs.python-version }} 35 | - name: Install dependencies 36 | shell: bash 37 | run: uv sync --group test 38 | 39 | - name: Run core tests 40 | shell: bash 41 | run: | 42 | make test 43 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Run CI Tests 3 | 4 | on: 5 | push: 6 | branches: [ main ] 7 | pull_request: 8 | workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI 9 | 10 | # If another push to the same PR or branch happens while this workflow is still running, 11 | # cancel the earlier run in favor of the next run. 12 | # 13 | # There's no point in testing an outdated version of the code. GitHub only allows 14 | # a limited number of job runners to be active at the same time, so it's better to cancel 15 | # pointless jobs early so that more useful jobs can run sooner. 16 | concurrency: 17 | group: ${{ github.workflow }}-${{ github.ref }} 18 | cancel-in-progress: true 19 | 20 | jobs: 21 | lint: 22 | strategy: 23 | matrix: 24 | # Only lint on the min and max supported Python versions. 25 | # It's extremely unlikely that there's a lint issue on any version in between 26 | # that doesn't show up on the min or max versions. 27 | # 28 | # GitHub rate-limits how many jobs can be running at any one time. 29 | # Starting new jobs is also relatively slow, 30 | # so linting on fewer versions makes CI faster. 31 | python-version: 32 | - "3.12" 33 | uses: 34 | ./.github/workflows/_lint.yml 35 | with: 36 | working-directory: . 37 | python-version: ${{ matrix.python-version }} 38 | secrets: inherit 39 | test: 40 | strategy: 41 | matrix: 42 | # Only lint on the min and max supported Python versions. 43 | # It's extremely unlikely that there's a lint issue on any version in between 44 | # that doesn't show up on the min or max versions. 45 | # 46 | # GitHub rate-limits how many jobs can be running at any one time. 47 | # Starting new jobs is also relatively slow, 48 | # so linting on fewer versions makes CI faster. 49 | python-version: 50 | - "3.10" 51 | - "3.12" 52 | uses: 53 | ./.github/workflows/_test.yml 54 | with: 55 | working-directory: . 56 | python-version: ${{ matrix.python-version }} 57 | secrets: inherit 58 | 59 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: release 2 | run-name: Release ${{ inputs.working-directory }} by @${{ github.actor }} 3 | on: 4 | workflow_call: 5 | inputs: 6 | working-directory: 7 | required: true 8 | type: string 9 | description: "From which folder this pipeline executes" 10 | workflow_dispatch: 11 | inputs: 12 | working-directory: 13 | description: "From which folder this pipeline executes" 14 | default: "." 15 | dangerous-nonmain-release: 16 | required: false 17 | type: boolean 18 | default: false 19 | description: "Release from a non-main branch (danger!)" 20 | 21 | env: 22 | PYTHON_VERSION: "3.11" 23 | UV_FROZEN: "true" 24 | UV_NO_SYNC: "true" 25 | 26 | jobs: 27 | build: 28 | if: github.ref == 'refs/heads/main' || inputs.dangerous-nonmain-release 29 | environment: Scheduled testing 30 | runs-on: ubuntu-latest 31 | 32 | outputs: 33 | pkg-name: ${{ steps.check-version.outputs.pkg-name }} 34 | version: ${{ steps.check-version.outputs.version }} 35 | 36 | steps: 37 | - uses: actions/checkout@v4 38 | 39 | - name: Set up Python + uv 40 | uses: "./.github/actions/uv_setup" 41 | with: 42 | python-version: ${{ env.PYTHON_VERSION }} 43 | 44 | # We want to keep this build stage *separate* from the release stage, 45 | # so that there's no sharing of permissions between them. 46 | # The release stage has trusted publishing and GitHub repo contents write access, 47 | # and we want to keep the scope of that access limited just to the release job. 48 | # Otherwise, a malicious `build` step (e.g. via a compromised dependency) 49 | # could get access to our GitHub or PyPI credentials. 50 | # 51 | # Per the trusted publishing GitHub Action: 52 | # > It is strongly advised to separate jobs for building [...] 53 | # > from the publish job. 54 | # https://github.com/pypa/gh-action-pypi-publish#non-goals 55 | - name: Build project for distribution 56 | run: uv build 57 | - name: Upload build 58 | uses: actions/upload-artifact@v4 59 | with: 60 | name: dist 61 | path: ${{ inputs.working-directory }}/dist/ 62 | 63 | - name: Check Version 64 | id: check-version 65 | shell: python 66 | working-directory: ${{ inputs.working-directory }} 67 | run: | 68 | import os 69 | import tomllib 70 | with open("pyproject.toml", "rb") as f: 71 | data = tomllib.load(f) 72 | pkg_name = data["project"]["name"] 73 | version = data["project"]["version"] 74 | with open(os.environ["GITHUB_OUTPUT"], "a") as f: 75 | f.write(f"pkg-name={pkg_name}\n") 76 | f.write(f"version={version}\n") 77 | publish: 78 | needs: 79 | - build 80 | runs-on: ubuntu-latest 81 | permissions: 82 | # This permission is used for trusted publishing: 83 | # https://blog.pypi.org/posts/2023-04-20-introducing-trusted-publishers/ 84 | # 85 | # Trusted publishing has to also be configured on PyPI for each package: 86 | # https://docs.pypi.org/trusted-publishers/adding-a-publisher/ 87 | id-token: write 88 | 89 | defaults: 90 | run: 91 | working-directory: ${{ inputs.working-directory }} 92 | 93 | steps: 94 | - uses: actions/checkout@v4 95 | 96 | - name: Set up Python + uv 97 | uses: "./.github/actions/uv_setup" 98 | with: 99 | python-version: ${{ env.PYTHON_VERSION }} 100 | 101 | - uses: actions/download-artifact@v4 102 | with: 103 | name: dist 104 | path: ${{ inputs.working-directory }}/dist/ 105 | 106 | - name: Publish package distributions to PyPI 107 | uses: pypa/gh-action-pypi-publish@release/v1 108 | with: 109 | packages-dir: ${{ inputs.working-directory }}/dist/ 110 | verbose: true 111 | print-hash: true 112 | # Temp workaround since attestations are on by default as of gh-action-pypi-publish v1.11.0 113 | attestations: false 114 | 115 | mark-release: 116 | needs: 117 | - build 118 | - publish 119 | runs-on: ubuntu-latest 120 | permissions: 121 | # This permission is needed by `ncipollo/release-action` to 122 | # create the GitHub release. 123 | contents: write 124 | 125 | defaults: 126 | run: 127 | working-directory: ${{ inputs.working-directory }} 128 | 129 | steps: 130 | - uses: actions/checkout@v4 131 | 132 | - name: Set up Python + uv 133 | uses: "./.github/actions/uv_setup" 134 | with: 135 | python-version: ${{ env.PYTHON_VERSION }} 136 | 137 | - uses: actions/download-artifact@v4 138 | with: 139 | name: dist 140 | path: ${{ inputs.working-directory }}/dist/ 141 | 142 | - name: Create Tag 143 | uses: ncipollo/release-action@v1 144 | with: 145 | artifacts: "dist/*" 146 | token: ${{ secrets.GITHUB_TOKEN }} 147 | generateReleaseNotes: true 148 | tag: ${{needs.build.outputs.pkg-name}}==${{ needs.build.outputs.version }} 149 | body: ${{ needs.release-notes.outputs.release-body }} 150 | commit: main 151 | makeLatest: true -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Pyenv 2 | .python-version 3 | 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | share/python-wheels/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | MANIFEST 31 | 32 | # Environments 33 | .venv 34 | .env 35 | 36 | # mypy 37 | .mypy_cache/ 38 | .dmypy.json 39 | dmypy.json 40 | 41 | .langgraph_api 42 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 LangChain, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all lint format test help 2 | 3 | # Default target executed when no arguments are given to make. 4 | all: help 5 | 6 | ###################### 7 | # TESTING AND COVERAGE 8 | ###################### 9 | 10 | # Define a variable for the test file path. 11 | TEST_FILE ?= tests/ 12 | 13 | test: 14 | uv run pytest --disable-socket --allow-unix-socket $(TEST_FILE) 15 | 16 | test_watch: 17 | uv run ptw . -- $(TEST_FILE) 18 | 19 | 20 | ###################### 21 | # LINTING AND FORMATTING 22 | ###################### 23 | 24 | # Define a variable for Python and notebook files. 25 | lint format: PYTHON_FILES=langgraph_swarm/ tests/ 26 | lint_diff format_diff: PYTHON_FILES=$(shell git diff --relative=langgraph_swarm/ tests/ --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$') 27 | 28 | lint lint_diff: 29 | [ "$(PYTHON_FILES)" = "" ] || uv run ruff format $(PYTHON_FILES) --diff 30 | [ "$(PYTHON_FILES)" = "" ] || uv run ruff check $(PYTHON_FILES) --diff 31 | # [ "$(PYTHON_FILES)" = "" ] || uv run mypy $(PYTHON_FILES) 32 | 33 | format format_diff: 34 | [ "$(PYTHON_FILES)" = "" ] || uv run ruff check --fix $(PYTHON_FILES) 35 | [ "$(PYTHON_FILES)" = "" ] || uv run ruff format $(PYTHON_FILES) 36 | 37 | 38 | 39 | ###################### 40 | # HELP 41 | ###################### 42 | 43 | help: 44 | @echo '====================' 45 | @echo '-- LINTING --' 46 | @echo 'format - run code formatters' 47 | @echo 'lint - run linters' 48 | @echo '-- TESTS --' 49 | @echo 'test - run unit tests' 50 | @echo 'test TEST_FILE= - run all tests in file' 51 | @echo '-- DOCUMENTATION tasks are from the top-level Makefile --' 52 | 53 | 54 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🤖 LangGraph Multi-Agent Swarm 2 | 3 | A Python library for creating swarm-style multi-agent systems using [LangGraph](https://github.com/langchain-ai/langgraph). A swarm is a type of [multi-agent](https://langchain-ai.github.io/langgraph/concepts/multi_agent) architecture where agents dynamically hand off control to one another based on their specializations. The system remembers which agent was last active, ensuring that on subsequent interactions, the conversation resumes with that agent. 4 | 5 | ![Swarm](static/img/swarm.png) 6 | 7 | ## Features 8 | 9 | - 🤖 **Multi-agent collaboration** - Enable specialized agents to work together and hand off context to each other 10 | - 🛠️ **Customizable handoff tools** - Built-in tools for communication between agents 11 | 12 | This library is built on top of [LangGraph](https://github.com/langchain-ai/langgraph), a powerful framework for building agent applications, and comes with out-of-box support for [streaming](https://langchain-ai.github.io/langgraph/how-tos/#streaming), [short-term and long-term memory](https://langchain-ai.github.io/langgraph/concepts/memory/) and [human-in-the-loop](https://langchain-ai.github.io/langgraph/concepts/human_in_the_loop/) 13 | 14 | ## Installation 15 | 16 | ```bash 17 | pip install langgraph-swarm 18 | ``` 19 | 20 | ## Quickstart 21 | 22 | ```bash 23 | pip install langgraph-swarm langchain-openai 24 | 25 | export OPENAI_API_KEY= 26 | ``` 27 | 28 | ```python 29 | from langchain_openai import ChatOpenAI 30 | 31 | from langgraph.checkpoint.memory import InMemorySaver 32 | from langgraph.prebuilt import create_react_agent 33 | from langgraph_swarm import create_handoff_tool, create_swarm 34 | 35 | model = ChatOpenAI(model="gpt-4o") 36 | 37 | def add(a: int, b: int) -> int: 38 | """Add two numbers""" 39 | return a + b 40 | 41 | alice = create_react_agent( 42 | model, 43 | [add, create_handoff_tool(agent_name="Bob")], 44 | prompt="You are Alice, an addition expert.", 45 | name="Alice", 46 | ) 47 | 48 | bob = create_react_agent( 49 | model, 50 | [create_handoff_tool(agent_name="Alice", description="Transfer to Alice, she can help with math")], 51 | prompt="You are Bob, you speak like a pirate.", 52 | name="Bob", 53 | ) 54 | 55 | checkpointer = InMemorySaver() 56 | workflow = create_swarm( 57 | [alice, bob], 58 | default_active_agent="Alice" 59 | ) 60 | app = workflow.compile(checkpointer=checkpointer) 61 | 62 | config = {"configurable": {"thread_id": "1"}} 63 | turn_1 = app.invoke( 64 | {"messages": [{"role": "user", "content": "i'd like to speak to Bob"}]}, 65 | config, 66 | ) 67 | print(turn_1) 68 | turn_2 = app.invoke( 69 | {"messages": [{"role": "user", "content": "what's 5 + 7?"}]}, 70 | config, 71 | ) 72 | print(turn_2) 73 | ``` 74 | 75 | ## Memory 76 | 77 | You can add [short-term](https://langchain-ai.github.io/langgraph/how-tos/persistence/) and [long-term](https://langchain-ai.github.io/langgraph/how-tos/cross-thread-persistence/) [memory](https://langchain-ai.github.io/langgraph/concepts/memory/) to your swarm multi-agent system. Since `create_swarm()` returns an instance of `StateGraph` that needs to be compiled before use, you can directly pass a [checkpointer](https://langchain-ai.github.io/langgraph/reference/checkpoints/#langgraph.checkpoint.base.BaseCheckpointSaver) or a [store](https://langchain-ai.github.io/langgraph/reference/store/#langgraph.store.base.BaseStore) instance to the `.compile()` method: 78 | 79 | ```python 80 | from langgraph.checkpoint.memory import InMemorySaver 81 | from langgraph.store.memory import InMemoryStore 82 | 83 | # short-term memory 84 | checkpointer = InMemorySaver() 85 | # long-term memory 86 | store = InMemoryStore() 87 | 88 | model = ... 89 | alice = ... 90 | bob = ... 91 | 92 | workflow = create_swarm( 93 | [alice, bob], 94 | default_active_agent="Alice" 95 | ) 96 | 97 | # Compile with checkpointer/store 98 | app = workflow.compile( 99 | checkpointer=checkpointer, 100 | store=store 101 | ) 102 | ``` 103 | 104 | > [!IMPORTANT] 105 | > Adding [short-term memory](https://langchain-ai.github.io/langgraph/concepts/persistence/) is crucial for maintaining conversation state across multiple interactions. Without it, the swarm would "forget" which agent was last active and lose the conversation history. Make sure to always compile the swarm with a checkpointer if you plan to use it in multi-turn conversations; e.g., `workflow.compile(checkpointer=checkpointer)`. 106 | 107 | ## How to customize 108 | 109 | You can customize multi-agent swarm by changing either the [handoff tools](#customizing-handoff-tools) implementation or the [agent implementation](#customizing-agent-implementation). 110 | 111 | ### Customizing handoff tools 112 | 113 | By default, the agents in the swarm are assumed to use handoff tools created with the prebuilt `create_handoff_tool`. You can also create your own, custom handoff tools. Here are some ideas on how you can modify the default implementation: 114 | 115 | * change tool name and/or description 116 | * add tool call arguments for the LLM to populate, for example a task description for the next agent 117 | * change what data is passed to the next agent as part of the handoff: by default `create_handoff_tool` passes **full** message history (all of the messages generated in the swarm up to this point), as well as a tool message indicating successful handoff. 118 | 119 | Here is an example of what a custom handoff tool might look like: 120 | 121 | ```python 122 | from typing import Annotated 123 | 124 | from langchain_core.tools import tool, BaseTool, InjectedToolCallId 125 | from langchain_core.messages import ToolMessage 126 | from langgraph.types import Command 127 | from langgraph.prebuilt import InjectedState 128 | 129 | def create_custom_handoff_tool(*, agent_name: str, name: str | None, description: str | None) -> BaseTool: 130 | 131 | @tool(name, description=description) 132 | def handoff_to_agent( 133 | # you can add additional tool call arguments for the LLM to populate 134 | # for example, you can ask the LLM to populate a task description for the next agent 135 | task_description: Annotated[str, "Detailed description of what the next agent should do, including all of the relevant context."], 136 | # you can inject the state of the agent that is calling the tool 137 | state: Annotated[dict, InjectedState], 138 | tool_call_id: Annotated[str, InjectedToolCallId], 139 | ): 140 | tool_message = ToolMessage( 141 | content=f"Successfully transferred to {agent_name}", 142 | name=name, 143 | tool_call_id=tool_call_id, 144 | ) 145 | # you can use a different messages state key here, if your agent uses a different schema 146 | # e.g., "alice_messages" instead of "messages" 147 | messages = state["messages"] 148 | return Command( 149 | goto=agent_name, 150 | graph=Command.PARENT, 151 | # NOTE: this is a state update that will be applied to the swarm multi-agent graph (i.e., the PARENT graph) 152 | update={ 153 | "messages": messages + [tool_message], 154 | "active_agent": agent_name, 155 | # optionally pass the task description to the next agent 156 | "task_description": task_description, 157 | }, 158 | ) 159 | 160 | return handoff_to_agent 161 | ``` 162 | 163 | > [!IMPORTANT] 164 | > If you are implementing custom handoff tools that return `Command`, you need to ensure that: 165 | (1) your agent has a tool-calling node that can handle tools returning `Command` (like LangGraph's prebuilt [`ToolNode`](https://langchain-ai.github.io/langgraph/reference/prebuilt/#langgraph.prebuilt.tool_node.ToolNode)) 166 | (2) both the swarm graph and the next agent graph have the [state schema](https://langchain-ai.github.io/langgraph/concepts/low_level#schema) containing the keys you want to update in `Command.update` 167 | 168 | ### Customizing agent implementation 169 | 170 | By default, individual agents are expected to communicate over a single `messages` key that is shared by all agents and the overall multi-agent swarm graph. This means that messages from **all** of the agents will be combined into a single, shared list of messages. This might not be desirable if you don't want to expose an agent's internal history of messages. To change this, you can customize the agent by taking the following steps: 171 | 172 | 1. use custom [state schema](https://langchain-ai.github.io/langgraph/concepts/low_level#schema) with a different key for messages, for example `alice_messages` 173 | 1. write a wrapper that converts the parent graph state to the child agent state and back (see this [how-to](https://langchain-ai.github.io/langgraph/how-tos/subgraph-transform-state/) guide) 174 | 175 | ```python 176 | from typing_extensions import TypedDict, Annotated 177 | 178 | from langchain_core.messages import AnyMessage 179 | from langgraph.graph import StateGraph, add_messages 180 | from langgraph_swarm import SwarmState 181 | 182 | class AliceState(TypedDict): 183 | alice_messages: Annotated[list[AnyMessage], add_messages] 184 | 185 | # see this guide to learn how you can implement a custom tool-calling agent 186 | # https://langchain-ai.github.io/langgraph/how-tos/react-agent-from-scratch/ 187 | alice = ( 188 | StateGraph(AliceState) 189 | .add_node("model", ...) 190 | .add_node("tools", ...) 191 | .add_edge(...) 192 | ... 193 | .compile() 194 | ) 195 | 196 | # wrapper calling the agent 197 | def call_alice(state: SwarmState): 198 | # you can put any input transformation from parent state -> agent state 199 | # for example, you can invoke "alice" with "task_description" populated by the LLM 200 | response = alice.invoke({"alice_messages": state["messages"]}) 201 | # you can put any output transformation from agent state -> parent state 202 | return {"messages": response["alice_messages"]} 203 | 204 | def call_bob(state: SwarmState): 205 | ... 206 | ``` 207 | 208 | Then, you can create the swarm manually in the following way: 209 | 210 | ```python 211 | from langgraph_swarm import add_active_agent_router 212 | 213 | workflow = ( 214 | StateGraph(SwarmState) 215 | .add_node("Alice", call_alice, destinations=("Bob",)) 216 | .add_node("Bob", call_bob, destinations=("Alice",)) 217 | ) 218 | # this is the router that enables us to keep track of the last active agent 219 | workflow = add_active_agent_router( 220 | builder=workflow, 221 | route_to=["Alice", "Bob"], 222 | default_active_agent="Alice", 223 | ) 224 | 225 | # compile the workflow 226 | app = workflow.compile() 227 | ``` 228 | 229 | -------------------------------------------------------------------------------- /examples/customer_support/README.md: -------------------------------------------------------------------------------- 1 | # Customer Support Example 2 | 3 | A simple example of building a customer support system using LangGraph Swarm. This example demonstrates how to create a system where agents can hand off conversations to other specialized agents. 4 | 5 | ## Overview 6 | 7 | The system consists of two specialized agents: 8 | - **Flight Assistant**: Handles flight search and booking 9 | - **Hotel Assistant**: Handles hotel search and booking 10 | 11 | These agents can transfer control to each other using handoff tools, allowing for a seamless customer experience. 12 | 13 | ## Quickstart 14 | 15 | ```bash 16 | uvx --refresh --from "langgraph-cli[inmem]" --with-editable . --python 3.11 langgraph dev 17 | ``` 18 | 19 | ## Features 20 | 21 | - Agent handoff between specialized services 22 | - Mock data for flights and hotels 23 | - Reservation tracking by user ID 24 | - Built with LangGraph Swarm for agent orchestration 25 | 26 | ## How It Works 27 | 28 | 1. The system starts with the Flight Assistant as the default agent 29 | 2. Agents can pass control using handoff tools (`transfer_to_hotel_assistant` and `transfer_to_flight_assistant`) 30 | 3. User context and reservation information is maintained throughout handoffs 31 | 4. Agents have access to specific tools related to their domain 32 | 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /examples/customer_support/langgraph.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": ["."], 3 | "graphs": { 4 | "agent": "./src/agent/customer_support.py:app" 5 | } 6 | } -------------------------------------------------------------------------------- /examples/customer_support/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "swarm-customer-support" 3 | version = "0.0.1" 4 | description = "Simple customer support example using LangGraph Swarm." 5 | authors = [ 6 | { name = "Lance Martin" } 7 | ] 8 | readme = "README.md" 9 | license = { text = "MIT" } 10 | requires-python = ">=3.9" 11 | dependencies = [ 12 | "langchain-openai>=0.3.11", 13 | "langgraph>=0.3.21", 14 | "langgraph-swarm>=0.0.7", 15 | "langchain>=0.3.21", 16 | ] 17 | 18 | [project.optional-dependencies] 19 | dev = ["mypy>=1.11.1", "ruff>=0.6.1"] 20 | 21 | [build-system] 22 | requires = ["setuptools>=73.0.0", "wheel"] 23 | build-backend = "setuptools.build_meta" 24 | 25 | [tool.setuptools] 26 | packages = ["customer_support"] 27 | 28 | [tool.setuptools.package-dir] 29 | "customer_support" = "src/agent" 30 | 31 | [tool.setuptools.package-data] 32 | "*" = ["py.typed"] 33 | 34 | [tool.ruff] 35 | lint.select = [ 36 | "E", # pycodestyle 37 | "F", # pyflakes 38 | "I", # isort 39 | "D", # pydocstyle 40 | "D401", # First line should be in imperative mood 41 | "T201", 42 | "UP", 43 | ] 44 | lint.ignore = [ 45 | "UP006", 46 | "UP007", 47 | "UP035", 48 | "D417", 49 | "E501", 50 | ] 51 | 52 | [tool.ruff.lint.per-file-ignores] 53 | "tests/*" = ["D", "UP"] 54 | 55 | [tool.ruff.lint.pydocstyle] 56 | convention = "google" -------------------------------------------------------------------------------- /examples/customer_support/src/agent/customer_support.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "id": "6f1257c4-11f2-4f2c-b7a9-3ff7d88a844e", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "! pip install langchain_openai langgraph langgraph-swarm" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 1, 16 | "id": "52d21ef9-1a36-4616-bf59-6e8a0dc053cd", 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "import os\n", 21 | "\n", 22 | "os.environ[\"ANTHROPIC_API_KEY\"] = \"\"\n", 23 | "os.environ[\"OPENAI_API_KEY\"] = \"\"\n", 24 | "\n", 25 | "# turn on langsmith tracing\n", 26 | "os.environ[\"LANGSMITH_API_KEY\"] = \"\"\n", 27 | "os.environ[\"LANGSMITH_TRACING_V2\"] = \"true\"" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": 1, 33 | "id": "b00fae2b-5c83-4e23-838f-46e8fe30bffd", 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "from langchain_openai import ChatOpenAI\n", 38 | "\n", 39 | "model = ChatOpenAI(model=\"gpt-4o\")" 40 | ] 41 | }, 42 | { 43 | "cell_type": "markdown", 44 | "id": "bbf49378-d2b2-4e4f-a1a5-1f308c82febe", 45 | "metadata": {}, 46 | "source": [ 47 | "## Customer support tools" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": 4, 53 | "id": "6011da74-ad1e-472c-b8f3-57ecbf9fd6ba", 54 | "metadata": {}, 55 | "outputs": [], 56 | "source": [ 57 | "import datetime\n", 58 | "from collections import defaultdict\n", 59 | "from typing import Callable\n", 60 | "\n", 61 | "from langchain_core.runnables import RunnableConfig\n", 62 | "from langgraph.checkpoint.memory import MemorySaver\n", 63 | "from langgraph.prebuilt import create_react_agent\n", 64 | "from langgraph_swarm import create_handoff_tool, create_swarm\n", 65 | "\n", 66 | "# Mock data for tools\n", 67 | "RESERVATIONS = defaultdict(lambda: {\"flight_info\": {}, \"hotel_info\": {}})\n", 68 | "TOMORROW = (datetime.date.today() + datetime.timedelta(days=1)).isoformat()\n", 69 | "FLIGHTS = [\n", 70 | " {\n", 71 | " \"departure_airport\": \"BOS\",\n", 72 | " \"arrival_airport\": \"JFK\",\n", 73 | " \"airline\": \"Jet Blue\",\n", 74 | " \"date\": TOMORROW,\n", 75 | " \"id\": \"1\",\n", 76 | " }\n", 77 | "]\n", 78 | "HOTELS = [\n", 79 | " {\n", 80 | " \"location\": \"New York\",\n", 81 | " \"name\": \"McKittrick Hotel\",\n", 82 | " \"neighborhood\": \"Chelsea\",\n", 83 | " \"id\": \"1\",\n", 84 | " }\n", 85 | "]\n", 86 | "\n", 87 | "\n", 88 | "# Flight tools\n", 89 | "def search_flights(\n", 90 | " departure_airport: str,\n", 91 | " arrival_airport: str,\n", 92 | " date: str,\n", 93 | ") -> list[dict]:\n", 94 | " \"\"\"Search flights.\n", 95 | "\n", 96 | " Args:\n", 97 | " departure_airport: 3-letter airport code for the departure airport. If unsure, use the biggest airport in the area\n", 98 | " arrival_airport: 3-letter airport code for the arrival airport. If unsure, use the biggest airport in the area\n", 99 | " date: YYYY-MM-DD date\n", 100 | " \"\"\"\n", 101 | " # return all flights for simplicity\n", 102 | " return FLIGHTS\n", 103 | "\n", 104 | "\n", 105 | "def book_flight(\n", 106 | " flight_id: str,\n", 107 | " config: RunnableConfig,\n", 108 | ") -> str:\n", 109 | " \"\"\"Book a flight.\"\"\"\n", 110 | " user_id = config[\"configurable\"].get(\"user_id\")\n", 111 | " flight = [flight for flight in FLIGHTS if flight[\"id\"] == flight_id][0]\n", 112 | " RESERVATIONS[user_id][\"flight_info\"] = flight\n", 113 | " return \"Successfully booked flight\"\n", 114 | "\n", 115 | "\n", 116 | "# Hotel tools\n", 117 | "def search_hotels(location: str) -> list[dict]:\n", 118 | " \"\"\"Search hotels.\n", 119 | "\n", 120 | " Args:\n", 121 | " location: offical, legal city name (proper noun)\n", 122 | " \"\"\"\n", 123 | " # return all hotels for simplicity\n", 124 | " return HOTELS\n", 125 | "\n", 126 | "\n", 127 | "def book_hotel(\n", 128 | " hotel_id: str,\n", 129 | " config: RunnableConfig,\n", 130 | ") -> str:\n", 131 | " \"\"\"Book a hotel\"\"\"\n", 132 | " user_id = config[\"configurable\"].get(\"user_id\")\n", 133 | " hotel = [hotel for hotel in HOTELS if hotel[\"id\"] == hotel_id][0]\n", 134 | " RESERVATIONS[user_id][\"hotel_info\"] = hotel\n", 135 | " return \"Successfully booked hotel\"\n", 136 | "\n", 137 | "\n", 138 | "# Handoff tools\n", 139 | "transfer_to_hotel_assistant = create_handoff_tool(\n", 140 | " agent_name=\"hotel_assistant\",\n", 141 | " description=\"Transfer user to the hotel-booking assistant that can search for and book hotels.\",\n", 142 | ")\n", 143 | "transfer_to_flight_assistant = create_handoff_tool(\n", 144 | " agent_name=\"flight_assistant\",\n", 145 | " description=\"Transfer user to the flight-booking assistant that can search for and book flights.\",\n", 146 | ")\n", 147 | "\n", 148 | "\n", 149 | "# Define agent prompt\n", 150 | "def make_prompt(base_system_prompt: str) -> Callable[[dict, RunnableConfig], list]:\n", 151 | " def prompt(state: dict, config: RunnableConfig) -> list:\n", 152 | " user_id = config[\"configurable\"].get(\"user_id\")\n", 153 | " current_reservation = RESERVATIONS[user_id]\n", 154 | " system_prompt = (\n", 155 | " base_system_prompt\n", 156 | " + f\"\\n\\nUser's active reservation: {current_reservation}\"\n", 157 | " + f\"Today is: {datetime.datetime.now()}\"\n", 158 | " )\n", 159 | " return [{\"role\": \"system\", \"content\": system_prompt}] + state[\"messages\"]\n", 160 | "\n", 161 | " return prompt\n", 162 | "\n", 163 | "\n", 164 | "# Define agents\n", 165 | "flight_assistant = create_react_agent(\n", 166 | " model,\n", 167 | " [search_flights, book_flight, transfer_to_hotel_assistant],\n", 168 | " prompt=make_prompt(\"You are a flight booking assistant\"),\n", 169 | " name=\"flight_assistant\",\n", 170 | ")\n", 171 | "\n", 172 | "hotel_assistant = create_react_agent(\n", 173 | " model,\n", 174 | " [search_hotels, book_hotel, transfer_to_flight_assistant],\n", 175 | " prompt=make_prompt(\"You are a hotel booking assistant\"),\n", 176 | " name=\"hotel_assistant\",\n", 177 | ")\n", 178 | "\n", 179 | "# Compile and run!\n", 180 | "checkpointer = MemorySaver()\n", 181 | "builder = create_swarm(\n", 182 | " [flight_assistant, hotel_assistant], default_active_agent=\"flight_assistant\"\n", 183 | ")\n", 184 | "\n", 185 | "# Important: compile the swarm with a checkpointer to remember\n", 186 | "# previous interactions and last active agent\n", 187 | "app = builder.compile(checkpointer=checkpointer)" 188 | ] 189 | }, 190 | { 191 | "cell_type": "code", 192 | "execution_count": 5, 193 | "id": "1e87a927-4f47-4ef8-a6ce-1094b31d6f24", 194 | "metadata": {}, 195 | "outputs": [], 196 | "source": [ 197 | "from IPython.display import Image, display" 198 | ] 199 | }, 200 | { 201 | "cell_type": "code", 202 | "execution_count": 6, 203 | "id": "d48b7dd4-3bbf-46b9-b1a6-df608be11b7f", 204 | "metadata": {}, 205 | "outputs": [ 206 | { 207 | "data": { 208 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAMUAAAD5CAIAAAAY4mZxAAAAAXNSR0IArs4c6QAAIABJREFUeJztnXdcE0n/xycFSEhC7yBNBKkCoqKHHRsCdg8BOe+xn9jALiqictbTQ8W7E7GgiOfpiR27p3jCPSoKikiTIgQlAUIgPfn9sf7yeEhbsptNYN8v/8Ds7Mwny4dpO/MdgkwmAzg4CEHEWgBOtwL3Ew6S4H7CQRLcTzhIgvsJB0lwP+EgCVlpJTHL+M0ccXOjRCKWCXhSpZWrCFpUoqYWUVuHpK1DNrbUwlqOGoC6n97+l1Oa21SS12TrrA0IBG0GSd9UE6jJnJdYKPv0gdfMkVBoxMp3PDt3mr07zaYvDWtdqgsBvfnMV4/rs26wbJ3pdu40ezcakURAqSDl0MQRl+Y2Mcv5tZWCIUFG1n21sVakiqDip48V/BsnmLbOtCFBhhpa3a2L9umD4MmVWiqDNDbMDGstKgfyfnqTxcl93DBxrjldT3mdM+VTXcK7eOjDrDXWBmaaWGtRIRD2U9FLbtmbptGzTBHMU2WRSmVnd5dPWmTZvf9yYIGkn/65xa7/KBwT3rNagdRd5aO+NTGzpWAtRCVArHNTmtdUU87vaWYCAISutb6U+EEkVI8ZELRBxk8NLGF+NidwngUiuakdoeusb6UwsVahEiDjp8eXWH0HMBDJSh3RMdCg62q8elyPtRDsQcBPzPf85kaxvTsdCT3qypAgwydXWFirwB4E/PT674ahk42QEKPGaGgRfQMMX/7V06soRf3Eb5KU5DWZ2VIR0tMBXC737du3WN3ePpa9qfnZHJQyVxcU9VNpXpOdm/LeZ4WEhKSnp2N1e/sYW2nxm6SNdSKU8lcLFPVTdSmvj5fyek5CobBrN0LTbF2+vZM4D2SU5TejWoSKo7Cf3vMZehoIifkXJ06cCAgI8PPzmzt3bnZ2NgAgMDCQzWafP3/ex8cnMDAQ8sfhw4eDg4MHDRo0ceLExMREiUQC3b5r166xY8f+9ddfU6ZM8fHx+eeff76+HXGodBKrGl3LqjiKvihobpRoM0gIifkf2dnZhw4dGj9+/JAhQ548edLc3AwA2L17d2RkZP/+/cPCwjQ1NQEAJBIpKytr2LBhVlZWBQUFycnJOjo64eHhUCZcLjcxMXHdunU8Hm/AgAFf34442jrkD0U8NHJWFxTyk0QiE/GlFBryfqqqqgIAzJw508PDIyAgAPrQxcWFTCYbGRl5enpCn5BIpJMnTxIIn1fCVFZW3rt3T+4noVAYExPj5ubW1u2IQ9MhNXEkKGWuFijkJ6lIStVB3kwAAD8/Px0dnU2bNq1evdrPz6+dlGw2++jRo0+fPuVwOAAABuN/06oUCkVuJuVAIhPIGuq9zEtBFOo/aVBIQp5UyEf+1ZWRkVFycrKNjc2KFSvmzp378ePHVpOxWKywsLDs7OzFixcfPHjQ2dlZ3n8CAGhrK3vJG7de3P3We8FC0S+vzSA3N4oREvMvbG1tExISjhw5UlRUFBsbK//8ywURFy5cYLPZiYmJ48aNc3V1NTPr+G00qvvrmzkSbXQqbHVBUT9ZOlCaG1HpMUBj+wEDBgwdOlQ+CUmlUmtra+Vp6uvr9fX15Taqr69v3y4tbkcckVBq2LOX15G+/NPvAhyWuPo938YZ4SnN169fz58/XywWFxYWXrx40cXFBeqVFxQU3Lt3j0wml5SUaGho0Gi0y5cvSyQSkUh08uTJu3fvNjU1zZgxg0KhZGZmlpaWzp49+8tsW9xuYGCArOz7v3/yGqmnzei5y+sU9ZO2DunvqyzP4XrISQIAgIaGhnfv3t26dSs7O9vb23vDhg10Oh0A4OHhUVBQcP369bdv37q6uo4aNUoqlZ4/f/7u3bu9evXatGnTixcvmpubfXx8WvVTi9vt7OyQ1Fwrys/mDJ7Yo19lIrA+88aJ6kETDA1Me3Q9DwB485TD5YgHjkW4zlMvEKiZnfoz/r7KmjjXvK0EcXFx9+7d+/pzU1PTmpqarz/X1dVF7y2bnMePH8fExLR6ycrKqrKy8uvPU1NTLSzaXDP46M9P38chWeGpI8isHz9/oGLoZOO21lDX1dXxeK3MGotEIg2NVt7VEInEzozUFITP57PZ7FYvEQitPxYTExMyufW/wH9usSVimW+AIdIy1Qxk/FRVwnv7T+Oob02QkKR+yKSyP498mLrECmsh2IPM5JuFPVXfVOPxJRSH4qpM2t6KYVOMsVahEiA2mes1Qp/fLHl2t/UWpBtzNanKe5S+kQUeLQMgv58z6wZLQ5PoPVofwTxVmWvHqr1G6lrY47EMPoPwy6ZBEwybGsV3UlsZtXUz+M3i0z+WOXrTcTN9CSrxMPKzOY8ufRoy0cjtG13EM8ccqUT25ArrYyV/5AwT/R4/69YCtOL1CPnSzCu1le94roN17Nxo+ibd4blXlfCqinlZN9lDggy9RvSUNh0WKMZ/AgBw2MLcx5zSvCYAgK2LNlmTSNMl6xhoSCQtC62vr9fTQ/ilDSI0skTcBjGBCF7/zTEw1ezjRfcYqoo6VQR0/SSn7qOQ+Z7PrRc3NYiJJEJj3b+WuHz8+JHL5drb2ytBSVv8/fff3t7eWlr/GqbRdUkEEoGuS2bok3s5aqOxErW7IVMB9u3bh7UEmUgkSkhIwFqF2qOk+qktzp079+2332Io4GtSUlJarErA6TxYLk719fUdNmwYhgJaxcjIaPfu3VirUFewqZ/evHnj5OQklUpbfR+MOUVFRQ4ODjU1NaamPSLQHoJgUD8lJSWVlpaSSCTVNBMAwMHBAQCwY8eOvLw8rLWoGRj4icFgTJw4UfnlwiUhISEnJwdrFWqGUtu7y5cvBwcHK604pDh16lRERATWKtQDJdVPMpls0KBBo0aNUk5xyNK/f/9FixZhrUI9UEb9VF9fTyaTKRRKW4sbVZ/KykorKyuVncRXHVCvn4qKim7evEmn09XXTNCKcgBAcnLyy5cvsdai0qDup507d4aEhKBdinKIiopKS0vDWoVKg/H8uJqSlZU1aNAgrFWoImjVT0wmc8OGDShljjkikejYsWNYq1BFUPFTU1PTwYMH4+Pj0chcFYDCCWGtQhXB27uu8+nTJy6Xi+ymdXUH+fppzZo1ZWVliGerghgbG7969SouLg5rISoEwvVTSkpKnz59fH19EcxTxeFyuXw+38ioR4fBkIO3dwhQVFTU3Nzs4eGBtRDsQay9Y7FYBw4cQCo39cLBweHevXspKSlYC8EexOqnadOm7du3z9bWFpHc1BEWi0WlUpUftFOlQMZPYrGYQCCQSD19uf6LFy8cHR1pNOUdQKJqINDecbncZ8+e4WYCADg7O48dOxZrFViCgJ+WLVtGoeCn5wIo4jkUTBFrIZihaHtXWVlZU1PTv39/5CSpPfX19SQS6cvI+j0HfL4AFYYPH37t2jUohmyPQqH2LiMjIykpCTkx3YfDhw9fvnwZaxUYoFD9FBAQkJKSYmjY04NG4sjB2zu0KC4uzsnJmTZtGtZClErX27vq6mqBQIComG5F7969T58+XV5ejrUQpdJFPxUXFy9fvrxFNBKcFpw6daqnzW12sb27du0ag8FQwegDONiC95/QZfPmzTNmzHB3d8daiJLoSntXW1t769YtFMR0QxwdHe/cuYO1CuXRlT1xFy5ckB/Zi9M+YWFhYjEq502qJl3xk7m5+eDBg1EQ0w0hEAhEYg86AbYrXzU4ONjYGD9eorOEhYUVFhZirUJJwPZTWVnZlStX0BHTPenVq1dFRQXWKpQE7Pbuxo0b+FInWOzZswdrCcoDtp969+7t6uqKjpjuiVgslslkKhuMD1nw+SfUSU1Nra6ujo6OxlqIMoDXf+Jyufv27UNNTPdEQ0NDJBJhrUJJwGvv8vPze85QRUGmTJkikUjk1X9wcLBMJuPxeN17ehOen8zNzVeuXImamG6Fm5vbtWvXvpx8kslkTk5OmIpCHXjtnZWVVbd/IkgRERFhbv6vM9+1tLTCwsKwU6QM4PkpKSkJD8ndSfr06ePt7f3lcMfa2lotAmUrAjw/3b59G98a1Xlmz55tYvL5zHdtbe2eEHQanp+WLl3ak3eUw8XR0XHgwIHQz9bW1gEBAVgrQh14fvLz81PrML3KJzw83MTERFtbu4ecWQXDHGw2Ozk5edWqVWjqaUkTR8yqFopF6jrpSgIWg/tNrqys7GPpV5LXhLWcLkIkAn0TTV2jjqf4YfipqqoqNzdXMWEwaKwTPbxQW1POt3GmNXMkSisXcfrZBvWzBXmZHKyFdB26Prmi4JOukYb3KH1rp/YCyMB431JbW1tRUeHl5YWQyPbg1ov/TPwwcqa5rnF3OMi6eyASSG+nfPCbZGjp0KalVPT93eGoovBNvYlEfBWoynH5SPnY2abGlq1vbYLRH79///4ff/yBnLA2ybrJ8g00xs2kmgwOMn52p66tqzD8VFxc/OnTJ4RUtUdVCZ+u3yNWd6gjusZaZfltDixg9McDAwOVM1kgkwAd3E+qiqYWUc9Yq7lRos1oZVklDH+YmZkhKqxNmjhiqSp26nA+01gnbGuPBYz2LjExMTMzEzFRON0RGH4qLS3FA2DgtA+M9i4qKkpXVxdNMThqDww/tVjNg4PzNTDau02bNhUUFKApBkftgeGn8vLynrOuHqdrwGjv4uLiTE1N0RSDo/bA8JONjQ2aSnC6AzDau/Xr11dWVqIpBkftgeGnwsJCvP+E0z4w/LR9+3Z8ygCnfWD4qW/fvqq8ueXBwzsRc6YFBA49fuKXBw/vjBztU17+HrokFovDI6Yc+aW94x4LiwpGjvb5++9HrV7lcrnvCpV0ys/1G+mTp/rX1DDbSiCRSHJzcxQviMmsrmZWKZ7Pl8DwU2xsrHLWq3SB0tLi7Ts2erh7xW7ZPca/5TYSAoHAYOgo8scwb0HIjRvpCsvsFJqaWjQavZ2odnv2bfvpQLyCpXyoqgwNDy4oeKNgPi2AMb7Lycnh8/nIFo8Uz55nkUikqJUboF9DUfG7L6+SSKQjh0+2eqNMJquq/mBpYdV+/kKhEFG97eE/erz/6PHtiUHiLapELEZjaS4MP8XExKhmmMPoVYufv/gHADB6zMBhQ0dtjd395dVqZlVoWDAAIDzsP3P/8wMA4E1+3uHEfSUlhYYGRrZ2vYuKCk6duAglLn1fnPb7qYKCN1ZW1suXrnV39wQAhIQG1tWxL6Wfv5R+3tTULC31ajticnNzUk4n5eblAAD6OrkuWrTCydEZAMDn8w8k7Hzy5C8AgIeHV+QPq8zMzJ8+ffxb0sGqqkozM4vgoOlTp3y7c3dsRsZVAMDtjKdkMrnVBPcf3AYAjBztAwBIPXPZ3Mzixs3Lly79XlJaRKVqDxwwOHLJKj09fQDAHxdS792/NWN62LFjh1ns2j59+q6KirG2tq1mVn33/XQAwNa4dVsBGDcucN2aWER+FzDaOx8fH9XsP30/Z9GI4f5kMnlb3N6QkO9aXNXXM9gWt1e+ErCmhrlq9WIymbxx/XYvrwGZmQ+Dg6Zran7e9XD6zDEvzwErlq8TCoUbN0VxuVwAQOyW3QyGzlC/kQkHkmK37G5Z/L9hMqsEQsHs8HnfRSxgMqvWrV8GVeqpZ49nZFydPi104YJlHE4DlUptbm6OjVurqaEZHRUzZPAwFusTAGDqlJAxYz63160mCA/9j7fXAHMzi4QDSQkHkgwNjAAAb97kWlvbLlywLChwauaTh7v2bJXryc/P+/33lOjomLitez99rPlx1xYAgKGB0cYN26FHl3AgKTz0P0j9LmDUT/v27ZszZ44Knibl5tYvKzuTQCD4fTPi66sUCsXvmxHyANe371zn8XhbNu00MDD85pvhL189f5r1OHTWHOjq8qVrx40LBADYWNv9EDnn2fOs4cNG93VyIZPJhoZGUHXVPv7+E+SGcHJyiYpelJuXM8DHt5pZRaVSQ2fNIZPJEwMmQz0YgUAwdOioMf4T5Lc79ulra2MP/VxXz/46gZWVta6uHruO9aWYqJUb5F+QTCafPpMsEAjkp6Hs2L7fwMAQADB1akjikf0NnAZdHV3HPn0BANbWtp35Up0Hhp9KSkqam5tV0E+w+PSphkajQc+XQCBYWFjV1FTLr+rofF6QY2vbG0oMN38CgfDo8f3fz58uKyuFzjqvY7MAAP6jJ9y9e3PtuqVLfoi2t3cAAFiYW7q6epw+c4xCoQYFTpXXkXI6TCBHJBJd/DPt9p3rHz8ytbQoUqm0vr7O1PTzeloKhQr9YGpqDgBg1X7S1UFr3RGM9m7atGkGBgYo6VAalpa9mpqaSkqKoF9DUVFB796OXyeD+vUSCex9pKdSkjZvWe3k6LJj20+LFq4AAEhlUgDAoIFDfoz/mV3Hmjs/ZO++7dCJ8DvjE8aNDfzl1wMRc6a+fPm8RVYdJoCQyWQbNq44k5o8YXzwrp2HoOEtVGgLNMgaAACJFMXNsTD8NGrUqG5wWtK4sYFGRsYbYlYkHz8SvXqxWCyeE7GgMzd2ZjQkEAhSzx6fGDA5ckm0u7uni/O/jm0ZNHDIsaNpPyxeee36pbNpJwEAdDp9xfJ1J09coNHoMZuimpubW2TYVoIvxbx8+fzZ8+zly9ZNnxbq4uxmb+fQuSeBCjD8dPTo0fr6ejTFIIamhiYAgMNp+PqSrq5e5JJVWlqU0tJin/6+R39NtbKy7jBDKoXKYtV2mIzP5wkEAkdHZ+i/DZx6AIBUKpXPOBCJxBnTw4yMjAsL30L+g9q1qVNCuE1c5lezi60moFCobDYLylZeCtQfalFoO2hpUaC2r8MvBQsY/adr166NHz9eT08PWQVoYGfvQCQS9//8Y+SSVV6ePl9eyn/7eveercsi15A1NIhEYnX1BwMDww4jqru7e929dzP17AkGQ8fVxQPqAH2Nrq6evb3DxT/TDAwMm7jck6d+IxKJUNt68c+0zCcPx/gHsFifams/OTm5iESi776fNmL4GDvb3unp5+k0usW/p8HaStDPw/vGzcs/7Y93d/NkMHRcnN01NTWPJh2aOHFKSUlh6tnjAIDSkqL2J9VMTEwtzC1//+M0hUrlcBpmTA9DZDMcjPopIiJCLcwEADA3s1i7eotAIHj69HGLS2am5ubmlrv2bN2+Y2PctvXLV85f/ENEh/O0Cxcs8/L0STmdlJp6/ENVe4cdbNoYT6VQ47atP3c+ZfHilbPD52ZkXBGJRBYWViKh8Mgv+69dvzR1asi3M2fz+DwvzwF37t44kLCTrKERv+NAi+mYthKMGRMwZfLMBw9v/5Z08PWbV8bGJjEbdxQWvY3duubZs6yf9v3q6+t38c+09r8RgUCIiYnX1qYdOrz3ZsaVpiZuRw+1U6hi/IKUHWWjQi10DNDa0imRSKAKSSKRPHp8f2vcun17j3h7DUCpuO7HuT0l4ettKDTF9nOeO3du/Pjx6r7Fpbz8/fKV8wf7DnXo7SgQCv766y6FQrGy7LgLJYfL5c4KC2z10sIFywMnTkFOrPoBw09paWmDBw9Wdz/RaPTRo8Y/ffro9p3rdDrD3c1zxYr1JiYw1jFra2v/9mtqq5d0GOr9cBQHhp+mT5+uo6ODphhlYGhoFLkkOnJJ10+/IBKJ5mYWiIrqPsDwU7ePnY2jODDGdzdu3FCX+SccrIDhpzNnzlRXV3ciIU7PBYaf1GUyEwdDYPSfwsPD0VSC0x2AUT89fPiwpgb2+g2cHgUMP128eBE//A6nfWD4yd/fH49fgNM+MPpPQUFBaCrB6Q7AqJ+ysrLw9g6nfWD46cmTJ1lZWWiK+YyBmRZQvVUPOHIMzLQIbRgHRns3bNgwxBS1C1kD1FYJdAzxk1tUEQ5byK0Xa1FbX4EIw0/9+/dHTlV72LnRPhQL7N0ZyikOBxY1Zbw+3vS2rsJo73Jzc2/evImQqvZw9GaIRZKchywllIUDi6qS5rdZDYMD2twzB2N95oMHD65cubJv3z7k5LXHndQakibJ0FzLyJKCnw2EOWymoJEtLH7ZGLK6Vzu/Dhh+YrPZb9++HTJkCHIiO+Dd88aS3CaRUMaqwjKOvkQslgGA1Nk1IpFIIpGo5s79tjA01wIEmbWTdr9hHbzAVcX14ypFVlZWRkbG5s2bkcpw7dq1jx49io6OnjZtGlJ5qg4w+k98Pj8hIQFNMarIoEGDEDSTRCLJz88XCoXJycndMpg7DD9RKJRz586pbAgoxJFKpTt37kQ2z7y8PB6PBwCoqanZuHFjh7su1Q4YfgIArFmzpueEZN22bducOXOQzfP58+dsNhv6+f3791FRUcjmjzl4/0mpLFiw4NmzZ/LYOlpaWhEREQsXLsRaF2LAq58yMjLy8vJQE6MqXLp06eHDh4hny2azmUym3ExQeIJLly4hXhCGwPNTdXX1/fv3UROjEly7dq2goGD48OGI5/z69WsOhwNFR4F6ToaGhvKoX90DeO1dVVVVZWXlwIED0ZTUnRk6dKihoeGlS5dev35tY2NDp7f54kJNwftP/0MikRw7dmzBgk6Fg1KQ2NjY/v37d78lZfDaO2iI14WobWpBaGjoqFGjlFPWwIEDm5raPHVefYFdP0VERKxdu9bV1RU1SdgABSDsMBAUTvvA9lNRURGdTjczM0NNEgYwmUw2m+3i4qK0ErlcbklJiYeHh9JKVA6w2zsHB4fuZ6a5c+cq00zQ3MGWLVuUWaJygO2n2tra+HhFDw9RKSorKy9cuKDkQvX09Ozs7JRcqBLoyvguKCjo119/tbDoDjFreDwekUjsZpNAGAK7fgIAJCQkaGigFYxQmWRlZUVHR2NiJrFY/Px56/HE1Zqu+MnOzk41DwaCS2ZmJlYrcEpKSvbs2YNJ0ajSFT9BseqQVoIBUVFRSK26hItIJPLz88OkaFTpop8cHR0zMjKQFqM8iouLDxxo77hOtHF1dV2yZAmGAlCih75vmT59+p49ezAcYVVUVGhra6v74Upf08X6CXo3rKYvXiQSSVpaGrbD9cjISGihZjej6366ffv24cOHERWjJPLz87HqNkHU19ePGDHCyqqDQ2bVka77KSIiQh1X1B89evTx45aHcCgZPT29lStXYqsBJbruJwKBoI71k1AoxHx9bXp6eneNlNx1P0GcPNn6ueEqy5IlS75ccat8oA193TWyraJ+0tDQUNoOdMVJTk6uq6vDVoNQKIyNReYwcRUEgfmCvLy8Pn36qP4rsFevXu3fv//48eNYC+nOKFo/QXObajH0pVAomK+MmDdvXlVVy0M4uxPIzGfu3bvX0tJy1qxZSEjqtqSnp/P5/G+//RZrISiCQP0EAFi1ahWbzW5oaOW8XhWhrKwM8824kyZN6t5mQsxP0LhJlY/Ge/LkCbYLti5fvoz5UEAJIDlNnJmZWVhYiPief0QYOXIkg4FZAMXdu3fb2Njo6+tjJUBpIPw++MyZM6ampv7+/gjmqe7w+Xwej9cTzNRT1hfweLw5c+acO3dO+UXz+fzc3NwBA3rKYdeI9Z++ZPPmzSo1Ki4rK8PqBfDIkSP79euHSdGYgFb9tGbNmg0bNqjIWwWRSCQUCmk0mpLLLSgosLGxUa9QmQqCbntXX1+vIpZSPgUFBWZmZqo85kUDVNo7OYcOHVKFhi81NTUtLU2ZJW7atKm4uLinmQl1P8XExCQnJ6NaRGeorq5WZqTK8vLyJUuWBAQEKK1E1UFJ47u7d++OHj1aCQW1SnNzM5lM1tRUxoEwFRUVBAKhW6697Azo1k9yiouLMVwppa2trRwzJSYm3rp1q8eaSanzT1evXg0MDGzxYURExKlTp1AqccaMGSQSiUAg1NXVUSgUCoVCIBAIBEJqaioaxUGRtHvUaO5rlFQ/AQAgM8XHx8tfY40fP76iogK9M/WEQmFRUVFhYWFtbW1lZWVRUVFBQYGBgQEaZR0/fhyyLBqZqxHK8xPEsmXLEhMToR1wtbW1jY2N6I28goKCWizt1dXVnTt3LuIF3b17F8OXgyqFsv1Ep9M3btwIACgtLYU+ef369evXr9EoKyQkpEVXxsXFxcvLC9lS6uvr9fX1u8cGfMVRtp8gfH195TVHbW0tSh0aOp0eEBBAJH7+jgYGBvPnz0e2iDVr1mhqanp7eyObrfqCgZ9GjhwpFov/p4BIfPHiRVlZGRplhYaG2tjYQD97eHh4enoimPmLFy/GjRunra2NYJ7qjrL99N1331GpVDKZLJPJ5ENLJpN55swZNIqj0WhBQUEkEsnAwCAiIgLBnIuLi21sbDCcVFNNOjVfIBZJeVzE5pfz8/PLy8sLCwsrKioaGhoEAkFDQwOdTv/555/RWCTE4/GWLl1qZ2cH9dsQITIycs+ePVQqFakM20cqlekaqkcAtw78lJ/NefWogc0UUumoBFKWyWRSiUQqk6nRkZUSiYRAIMi7ZUpA11CjqpRn70brP0bfxEqln1J7fsq+xa6tEnkON2AYqMcfRzdGKpU11AofXagZMd3Y0kFJ9WIXaNNPWTfZHJbYN9BE6ZJw2uPqbxXDpxlZ2KuopVqvtOs+Cms/CHAzqSCjZpk/u6O6+2Ra91PtB4FMhh9Rr4poM8jMMj6Pq6KR3Fr3E7dBYtxLpft9PRnrvnQ2U4i1itZpfZW+SCAV9ZRjp9UPbp3qHuGMzfsWnO4K7iccJMH9hIMkuJ9wkAT3Ew6S4H7CQRLcTzhIgvsJB0lwP+EgCe4nHCTB/YSDJIj5KWjSiCO/wD6hkMmsrmZ2KgDLg4d3Ro72KS9/3yV1MLh+I33yVP+aGmY7ad7k5wkEAgUL4nK57wrfKpiJqoFl/fShqjI0PLig4A2GGr5GU1OLRqO3s5z3ZsaVJZFz+HxFQ/jPWxBy40a6gpmoGlgeAycRi1Uweqf/6PH+o8e3k0DxmglCKFTRNSeKgKSfuNzGHT9uysx8oKujFxLy3aTgz1tmWazaI7/sz8rOFIvF7m6eixausLd3qGZWfff9dADA1rgh7SNuAAAH/ElEQVR1WwEYNy5w3ZpYKKpE0rHDd+/dFAoFvaxsZs6cPWrk2M5r+Pix5tjxxKyszKYmbq9eNqGzvpebI/XsiUvpvzc2chwcnOZ8t7C/98CKirL9B37Mf5vHYOj4DvJbsXzd7r1xGRlXAQC3M56SyeSvE9y6fe3AzzsBAJOn+gMA1q7ZMn5cUG5uTsrppNy8HABAXyfXRYtWODk6AwAKiwqWLvvPzviE35IOFhe/MzU1Xzh/2TffDAcAhIQG1tWxL6Wfv5R+3tTULC31KoK/CAxB0k83bl4eNzZw5YoN9+5nHPh5p51tbw8PLz6fH7VqEYfTsGD+MooW5ey5k1GrFqWc+tPQwGjjhu074mO+n7PIy9NHX98AACCVSjfGrGQyq8JCv9fTM8jJ+e+27Rv4fF7AhEmd1CCWiN++fT0peLqujt5fj+/tiI+xtOzl3Nf12fPso0mHRo8eP2jAkOx/nvCamwEAe/ZtKy9/v+SH6Obmphc5/yUSiVOnhEil0tu3r0O5fZ1g0MBvZs4I//386R93HKDR6FZW1gAAJrNKIBTMDp9HJBLT08+vW7/s7Jkr0HYdgUCwddu6pZGrzc0sjp/4ZXv8xrTUq7q6erFbdq9ZG+nZr/+M6WEaSoklpByQ9NPYMRPXrtkCABjqN3LmtxMePLzt4eF1+8718vL3+/Ye8fYaAABwd/cKDQ++eDHtu4j5jn36AgCsrW3d3T9v2/3r0b1XuS/OnrliZGQMNT08XvOFi2c77ycLc8sTyeehzewTJkyaMs0/M/OBc19XJrMKADBl0kxXV48xYz5HjmMyqxz79A2cOAUAMHNGOADAsU9fWxt7eW5fJ9DXN7CwsAIAODu76ep+Dg3q7z9BnqeTk0tU9KLcvJwBPr7QJ0sjV0NV7Lx5kQsXhb989XzY0FF9nVzIZLKhoZH8u3cPkPST/PlSKBQLC6uPn2oAAC9fPqPT6JCZAABmZubW1rYF71rvgz99+lgsFoeGB8s/kUgkNBodloyi4ncnTv4KdfMlEgmbzQIA+A7yYzB04n/ctDRyta+vH5RyjH9A6tkTCQd3zw6fB1WQLegwAQSBQHj0+P7v50+XlZVC28/r2Cz5VSrl814UU1NzAEBt7SdYX0e9QKs/TiSRoNPPuU1cXb1/7frV0dFltfFM6+pYhoZGP+395csPSXBChz9/8c/adUu9PH3WrN5C06Ztjl0tlUkBAIaGRocSkg8f+Wn9xhVubv02x/xobGwyb+4SfX2D02eSb9y8vGD+simTZ7bIrcMEEKdSko6f+GXa1FkL5i1lsWu3xq2DCm2BBlkDACCVquhWAkRAfb7A2MiEw/nXuVNsNotObz1aEoOhU19fZ2pqbm1tK/9naQEjfGBKSpKFhVX8jgMDBwx2dfWQ1w1Qw7rrx4R9e4+Ulhbt2h0L1SvTp4WeSUn/ZsjwhIO7c3NzWuTWTgL5yFQgEKSePT4xYHLkkmh3d08XZ/fOq1XB4a2CoO4nV1ePxkZOfn4e9N/i4sIPHyqgToOWFgUA8GVd5e09UCKRXL7yh/wT+UmNmhqaAIAW1vyaBk69Q29H6DQEoVDYzGuWR/aFxufeXgN8fYdCE4nQyJ9Go82ZswgA8PXsYqsJII/Kmy0+nycQCBwdneUCoIFFh0+GSqGyWLWdeITqBOrzT/6jJ5xJPR4btxYa/qSkJOnp6U8KngEAMDExtTC3/P2P0xQqlcNpmDolZIx/wJWrF3/59edqZpVjn75FRe8eZ94/kfwHhUKxs3cgEon7f/4xcskqL0+ftorz9PTJyLhy/Ua6DkP3/IUzjY2c96XFMpnsbcGbrXFrJ0+aSaVqZ2c/6evkAgCIjVtLp9F9+vs+zXoMAHD6f0/IaTWBq1s/Eol0KHHvhHHBAqEgOGiavb3DxT/TDAwMm7jck6d+IxKJJSVFHT4Zd3evu/dupp49wWDouLp42Ns7KP60MQf1+olMJu/ZddjJ0eXIL/sPHtpjbW378/6jUN+WQCDExMRra9MOHd57M+NKXR1bQ0Njz67DgROn3LuX8dP++OcvsoODpkOVjbmZxdrVWwQCwdOnj9sp7j9zFg/wGXzw0J6EQ7v7ew+K3byLxa59kfNfTQ1NG2u71NTjSUmHPDy8VkVvAgA493V7k5/304H4d4Vvo6M2urm1PGil1QSWFlbRURsrKsoOHd774MFtAMCmjfFUCjVu2/pz51MWL145O3xuRsYVkaiDXU0LFyzz8vRJOZ2Umnr8Q1UFEg8be1qPX5CdwRbyQb8RqIQuxVGQWyc/+AYYqGZUDCzft3QNLpc7K6xl3GmIhQuWQ3NFOFihfn7S1tb+7dfW423qMHrceSmqhvr5iUgkmptheRIwTjvg6+lwkAT3Ew6S4H7CQRLcTzhIgvsJB0lwP+EgCe4nHCTB/YSDJLifcJAE9xMOkrT+vkWTQpACPP64isIw1CCoaj3Qui6GvsanMkX3v+KgxPs8rqG5im6xat1PJr20CHj1pJI01gktHahaVFSO+1KcNusnSwfKXxfaiwmBgwl3TlcPmqC66xzbO6/s9d8NhTncfsMN9U01SWRVbbF7BvwmSX2t4PHFmuBFFoZmWljLaZMOzlMsfd2U87CeWconkfH2DzP0TTUaakV2brSB4wwY+ip9FmGnznsFAAh4iJ33igMXmRRQaOrRPnTWTzg4nUE9XI+jLuB+wkES3E84SIL7CQdJcD/hIAnuJxwk+T8Xr77UVHOcCgAAAABJRU5ErkJggg==", 209 | "text/plain": [ 210 | "" 211 | ] 212 | }, 213 | "metadata": {}, 214 | "output_type": "display_data" 215 | } 216 | ], 217 | "source": [ 218 | "display(Image(app.get_graph().draw_mermaid_png()))" 219 | ] 220 | }, 221 | { 222 | "cell_type": "code", 223 | "execution_count": 7, 224 | "id": "9b54c131-b1a5-4f30-b8a4-61a950ea543a", 225 | "metadata": {}, 226 | "outputs": [], 227 | "source": [ 228 | "import uuid\n", 229 | "\n", 230 | "config = {\"configurable\": {\"thread_id\": str(uuid.uuid4()), \"user_id\": \"1\"}}" 231 | ] 232 | }, 233 | { 234 | "cell_type": "code", 235 | "execution_count": 8, 236 | "id": "5930b465-3692-4971-b00d-cda356754f75", 237 | "metadata": {}, 238 | "outputs": [], 239 | "source": [ 240 | "def print_stream(stream):\n", 241 | " for ns, update in stream:\n", 242 | " print(f\"Namespace '{ns}'\")\n", 243 | " for node, node_updates in update.items():\n", 244 | " if node_updates is None:\n", 245 | " continue\n", 246 | "\n", 247 | " if isinstance(node_updates, (dict, tuple)):\n", 248 | " node_updates_list = [node_updates]\n", 249 | " elif isinstance(node_updates, list):\n", 250 | " node_updates_list = node_updates\n", 251 | " else:\n", 252 | " raise ValueError(node_updates)\n", 253 | "\n", 254 | " for node_updates in node_updates_list:\n", 255 | " print(f\"Update from node '{node}'\")\n", 256 | " if isinstance(node_updates, tuple):\n", 257 | " print(node_updates)\n", 258 | " continue\n", 259 | " messages_key = next(\n", 260 | " (k for k in node_updates.keys() if \"messages\" in k), None\n", 261 | " )\n", 262 | " if messages_key is not None:\n", 263 | " node_updates[messages_key][-1].pretty_print()\n", 264 | " else:\n", 265 | " print(node_updates)\n", 266 | "\n", 267 | " print(\"\\n\\n\")\n", 268 | "\n", 269 | " print(\"\\n===\\n\")" 270 | ] 271 | }, 272 | { 273 | "cell_type": "code", 274 | "execution_count": 9, 275 | "id": "b61f97ff", 276 | "metadata": {}, 277 | "outputs": [ 278 | { 279 | "name": "stdout", 280 | "output_type": "stream", 281 | "text": [ 282 | "Namespace '('flight_assistant:84744415-e427-8fd1-f8cd-7709705ba2ba',)'\n", 283 | "Update from node 'agent'\n", 284 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 285 | "Name: flight_assistant\n", 286 | "Tool Calls:\n", 287 | " search_flights (call_W97unU1M0IiCu0DrEmGDn5bm)\n", 288 | " Call ID: call_W97unU1M0IiCu0DrEmGDn5bm\n", 289 | " Args:\n", 290 | " departure_airport: BOS\n", 291 | " arrival_airport: JFK\n", 292 | " date: 2025-02-26\n", 293 | "\n", 294 | "\n", 295 | "\n", 296 | "Namespace '('flight_assistant:84744415-e427-8fd1-f8cd-7709705ba2ba',)'\n", 297 | "Update from node 'tools'\n", 298 | "=================================\u001b[1m Tool Message \u001b[0m=================================\n", 299 | "Name: search_flights\n", 300 | "\n", 301 | "[{\"departure_airport\": \"BOS\", \"arrival_airport\": \"JFK\", \"airline\": \"Jet Blue\", \"date\": \"2025-02-26\", \"id\": \"1\"}]\n", 302 | "\n", 303 | "\n", 304 | "\n", 305 | "Namespace '('flight_assistant:84744415-e427-8fd1-f8cd-7709705ba2ba',)'\n", 306 | "Update from node 'agent'\n", 307 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 308 | "Name: flight_assistant\n", 309 | "\n", 310 | "I found a flight for you from Boston (BOS) to New York (JFK) for tomorrow, February 26, 2025:\n", 311 | "\n", 312 | "- **Airline**: Jet Blue\n", 313 | "\n", 314 | "Would you like to book this flight?\n", 315 | "\n", 316 | "\n", 317 | "\n", 318 | "Namespace '()'\n", 319 | "Update from node 'flight_assistant'\n", 320 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 321 | "Name: flight_assistant\n", 322 | "\n", 323 | "I found a flight for you from Boston (BOS) to New York (JFK) for tomorrow, February 26, 2025:\n", 324 | "\n", 325 | "- **Airline**: Jet Blue\n", 326 | "\n", 327 | "Would you like to book this flight?\n", 328 | "\n", 329 | "\n", 330 | "\n", 331 | "\n", 332 | "===\n", 333 | "\n" 334 | ] 335 | } 336 | ], 337 | "source": [ 338 | "print_stream(\n", 339 | " app.stream(\n", 340 | " {\n", 341 | " \"messages\": [\n", 342 | " {\n", 343 | " \"role\": \"user\",\n", 344 | " \"content\": \"i am looking for a flight from boston to ny tomorrow\",\n", 345 | " }\n", 346 | " ]\n", 347 | " },\n", 348 | " config,\n", 349 | " subgraphs=True,\n", 350 | " )\n", 351 | ")" 352 | ] 353 | }, 354 | { 355 | "cell_type": "code", 356 | "execution_count": 10, 357 | "id": "0d8dc373-e4d8-4c9d-92d6-4f3dbf6a5465", 358 | "metadata": {}, 359 | "outputs": [ 360 | { 361 | "name": "stdout", 362 | "output_type": "stream", 363 | "text": [ 364 | "Namespace '('flight_assistant:e4a8a360-6a10-ffc1-9775-2db618706a62',)'\n", 365 | "Update from node 'agent'\n", 366 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 367 | "Name: flight_assistant\n", 368 | "Tool Calls:\n", 369 | " book_flight (call_0yBH8MVZF8CZauwa0VrSIjDO)\n", 370 | " Call ID: call_0yBH8MVZF8CZauwa0VrSIjDO\n", 371 | " Args:\n", 372 | " flight_id: 1\n", 373 | "\n", 374 | "\n", 375 | "\n", 376 | "Namespace '('flight_assistant:e4a8a360-6a10-ffc1-9775-2db618706a62',)'\n", 377 | "Update from node 'tools'\n", 378 | "=================================\u001b[1m Tool Message \u001b[0m=================================\n", 379 | "Name: book_flight\n", 380 | "\n", 381 | "Successfully booked flight\n", 382 | "\n", 383 | "\n", 384 | "\n", 385 | "Namespace '('flight_assistant:e4a8a360-6a10-ffc1-9775-2db618706a62',)'\n", 386 | "Update from node 'agent'\n", 387 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 388 | "Name: flight_assistant\n", 389 | "\n", 390 | "Your flight from Boston to New York with Jet Blue on February 26, 2025, has been successfully booked. Safe travels!\n", 391 | "\n", 392 | "\n", 393 | "\n", 394 | "Namespace '()'\n", 395 | "Update from node 'flight_assistant'\n", 396 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 397 | "Name: flight_assistant\n", 398 | "\n", 399 | "Your flight from Boston to New York with Jet Blue on February 26, 2025, has been successfully booked. Safe travels!\n", 400 | "\n", 401 | "\n", 402 | "\n", 403 | "\n", 404 | "===\n", 405 | "\n" 406 | ] 407 | } 408 | ], 409 | "source": [ 410 | "print_stream(\n", 411 | " app.stream(\n", 412 | " {\"messages\": [{\"role\": \"user\", \"content\": \"yes please\"}]},\n", 413 | " config,\n", 414 | " subgraphs=True,\n", 415 | " )\n", 416 | ")" 417 | ] 418 | }, 419 | { 420 | "cell_type": "code", 421 | "execution_count": 11, 422 | "id": "b6338b25", 423 | "metadata": {}, 424 | "outputs": [ 425 | { 426 | "name": "stdout", 427 | "output_type": "stream", 428 | "text": [ 429 | "Namespace '('flight_assistant:470ea1ba-b5a2-6164-d6e1-b6c98519aefc',)'\n", 430 | "Update from node 'agent'\n", 431 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 432 | "Name: flight_assistant\n", 433 | "Tool Calls:\n", 434 | " transfer_to_hotel_assistant (call_Ufumk8HdnMnO8ll294NLOyqI)\n", 435 | " Call ID: call_Ufumk8HdnMnO8ll294NLOyqI\n", 436 | " Args:\n", 437 | "\n", 438 | "\n", 439 | "\n", 440 | "Namespace '()'\n", 441 | "Update from node 'flight_assistant'\n", 442 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 443 | "Name: flight_assistant\n", 444 | "Tool Calls:\n", 445 | " transfer_to_hotel_assistant (call_Ufumk8HdnMnO8ll294NLOyqI)\n", 446 | " Call ID: call_Ufumk8HdnMnO8ll294NLOyqI\n", 447 | " Args:\n", 448 | "Update from node 'flight_assistant'\n", 449 | "=================================\u001b[1m Tool Message \u001b[0m=================================\n", 450 | "Name: transfer_to_hotel_assistant\n", 451 | "\n", 452 | "Successfully transferred to hotel_assistant\n", 453 | "Update from node 'flight_assistant'\n", 454 | "{'active_agent': 'hotel_assistant'}\n", 455 | "\n", 456 | "\n", 457 | "\n", 458 | "Namespace '('hotel_assistant:96312da9-559b-cfc4-43eb-73ffd7549e5a',)'\n", 459 | "Update from node 'agent'\n", 460 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 461 | "Name: hotel_assistant\n", 462 | "Tool Calls:\n", 463 | " search_hotels (call_UbFeu8XUunTfuVLnbMgyJUPP)\n", 464 | " Call ID: call_UbFeu8XUunTfuVLnbMgyJUPP\n", 465 | " Args:\n", 466 | " location: New York\n", 467 | "\n", 468 | "\n", 469 | "\n", 470 | "Namespace '('hotel_assistant:96312da9-559b-cfc4-43eb-73ffd7549e5a',)'\n", 471 | "Update from node 'tools'\n", 472 | "=================================\u001b[1m Tool Message \u001b[0m=================================\n", 473 | "Name: search_hotels\n", 474 | "\n", 475 | "[{\"location\": \"New York\", \"name\": \"McKittrick Hotel\", \"neighborhood\": \"Chelsea\", \"id\": \"1\"}]\n", 476 | "\n", 477 | "\n", 478 | "\n", 479 | "Namespace '('hotel_assistant:96312da9-559b-cfc4-43eb-73ffd7549e5a',)'\n", 480 | "Update from node 'agent'\n", 481 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 482 | "Name: hotel_assistant\n", 483 | "\n", 484 | "I found a hotel for you in New York:\n", 485 | "\n", 486 | "- **Hotel Name**: McKittrick Hotel\n", 487 | "- **Neighborhood**: Chelsea\n", 488 | "\n", 489 | "Would you like to book a room at this hotel?\n", 490 | "\n", 491 | "\n", 492 | "\n", 493 | "Namespace '()'\n", 494 | "Update from node 'hotel_assistant'\n", 495 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 496 | "Name: hotel_assistant\n", 497 | "\n", 498 | "I found a hotel for you in New York:\n", 499 | "\n", 500 | "- **Hotel Name**: McKittrick Hotel\n", 501 | "- **Neighborhood**: Chelsea\n", 502 | "\n", 503 | "Would you like to book a room at this hotel?\n", 504 | "\n", 505 | "\n", 506 | "\n", 507 | "\n", 508 | "===\n", 509 | "\n" 510 | ] 511 | } 512 | ], 513 | "source": [ 514 | "print_stream(\n", 515 | " app.stream(\n", 516 | " {\n", 517 | " \"messages\": [\n", 518 | " {\"role\": \"user\", \"content\": \"now i'd like to book a hotel as well\"}\n", 519 | " ]\n", 520 | " },\n", 521 | " config,\n", 522 | " subgraphs=True,\n", 523 | " )\n", 524 | ")" 525 | ] 526 | }, 527 | { 528 | "cell_type": "code", 529 | "execution_count": 12, 530 | "id": "7ed3b7c1", 531 | "metadata": {}, 532 | "outputs": [ 533 | { 534 | "name": "stdout", 535 | "output_type": "stream", 536 | "text": [ 537 | "Namespace '('hotel_assistant:dafeb86b-a3cd-bd65-21b7-250fd7daf8f2',)'\n", 538 | "Update from node 'agent'\n", 539 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 540 | "Name: hotel_assistant\n", 541 | "Tool Calls:\n", 542 | " book_hotel (call_AyZooDJt5yGaiZikmIbzoLKk)\n", 543 | " Call ID: call_AyZooDJt5yGaiZikmIbzoLKk\n", 544 | " Args:\n", 545 | " hotel_id: 1\n", 546 | "\n", 547 | "\n", 548 | "\n", 549 | "Namespace '('hotel_assistant:dafeb86b-a3cd-bd65-21b7-250fd7daf8f2',)'\n", 550 | "Update from node 'tools'\n", 551 | "=================================\u001b[1m Tool Message \u001b[0m=================================\n", 552 | "Name: book_hotel\n", 553 | "\n", 554 | "Successfully booked hotel\n", 555 | "\n", 556 | "\n", 557 | "\n", 558 | "Namespace '('hotel_assistant:dafeb86b-a3cd-bd65-21b7-250fd7daf8f2',)'\n", 559 | "Update from node 'agent'\n", 560 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 561 | "Name: hotel_assistant\n", 562 | "\n", 563 | "Your reservation at the McKittrick Hotel in Chelsea, New York, has been successfully booked. Enjoy your stay!\n", 564 | "\n", 565 | "\n", 566 | "\n", 567 | "Namespace '()'\n", 568 | "Update from node 'hotel_assistant'\n", 569 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 570 | "Name: hotel_assistant\n", 571 | "\n", 572 | "Your reservation at the McKittrick Hotel in Chelsea, New York, has been successfully booked. Enjoy your stay!\n", 573 | "\n", 574 | "\n", 575 | "\n", 576 | "\n", 577 | "===\n", 578 | "\n" 579 | ] 580 | } 581 | ], 582 | "source": [ 583 | "print_stream(\n", 584 | " app.stream(\n", 585 | " {\"messages\": [{\"role\": \"user\", \"content\": \"yes please\"}]},\n", 586 | " config,\n", 587 | " subgraphs=True,\n", 588 | " )\n", 589 | ")" 590 | ] 591 | }, 592 | { 593 | "cell_type": "code", 594 | "execution_count": 13, 595 | "id": "8b890ead", 596 | "metadata": {}, 597 | "outputs": [ 598 | { 599 | "name": "stdout", 600 | "output_type": "stream", 601 | "text": [ 602 | "Namespace '('hotel_assistant:813f3e95-62fe-2095-d6c7-07d52a6de1ad',)'\n", 603 | "Update from node 'agent'\n", 604 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 605 | "Name: hotel_assistant\n", 606 | "Tool Calls:\n", 607 | " transfer_to_flight_assistant (call_MpFQrEcnXvXdNBr2zCs0xgRk)\n", 608 | " Call ID: call_MpFQrEcnXvXdNBr2zCs0xgRk\n", 609 | " Args:\n", 610 | "\n", 611 | "\n", 612 | "\n", 613 | "Namespace '()'\n", 614 | "Update from node 'hotel_assistant'\n", 615 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 616 | "Name: hotel_assistant\n", 617 | "Tool Calls:\n", 618 | " transfer_to_flight_assistant (call_MpFQrEcnXvXdNBr2zCs0xgRk)\n", 619 | " Call ID: call_MpFQrEcnXvXdNBr2zCs0xgRk\n", 620 | " Args:\n", 621 | "Update from node 'hotel_assistant'\n", 622 | "=================================\u001b[1m Tool Message \u001b[0m=================================\n", 623 | "Name: transfer_to_flight_assistant\n", 624 | "\n", 625 | "Successfully transferred to flight_assistant\n", 626 | "Update from node 'hotel_assistant'\n", 627 | "{'active_agent': 'flight_assistant'}\n", 628 | "\n", 629 | "\n", 630 | "\n", 631 | "Namespace '('flight_assistant:56838261-9788-8e53-f605-c875e9c0336a',)'\n", 632 | "Update from node 'agent'\n", 633 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 634 | "Name: flight_assistant\n", 635 | "\n", 636 | "Hello! I'm here to assist you with your flight needs. How can I help you today?\n", 637 | "\n", 638 | "\n", 639 | "\n", 640 | "Namespace '()'\n", 641 | "Update from node 'flight_assistant'\n", 642 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 643 | "Name: flight_assistant\n", 644 | "\n", 645 | "Hello! I'm here to assist you with your flight needs. How can I help you today?\n", 646 | "\n", 647 | "\n", 648 | "\n", 649 | "\n", 650 | "===\n", 651 | "\n" 652 | ] 653 | } 654 | ], 655 | "source": [ 656 | "print_stream(\n", 657 | " app.stream(\n", 658 | " {\n", 659 | " \"messages\": [\n", 660 | " {\"role\": \"user\", \"content\": \"i wanna talk to flight assistant now\"}\n", 661 | " ]\n", 662 | " },\n", 663 | " config,\n", 664 | " subgraphs=True,\n", 665 | " )\n", 666 | ")" 667 | ] 668 | }, 669 | { 670 | "cell_type": "code", 671 | "execution_count": null, 672 | "id": "7d6d1752", 673 | "metadata": {}, 674 | "outputs": [], 675 | "source": [] 676 | } 677 | ], 678 | "metadata": { 679 | "kernelspec": { 680 | "display_name": "test", 681 | "language": "python", 682 | "name": "python3" 683 | }, 684 | "language_info": { 685 | "codemirror_mode": { 686 | "name": "ipython", 687 | "version": 3 688 | }, 689 | "file_extension": ".py", 690 | "mimetype": "text/x-python", 691 | "name": "python", 692 | "nbconvert_exporter": "python", 693 | "pygments_lexer": "ipython3", 694 | "version": "3.13.1" 695 | } 696 | }, 697 | "nbformat": 4, 698 | "nbformat_minor": 5 699 | } 700 | -------------------------------------------------------------------------------- /examples/customer_support/src/agent/customer_support.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | from collections import defaultdict 3 | from typing import Callable 4 | 5 | from langchain_core.runnables import RunnableConfig 6 | from langchain_openai import ChatOpenAI 7 | from langgraph.prebuilt import create_react_agent 8 | from langgraph_swarm import create_handoff_tool, create_swarm 9 | 10 | model = ChatOpenAI(model="gpt-4o") 11 | 12 | # Mock data for tools 13 | RESERVATIONS = defaultdict(lambda: {"flight_info": {}, "hotel_info": {}}) 14 | 15 | TOMORROW = (datetime.date.today() + datetime.timedelta(days=1)).isoformat() 16 | FLIGHTS = [ 17 | { 18 | "departure_airport": "BOS", 19 | "arrival_airport": "JFK", 20 | "airline": "Jet Blue", 21 | "date": TOMORROW, 22 | "id": "1", 23 | } 24 | ] 25 | HOTELS = [ 26 | { 27 | "location": "New York", 28 | "name": "McKittrick Hotel", 29 | "neighborhood": "Chelsea", 30 | "id": "1", 31 | } 32 | ] 33 | 34 | 35 | # Flight tools 36 | def search_flights( 37 | departure_airport: str, 38 | arrival_airport: str, 39 | date: str, 40 | ) -> list[dict]: 41 | """Search flights. 42 | 43 | Args: 44 | departure_airport: 3-letter airport code for the departure airport. If unsure, use the biggest airport in the area 45 | arrival_airport: 3-letter airport code for the arrival airport. If unsure, use the biggest airport in the area 46 | date: YYYY-MM-DD date 47 | """ 48 | # return all flights for simplicity 49 | return FLIGHTS 50 | 51 | 52 | def book_flight( 53 | flight_id: str, 54 | config: RunnableConfig, 55 | ) -> str: 56 | """Book a flight.""" 57 | user_id = config["configurable"].get("user_id") 58 | flight = [flight for flight in FLIGHTS if flight["id"] == flight_id][0] 59 | RESERVATIONS[user_id]["flight_info"] = flight 60 | return "Successfully booked flight" 61 | 62 | 63 | # Hotel tools 64 | def search_hotels(location: str) -> list[dict]: 65 | """Search hotels. 66 | 67 | Args: 68 | location: offical, legal city name (proper noun) 69 | """ 70 | # return all hotels for simplicity 71 | return HOTELS 72 | 73 | 74 | def book_hotel( 75 | hotel_id: str, 76 | config: RunnableConfig, 77 | ) -> str: 78 | """Book a hotel""" 79 | user_id = config["configurable"].get("user_id") 80 | hotel = [hotel for hotel in HOTELS if hotel["id"] == hotel_id][0] 81 | RESERVATIONS[user_id]["hotel_info"] = hotel 82 | return "Successfully booked hotel" 83 | 84 | 85 | # Define handoff tools 86 | transfer_to_hotel_assistant = create_handoff_tool( 87 | agent_name="hotel_assistant", 88 | description="Transfer user to the hotel-booking assistant that can search for and book hotels.", 89 | ) 90 | transfer_to_flight_assistant = create_handoff_tool( 91 | agent_name="flight_assistant", 92 | description="Transfer user to the flight-booking assistant that can search for and book flights.", 93 | ) 94 | 95 | 96 | # Define agent prompt 97 | def make_prompt(base_system_prompt: str) -> Callable[[dict, RunnableConfig], list]: 98 | def prompt(state: dict, config: RunnableConfig) -> list: 99 | user_id = config["configurable"].get("user_id") 100 | current_reservation = RESERVATIONS[user_id] 101 | system_prompt = ( 102 | base_system_prompt 103 | + f"\n\nUser's active reservation: {current_reservation}" 104 | + f"Today is: {datetime.datetime.now()}" 105 | ) 106 | return [{"role": "system", "content": system_prompt}] + state["messages"] 107 | 108 | return prompt 109 | 110 | 111 | # Define agents 112 | flight_assistant = create_react_agent( 113 | model, 114 | [search_flights, book_flight, transfer_to_hotel_assistant], 115 | prompt=make_prompt("You are a flight booking assistant"), 116 | name="flight_assistant", 117 | ) 118 | 119 | hotel_assistant = create_react_agent( 120 | model, 121 | [search_hotels, book_hotel, transfer_to_flight_assistant], 122 | prompt=make_prompt("You are a hotel booking assistant"), 123 | name="hotel_assistant", 124 | ) 125 | 126 | # Compile and run! 127 | builder = create_swarm( 128 | [flight_assistant, hotel_assistant], default_active_agent="flight_assistant" 129 | ) 130 | app = builder.compile() 131 | -------------------------------------------------------------------------------- /examples/research/README.md: -------------------------------------------------------------------------------- 1 | # Swarm Researcher Example 2 | 3 | A two-phase multi-agent system that demonstrates an effective collaborative approach to planning and research tasks. This example showcases a pattern used in many deep research systems: 4 | 5 | 1. **Planning Phase**: A dedicated planner agent clarifies requirements, reads documentation, and develops a structured approach 6 | 2. **Research Phase**: A researcher agent implements the solution based on the planner's guidance 7 | 8 | ## Quickstart 9 | 10 | ```bash 11 | uvx --refresh --from "langgraph-cli[inmem]" --with-editable . --python 3.11 langgraph dev 12 | ``` 13 | 14 | ## How It Works 15 | 16 | - The system starts with the **planner agent** that: 17 | - Analyzes the user's request 18 | - Reads relevant documentation 19 | - Asks clarifying questions to refine scope 20 | - Creates a structured plan with clear objectives 21 | - Identifies the most relevant resources for implementation 22 | - Hands off to the researcher agent 23 | 24 | - The **researcher agent** then: 25 | - Follows the structured plan from the planner 26 | - Reads the recommended documentation sources 27 | - Implements the solution to satisfy all requirements 28 | - Can request additional planning if needed 29 | 30 | This pattern demonstrates how breaking complex tasks into planning and execution phases can lead to more thorough, well-researched outcomes. 31 | 32 | -------------------------------------------------------------------------------- /examples/research/langgraph.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": ["."], 3 | "graphs": { 4 | "agent": "./src/agent/agent.py:app" 5 | } 6 | } -------------------------------------------------------------------------------- /examples/research/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "swarm-researcher" 3 | version = "0.0.1" 4 | description = "Simple multi-agent example for planning and research." 5 | authors = [ 6 | { name = "Lance Martin" } 7 | ] 8 | readme = "README.md" 9 | license = { text = "MIT" } 10 | requires-python = ">=3.9" 11 | dependencies = [ 12 | "httpx>=0.28.1", 13 | "markdownify>=1.1.0", 14 | "langchain-anthropic>=0.3.10", 15 | "langchain-openai>=0.3.11", 16 | "langchain-mcp-adapters>=0.0.5", 17 | "langgraph>=0.3.21", 18 | "langgraph-swarm>=0.0.7", 19 | "langchain>=0.3.21", 20 | ] 21 | 22 | [project.optional-dependencies] 23 | dev = ["mypy>=1.11.1", "ruff>=0.6.1"] 24 | 25 | [build-system] 26 | requires = ["setuptools>=73.0.0", "wheel"] 27 | build-backend = "setuptools.build_meta" 28 | 29 | [tool.setuptools] 30 | packages = ["swarm_researcher"] 31 | 32 | [tool.setuptools.package-dir] 33 | "swarm_researcher" = "src/agent" 34 | 35 | [tool.setuptools.package-data] 36 | "*" = ["py.typed"] 37 | 38 | [tool.ruff] 39 | lint.select = [ 40 | "E", # pycodestyle 41 | "F", # pyflakes 42 | "I", # isort 43 | "D", # pydocstyle 44 | "D401", # First line should be in imperative mood 45 | "T201", 46 | "UP", 47 | ] 48 | lint.ignore = [ 49 | "UP006", 50 | "UP007", 51 | "UP035", 52 | "D417", 53 | "E501", 54 | ] 55 | 56 | [tool.ruff.lint.per-file-ignores] 57 | "tests/*" = ["D", "UP"] 58 | 59 | [tool.ruff.lint.pydocstyle] 60 | convention = "google" -------------------------------------------------------------------------------- /examples/research/src/agent/agent.py: -------------------------------------------------------------------------------- 1 | from langchain.chat_models import init_chat_model 2 | from langgraph.prebuilt import create_react_agent 3 | from langgraph_swarm import create_handoff_tool, create_swarm 4 | 5 | # from swarm_researcher.configuration import Configuration 6 | from swarm_researcher.prompts import planner_prompt, researcher_prompt 7 | from swarm_researcher.utils import fetch_doc 8 | 9 | # LLM 10 | model = init_chat_model(model="gpt-4o", model_provider="openai") 11 | 12 | # Handoff tools 13 | transfer_to_planner_agent = create_handoff_tool( 14 | agent_name="planner_agent", 15 | description="Transfer the user to the planner_agent for clarifying questions related to the user's request.", 16 | ) 17 | transfer_to_researcher_agent = create_handoff_tool( 18 | agent_name="researcher_agent", 19 | description="Transfer the user to the researcher_agent to perform research and implement the solution to the user's request.", 20 | ) 21 | 22 | # LLMS.txt 23 | llms_txt = "LangGraph:https://langchain-ai.github.io/langgraph/llms.txt" 24 | num_urls = 3 25 | planner_prompt_formatted = planner_prompt.format(llms_txt=llms_txt, num_urls=num_urls) 26 | 27 | # Planner agent 28 | planner_agent = create_react_agent( 29 | model, 30 | prompt=planner_prompt_formatted, 31 | tools=[fetch_doc, transfer_to_researcher_agent], 32 | name="planner_agent", 33 | ) 34 | 35 | # Researcher agent 36 | researcher_agent = create_react_agent( 37 | model, 38 | prompt=researcher_prompt, 39 | tools=[fetch_doc, transfer_to_planner_agent], 40 | name="researcher_agent", 41 | ) 42 | 43 | # Swarm 44 | agent_swarm = create_swarm( 45 | [planner_agent, researcher_agent], default_active_agent="planner_agent" 46 | ) 47 | app = agent_swarm.compile() 48 | -------------------------------------------------------------------------------- /examples/research/src/agent/configuration.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Any, Optional 3 | 4 | from langchain_core.runnables import RunnableConfig 5 | from pydantic import BaseModel, Field 6 | 7 | 8 | class Configuration(BaseModel): 9 | """The configurable fields for the research assistant.""" 10 | 11 | llms_txt: int = Field( 12 | default="https://langchain-ai.github.io/langgraph/llms.txt", 13 | title="llms.txt URL", 14 | description="llms.txt URL to use for research", 15 | ) 16 | 17 | @classmethod 18 | def from_runnable_config( 19 | cls, config: Optional[RunnableConfig] = None 20 | ) -> "Configuration": 21 | """Create a Configuration instance from a RunnableConfig.""" 22 | configurable = ( 23 | config["configurable"] if config and "configurable" in config else {} 24 | ) 25 | 26 | # Get raw values from environment or config 27 | raw_values: dict[str, Any] = { 28 | name: os.environ.get(name.upper(), configurable.get(name)) 29 | for name in cls.model_fields.keys() 30 | } 31 | 32 | # Filter out None values 33 | values = {k: v for k, v in raw_values.items() if v is not None} 34 | 35 | return cls(**values) 36 | -------------------------------------------------------------------------------- /examples/research/src/agent/prompts.py: -------------------------------------------------------------------------------- 1 | planner_prompt = """ 2 | 3 | You will help plan the steps to implement a LangGraph application based on the user's request. 4 | 5 | 6 | 7 | 1. Reflect on the user's request and the project scope 8 | 2. Use the fetch_doc tool to read this llms.txt file, which gives you access to the LangGraph documentation: {llms_txt} 9 | 3. [IMPORTANT]: After reading the llms.txt file, ask the user for clarifications to help refine the project scope. 10 | 4. Once you have a clear project scope based on the user's feedback, select the most relevant URLs from the llms.txt file to reference in order to implement the project. 11 | 5. Then, produce a short summary with two markdown sections: 12 | - ## Scope: A short description that lays out the scope of the project with up to 5 bullet points 13 | - ## URLs: A list of the {num_urls} relevant URLs to reference in order to implement the project 14 | 6. Finally, transfer to the research agent using the transfer_to_researcher_agent tool. 15 | 16 | """ 17 | 18 | researcher_prompt = """ 19 | 20 | You will implement the solution to the user's request. 21 | 22 | 23 | 24 | 1. First, reflect of the project Scope as provided by the planner agent. 25 | 2. Then, use the fetch_doc tool to fetch and read each URL in the list of URLs provided by the planner agent. 26 | 3. Reflect on the information in the URLs. 27 | 4. Think carefully. 28 | 5. Implement the solution to the user's request using the information in the URLs. 29 | 6. If you need further clarification or additional sources to implement the solution, then transfer to transfer_to_planner_agent. 30 | 31 | 32 | 33 | Check that your solution satisfies all bullet points in the project Scope. 34 | 35 | """ 36 | -------------------------------------------------------------------------------- /examples/research/src/agent/utils.py: -------------------------------------------------------------------------------- 1 | import httpx 2 | from markdownify import markdownify 3 | 4 | httpx_client = httpx.Client(follow_redirects=False, timeout=10) 5 | 6 | 7 | def print_stream(stream): 8 | for ns, update in stream: 9 | print(f"Namespace '{ns}'") 10 | for node, node_updates in update.items(): 11 | if node_updates is None: 12 | continue 13 | 14 | if isinstance(node_updates, (dict, tuple)): 15 | node_updates_list = [node_updates] 16 | elif isinstance(node_updates, list): 17 | node_updates_list = node_updates 18 | else: 19 | raise ValueError(node_updates) 20 | 21 | for node_updates in node_updates_list: 22 | print(f"Update from node '{node}'") 23 | if isinstance(node_updates, tuple): 24 | print(node_updates) 25 | continue 26 | messages_key = next( 27 | (k for k in node_updates.keys() if "messages" in k), None 28 | ) 29 | if messages_key is not None: 30 | node_updates[messages_key][-1].pretty_print() 31 | else: 32 | print(node_updates) 33 | 34 | print("\n\n") 35 | 36 | print("\n===\n") 37 | 38 | 39 | def fetch_doc(url: str) -> str: 40 | """Fetch a document from a URL and return the markdownified text. 41 | 42 | Args: 43 | url (str): The URL of the document to fetch. 44 | 45 | Returns: 46 | str: The markdownified text of the document. 47 | """ 48 | try: 49 | response = httpx_client.get(url, timeout=10) 50 | response.raise_for_status() 51 | return markdownify(response.text) 52 | except (httpx.HTTPStatusError, httpx.RequestError) as e: 53 | return f"Encountered an HTTP error: {str(e)}" 54 | -------------------------------------------------------------------------------- /langgraph_swarm/__init__.py: -------------------------------------------------------------------------------- 1 | from langgraph_swarm.handoff import create_handoff_tool 2 | from langgraph_swarm.swarm import SwarmState, add_active_agent_router, create_swarm 3 | 4 | __all__ = ["create_swarm", "add_active_agent_router", "create_handoff_tool", "SwarmState"] 5 | -------------------------------------------------------------------------------- /langgraph_swarm/handoff.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from langchain_core.messages import ToolMessage 4 | from langchain_core.tools import BaseTool, InjectedToolCallId, tool 5 | from langgraph.graph.state import CompiledStateGraph 6 | from langgraph.prebuilt import InjectedState, ToolNode 7 | from langgraph.types import Command 8 | from typing_extensions import Annotated 9 | 10 | WHITESPACE_RE = re.compile(r"\s+") 11 | METADATA_KEY_HANDOFF_DESTINATION = "__handoff_destination" 12 | 13 | 14 | def _normalize_agent_name(agent_name: str) -> str: 15 | """Normalize an agent name to be used inside the tool name.""" 16 | return WHITESPACE_RE.sub("_", agent_name.strip()).lower() 17 | 18 | 19 | def create_handoff_tool( 20 | *, agent_name: str, name: str | None = None, description: str | None = None 21 | ) -> BaseTool: 22 | """Create a tool that can handoff control to the requested agent. 23 | 24 | Args: 25 | agent_name: The name of the agent to handoff control to, i.e. 26 | the name of the agent node in the multi-agent graph. 27 | Agent names should be simple, clear and unique, preferably in snake_case, 28 | although you are only limited to the names accepted by LangGraph 29 | nodes as well as the tool names accepted by LLM providers 30 | (the tool name will look like this: `transfer_to_`). 31 | name: Optional name of the tool to use for the handoff. 32 | If not provided, the tool name will be `transfer_to_`. 33 | description: Optional description for the handoff tool. 34 | If not provided, the tool description will be `Ask agent for help`. 35 | """ 36 | if name is None: 37 | name = f"transfer_to_{_normalize_agent_name(agent_name)}" 38 | 39 | if description is None: 40 | description = f"Ask agent '{agent_name}' for help" 41 | 42 | @tool(name, description=description) 43 | def handoff_to_agent( 44 | state: Annotated[dict, InjectedState], 45 | tool_call_id: Annotated[str, InjectedToolCallId], 46 | ): 47 | tool_message = ToolMessage( 48 | content=f"Successfully transferred to {agent_name}", 49 | name=name, 50 | tool_call_id=tool_call_id, 51 | ) 52 | return Command( 53 | goto=agent_name, 54 | graph=Command.PARENT, 55 | update={"messages": state["messages"] + [tool_message], "active_agent": agent_name}, 56 | ) 57 | 58 | handoff_to_agent.metadata = {METADATA_KEY_HANDOFF_DESTINATION: agent_name} 59 | return handoff_to_agent 60 | 61 | 62 | def get_handoff_destinations(agent: CompiledStateGraph, tool_node_name: str = "tools") -> list[str]: 63 | """Get a list of destinations from agent's handoff tools.""" 64 | nodes = agent.get_graph().nodes 65 | if tool_node_name not in nodes: 66 | return [] 67 | 68 | tool_node = nodes[tool_node_name].data 69 | if not isinstance(tool_node, ToolNode): 70 | return [] 71 | 72 | tools = tool_node.tools_by_name.values() 73 | return [ 74 | tool.metadata[METADATA_KEY_HANDOFF_DESTINATION] 75 | for tool in tools 76 | if tool.metadata is not None and METADATA_KEY_HANDOFF_DESTINATION in tool.metadata 77 | ] 78 | -------------------------------------------------------------------------------- /langgraph_swarm/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langchain-ai/langgraph-swarm-py/8879cd91cf799da9e217e382a2c56b5033575d37/langgraph_swarm/py.typed -------------------------------------------------------------------------------- /langgraph_swarm/swarm.py: -------------------------------------------------------------------------------- 1 | from langgraph.graph import START, MessagesState, StateGraph 2 | from langgraph.pregel import Pregel 3 | from typing_extensions import Any, Literal, Optional, Type, TypeVar, Union, get_args, get_origin 4 | 5 | from langgraph_swarm.handoff import get_handoff_destinations 6 | 7 | 8 | class SwarmState(MessagesState): 9 | """State schema for the multi-agent swarm.""" 10 | 11 | # NOTE: this state field is optional and is not expected to be provided by the user. 12 | # If a user does provide it, the graph will start from the specified active agent. 13 | # If active agent is typed as a `str`, we turn it into enum of all active agent names. 14 | active_agent: Optional[str] 15 | 16 | 17 | StateSchema = TypeVar("StateSchema", bound=SwarmState) 18 | StateSchemaType = Type[StateSchema] 19 | 20 | 21 | def _update_state_schema_agent_names( 22 | state_schema: StateSchemaType, agent_names: list[str] 23 | ) -> StateSchemaType: 24 | """Update the state schema to use Literal with agent names for 'active_agent'.""" 25 | 26 | active_agent_annotation = state_schema.__annotations__["active_agent"] 27 | 28 | # Check if the annotation is str or Optional[str] 29 | is_str_type = active_agent_annotation is str 30 | is_optional_str = ( 31 | get_origin(active_agent_annotation) is Union and get_args(active_agent_annotation)[0] is str 32 | ) 33 | 34 | # We only update if the 'active_agent' is a str or Optional[str] 35 | if not (is_str_type or is_optional_str): 36 | return state_schema 37 | 38 | updated_schema = type( 39 | f"{state_schema.__name__}", 40 | (state_schema,), 41 | {"__annotations__": {**state_schema.__annotations__}}, 42 | ) 43 | 44 | # Create the Literal type with agent names 45 | literal_type = Literal.__getitem__(tuple(agent_names)) 46 | 47 | # If it was Optional[str], make it Optional[Literal[...]] 48 | if is_optional_str: 49 | updated_schema.__annotations__["active_agent"] = Optional[literal_type] 50 | else: 51 | updated_schema.__annotations__["active_agent"] = literal_type 52 | 53 | return updated_schema 54 | 55 | 56 | def add_active_agent_router( 57 | builder: StateGraph, 58 | *, 59 | route_to: list[str], 60 | default_active_agent: str, 61 | ) -> StateGraph: 62 | """Add a router to the currently active agent to the StateGraph. 63 | 64 | Args: 65 | builder: The graph builder (StateGraph) to add the router to. 66 | route_to: A list of agent (node) names to route to. 67 | default_active_agent: Name of the agent to route to by default (if no agents are currently active). 68 | 69 | Returns: 70 | StateGraph with the router added. 71 | 72 | Example: 73 | ```python 74 | from langgraph.checkpoint.memory import InMemorySaver 75 | from langgraph.prebuilt import create_react_agent 76 | from langgraph.graph import StateGraph 77 | from langgraph_swarm import SwarmState, create_handoff_tool, add_active_agent_router 78 | 79 | def add(a: int, b: int) -> int: 80 | '''Add two numbers''' 81 | return a + b 82 | 83 | alice = create_react_agent( 84 | "openai:gpt-4o", 85 | [add, create_handoff_tool(agent_name="Bob")], 86 | prompt="You are Alice, an addition expert.", 87 | name="Alice", 88 | ) 89 | 90 | bob = create_react_agent( 91 | "openai:gpt-4o", 92 | [create_handoff_tool(agent_name="Alice", description="Transfer to Alice, she can help with math")], 93 | prompt="You are Bob, you speak like a pirate.", 94 | name="Bob", 95 | ) 96 | 97 | checkpointer = InMemorySaver() 98 | workflow = ( 99 | StateGraph(SwarmState) 100 | .add_node(alice, destinations=("Bob",)) 101 | .add_node(bob, destinations=("Alice",)) 102 | ) 103 | # this is the router that enables us to keep track of the last active agent 104 | workflow = add_active_agent_router( 105 | builder=workflow, 106 | route_to=["Alice", "Bob"], 107 | default_active_agent="Alice", 108 | ) 109 | 110 | # compile the workflow 111 | app = workflow.compile(checkpointer=checkpointer) 112 | 113 | config = {"configurable": {"thread_id": "1"}} 114 | turn_1 = app.invoke( 115 | {"messages": [{"role": "user", "content": "i'd like to speak to Bob"}]}, 116 | config, 117 | ) 118 | turn_2 = app.invoke( 119 | {"messages": [{"role": "user", "content": "what's 5 + 7?"}]}, 120 | config, 121 | ) 122 | ``` 123 | """ 124 | channels = builder.schemas[builder.schema] 125 | if "active_agent" not in channels: 126 | raise ValueError("Missing required key 'active_agent' in in builder's state_schema") 127 | 128 | if default_active_agent not in route_to: 129 | raise ValueError( 130 | f"Default active agent '{default_active_agent}' not found in routes {route_to}" 131 | ) 132 | 133 | def route_to_active_agent(state: dict): 134 | return state.get("active_agent", default_active_agent) 135 | 136 | builder.add_conditional_edges(START, route_to_active_agent, path_map=route_to) 137 | return builder 138 | 139 | 140 | def create_swarm( 141 | agents: list[Pregel], 142 | *, 143 | default_active_agent: str, 144 | state_schema: StateSchemaType = SwarmState, 145 | config_schema: Type[Any] | None = None, 146 | ) -> StateGraph: 147 | """Create a multi-agent swarm. 148 | 149 | Args: 150 | agents: List of agents to add to the swarm 151 | An agent can be a LangGraph [CompiledStateGraph](https://langchain-ai.github.io/langgraph/reference/graphs/#langgraph.graph.state.CompiledStateGraph), 152 | a functional API [workflow](https://langchain-ai.github.io/langgraph/reference/func/#langgraph.func.entrypoint), 153 | or any other [Pregel](https://langchain-ai.github.io/langgraph/reference/pregel/#langgraph.pregel.Pregel) object. 154 | default_active_agent: Name of the agent to route to by default (if no agents are currently active). 155 | state_schema: State schema to use for the multi-agent graph. 156 | config_schema: An optional schema for configuration. 157 | Use this to expose configurable parameters via `swarm.config_specs`. 158 | 159 | Returns: 160 | A multi-agent swarm StateGraph. 161 | 162 | Example: 163 | ```python 164 | from langgraph.checkpoint.memory import InMemorySaver 165 | from langgraph.prebuilt import create_react_agent 166 | from langgraph_swarm import create_handoff_tool, create_swarm 167 | 168 | def add(a: int, b: int) -> int: 169 | '''Add two numbers''' 170 | return a + b 171 | 172 | alice = create_react_agent( 173 | "openai:gpt-4o", 174 | [add, create_handoff_tool(agent_name="Bob")], 175 | prompt="You are Alice, an addition expert.", 176 | name="Alice", 177 | ) 178 | 179 | bob = create_react_agent( 180 | "openai:gpt-4o", 181 | [create_handoff_tool(agent_name="Alice", description="Transfer to Alice, she can help with math")], 182 | prompt="You are Bob, you speak like a pirate.", 183 | name="Bob", 184 | ) 185 | 186 | checkpointer = InMemorySaver() 187 | workflow = create_swarm( 188 | [alice, bob], 189 | default_active_agent="Alice" 190 | ) 191 | app = workflow.compile(checkpointer=checkpointer) 192 | 193 | config = {"configurable": {"thread_id": "1"}} 194 | turn_1 = app.invoke( 195 | {"messages": [{"role": "user", "content": "i'd like to speak to Bob"}]}, 196 | config, 197 | ) 198 | turn_2 = app.invoke( 199 | {"messages": [{"role": "user", "content": "what's 5 + 7?"}]}, 200 | config, 201 | ) 202 | ``` 203 | """ 204 | active_agent_annotation = state_schema.__annotations__.get("active_agent") 205 | if active_agent_annotation is None: 206 | raise ValueError("Missing required key 'active_agent' in state_schema") 207 | 208 | agent_names = [agent.name for agent in agents] 209 | state_schema = _update_state_schema_agent_names(state_schema, agent_names) 210 | builder = StateGraph(state_schema, config_schema) 211 | add_active_agent_router( 212 | builder, 213 | route_to=agent_names, 214 | default_active_agent=default_active_agent, 215 | ) 216 | for agent in agents: 217 | builder.add_node( 218 | agent.name, 219 | agent, 220 | destinations=tuple(get_handoff_destinations(agent)), 221 | ) 222 | 223 | return builder 224 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["pdm-backend"] 3 | build-backend = "pdm.backend" 4 | 5 | [project] 6 | name = "langgraph-swarm" 7 | version = "0.0.11" 8 | description = "An implementation of a multi-agent swarm using LangGraph" 9 | authors = [ 10 | {name = "Vadym Barda", email = "19161700+vbarda@users.noreply.github.com "} 11 | ] 12 | license = "MIT" 13 | license-files = ["LICENSE"] 14 | readme = "README.md" 15 | requires-python = ">=3.10" 16 | dependencies = [ 17 | "langgraph>=0.3.5", 18 | "langchain-core>=0.3.40,<0.4.0" 19 | ] 20 | 21 | [dependency-groups] 22 | test = [ 23 | "pytest>=8.0.0", 24 | "ruff>=0.9.4", 25 | "mypy>=1.8.0", 26 | "pytest-socket>=0.7.0", 27 | "types-setuptools>=69.0.0", 28 | ] 29 | 30 | [tool.pytest.ini_options] 31 | minversion = "8.0" 32 | addopts = "-ra -q -v" 33 | testpaths = [ 34 | "tests", 35 | ] 36 | python_files = ["test_*.py"] 37 | python_functions = ["test_*"] 38 | 39 | [tool.ruff] 40 | line-length = 100 41 | target-version = "py310" 42 | 43 | [tool.ruff.lint] 44 | select = [ 45 | "E", # pycodestyle errors 46 | "W", # pycodestyle warnings 47 | "F", # pyflakes 48 | "I", # isort 49 | "B", # flake8-bugbear 50 | ] 51 | ignore = [ 52 | "E501" # line-length 53 | ] 54 | 55 | 56 | [tool.mypy] 57 | python_version = "3.11" 58 | warn_return_any = true 59 | warn_unused_configs = true 60 | disallow_untyped_defs = true 61 | check_untyped_defs = true 62 | -------------------------------------------------------------------------------- /static/img/swarm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langchain-ai/langgraph-swarm-py/8879cd91cf799da9e217e382a2c56b5033575d37/static/img/swarm.png -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langchain-ai/langgraph-swarm-py/8879cd91cf799da9e217e382a2c56b5033575d37/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_import.py: -------------------------------------------------------------------------------- 1 | def test_import() -> None: 2 | """Test that the code can be imported""" 3 | from langgraph_swarm import ( # noqa: F401 4 | add_active_agent_router, 5 | create_handoff_tool, 6 | create_swarm, 7 | ) 8 | -------------------------------------------------------------------------------- /tests/test_swarm.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from langchain_core.callbacks.manager import CallbackManagerForLLMRun 4 | from langchain_core.language_models.chat_models import BaseChatModel 5 | from langchain_core.messages import AIMessage, BaseMessage 6 | from langchain_core.outputs import ChatGeneration, ChatResult 7 | from langgraph.checkpoint.memory import MemorySaver 8 | from langgraph.prebuilt import create_react_agent 9 | 10 | from langgraph_swarm import create_handoff_tool, create_swarm 11 | 12 | 13 | class FakeChatModel(BaseChatModel): 14 | idx: int = 0 15 | responses: list[BaseMessage] 16 | 17 | @property 18 | def _llm_type(self) -> str: 19 | return "fake-tool-call-model" 20 | 21 | def _generate( 22 | self, 23 | messages: list[BaseMessage], 24 | stop: Optional[list[str]] = None, 25 | run_manager: Optional[CallbackManagerForLLMRun] = None, 26 | **kwargs, 27 | ) -> ChatResult: 28 | generation = ChatGeneration(message=self.responses[self.idx]) 29 | self.idx += 1 30 | return ChatResult(generations=[generation]) 31 | 32 | def bind_tools(self, tools: list[any]) -> "FakeChatModel": 33 | return self 34 | 35 | 36 | def test_basic_swarm() -> None: 37 | # Create fake responses for the model 38 | recorded_messages = [ 39 | AIMessage( 40 | content="", 41 | name="Alice", 42 | tool_calls=[ 43 | { 44 | "name": "transfer_to_bob", 45 | "args": {}, 46 | "id": "call_1LlFyjm6iIhDjdn7juWuPYr4", 47 | } 48 | ], 49 | ), 50 | AIMessage( 51 | content="Ahoy, matey! Bob the pirate be at yer service. What be ye needin' help with today on the high seas? Arrr!", 52 | name="Bob", 53 | ), 54 | AIMessage( 55 | content="", 56 | name="Bob", 57 | tool_calls=[ 58 | { 59 | "name": "transfer_to_alice", 60 | "args": {}, 61 | "id": "call_T6pNmo2jTfZEK3a9avQ14f8Q", 62 | } 63 | ], 64 | ), 65 | AIMessage( 66 | content="", 67 | name="Alice", 68 | tool_calls=[ 69 | { 70 | "name": "add", 71 | "args": { 72 | "a": 5, 73 | "b": 7, 74 | }, 75 | "id": "call_4kLYO1amR2NfhAxfECkALCr1", 76 | } 77 | ], 78 | ), 79 | AIMessage( 80 | content="The sum of 5 and 7 is 12.", 81 | name="Alice", 82 | ), 83 | ] 84 | 85 | model = FakeChatModel(responses=recorded_messages) 86 | 87 | def add(a: int, b: int) -> int: 88 | """Add two numbers""" 89 | return a + b 90 | 91 | alice = create_react_agent( 92 | model, 93 | [add, create_handoff_tool(agent_name="Bob")], 94 | prompt="You are Alice, an addition expert.", 95 | name="Alice", 96 | ) 97 | 98 | bob = create_react_agent( 99 | model, 100 | [ 101 | create_handoff_tool( 102 | agent_name="Alice", description="Transfer to Alice, she can help with math" 103 | ) 104 | ], 105 | prompt="You are Bob, you speak like a pirate.", 106 | name="Bob", 107 | ) 108 | 109 | checkpointer = MemorySaver() 110 | workflow = create_swarm([alice, bob], default_active_agent="Alice") 111 | app = workflow.compile(checkpointer=checkpointer) 112 | 113 | config = {"configurable": {"thread_id": "1"}} 114 | turn_1 = app.invoke( 115 | {"messages": [{"role": "user", "content": "i'd like to speak to Bob"}]}, 116 | config, 117 | ) 118 | 119 | # Verify turn 1 results 120 | assert len(turn_1["messages"]) == 4 121 | assert turn_1["messages"][-2].content == "Successfully transferred to Bob" 122 | assert turn_1["messages"][-1].content == recorded_messages[1].content 123 | assert turn_1["active_agent"] == "Bob" 124 | 125 | turn_2 = app.invoke( 126 | {"messages": [{"role": "user", "content": "what's 5 + 7?"}]}, 127 | config, 128 | ) 129 | 130 | # Verify turn 2 results 131 | assert len(turn_2["messages"]) == 10 132 | assert turn_2["messages"][-4].content == "Successfully transferred to Alice" 133 | assert turn_2["messages"][-2].content == "12" 134 | assert turn_2["messages"][-1].content == recorded_messages[4].content 135 | assert turn_2["active_agent"] == "Alice" 136 | --------------------------------------------------------------------------------