├── .env.example
├── .gitignore
├── README.md
├── langgraph.json
├── pyproject.toml
├── src
└── langgraph_engineer
│ ├── __init__.py
│ ├── agent.py
│ ├── check.py
│ ├── critique.py
│ ├── draft.py
│ ├── gather_requirements.py
│ ├── loader.py
│ ├── model.py
│ └── state.py
└── static
└── agent_ui.png
/.env.example:
--------------------------------------------------------------------------------
1 | ANTHROPIC_API_KEY=...
2 | TAVILY_API_KEY=...
3 | OPENAI_API_KEY=...
4 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .env
2 | .ipynb_checkpoints
3 | .langgraph-data
4 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # LangGraph Engineer
2 |
3 | [Try out the deployed version](https://smith.langchain.com/studio/thread?baseUrl=https://langgraph-engineer-23dacb3822e3589d80ff57de9ee94e1c.default.us.langgraph.app)
4 |
5 | 
6 |
7 | This is an alpha version of an agent that can help bootstrap [LangGraph](https://github.com/langchain-ai/langgraph) applications. It will focus on creating the correct nodes and edges, but will not attempt to write the logic to fill in the nodes and edges - rather will leave that for you.
8 |
9 | ## Agent Details
10 |
11 | The agent consists of a few steps:
12 |
13 | 1. Converse with the user to gather all requirements
14 | 2. Write a draft
15 | 3. Run programatic checks against the generated draft (right now just checking that the response has the right format). If it fails, then go back to step 2. If it passes, then continue to step 4.
16 | 4. Run an LLM critique against the generated draft. If it fails, go back to step 2. If it passes, the continue to the end.
17 |
18 | ## How to run
19 |
20 | [Try out the deployed version](https://smith.langchain.com/studio/thread?baseUrl=https://langgraph-engineer-23dacb3822e3589d80ff57de9ee94e1c.default.us.langgraph.app)
21 |
22 | You can run this code locally with [LangGraph Studio](https://github.com/langchain-ai/langgraph-studio)
23 |
24 | You can deploy the code yourself to [LangGraph Cloud](https://langchain-ai.github.io/langgraph/cloud/#overview)
25 |
26 |
27 | ## Future direction:
28 |
29 | - Run more programatic checks (linting, checking imports)
30 | - Try to run the generated code
31 | - Attempt to generate code for the nodes and edges
--------------------------------------------------------------------------------
/langgraph.json:
--------------------------------------------------------------------------------
1 | {
2 | "python_version": "3.11",
3 | "dockerfile_lines": [],
4 | "dependencies": [
5 | "."
6 | ],
7 | "graphs": {
8 | "engineer": "./src/langgraph_engineer/agent.py:graph"
9 | },
10 | "env": [
11 | "ANTHROPIC_API_KEY",
12 | "OPENAI_API_KEY"
13 | ]
14 | }
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "langgraph_eng_package"
3 | version = "0.0.1"
4 | dependencies = [
5 | "langgraph",
6 | "langchain_anthropic",
7 | "langchain_core",
8 | "langchain_openai"
9 | ]
10 |
11 | [build-system]
12 | requires = ["setuptools >= 61.0"]
13 | build-backend = "setuptools.build_meta"
--------------------------------------------------------------------------------
/src/langgraph_engineer/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hwchase17/langgraph-engineer/585cbbb2dbe0f20567337bed34734d1c7518ba95/src/langgraph_engineer/__init__.py
--------------------------------------------------------------------------------
/src/langgraph_engineer/agent.py:
--------------------------------------------------------------------------------
1 | from typing import Literal
2 |
3 | from langgraph.graph import StateGraph, END, MessagesState
4 | from langchain_core.messages import AIMessage
5 |
6 |
7 | from langgraph_engineer.check import check
8 | from langgraph_engineer.critique import critique
9 | from langgraph_engineer.draft import draft_answer
10 | from langgraph_engineer.gather_requirements import gather_requirements
11 | from langgraph_engineer.state import AgentState, OutputState, GraphConfig
12 |
13 |
14 |
15 | def route_critique(state: AgentState) -> Literal["draft_answer", END]:
16 | if state['accepted']:
17 | return END
18 | else:
19 | return "draft_answer"
20 |
21 | def route_check(state: AgentState) -> Literal["critique", "draft_answer"]:
22 | if isinstance(state['messages'][-1], AIMessage):
23 | return "critique"
24 | else:
25 | return "draft_answer"
26 |
27 |
28 | def route_start(state: AgentState) -> Literal["draft_answer", "gather_requirements"]:
29 | if state.get('requirements'):
30 | return "draft_answer"
31 | else:
32 | return "gather_requirements"
33 |
34 |
35 | def route_gather(state: AgentState) -> Literal["draft_answer", END]:
36 | if state.get('requirements'):
37 | return "draft_answer"
38 | else:
39 | return END
40 |
41 |
42 | # Define a new graph
43 | workflow = StateGraph(AgentState, input=MessagesState, output=OutputState, config_schema=GraphConfig)
44 | workflow.add_node(draft_answer)
45 | workflow.add_node(gather_requirements)
46 | workflow.add_node(critique)
47 | workflow.add_node(check)
48 | workflow.set_conditional_entry_point(route_start)
49 | workflow.add_conditional_edges("gather_requirements", route_gather)
50 | workflow.add_edge("draft_answer", "check")
51 | workflow.add_conditional_edges("check", route_check)
52 | workflow.add_conditional_edges("critique", route_critique)
53 | graph = workflow.compile()
54 |
--------------------------------------------------------------------------------
/src/langgraph_engineer/check.py:
--------------------------------------------------------------------------------
1 | import re
2 | from langgraph_engineer.state import AgentState
3 |
4 |
5 | def extract_python_code(text):
6 | pattern = r'```python\s*(.*?)\s*(```|$)'
7 | matches = re.findall(pattern, text, re.DOTALL)
8 | return matches
9 |
10 |
11 | error_parsing = """Make sure your response contains a code block in the following format:
12 |
13 | ```python
14 | ...
15 | ```
16 |
17 | When trying to parse out that code block, got this error: {error}"""
18 |
19 |
20 | def check(state: AgentState):
21 | last_answer = state['messages'][-1]
22 | try:
23 | code_blocks = extract_python_code(last_answer.content)
24 | except Exception as e:
25 | return {"messages": [{"role": "user", "content": error_parsing.format(error=str(e))}]}
26 | if len(code_blocks) == 0:
27 | return {"messages": [{"role": "user", "content": error_parsing.format(error="Did not find a code block!")}]}
28 | if len(code_blocks) > 1:
29 | return {"messages": [{"role": "user", "content": error_parsing.format(error="Found multiple code blocks!")}]}
30 | return {"code": f"```python\n{code_blocks[0][0]}\n```"}
31 |
--------------------------------------------------------------------------------
/src/langgraph_engineer/critique.py:
--------------------------------------------------------------------------------
1 | from langgraph_engineer.loader import load_github_file
2 | from langgraph_engineer.model import _get_model
3 | from langgraph_engineer.state import AgentState
4 | from langchain_core.messages import AIMessage
5 | from langchain_core.pydantic_v1 import BaseModel
6 |
7 | critique_prompt = """You are tasked with critiquing a junior developers first attempt at building a LangGraph application. \
8 | Here is a long unit test file for LangGraph. This should contain a lot (but possibly not all) \
9 | relevant information on how to use LangGraph.
10 |
11 |
12 | {file}
13 |
14 |
15 | Based on the conversation below, attempt to critique the developer. If it seems like the written solution is fine, then call the `Accept` tool.
16 |
17 | Do NOT critique the internal logic of the nodes too much - just make sure the flow (the nodes and edges) are correct and make sense. \
18 | It's totally fine to use dummy LLMs or dummy retrieval steps."""
19 |
20 |
21 | class Accept(BaseModel):
22 | logic: str
23 | accept: bool
24 |
25 |
26 | def _swap_messages(messages):
27 | new_messages = []
28 | for m in messages:
29 | if isinstance(m, AIMessage):
30 | new_messages.append({"role": "user", "content": m.content})
31 | else:
32 | new_messages.append({"role": "assistant", "content": m.content})
33 | return new_messages
34 |
35 |
36 | def critique(state: AgentState, config):
37 | github_url = "https://github.com/langchain-ai/langgraph/blob/main/libs/langgraph/tests/test_pregel.py"
38 | file_contents = load_github_file(github_url)
39 | messages = [
40 | {"role": "user", "content": critique_prompt.format(file=file_contents)},
41 | {"role": "assistant", "content": state.get('requirements')},
42 |
43 | ] + _swap_messages(state['messages'])
44 | model = _get_model(config, "openai", "critique_model").with_structured_output(Accept)
45 | response = model.invoke(messages)
46 | accepted = response.accept
47 | if accepted:
48 | return {
49 | "messages": [
50 | {"role": "user", "content": response.logic},
51 | {"role": "assistant", "content": "okay, sending to user"}],
52 | "accepted": True
53 | }
54 | else:
55 | return {
56 | "messages": [
57 | {"role": "user", "content": response.logic},
58 | ],
59 | "accepted": False
60 | }
61 |
--------------------------------------------------------------------------------
/src/langgraph_engineer/draft.py:
--------------------------------------------------------------------------------
1 | from langgraph_engineer.loader import load_github_file
2 | from langgraph_engineer.model import _get_model
3 | from langgraph_engineer.state import AgentState
4 |
5 | prompt = """You are tasked with answering questions about LangGraph functionality and bugs.
6 | Here is a long unit test file for LangGraph. This should contain a lot (but possibly not all) \
7 | relevant information on how to use LangGraph.
8 |
9 |
10 | {file}
11 |
12 |
13 | Based on the information above, attempt to answer the user's questions. If you generate a code block, only \
14 | generate a single code block - eg lump all the code together (rather than splitting up). \
15 | You should encode helpful comments as part of that code block to understand what is going on. \
16 | ALWAYS just generate the simplest possible example - don't make assumptions that make it more complicated. \
17 | For "messages", these are a special object that looks like: {{"role": .., "content": ....}}
18 |
19 | If users ask for a messages key, use MessagesState which comes with a built in `messages` key. \
20 | You can import MessagesState from `langgraph.graph` and it is a TypedDict, so you can subclass it and add new keys to use as the graph state.
21 |
22 | Make sure any generated graphs have at least one edge that leads to the END node - you need to define a stopping criteria!
23 |
24 | You generate code using markdown python syntax, eg:
25 |
26 | ```python
27 | ...
28 | ```
29 |
30 | Remember, only generate one of those code blocks!"""
31 |
32 |
33 | def draft_answer(state: AgentState, config):
34 | github_url = "https://github.com/langchain-ai/langgraph/blob/main/libs/langgraph/tests/test_pregel.py"
35 | file_contents = load_github_file(github_url)
36 | messages = [
37 | {"role": "system", "content": prompt.format(file=file_contents)},
38 | {"role": "user", "content": state.get('requirements')}
39 | ] + state['messages']
40 | model = _get_model(config, "openai", "draft_model")
41 | response = model.invoke(messages)
42 | return {"messages": [response]}
43 |
--------------------------------------------------------------------------------
/src/langgraph_engineer/gather_requirements.py:
--------------------------------------------------------------------------------
1 | from langgraph_engineer.model import _get_model
2 | from langgraph_engineer.state import AgentState
3 | from typing import TypedDict
4 | from langchain_core.messages import RemoveMessage
5 |
6 | gather_prompt = """You are tasked with helping build LangGraph applications. \
7 | LangGraph is a framework for developing LLM applications. \
8 | It represents agents as graphs. These graphs can contain cycles and often contain branching logic.
9 |
10 | Your first job is to gather all the user requirements about the topology of the graph. \
11 | You should have a clear sense of all the nodes of the graph/agent, and all the edges.
12 |
13 | You are conversing with a user. Ask as many follow up questions as necessary - but only ask ONE question at a time. \
14 | Only gather information about the topology of the graph, not about the components (prompts, LLMs, vector DBs). \
15 | If you have a good idea of what they are trying to build, call the `Build` tool with a detailed description.
16 |
17 | Do not ask unnecessary questions! Do not ask them to confirm your understanding or the structure! The user will be able to \
18 | correct you even after you call the Build tool, so just do enough to get an MVP."""
19 |
20 |
21 | class Build(TypedDict):
22 | requirements: str
23 |
24 |
25 | def gather_requirements(state: AgentState, config):
26 | messages = [
27 | {"role": "system", "content": gather_prompt}
28 | ] + state['messages']
29 | model = _get_model(config, "openai", "gather_model").bind_tools([Build])
30 | response = model.invoke(messages)
31 | if len(response.tool_calls) == 0:
32 | return {"messages": [response]}
33 | else:
34 | requirements = response.tool_calls[0]['args']['requirements']
35 | delete_messages = [RemoveMessage(id=m.id) for m in state['messages']]
36 | return {"requirements": requirements, "messages": delete_messages}
37 |
--------------------------------------------------------------------------------
/src/langgraph_engineer/loader.py:
--------------------------------------------------------------------------------
1 | import functools
2 | from functools import lru_cache
3 | import time
4 | import requests
5 |
6 |
7 | CACHE_DURATION = 24 * 60 * 60
8 |
9 |
10 | def time_based_cache(seconds):
11 | def wrapper_cache(func):
12 | func = lru_cache(maxsize=None)(func)
13 | func.lifetime = seconds
14 | func.expiration = time.time() + func.lifetime
15 |
16 | @functools.wraps(func)
17 | def wrapped_func(*args, **kwargs):
18 | if time.time() >= func.expiration:
19 | func.cache_clear()
20 | func.expiration = time.time() + func.lifetime
21 | return func(*args, **kwargs)
22 |
23 | return wrapped_func
24 |
25 | return wrapper_cache
26 |
27 |
28 | @time_based_cache(CACHE_DURATION)
29 | def load_github_file(url):
30 | # Convert GitHub URL to raw content URL
31 | raw_url = url.replace("github.com", "raw.githubusercontent.com").replace("/blob/", "/")
32 |
33 | # Send a GET request to the raw URL
34 | response = requests.get(raw_url)
35 |
36 | # Check if the request was successful
37 | if response.status_code == 200:
38 | return response.text
39 | else:
40 | return f"Failed to load file. Status code: {response.status_code}"
41 |
--------------------------------------------------------------------------------
/src/langgraph_engineer/model.py:
--------------------------------------------------------------------------------
1 | from langchain_openai import ChatOpenAI
2 | from langchain_anthropic import ChatAnthropic
3 |
4 |
5 | def _get_model(config, default, key):
6 | model = config['configurable'].get(key, default)
7 | if model == "openai":
8 | return ChatOpenAI(temperature=0, model_name="gpt-4o-2024-08-06")
9 | elif model == "anthropic":
10 | return ChatAnthropic(temperature=0, model_name="claude-3-5-sonnet-20240620")
11 | else:
12 | raise ValueError
13 |
--------------------------------------------------------------------------------
/src/langgraph_engineer/state.py:
--------------------------------------------------------------------------------
1 | from langgraph.graph import MessagesState
2 | from typing import TypedDict, Literal
3 | class AgentState(MessagesState):
4 | requirements: str
5 | code: str
6 | accepted: bool
7 |
8 |
9 | class OutputState(TypedDict):
10 | code: str
11 |
12 |
13 | class GraphConfig(TypedDict):
14 | gather_model: Literal['openai', 'anthropic']
15 | draft_model: Literal['openai', 'anthropic']
16 | critique_model: Literal['openai', 'anthropic']
17 |
--------------------------------------------------------------------------------
/static/agent_ui.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hwchase17/langgraph-engineer/585cbbb2dbe0f20567337bed34734d1c7518ba95/static/agent_ui.png
--------------------------------------------------------------------------------