├── .env_template
├── .gitignore
├── README.md
├── agents
├── __init__.py
├── code.py
├── example.py
├── graph.py
├── outline.py
├── refine.py
├── select.py
├── state.py
└── utils.py
├── data
└── shadcn.csv
├── data_processing
└── data_process.ipynb
├── images
└── example.png
├── requirements.txt
└── webui.py
/.env_template:
--------------------------------------------------------------------------------
1 | OPENAI_API_KEY=xxx
2 | LANGCHAIN_TRACING_V2=true
3 | LANGCHAIN_ENDPOINT="https://api.smith.langchain.com"
4 | LANGCHAIN_API_KEY=xxx
5 | LANGCHAIN_PROJECT=xxx
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | components
3 | .env
4 | agent.ipynb
5 | test.ipynb
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Chat to Frontend Using Shadcn Agent
2 |
3 | 使用Shadcn或者各种组件库的时候,总是需要查看很多代码example。为什么不让GPT主动看了这些example写出一个前端组件库呢?
4 |
5 | 本Repo想展示使用langgraph的四步骤流程,使用shadcn官方文档构建Shadcn Agent
6 |
7 | 实现完全自动化的组件生产。
8 |
9 | 
10 |
11 | ## Usage
12 |
13 | rememeber to copy .env_template to .env and replace your api key.
14 |
15 | `pip install -r requirements.txt`
16 |
17 | `streamlit run webui.py`
18 |
19 | ## Data Processing
20 |
21 | 其实大部分的实验都是在于如何处理数据,就是召回shadcn官方文档的数据。这个部分在`data/`中。
22 |
23 | 我还探索了使用Dspy进行原型搭建。
24 |
25 | ## Agent
26 |
27 | 使用Langgraph进行链路搭建,推荐使用langsmith进行检测!
28 |
29 | 我感觉prompt影响非常大!
--------------------------------------------------------------------------------
/agents/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jw782cn/chat_to_frontend/edefa48f9044e62d94d8c5bd01e31f81f77f95f8/agents/__init__.py
--------------------------------------------------------------------------------
/agents/code.py:
--------------------------------------------------------------------------------
1 | from agents.state import *
2 |
3 | code_template = """
4 | Generate a frontend code based on the shadcn components, query and frontend outline. Choose from the components in shadcn library.
5 | Follow the frontend outline step by step.
6 | 1. Use tailwind to make grid and layout.
7 | 2. Use components to make the structure. But you don't have to use all the components. Choose the components that fit the best.
8 | 3. Refer to the description to import the components.
9 | 4. always generate whole page.tsx.
10 | 5. dont use fake image.
11 | 6. assume that the components are already imported.
12 | I will tip you 1000$ for complete the whole code without skipping any codes!!!!
13 |
14 | ---
15 |
16 | Follow the following format.
17 |
18 | Question: a query to build a frontend using components selected from shadcn library
19 |
20 | Components: nessary components in shadcn library
21 |
22 | Frontend Outline: a frontend outline based on the retrieved components and query, use bullet points to describe the structure
23 |
24 | Reasoning: Let's think step by step in order to ${{produce the frontend_code}}. We ...
25 |
26 | Frontend Code: a frontend code. Only output the code, no verbose!
27 |
28 | ---
29 | {example_code}
30 | ---
31 |
32 | Question: {question}
33 |
34 | Components: {components}
35 |
36 | Frontend Outline: {frontend_outline}
37 |
38 | Reasoning: Let's think step by step in order to"""
39 |
40 |
41 | class CodeAgent:
42 |
43 | def __init__(self, df, model: str = "gpt-4-turbo-preview"):
44 | self.model = model
45 | self.df = df
46 |
47 | def code_parser(self, result: str):
48 | result = result.split("Frontend Code:")[-1]
49 | # only need code snippet between ```tsx and ```
50 | result = result.split("```tsx")[1].split("```")[0]
51 | return result
52 |
53 | def retrieve_components(self, state: AgentState):
54 | file_names = state["file_names"]
55 | combined_contexts = get_combined_contexts(self.df, file_names)
56 | components = contexts_to_string(combined_contexts)
57 | return components
58 |
59 | def coding(self, state: AgentState):
60 | question = state["question"]
61 | components = self.retrieve_components(state)
62 | file_names = state["file_names"]
63 | selected_components = ", ".join(file_names)
64 | frontend_outline = state["frontend_outline"]
65 | prompt = PromptTemplate(
66 | template=code_template, input_variables=["question"], partial_variables={"components": components, "frontend_outline": frontend_outline, "example_code": example_code}
67 | )
68 | llm = ChatOpenAI(temperature=0.1, max_tokens=1024, model=self.model)
69 | parser = StrOutputParser()
70 | llm_chain = (
71 | prompt
72 | | llm
73 | | parser
74 | )
75 | result = llm_chain.invoke({"question": question})
76 | result = self.code_parser(result)
77 | state["codes"] = result
78 | return state
79 |
80 | def run(self, state: AgentState):
81 | return self.coding(state)
--------------------------------------------------------------------------------
/agents/example.py:
--------------------------------------------------------------------------------
1 |
2 | example_code = """\
3 | ---
4 |
5 | Example Codes:
6 |
7 | export default function DashboardPage() {
8 | return (
9 | <>
10 |
11 |
18 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
Dashboard
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 | Overview
48 |
49 | Analytics
50 |
51 |
52 | Reports
53 |
54 |
55 | Notifications
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 | Total Revenue
64 |
65 |
77 |
78 |
79 | $45,231.89
80 |
81 | +20.1% from last month
82 |
83 |
84 |
85 |
86 |
87 |
88 | Subscriptions
89 |
90 |
104 |
105 |
106 | +2350
107 |
108 | +180.1% from last month
109 |
110 |
111 |
112 |
113 |
114 | Sales
115 |
128 |
129 |
130 | +12,234
131 |
132 | +19% from last month
133 |
134 |
135 |
136 |
137 |
138 |
139 | Active Now
140 |
141 |
153 |
154 |
155 | +573
156 |
157 | +201 since last hour
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 | Overview
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 | Recent Sales
174 |
175 | You made 265 sales this month.
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 | >
188 | )
189 | }
190 |
191 | ---
192 |
193 | """
--------------------------------------------------------------------------------
/agents/graph.py:
--------------------------------------------------------------------------------
1 | from agents.state import *
2 | from agents.refine import RefineAgent
3 | from agents.code import CodeAgent
4 | from agents.outline import OutlineAgent
5 | from agents.select import SelectAgent
6 | from langgraph.graph import StateGraph, END
7 |
8 |
9 | class FrontendAgent:
10 | def __init__(self, data_path: str = "..\data\shadcn.csv", refine_nums: int = 1):
11 | self.df = pd.read_csv(data_path, index_col=False)
12 | self.components = read_combined_file(data_path)
13 | self.refine_nums = refine_nums
14 | self.set_app()
15 |
16 | def set_app(self):
17 | selectAgent = SelectAgent(components=self.components)
18 | outlineAgent = OutlineAgent(df=self.df)
19 | codeAgent = CodeAgent(df=self.df)
20 | refineAgent = RefineAgent(df=self.df)
21 |
22 | # Define a new graph
23 | workflow = StateGraph(AgentState)
24 |
25 | # Define the two nodes we will cycle between
26 | workflow.add_node("select", selectAgent.run)
27 | workflow.add_node("outline", outlineAgent.run)
28 | workflow.add_node("code", codeAgent.run)
29 | workflow.add_node("refine", refineAgent.run)
30 |
31 | # Define the edges between the nodes
32 | workflow.add_edge("select", "outline")
33 | workflow.add_edge("outline", "code")
34 | workflow.add_edge("code", "refine")
35 | workflow.add_edge("refine", END)
36 |
37 | # Set the entrypoint as `select`
38 | # This means that this node is the first one called
39 | workflow.set_entry_point("select")
40 | self.app = workflow.compile()
41 |
42 | def run(self, question: str):
43 | inputs = {"question": question}
44 |
45 | # streaming output from the graph, yielding the output of each node
46 | for output in self.app.stream(inputs):
47 | # stream() yields dictionaries with output keyed by node name
48 | for key, value in output.items():
49 | # select -> file_names
50 | # outline -> frontend_outline
51 | # code -> codes
52 | # refine -> codes
53 | # different output from different node
54 | if key == "select":
55 | yield {"node": key, "output": value["file_names"]}
56 | elif key == "outline":
57 | yield {"node": key, "output": value["frontend_outline"]}
58 | elif key == "code":
59 | yield {"node": key, "output": value["codes"]}
60 | elif key == "refine":
61 | yield {"node": key, "output": value["codes"]}
62 | yield {"node": "end", "output": "end"}
63 | return
--------------------------------------------------------------------------------
/agents/outline.py:
--------------------------------------------------------------------------------
1 | from agents.state import *
2 |
3 | outline_template = """
4 | You are building a frontend using components selected from shadcn library.
5 | Generate a frontend outline based on provided components and user question.
6 |
7 | Instructions:
8 | - Use the components selected.
9 | - Use bullet points to describe the structure.
10 | - Include detailed instructions for your colleague to follow. You need to describe ideally what this frontend will look like
11 | - no need to include any code.
12 |
13 | ---
14 |
15 | Follow the following format.
16 |
17 | Question: a query to build a frontend using components selected from shadcn library
18 |
19 | Components Selected: a list of components that can need to be used.
20 |
21 | Components: nessary components in shadcn library
22 |
23 | Reasoning: Let's think step by step in order to ${{produce the frontend_outline}}. We ...
24 |
25 | Frontend Outline: a frontend outline based on the retrieved components and query, use bullet points to describe the structure
26 |
27 | ---
28 |
29 | Question: {question}
30 |
31 | Components Selected: {selected_components}
32 |
33 | Components: {components}
34 |
35 | Reasoning: Let's think step by step in order to"""
36 |
37 |
38 | class OutlineAgent:
39 |
40 | def __init__(self, df, model: str = "gpt-3.5-turbo-0125"):
41 | self.model = model
42 | self.df = df
43 |
44 | def outline_parser(self, result: str):
45 | # only need the last part of the result
46 | result = result.split("Frontend Outline:")[-1]
47 | return result
48 |
49 | def retrieve_components(self, state: AgentState):
50 | file_names = state["file_names"]
51 | combined_contexts = get_combined_contexts(self.df, file_names)
52 | components = contexts_to_string(combined_contexts)
53 | return components
54 |
55 | def outline(self, state: AgentState):
56 | question = state["question"]
57 | components = self.retrieve_components(state)
58 | file_names = state["file_names"]
59 | selected_components = ", ".join(file_names)
60 | prompt = PromptTemplate(
61 | template=outline_template, input_variables=["question"], partial_variables={"components": components, "selected_components": selected_components}
62 | )
63 | llm = ChatOpenAI(temperature=0.1, max_tokens=1024, model=self.model)
64 | parser = StrOutputParser()
65 | llm_chain = (
66 | prompt
67 | | llm
68 | | parser
69 | )
70 | result = llm_chain.invoke({"question": question})
71 | result = self.outline_parser(result)
72 | state["frontend_outline"] = result
73 | return state
74 |
75 | def run(self, state: AgentState):
76 | return self.outline(state)
--------------------------------------------------------------------------------
/agents/refine.py:
--------------------------------------------------------------------------------
1 | from agents.state import *
2 |
3 | refine_template = """
4 | You are very strict about tsx coding!
5 | Refine a code based on the shadcn components, query and frontend outline.
6 | - Use tailwind to make grid and layout.
7 | - Refer to the description to import the components.
8 | - always generate whole page.tsx.
9 | - dont use fake image.
10 | - expand any placeholder in the code.
11 | - remember this is not a component but a page.tsx.
12 | - output the whole codes.
13 | - make this code as real as possible, add necessary texts, fake data to make it look real.
14 | - choose black, gray and white as the main color.
15 | - add more details please.
16 |
17 | I will tip you 1000$ for complete the whole code without skipping any codes!!!!
18 |
19 | ---
20 |
21 | Follow the following format.
22 |
23 | Question: a query to build a frontend using components selected from shadcn library
24 |
25 | Components: nessary components in shadcn library
26 |
27 | Reasoning: Let's think step by step in order to ${{produce the frontend_code}}. We ...
28 |
29 | Frontend Code: a frontend code. Only output the code, no verbose!
30 |
31 | ---
32 |
33 | Question: {question}
34 |
35 | Components Example: {components}
36 |
37 | Reasoning: Let's think step by step in order to"""
38 |
39 |
40 | class RefineAgent:
41 |
42 | def __init__(self, df, model: str = "gpt-4-turbo-preview"):
43 | self.model = model
44 | self.df = df
45 |
46 | def output_parser(self, result: str):
47 | result = result.split("Frontend Code:")[-1]
48 | # only need code snippet between ```tsx and ```
49 | result = result.split("```tsx")[1].split("```")[0]
50 | # add "use client" if not exist at the front
51 | if "use client" not in result:
52 | result = "\"use client\"\n" + result
53 | return result
54 |
55 | def retrieve_components(self, state: AgentState):
56 | file_names = state["file_names"]
57 | combined_contexts = get_combined_contexts(self.df, file_names)
58 | components = contexts_to_string(combined_contexts)
59 | return components
60 |
61 | def coding(self, state: AgentState):
62 | question = state["question"]
63 | components = self.retrieve_components(state)
64 | prompt = PromptTemplate(
65 | template=refine_template, input_variables=["question"], partial_variables={"components": components}
66 | )
67 | llm = ChatOpenAI(temperature=0.1, max_tokens=2048, model=self.model)
68 | parser = StrOutputParser()
69 | llm_chain = (
70 | prompt
71 | | llm
72 | | parser
73 | )
74 | result = llm_chain.invoke({"question": question})
75 | result = self.output_parser(result)
76 | state["codes"] = result
77 | return state
78 |
79 | def run(self, state: AgentState):
80 | return self.coding(state)
--------------------------------------------------------------------------------
/agents/select.py:
--------------------------------------------------------------------------------
1 | from agents.state import *
2 |
3 |
4 | select_template = """
5 | You are building a frontend using components selected from shadcn library.
6 | Generate a list of needed components based on the context and query.
7 |
8 | Instructions:
9 | - include as many components as needed.
10 | - include at least 3 components, no more than 10 components.
11 | - only select from provided components.
12 |
13 | ---
14 |
15 | Follow the following format.
16 |
17 | Question: a query to build a frontend using components selected from shadcn library
18 |
19 | Components: a list of components in shadcn library
20 |
21 | Reasoning: Let's think step by step in order to ${{produce the needed_components}}. We ...
22 |
23 | Needed Components: a full list of needed components name, rememeber to include .tsx, separated by comma, only output name, no verbose!
24 |
25 | ---
26 |
27 | Question: {question}
28 |
29 | Components: {components}
30 |
31 | Reasoning: Let's think step by step in order to"""
32 |
33 | class SelectAgent:
34 | """Select the needed components based on the question and components."""
35 |
36 | def __init__(self, components: str, model: str = "gpt-3.5-turbo-0125"):
37 | self.components = components
38 | self.model = model
39 |
40 | def select_parser(self, result: str):
41 | # only need the last part of the result
42 | result = result.split("Needed Components:")[-1]
43 | file_names = [name.strip() for name in result.split(",") if name.strip()]
44 | return file_names
45 |
46 | def select(self, state: AgentState):
47 | question = state["question"]
48 | prompt = PromptTemplate(
49 | template=select_template, input_variables=["question"], partial_variables={"components": self.components}
50 | )
51 | llm = ChatOpenAI(temperature=0.1, max_tokens=1024, model=self.model)
52 | parser = StrOutputParser()
53 | llm_chain = (
54 | prompt
55 | | llm
56 | | parser
57 | )
58 | result = llm_chain.invoke({"question": question})
59 | file_names = self.select_parser(result)
60 | state["file_names"] = file_names
61 | return state
62 |
63 | def run(self, state: AgentState):
64 | return self.select(state)
--------------------------------------------------------------------------------
/agents/state.py:
--------------------------------------------------------------------------------
1 | from typing import TypedDict, List
2 |
3 | from langchain.prompts import PromptTemplate
4 | from langchain_openai import OpenAI
5 | from langchain_core.output_parsers import StrOutputParser
6 | from langchain_core.prompts import ChatPromptTemplate
7 | from langchain.output_parsers.json import SimpleJsonOutputParser
8 | from langchain.prompts import PromptTemplate
9 | from langchain_core.pydantic_v1 import BaseModel, Field, validator
10 | from langchain_core.output_parsers import JsonOutputParser
11 | from langchain_openai import ChatOpenAI
12 |
13 | from agents.utils import *
14 | from agents.example import example_code
15 |
16 |
17 | class AgentState(TypedDict):
18 | question: str
19 | file_names: List[str]
20 | frontend_outline: str
21 | codes: str
22 |
23 |
--------------------------------------------------------------------------------
/agents/utils.py:
--------------------------------------------------------------------------------
1 | import tiktoken
2 | import pandas as pd
3 |
4 | def num_tokens_from_string(string: str, model="gpt-3.5-turbo-0613") -> int:
5 | """Returns the number of tokens in a text string based on the specified model's encoding."""
6 | try:
7 | encoding = tiktoken.encoding_for_model(model) # Attempt to get encoding for the specified model
8 | except KeyError:
9 | print("Warning: model not found. Using cl100k_base encoding.")
10 | encoding = tiktoken.get_encoding("cl100k_base") # Fallback encoding if model's encoding not found
11 |
12 | num_tokens = len(encoding.encode(string)) # Calculate number of tokens based on encoding
13 | return num_tokens
14 |
15 |
16 | def retrieve_component_from_df(df, file_name):
17 | # only return json object
18 | data_entry = df[df['file_name'] == file_name]
19 | # return the first one
20 | # if not exist, return empty json object
21 | if data_entry.empty:
22 | return {}
23 | return data_entry.to_dict(orient='records')[0]
24 |
25 | def get_combined_context_string(data_entry):
26 | # include file_name, description, usage_example, file_content
27 | # use mdx description instead
28 | context = f"----- START OF {data_entry['file_name']} -----\
29 | {data_entry['mdx_description']}\
30 | ----- END OF {data_entry['file_name']} -----\n\n"
31 |
32 | return {"context": context, "token": num_tokens_from_string(context)}
33 |
34 | def get_combined_contexts(df, file_names):
35 | # list of file_names -> list of combined context strings, also count token of each context
36 | combined_contexts = {}
37 | for file_name in file_names:
38 | data_entry = retrieve_component_from_df(df, file_name)
39 | if data_entry:
40 | combined_contexts[file_name] = get_combined_context_string(data_entry)
41 | return combined_contexts
42 |
43 | def contexts_to_string(combined_contexts):
44 | return "".join([combined_contexts[file_name]["context"] for file_name in combined_contexts])
45 |
46 | def get_combined_meta_string(data_entry):
47 | return f"----- START OF {data_entry['file_name']} -----\n{data_entry['description']}\n----- END OF {data_entry['file_name']} -----\n\n"
48 |
49 | def read_combined_file(data_path):
50 | # data_path is the csv file path of df
51 | df = pd.read_csv(data_path)
52 | combined_list = [get_combined_meta_string(data_entry) for data_entry in df.to_dict(orient='records')]
53 | # get combined string
54 | combined_string = "".join(combined_list)
55 | return combined_string
--------------------------------------------------------------------------------
/images/example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jw782cn/chat_to_frontend/edefa48f9044e62d94d8c5bd01e31f81f77f95f8/images/example.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | openai
2 | tiktoken
3 | langchain
4 | langgraph
5 | dspy
6 | streamlit
7 | langchain_openai
8 | python-dotenv
--------------------------------------------------------------------------------
/webui.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 | from agents.graph import FrontendAgent
3 | from dotenv import load_dotenv
4 | load_dotenv()
5 |
6 | if st.session_state.get("frontend_agent") is None:
7 | frontend_agent = FrontendAgent(data_path="data/shadcn.csv")
8 | st.session_state.frontend_agent = frontend_agent
9 |
10 | st.set_page_config(page_title="Chat to frontend with Shadcn", layout="wide")
11 | # Streamlit UI elements
12 | st.title("Chat to frontend with Shadcn Agent")
13 |
14 | # Input from user
15 | input_text = st.text_area("Enter the frontend component or page you want to create:")
16 |
17 | if st.button("Run Agent"):
18 | results = []
19 | with st.spinner():
20 | for output in st.session_state.frontend_agent.run(input_text):
21 | results.append(output)
22 | if output["node"] == "select":
23 | st.write("Selected components:")
24 | st.code(output["output"])
25 | elif output["node"] == "outline":
26 | st.write("Frontend Outline:")
27 | st.write(output["output"])
28 | elif output["node"] == "code":
29 | st.write("Frontend Code:")
30 | st.code(output["output"], language="tsx")
31 | elif output["node"] == "refine":
32 | st.write("Refined Frontend Code:")
33 | st.code(output["output"], language="tsx")
34 | st.write("Done!")
--------------------------------------------------------------------------------