├── .gitignore
├── LICENSE
├── README.md
├── babyagi.py
├── img
├── conn_error.png
└── robot.png
├── pyproject.toml
└── requirements.txt
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.py[cod]
3 | *$py.class
4 |
5 | .env
6 | .env.*
7 | env/
8 | .venv
9 | venv/
10 |
11 | .vscode/
12 | .idea/
13 |
14 | models
15 | llama/
16 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Yohei Nakajima
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | babyagi-ui
3 |
4 |
5 |
6 | # Introduction
7 | This Python script is non-poetry version of the [babyagi-streamlit](https://github.com/dory111111/babyagi-streamlit) for those not familiar with poetry setup/run. This code base is inspired by BabyAGI, see Acknowledgements section below.
8 |
9 | # Demo
10 | [streamlit-babyagi-2023-04-09-20-04-52.webm](https://user-images.githubusercontent.com/67872688/230803873-b744c9e2-d516-4e5d-9ef2-67f934b9b35c.webm)
11 |
12 | # Installation
13 | Install the required packages:
14 | ````
15 | pip install -r requirements.txt
16 | ````
17 | ## Usage
18 |
19 | Run streamlit.
20 | ````
21 | python -m streamlit run babyagi.py
22 | ````
23 |
24 | You can now view your Streamlit app in your browser.
25 |
26 | Local URL: http://localhost:8501
27 |
28 | To stop the Streamlit server, press ctrl-C.
29 |
30 | If you get the following popup error due to server being stopped accidentally, manually, or in some way
31 |
32 | 
33 |
34 | Just re-run streamlit with the following command.
35 | ````
36 | python -m streamlit run babyagi.py
37 | ````
38 |
39 | # Using Poetry for Installation and Usage
40 | If you want to use Poetry to install and run you can follow this instruction.
41 |
42 | Install the required packages:
43 | ````
44 | poetry install
45 | ````
46 |
47 | ## Usage
48 |
49 | Run streamlit.
50 | ````
51 | poetry run streamlit run babyagi.py
52 | ````
53 |
54 | # Acknowledgments
55 |
56 | I would like to express my gratitude to the developers whose code I referenced in creating this repo.
57 |
58 | Special thanks go to
59 |
60 | @yoheinakajima (https://github.com/yoheinakajima/babyagi)
61 |
62 | @hinthornw (https://github.com/hwchase17/langchain/pull/2559)
63 |
64 | @dory111111 (https://github.com/dory111111/)
65 |
66 | ---
67 | Roboto Logo Icon by Icons Mind(https://iconscout.com/contributors/icons-mind) on IconScout(https://iconscout.com)
68 |
--------------------------------------------------------------------------------
/babyagi.py:
--------------------------------------------------------------------------------
1 | from collections import deque
2 | from typing import Dict, List, Optional
3 | from langchain import LLMChain, OpenAI, PromptTemplate
4 | from langchain.embeddings import HuggingFaceEmbeddings
5 | from langchain.llms import BaseLLM
6 | from langchain.vectorstores import FAISS
7 | from langchain.vectorstores.base import VectorStore
8 | from pydantic import BaseModel, Field
9 | import streamlit as st
10 |
11 | class TaskCreationChain(LLMChain):
12 | @classmethod
13 | def from_llm(cls, llm: BaseLLM, objective: str, verbose: bool = True) -> LLMChain:
14 | """Get the response parser."""
15 | task_creation_template = """
16 | You are an task creation AI that uses the result of an execution agent
17 | to create new tasks with the following objective: {objective},
18 | The last completed task has the result: {result}.
19 | This result was based on this task description: {task_description}.
20 | These are incomplete tasks: {incomplete_tasks}.
21 | Based on the result, create new tasks to be completed
22 | by the AI system that do not overlap with incomplete tasks.
23 | Return the tasks as an array.
24 | """.strip()
25 | prompt = PromptTemplate(
26 | template=task_creation_template,
27 | partial_variables={"objective": objective},
28 | input_variables=["result", "task_description", "incomplete_tasks"],
29 | )
30 | return cls(prompt=prompt, llm=llm, verbose=verbose)
31 |
32 | def get_next_task(self, result: Dict, task_description: str, task_list: List[str]) -> List[Dict]:
33 | """Get the next task."""
34 | incomplete_tasks = ", ".join(task_list)
35 | response = self.run(result=result, task_description=task_description, incomplete_tasks=incomplete_tasks)
36 | new_tasks = response.split('\n')
37 | return [{"task_name": task_name} for task_name in new_tasks if task_name.strip()]
38 |
39 |
40 | class TaskPrioritizationChain(LLMChain):
41 | """Chain to prioritize tasks."""
42 |
43 | @classmethod
44 | def from_llm(cls, llm: BaseLLM, objective: str, verbose: bool = True) -> LLMChain:
45 | """Get the response parser."""
46 | task_prioritization_template = """
47 | You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing
48 | the following tasks: {task_names}.
49 | Consider the ultimate objective of your team: {objective}.
50 | Do not remove any tasks. Return the result as a numbered list, like:
51 | #. First task
52 | #. Second task
53 | Start the task list with number {next_task_id}."
54 | """.strip()
55 | prompt = PromptTemplate(
56 | template=task_prioritization_template,
57 | partial_variables={"objective": objective},
58 | input_variables=["task_names", "next_task_id"],
59 | )
60 | return cls(prompt=prompt, llm=llm, verbose=verbose)
61 |
62 | def prioritize_tasks(self, this_task_id: int, task_list: List[Dict]) -> List[Dict]:
63 | """Prioritize tasks."""
64 | task_names = [t["task_name"] for t in task_list]
65 | next_task_id = int(this_task_id) + 1
66 | response = self.run(task_names=task_names, next_task_id=next_task_id)
67 | new_tasks = response.split('\n')
68 | prioritized_task_list = []
69 | for task_string in new_tasks:
70 | if not task_string.strip():
71 | continue
72 | task_parts = task_string.strip().split(".", 1)
73 | if len(task_parts) == 2:
74 | task_id = task_parts[0].strip()
75 | task_name = task_parts[1].strip()
76 | prioritized_task_list.append({"task_id": task_id, "task_name": task_name})
77 | return prioritized_task_list
78 |
79 |
80 | class ExecutionChain(LLMChain):
81 | """Chain to execute tasks."""
82 |
83 | vectorstore: VectorStore = Field(init=False)
84 |
85 | @classmethod
86 | def from_llm(cls, llm: BaseLLM, vectorstore: VectorStore, verbose: bool = True) -> LLMChain:
87 | """Get the response parser."""
88 | execution_template = """
89 | You are an AI who performs one task based on the following objective: {objective}.
90 | Take into account these previously completed tasks: {context}.
91 | Your task: {task}.
92 | Response:
93 | """.strip()
94 | prompt = PromptTemplate(
95 | template=execution_template,
96 | input_variables=["objective", "context", "task"],
97 | )
98 | return cls(prompt=prompt, llm=llm, verbose=verbose, vectorstore=vectorstore)
99 |
100 | def _get_top_tasks(self, query: str, k: int) -> List[str]:
101 | """Get the top k tasks based on the query."""
102 | results = self.vectorstore.similarity_search_with_score(query, k=k)
103 | if not results:
104 | return []
105 | sorted_results, _ = zip(*sorted(results, key=lambda x: x[1], reverse=True))
106 | return [str(item.metadata['task']) for item in sorted_results]
107 |
108 | def execute_task(self, objective: str, task: str, k: int = 5) -> str:
109 | """Execute a task."""
110 | context = self._get_top_tasks(query=objective, k=k)
111 | return self.run(objective=objective, context=context, task=task)
112 |
113 |
114 | class Message:
115 | exp: st.expander
116 | ai_icon = "./img/robot.png"
117 |
118 | def __init__(self, label: str):
119 | message_area, icon_area = st.columns([10, 1])
120 | icon_area.image(self.ai_icon, caption="BabyAGI")
121 |
122 | # Expander
123 | self.exp = message_area.expander(label=label, expanded=True)
124 |
125 | def __enter__(self):
126 | return self
127 |
128 | def __exit__(self, ex_type, ex_value, trace):
129 | pass
130 |
131 | def write(self, content):
132 | self.exp.markdown(content)
133 |
134 |
135 | class BabyAGI(BaseModel):
136 | """Controller model for the BabyAGI agent."""
137 |
138 | objective: str = Field(alias="objective")
139 | task_list: deque = Field(default_factory=deque)
140 | task_creation_chain: TaskCreationChain = Field(...)
141 | task_prioritization_chain: TaskPrioritizationChain = Field(...)
142 | execution_chain: ExecutionChain = Field(...)
143 | task_id_counter: int = Field(1)
144 |
145 | def add_task(self, task: Dict):
146 | self.task_list.append(task)
147 |
148 | def print_task_list(self):
149 | with Message(label="Task List") as m:
150 | m.write("### Task List")
151 | for t in self.task_list:
152 | m.write("- " + str(t["task_id"]) + ": " + t["task_name"])
153 | m.write("")
154 |
155 | def print_next_task(self, task: Dict):
156 | with Message(label="Next Task") as m:
157 | m.write("### Next Task")
158 | m.write("- " + str(task["task_id"]) + ": " + task["task_name"])
159 | m.write("")
160 |
161 | def print_task_result(self, result: str):
162 | with Message(label="Task Result") as m:
163 | m.write("### Task Result")
164 | m.write(result)
165 | m.write("")
166 |
167 | def print_task_ending(self):
168 | with Message(label="Task Ending") as m:
169 | m.write("### Task Ending")
170 | m.write("")
171 |
172 |
173 | def run(self, max_iterations: Optional[int] = None):
174 | """Run the agent."""
175 | num_iters = 0
176 | while True:
177 | if self.task_list:
178 | self.print_task_list()
179 |
180 | # Step 1: Pull the first task
181 | task = self.task_list.popleft()
182 | self.print_next_task(task)
183 |
184 | # Step 2: Execute the task
185 | result = self.execution_chain.execute_task(
186 | self.objective, task["task_name"]
187 | )
188 | this_task_id = int(task["task_id"])
189 | self.print_task_result(result)
190 |
191 | # Step 3: Store the result in Pinecone
192 | result_id = f"result_{task['task_id']}"
193 | self.execution_chain.vectorstore.add_texts(
194 | texts=[result],
195 | metadatas=[{"task": task["task_name"]}],
196 | ids=[result_id],
197 | )
198 |
199 | # Step 4: Create new tasks and reprioritize task list
200 | new_tasks = self.task_creation_chain.get_next_task(
201 | result, task["task_name"], [t["task_name"] for t in self.task_list]
202 | )
203 | for new_task in new_tasks:
204 | self.task_id_counter += 1
205 | new_task.update({"task_id": self.task_id_counter})
206 | self.add_task(new_task)
207 | self.task_list = deque(
208 | self.task_prioritization_chain.prioritize_tasks(
209 | this_task_id, list(self.task_list)
210 | )
211 | )
212 | num_iters += 1
213 | if max_iterations is not None and num_iters == max_iterations:
214 | self.print_task_ending()
215 | break
216 |
217 | @classmethod
218 | def from_llm_and_objectives(
219 | cls,
220 | llm: BaseLLM,
221 | vectorstore: VectorStore,
222 | objective: str,
223 | first_task: str,
224 | verbose: bool = False,
225 | ) -> "BabyAGI":
226 | """Initialize the BabyAGI Controller."""
227 | task_creation_chain = TaskCreationChain.from_llm(
228 | llm, objective, verbose=verbose
229 | )
230 | task_prioritization_chain = TaskPrioritizationChain.from_llm(
231 | llm, objective, verbose=verbose
232 | )
233 | execution_chain = ExecutionChain.from_llm(llm, vectorstore, verbose=verbose)
234 | controller = cls(
235 | objective=objective,
236 | task_creation_chain=task_creation_chain,
237 | task_prioritization_chain=task_prioritization_chain,
238 | execution_chain=execution_chain,
239 | )
240 | controller.add_task({"task_id": 1, "task_name": first_task})
241 | return controller
242 |
243 |
244 | def main():
245 | st.set_page_config(
246 | initial_sidebar_state="expanded",
247 | page_title="BabyAGI UI",
248 | layout="centered",
249 | )
250 |
251 | with st.sidebar:
252 | openai_api_key = st.text_input('Your OpenAI API KEY', type="password")
253 | model_name = st.selectbox("Model name", options=["gpt-3.5-turbo", "gpt-4", "text-davinci-003"])
254 | temperature = st.slider(
255 | label="Temperature",
256 | min_value=0.0,
257 | max_value=1.0,
258 | step=0.1,
259 | value=0.5,
260 | )
261 |
262 | st.title("BabyAGI UI")
263 | objective = st.text_input("Input Ultimate goal", "Solve world hunger")
264 | first_task = st.text_input("Input Where to start", "Develop a task list")
265 | max_iterations = st.number_input("Max iterations", value=3, min_value=1, step=1)
266 | button = st.button("Run")
267 |
268 | embedding_model = HuggingFaceEmbeddings()
269 | vectorstore = FAISS.from_texts(["_"], embedding_model, metadatas=[{"task":first_task}])
270 |
271 | if button:
272 | try:
273 | baby_agi = BabyAGI.from_llm_and_objectives(
274 | llm=OpenAI(openai_api_key=openai_api_key, temperature=temperature, model_name=model_name),
275 | vectorstore=vectorstore,
276 | objective=objective,
277 | first_task=first_task,
278 | verbose=False
279 | )
280 | baby_agi.run(max_iterations=max_iterations)
281 | except Exception as e:
282 | st.error(e)
283 |
284 | if __name__ == "__main__":
285 | main()
--------------------------------------------------------------------------------
/img/conn_error.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/realminchoi/babyagi-ui/5ab0f7832f7e764c40a498bc9107912b3b585947/img/conn_error.png
--------------------------------------------------------------------------------
/img/robot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/realminchoi/babyagi-ui/5ab0f7832f7e764c40a498bc9107912b3b585947/img/robot.png
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "babyagi-streamlit"
3 | version = "1.0.0"
4 | description = ""
5 | authors = ["Dory "]
6 |
7 | [tool.poetry.dependencies]
8 | python = ">=3.10.10,<3.12"
9 | openai = "^0.27.0"
10 | langchain = ">=0.0.131"
11 | python-dotenv = "^1.0.0"
12 | faiss-cpu = "^1.7.3"
13 | sentence-transformers = "^2.2.2"
14 | streamlit = "^1.21.0"
15 |
16 | [tool.poetry.dev-dependencies]
17 |
18 | [tool.poetry.group.dev.dependencies]
19 | flake8 = "^6.0.0"
20 | black = "^23.1.0"
21 | isort = "^5.12.0"
22 |
23 | [build-system]
24 | requires = ["poetry-core>=1.0.0"]
25 | build-backend = "poetry.core.masonry.api"
26 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | openai
2 | langchain
3 | python-dotenv
4 | faiss-cpu
5 | sentence-transformers
6 | streamlit
7 | flake8
8 | black
9 | isort
10 | pillow>=10.0.1 # not directly required, pinned by Snyk to avoid a vulnerability
11 |
--------------------------------------------------------------------------------