├── .gitignore
├── screenshot.png
├── requirements.txt
├── README.md
├── templates
└── index.html
└── app.py
/.gitignore:
--------------------------------------------------------------------------------
1 | **.history**
2 | **.vscode**
--------------------------------------------------------------------------------
/screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shpetimhaxhiu/agi-taskgenius-gpt/HEAD/screenshot.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shpetimhaxhiu/agi-taskgenius-gpt/HEAD/requirements.txt
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # (mini) AGI TaskGenius GPT - AI-Powered Task Manager and Orchestrator
2 |
3 | TaskGenius is a groundbreaking AI-driven task manager and orchestrator that harnesses advanced natural language processing technology to generate, prioritize, and complete tasks based on user objectives. Built with Flask, LangChain, and FAISS, this app offers a seamless experience with OpenAI models. TaskGenius features real-time progress updates and an attractive user interface crafted with TailwindCSS, making it the ultimate solution for managing and automating intricate workflows.
4 |
5 | (mini) AGI TaskGenius GPT, a project wholly inspired by [Yohei Nakajima](https://twitter.com/yoheinakajima)'s pioneering efforts on [BabyAGI](https://github.com/yoheinakajima/babyagi/tree/main), further enhances his inventive groundwork by integrating a Flask and TailwindCSS UI.
6 |
7 | 
8 |
9 | ## Features
10 |
11 | - AI-fueled task generation, prioritization, and execution
12 | - Live progress tracking via Flask-SocketIO
13 | - Effortless integration with OpenAI models
14 | - Adjustable settings for objectives, temperature, verbosity, and maximum iterations
15 | - Visually engaging user interface utilizing TailwindCSS
16 |
17 |
18 | ## Installation
19 |
20 | 1. Clone the repository:
21 |
22 | ```bash
23 | git clone https://github.com/shpetimhaxhiu/agi-taskgenius-gpt.git
24 | ```
25 |
26 | 2. Install the necessary packages:
27 |
28 | ```bash
29 | pip install -r requirements.txt
30 | ```
31 |
32 | 3. Launch the Flask app:
33 |
34 | ```bash
35 | python app.py
36 | ```
37 |
38 | 4. Open your browser and navigate to `http://127.0.0.1:5000/` to access the TaskGenius app.
39 |
40 | ## Usage
41 |
42 | 1. Input your objective in the "Objective" field.
43 | 2. Adjust the temperature, verbosity, and maximum iterations as per your requirements.
44 | 3. Click "Run TaskGenius" to initiate the AI-driven task management process.
45 | 4. Monitor the real-time progress of task generation, prioritization, and execution in the "Result" section.
46 |
--------------------------------------------------------------------------------
/templates/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | AGI TaskGenius GPT - AI-Powered Task Manager and Orchestrator
7 |
8 |
9 |
10 |
11 |
12 |
TaskGenius ⚡
13 |
35 |
36 |
37 |
70 |
71 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 |
2 | from flask import Flask, render_template, request
3 | from flask_socketio import SocketIO, emit
4 | from collections import deque
5 | from typing import Dict, List, Optional, Any
6 | from langchain import LLMChain, OpenAI, PromptTemplate
7 | from langchain.embeddings import OpenAIEmbeddings
8 | from langchain.llms import BaseLLM
9 | from langchain.vectorstores.base import VectorStore
10 | from pydantic import BaseModel, Field
11 | from langchain.chains.base import Chain
12 | from langchain.vectorstores import FAISS
13 | from langchain.docstore import InMemoryDocstore
14 | import faiss
15 | import os
16 |
17 | # Define your embedding model
18 | embeddings_model = OpenAIEmbeddings()
19 | # Initialize the vectorstore as empty
20 | import faiss
21 |
22 | embedding_size = 1536
23 | index = faiss.IndexFlatL2(embedding_size)
24 | vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
25 |
26 |
27 |
28 | class TaskCreationChain(LLMChain):
29 | """Chain to generates tasks."""
30 |
31 | @classmethod
32 | def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
33 | """Get the response parser."""
34 | task_creation_template = (
35 | "You are an task creation AI that uses the result of an execution agent"
36 | " to create new tasks with the following objective: {objective},"
37 | " The last completed task has the result: {result}."
38 | " This result was based on this task description: {task_description}."
39 | " These are incomplete tasks: {incomplete_tasks}."
40 | " Based on the result, create new tasks to be completed"
41 | " by the AI system that do not overlap with incomplete tasks."
42 | " Return the tasks as an array."
43 | )
44 | prompt = PromptTemplate(
45 | template=task_creation_template,
46 | input_variables=[
47 | "result",
48 | "task_description",
49 | "incomplete_tasks",
50 | "objective",
51 | ],
52 | )
53 | return cls(prompt=prompt, llm=llm, verbose=verbose)
54 |
55 | class TaskPrioritizationChain(LLMChain):
56 | """Chain to prioritize tasks."""
57 |
58 | @classmethod
59 | def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
60 | """Get the response parser."""
61 | task_prioritization_template = (
62 | "You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing"
63 | " the following tasks: {task_names}."
64 | " Consider the ultimate objective of your team: {objective}."
65 | " Do not remove any tasks. Return the result as a numbered list, like:"
66 | " #. First task"
67 | " #. Second task"
68 | " Start the task list with number {next_task_id}."
69 | )
70 | prompt = PromptTemplate(
71 | template=task_prioritization_template,
72 | input_variables=["task_names", "next_task_id", "objective"],
73 | )
74 | return cls(prompt=prompt, llm=llm, verbose=verbose)
75 |
76 |
77 | class ExecutionChain(LLMChain):
78 | """Chain to execute tasks."""
79 |
80 | @classmethod
81 | def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
82 | """Get the response parser."""
83 | execution_template = (
84 | "You are an AI who performs one task based on the following objective: {objective}."
85 | " Take into account these previously completed tasks: {context}."
86 | " Your task: {task}."
87 | " Response:"
88 | )
89 | prompt = PromptTemplate(
90 | template=execution_template,
91 | input_variables=["objective", "context", "task"],
92 | )
93 | return cls(prompt=prompt, llm=llm, verbose=verbose)
94 |
95 |
96 | def get_next_task(
97 | task_creation_chain: LLMChain,
98 | result: Dict,
99 | task_description: str,
100 | task_list: List[str],
101 | objective: str,
102 | ) -> List[Dict]:
103 | """Get the next task."""
104 | incomplete_tasks = ", ".join(task_list)
105 | response = task_creation_chain.run(
106 | result=result,
107 | task_description=task_description,
108 | incomplete_tasks=incomplete_tasks,
109 | objective=objective,
110 | )
111 | new_tasks = response.split("\n")
112 | return [{"task_name": task_name} for task_name in new_tasks if task_name.strip()]
113 |
114 |
115 | def prioritize_tasks(
116 | task_prioritization_chain: LLMChain,
117 | this_task_id: int,
118 | task_list: List[Dict],
119 | objective: str,
120 | ) -> List[Dict]:
121 | """Prioritize tasks."""
122 | task_names = [t["task_name"] for t in task_list]
123 | next_task_id = int(this_task_id) + 1
124 | response = task_prioritization_chain.run(
125 | task_names=task_names, next_task_id=next_task_id, objective=objective
126 | )
127 | new_tasks = response.split("\n")
128 | prioritized_task_list = []
129 | for task_string in new_tasks:
130 | if not task_string.strip():
131 | continue
132 | task_parts = task_string.strip().split(".", 1)
133 | if len(task_parts) == 2:
134 | task_id = task_parts[0].strip()
135 | task_name = task_parts[1].strip()
136 | prioritized_task_list.append({"task_id": task_id, "task_name": task_name})
137 | return prioritized_task_list
138 |
139 | def _get_top_tasks(vectorstore, query: str, k: int) -> List[str]:
140 | """Get the top k tasks based on the query."""
141 | results = vectorstore.similarity_search_with_score(query, k=k)
142 | if not results:
143 | return []
144 | sorted_results, _ = zip(*sorted(results, key=lambda x: x[1], reverse=True))
145 | return [str(item.metadata["task"]) for item in sorted_results]
146 |
147 |
148 | def execute_task(
149 | vectorstore, execution_chain: LLMChain, objective: str, task: str, k: int = 5
150 | ) -> str:
151 | """Execute a task."""
152 | context = _get_top_tasks(vectorstore, query=objective, k=k)
153 | return execution_chain.run(objective=objective, context=context, task=task)
154 |
155 | class BabyAGI(Chain, BaseModel):
156 | """Controller model for the BabyAGI agent."""
157 |
158 | task_list: deque = Field(default_factory=deque)
159 | task_creation_chain: TaskCreationChain = Field(...)
160 | task_prioritization_chain: TaskPrioritizationChain = Field(...)
161 | execution_chain: ExecutionChain = Field(...)
162 | task_id_counter: int = Field(1)
163 | vectorstore: VectorStore = Field(init=False)
164 | max_iterations: Optional[int] = None
165 | results = []
166 |
167 | class Config:
168 | """Configuration for this pydantic object."""
169 |
170 | arbitrary_types_allowed = True
171 |
172 | def add_task(self, task: Dict):
173 | self.task_list.append(task)
174 |
175 | def print_task_list(self):
176 | print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m")
177 | self.results.append(str("*****TASK LIST*****"))
178 | for t in self.task_list:
179 | print(str(t["task_id"]) + ": " + t["task_name"])
180 | self.results.append(str(t["task_id"]) + ": " + t["task_name"])
181 | emit("result", str(t["task_id"]) + ": " + t["task_name"])
182 |
183 | def print_next_task(self, task: Dict):
184 | print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m")
185 | self.results.append(str("*****NEXT TASK*****"))
186 | print(str(task["task_id"]) + ": " + task["task_name"])
187 | self.results.append(str(task["task_id"]) + ": " + task["task_name"])
188 | emit("result", str(task["task_id"]) + ": " + task["task_name"])
189 |
190 | def print_task_result(self, result: str):
191 | print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m")
192 | self.results.append(str("*****TASK RESULT*****"))
193 | print(result)
194 | self.results.append(result)
195 | emit("result", result)
196 |
197 | @property
198 | def input_keys(self) -> List[str]:
199 | return ["objective"]
200 |
201 | @property
202 | def output_keys(self) -> List[str]:
203 | return []
204 |
205 | def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
206 | """Run the agent."""
207 | objective = inputs["objective"]
208 | first_task = inputs.get("first_task", "Make a todo list")
209 | self.add_task({"task_id": 1, "task_name": first_task})
210 | num_iters = 0
211 | while True:
212 | if self.task_list:
213 | self.print_task_list()
214 |
215 | # Step 1: Pull the first task
216 | task = self.task_list.popleft()
217 | self.print_next_task(task)
218 |
219 | # Step 2: Execute the task
220 | result = execute_task(
221 | self.vectorstore, self.execution_chain, objective, task["task_name"]
222 | )
223 | this_task_id = int(task["task_id"])
224 | self.print_task_result(result)
225 |
226 | # Step 3: Store the result in Pinecone
227 | result_id = f"result_{task['task_id']}"
228 | self.vectorstore.add_texts(
229 | texts=[result],
230 | metadatas=[{"task": task["task_name"]}],
231 | ids=[result_id],
232 | )
233 |
234 | # Step 4: Create new tasks and reprioritize task list
235 | new_tasks = get_next_task(
236 | self.task_creation_chain,
237 | result,
238 | task["task_name"],
239 | [t["task_name"] for t in self.task_list],
240 | objective,
241 | )
242 | for new_task in new_tasks:
243 | self.task_id_counter += 1
244 | new_task.update({"task_id": self.task_id_counter})
245 | self.add_task(new_task)
246 | self.task_list = deque(
247 | prioritize_tasks(
248 | self.task_prioritization_chain,
249 | this_task_id,
250 | list(self.task_list),
251 | objective,
252 | )
253 | )
254 | num_iters += 1
255 | if self.max_iterations is not None and num_iters == self.max_iterations:
256 | print(
257 | "\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m"
258 | )
259 | self.results.append(str("*****TASK ENDING*****"))
260 | break
261 | return {}
262 |
263 | @classmethod
264 | def from_llm(
265 | cls, llm: BaseLLM, vectorstore: VectorStore, verbose: bool = False, **kwargs
266 | ) -> "BabyAGI":
267 | """Initialize the BabyAGI Controller."""
268 | task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose)
269 | task_prioritization_chain = TaskPrioritizationChain.from_llm(
270 | llm, verbose=verbose
271 | )
272 | execution_chain = ExecutionChain.from_llm(llm, verbose=verbose)
273 | return cls(
274 | task_creation_chain=task_creation_chain,
275 | task_prioritization_chain=task_prioritization_chain,
276 | execution_chain=execution_chain,
277 | vectorstore=vectorstore,
278 | **kwargs,
279 | )
280 |
281 | app = Flask(__name__)
282 | socketio = SocketIO(app)
283 |
284 | # Add TailwindCSS to the app
285 | @app.route('/css/')
286 | def static_css(path):
287 | return app.send_static_file(os.path.join('css', path))
288 |
289 | # Add a route for the index page
290 | @app.route("/", methods=["GET"])
291 | def index():
292 | return render_template("index.html")
293 |
294 | @socketio.on("run_babyagi")
295 | def run_babyagi(data):
296 | objective = data["objective"]
297 | temperature = float(data["temperature"])
298 | verbose = data["verbose"] == "True"
299 | max_iterations = int(data["max_iterations"])
300 |
301 | # Set up and run the BabyAGI
302 | llm = OpenAI(temperature=temperature)
303 | baby_agi = BabyAGI.from_llm(
304 | llm=llm, vectorstore=vectorstore, verbose=verbose, max_iterations=max_iterations
305 | )
306 | result = baby_agi({"objective": objective})
307 |
308 | emit("result", baby_agi.results)
309 |
310 | if __name__ == "__main__":
311 | socketio.run(app, debug=True)
--------------------------------------------------------------------------------