├── .DS_Store ├── .gitignore ├── LLM_response.py ├── README.md ├── agents.py ├── app.py ├── execute_code.py ├── imgs ├── image.png ├── pic-1.png ├── pic-10.png ├── pic-11.png ├── pic-12.png ├── pic-2.png ├── pic-3.png ├── pic-4.png ├── pic-5.png ├── pic-6.png ├── pic-7.png ├── pic-8.png ├── pic-9.png ├── thumbnail.png └── thumbnail2.png ├── interact_AGENT.py ├── miscellaneous.py ├── prompts ├── Agent_prompt.py └── Child_Agent_prompt.py ├── requirements.txt ├── static ├── css │ ├── create_agent.css │ └── styles.css └── js │ ├── app.js │ └── script.js ├── templates ├── base.html ├── components │ ├── chat_window.html │ ├── input_group.html │ └── loading_message.html ├── create_agent.html └── index.html └── terminal_ui ├── terminal_animation.py └── terminal_animation2.py /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishnugamini/LocalLLMAgent/c5e4804356e502e50d496967e2b4aab97397cd76/.DS_Store -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | __pycache__ 3 | playground.py 4 | render -------------------------------------------------------------------------------- /LLM_response.py: -------------------------------------------------------------------------------- 1 | from openai import OpenAI 2 | from pydantic import BaseModel 3 | from typing import Optional 4 | from prompts.Agent_prompt import system_msg 5 | import os 6 | import copy 7 | import time 8 | from dotenv import load_dotenv 9 | 10 | load_dotenv() 11 | 12 | 13 | key = os.getenv("OPENAPI_KEY") 14 | client = OpenAI(api_key=key) 15 | msg = copy.deepcopy(system_msg) 16 | 17 | 18 | class Tool(BaseModel): 19 | tool_name: str 20 | required: bool 21 | thinking_phase: str 22 | important_parameter: str 23 | print_statement_to_add: str 24 | code: Optional[str] = None 25 | query: Optional[str] = None 26 | 27 | 28 | class Message(BaseModel): 29 | message_from_the_user: str 30 | tasks_to_achieve: str 31 | immediate_task_to_achieve: str 32 | message_to_the_user: str 33 | tool: Tool 34 | call_myself: str 35 | 36 | 37 | def refresh(): 38 | global msg 39 | original_system_message = copy.deepcopy(system_msg) 40 | msg = original_system_message 41 | return "Memory Refreshed" 42 | 43 | 44 | def add_context(role, message): 45 | global msg 46 | msg.append({"role": role, "content": message}) 47 | 48 | 49 | def llm(): 50 | global msg 51 | try: 52 | completion = client.beta.chat.completions.parse( 53 | model="gpt-4.1-2025-04-14", # or "gpt-4o-2024-08-06" (expensive but better output in some instances) 54 | messages=msg, 55 | response_format=Message, 56 | # reasoning_effort="medium" 57 | ) 58 | content = completion.choices[0].message.content 59 | add_context("assistant", content) 60 | return content 61 | except Exception as e: 62 | print(f"Error Occured \n {e}") 63 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # `Local LLM Agent` 2 | ### The Local LLM Agent is an AI-powered tool designed to automate complex and time-consuming tasks directly on your device. By Levaraging the capabilites of LLM's ability to `attend` to relavant chunks of data, this tool can autonomously complete entire workflows, saving you hours of manual effort. The agent is capable of recursive self-calling, allowing it to self-correct and achieve the tasks presented in the query. 3 | 4 | ## `How is it different from other Agents?` 5 | ### My tool encompasses a system similar to that of a chatbot. No external coding expertise is required nor explicit creation of various agents is required. Just one single prompt will kick off the workflow, and the agent will continuosly work towards fullfilling the user's request, And just like a chatbot, you can look at the work done and get it modified if you are dissatisfied 6 | 7 | ### `The Agent can be executed in 2 ways:` 8 | 1. GUI (Provides intermediary outputs at every step) 9 | 2. TERMINAL (Light Weight Alternative) 10 | 11 | ## Let's look at a video of one aspect of its functionality. 12 | https://github.com/user-attachments/assets/d1af0e9f-5567-4e5a-be8c-0ebddad02326 13 | 14 | ## Custom Agents 15 | https://github.com/user-attachments/assets/26988225-825e-4722-a42e-efe070dc9769 16 | 17 | ## Demonstration of Research Functionality 18 | https://github.com/user-attachments/assets/8a940923-afd9-4326-9a68-acfd5f397e27 19 | 20 | # `Workflow Diagram` 21 | ![workflow](https://github.com/user-attachments/assets/71e2c360-f1be-4d50-8714-1ef79e1a3b15) 22 | 23 | # `Current Features and Capabilities` 24 | 25 | ### 1. `Application Development` 26 | - **Develop Web Applications & Games:** The agent can design and build fully functional web applications and games, saving them directly to your system. 27 | 28 | ### 2. `Code Debugging & Self-Correction` 29 | - **Automatic Code Debugging:** The agent can analyze, debug, and fix code by interacting with your local environment. 30 | - **Self-Correction:** The agent evaluates compiler outputs, rewrites faulty code, and re-executes it to achieve success. 31 | 32 | ### 3. `File System Interaction` 33 | - **Manage Files Seamlessly:** The agent can search, update, delete, and create files/folders on your system. The agent can also open files as well. 34 | - **Local Environment Access:** It interacts with Python environment to run scripts. 35 | 36 | ### 4. `Version Control Integration` 37 | - **GitHub Integration:** Push code changes, create repositories, or manage version control seamlessly. 38 | - *Example:* Automatically push your latest project updates to GitHub. 39 | 40 | ### 5. `Data Analysis` 41 | - **Analyze Data Locally:** The agent can analyze CSV or other datasets and generate charts or visualizations. 42 | - *Example:* Create a line graph comparing sales data over time. 43 | 44 | ### 6. `Internet Search & Research` 45 | - **Internet Search Functionality:** The agent can search the web for real-time information, news, or technical resources. 46 | 47 | ### 7. `Research & Summarization` 48 | - Conduct research on any topic and summarize the findings or draft blog posts. 49 | 50 | ### 8. `Download/Generate Pictures` 51 | **Search and download pictures and Generate pictures:** The agent has a picture tool using which it can download pictures and generate pictures on the fly. 52 | 53 | ## `Automating Menial Tasks` 54 | The Local LLM Agent is not just for high-level tasks; it excels at automating day-to-day repetitive activities, saving you hours of manual work. 55 | 56 | SOME EXAMPLES ARE: 57 | - `PPT/PDF Preparation:`Prepare a PPT about a topic within seconds with just a simple prompt 58 | - `PDF Merging:`Combine multiple PDF files into one with a simple command. 59 | - `URL Shortening:`Shorten long URLs for sharing. 60 | - `File Sorting:`Automatically move, rename, or delete files based on predefined conditions (e.g., file type, date created). 61 | 62 | --- 63 | ## `Comprehensive Prompts` 64 | You can ask the Local LLM Agent to perform virtually any task by providing a comprehensive and clear prompt. Whether you need to develop an application, shorten URLs, or merge files, the agent will autonomously work through the task until it is completed successfully. 65 | 66 | ## `A few more examples of its work by using the comprehensive prompt approach:` 67 | ## `Example Output: Tic Tac Toe with Smart AI Opponent` 68 | With just a single prompt, the Local LLM Agent can create a fully functional Tic Tac Toe game featuring a smart AI opponent. 69 | 70 | ![Local LLM Agent Image](imgs/pic-1.png) 71 | 72 | ## `Example Output: Analyze stock trends and predict the price of stocks/crypto for the next day` 73 | The Agent provides investment advice by thoroughly examining stock trends, plotting graphs, and running ML models to predict the stock price for the coming days. 74 | 75 | ![Data Example](imgs/pic-5.png) 76 | 77 | ## How to Use the Local LLM Agent 78 | 79 | To start using the Local LLM Agent, follow these steps: 80 | 81 | 1. **Clone the Repository**: First, clone the repository to your local machine using the following command: 82 | ```bash 83 | git clone https://github.com/vishnugamini/LLMAgent 84 | 2. **Navigate to the Project Directory**: Move into the project directory: 85 | ```bash 86 | cd local-llm-agent 87 | 3. **Create a `.env` File**: Create a `.env` file in the root directory of the project and include your OpenAI and Preplexity(to browse the internet(optional)) API keys. The `.env` file should look like this: 88 | ```bash 89 | OPENAPI_KEY = "your-openai-api-key" 90 | PERPLEXITY_API = "your-perplexity-api-key" 91 | 4. **Install Dependencies**: Install the necessary Python dependencies by running: 92 | ```bash 93 | pip install -r requirements.txt 94 | 5. **Run the Agent**: Start the agent by executing the following command: 95 | 96 | ### `UI Version` 97 | python app.py 98 | #### access the APP here: 99 | http://localhost:5000/ 100 | 101 | ### `TERMINAL VERSION` 102 | python interact_AGENT.py 103 | 104 | 7. **Interact with the Agent**: Once the agent is running, you can start interacting with it through the terminal. You can give it tasks like "Create a Tic Tac Toe game" or "Debug this piece of code," and the agent will handle everything from development to debugging and even self-correction. 105 | 106 | ## `Example Workflow (Terminal Version)` 107 | 108 | Here, The agent is tasked to create a ppt about AI with a beautiful template. 109 | 110 | 1. `Starts of with a web search about templates for ppt` 111 | 2. `Tries to create a ppt using a python framwork but realizes that the framework does not exist` 112 | 3. `Uses the install tool to install the python-pptx framework` 113 | 4. `proceeds to create the ppt` 114 | ### `OUTPUT: ` 115 | ![Data Example](imgs/pic-10.png) 116 | ![Data Example](imgs/pic-11.png) 117 | ### `Provides a multi-page ppt about AI with a beautiful template` 118 | ![Data Example](imgs/pic-12.png) 119 | 120 | -------------------------------------------------------------------------------- /agents.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import os 3 | from dotenv import load_dotenv 4 | import json 5 | import sys 6 | import subprocess 7 | from openai import OpenAI 8 | from pydantic import BaseModel 9 | from typing import Optional 10 | from prompts.Child_Agent_prompt import system_msg 11 | from execute_code import exec_code 12 | import threading 13 | import copy 14 | import random 15 | 16 | 17 | from terminal_ui.terminal_animation2 import ( 18 | sub_search_dots, 19 | sub_thinking_dots, 20 | sub_picture_message, 21 | sub_search_message, 22 | sub_compiler_message, 23 | sub_user_message, 24 | sub_install_module, 25 | sub_uninstall_module, 26 | ) 27 | 28 | load_dotenv() 29 | 30 | 31 | Perp_msg = [ 32 | { 33 | "role": "system", 34 | "content": "Be very precise as the tokens you produce will affect another LLM's response. The information should be up to date, you will be mostly used for searches. Provide valid responses to the LLM. Do not provide extraneous, unsolicited content.The content must be Verbose and Valid. If the agent asks for a link, provide th actual link of the whatever's been asked without any wrapper on top", 35 | }, 36 | ] 37 | 38 | class PerpSearch: 39 | def refresh(self): 40 | self.msg = copy.deepcopy(Perp_msg) 41 | self.payload["messages"] = self.msg 42 | 43 | def __init__(self): 44 | self.key = os.getenv("PERPLEXITY_API") 45 | self.url = "https://api.perplexity.ai/chat/completions" 46 | self.msg = copy.deepcopy(Perp_msg) 47 | self.payload = { 48 | "model": "sonar", 49 | "messages": self.msg, 50 | "temperature": 0.2, 51 | "top_p": 0.9, 52 | "search_recency_filter": "month", 53 | "top_k": 0, 54 | "stream": False, 55 | "presence_penalty": 0, 56 | "frequency_penalty": 1, 57 | } 58 | self.headers = { 59 | "Authorization": f"Bearer {self.key}", 60 | "Content-Type": "application/json", 61 | } 62 | 63 | def search(self, query): 64 | self.msg.append({"role": "user", "content": query}) 65 | self.payload["messages"] = self.msg 66 | response = requests.request( 67 | "POST", self.url, json=self.payload, headers=self.headers 68 | ) 69 | response = json.loads(response.text) 70 | print(response) 71 | response = response["choices"][0]["message"]["content"] 72 | self.msg.append({"role": "assistant", "content": response}) 73 | return response 74 | 75 | 76 | class GenerateImage: 77 | def __init__(self): 78 | self.key = os.getenv("OPENAPI_KEY") 79 | self.client = OpenAI(api_key=self.key) 80 | 81 | def generate(self, query): 82 | response = self.client.images.generate( 83 | model="dall-e-3", 84 | style="natural", 85 | prompt=query, 86 | size="1024x1024", 87 | quality="standard", 88 | n=1, 89 | ) 90 | image_url = response.data[0].url 91 | return image_url 92 | 93 | from miscellaneous import shorten_urls 94 | class PicSearch: 95 | def __init__(self): 96 | self.store = [] 97 | self.url = "https://pixabay.com/api/" 98 | self.params = { 99 | "key": os.getenv("PIXABAY_API"), 100 | "q": "", 101 | "image_type": "photo", 102 | "pretty": "true", 103 | } 104 | 105 | def picSearch(self, query): 106 | query = query.split(" ") 107 | query = "+".join(query) 108 | self.params["q"] = query 109 | response = requests.get(self.url, params=self.params) 110 | results = response.json() 111 | results = results["hits"] 112 | try: 113 | size = len(results) 114 | count = 5 115 | while count >= size: 116 | count = count - 1 117 | r = random.randint(0, size - count) 118 | for links in range(r, r + count): 119 | self.store.append(results[links]["webformatURL"]) 120 | 121 | self.store = shorten_urls(self.store) 122 | return self.store 123 | except: 124 | return "no pictures found! make the search query simpler" 125 | 126 | finally: 127 | self.store = [] 128 | class PicDownloader: 129 | class Format(BaseModel): 130 | type: str 131 | code: str 132 | 133 | def __init__(self, query): 134 | self.key = os.getenv("OPENAPI_KEY") 135 | self.query = query 136 | self.client = OpenAI(api_key=self.key) 137 | 138 | def act(self,code): 139 | exec(code) 140 | 141 | def initiate(self): 142 | messages = [{"role": "system", "content": "Your job is to indentify if the code being given to you is downloading a picture or not and it is downlaoding picture, modify the code such that picture or pictures is/are downloaded to the folder 'render' with names same as the ones specified in the code being supplied to you."}, {"role": "system","content": '''the output format is in this manner 143 | {"type": "True or False", "code": "entire modified code of python"}. type should only be true if the code given is downloading pictures'''} 144 | ] 145 | messages.append({"role": "user", "content": self.query}) 146 | completion = self.client.beta.chat.completions.parse( 147 | model="gpt-4o-mini-2024-07-18", messages=messages, response_format=self.Format 148 | ) 149 | content = completion.choices[0].message.content 150 | messages.append({"role": "assistant", "content": content}) 151 | msg = json.loads(content) 152 | type = msg["type"] 153 | code = msg["code"] 154 | if type: 155 | self.act(code) 156 | 157 | class InstallModule: 158 | def __init__(self): 159 | pass 160 | 161 | def install(self, module): 162 | try: 163 | result = subprocess.run( 164 | [sys.executable, "-m", "pip", "install", module], 165 | stdout=subprocess.PIPE, 166 | stderr=subprocess.PIPE, 167 | text=True, 168 | check=True, 169 | ) 170 | return { 171 | "output": f"{module} module installed successfully", 172 | "error": False, 173 | "code": f"pip install {module}" 174 | } 175 | except subprocess.CalledProcessError as e: 176 | return { 177 | "output": f"Error occurred while installing {module}:\n{e.stderr}", 178 | "error": True, 179 | } 180 | 181 | def uninstall(self, module): 182 | try: 183 | result = subprocess.run( 184 | [sys.executable, "-m", "pip", "uninstall", module, "-y"], 185 | stdout=subprocess.PIPE, 186 | stderr=subprocess.PIPE, 187 | text=True, 188 | check=True, 189 | ) 190 | return { 191 | "output": f"{module} module uninstalled successfully", 192 | "error": False, 193 | } 194 | except subprocess.CalledProcessError as e: 195 | return { 196 | "output": f"Error occurred while uninstalling {module}:\n{e.stderr}", 197 | "error": True, 198 | } 199 | 200 | 201 | search = PerpSearch() 202 | picture = PicSearch() 203 | install = InstallModule() 204 | original_system_message = copy.deepcopy(system_msg) 205 | 206 | message = [ 207 | { 208 | "role": "system", 209 | "content": "You will be given code, which you have to deem as either html, css or js or none. The code may include contents of python, but that is none of our concern. Your goal is to identify the traces either html,css or js and you have to extact the html,css or js code. The code will surely include python code as well, but you aim is to extract the html,css and js part. always ensure to change the name of js and css files cited in html file to 'scripts.js' and 'styles.css. If you encounter a case where all html,css,js are in the same code, extract all of it into html file and name it index.html. Always regardless of whether the original file contains script and styles tags, you still have to add them, pointing towards styles.css and scripts.js.Remember it should always be scripts.js and styles.css. If a html file has html, css and js, that must also be extracted for styling and functionality. dont miss the embedded css and js in the html document as it is crucial" 210 | }, 211 | { 212 | "role": "system", 213 | "content": """This is how you provide your output {'file_type': 'provide the file type (html,css,js, none)', 'code': 'extracted html,css or js code only. None if no traces of html,css or js'}""", 214 | }, 215 | ] 216 | class file_judger: 217 | class Format(BaseModel): 218 | file_type: str 219 | code: str 220 | 221 | def __init__(self, query): 222 | self.key = os.getenv("OPENAPI_KEY") 223 | self.query = query 224 | self.client = OpenAI(api_key=self.key) 225 | self.messages = copy.deepcopy(message) 226 | 227 | def initiate(self): 228 | self.messages.append({"role": "user", "content": self.query}) 229 | completion = self.client.beta.chat.completions.parse( 230 | model="gpt-4o-mini-2024-07-18", messages=self.messages, response_format=self.Format 231 | ) 232 | content = completion.choices[0].message.content 233 | self.messages.append({"role": "assistant", "content": content}) 234 | self.act(content) 235 | 236 | def act(self, content): 237 | json_content = json.loads(content) 238 | file_type = json_content["file_type"] 239 | code = json_content["code"] 240 | 241 | if not os.path.exists("render"): 242 | os.makedirs("render") 243 | 244 | if file_type != "none": 245 | if file_type == "html": 246 | with open("render/index.html", "w") as file: 247 | file.write(code) 248 | elif file_type == "css": 249 | with open("render/styles.css", "w") as file: 250 | file.write(code) 251 | elif file_type == "js": 252 | with open("render/scripts.js", "w") as file: 253 | file.write(code) 254 | elif file_type == "python": 255 | exec(code) 256 | self.messages = copy.deepcopy(message) 257 | 258 | class Code_Fixer: 259 | class Format(BaseModel): 260 | error_description: str 261 | code: str 262 | 263 | def __init__(self, query): 264 | self.key = os.getenv("OPENAPI_KEY") 265 | self.query = query 266 | self.client = OpenAI(api_key=self.key) 267 | 268 | def initiate(self): 269 | 270 | messages = [ 271 | { 272 | "role": "system", 273 | "content": "You are specialist in providing remedy to the incorrect code and error provided to you. The code is being ran in a python environment.", 274 | }, 275 | { 276 | "role": "system", 277 | "content": "when writing a multi-line html, css, js, python code using this ('''), ensure that you dont include '\n' in it as the code will run into an error", 278 | }, 279 | { 280 | "role": "system", 281 | "content": " Always implement the code directly without any string formatting issues.", 282 | }, 283 | { 284 | "role": "system", 285 | "content": """This is how you provide your output {'error_description': 'There is wehere you write about the error', 'code': 'complete corrected python code for execution'}""", 286 | }, 287 | ] 288 | messages.append({"role": "user", "content": self.query}) 289 | completion = self.client.beta.chat.completions.parse( 290 | model="gpt-4o-mini-2024-07-18", messages=messages, response_format=self.Format 291 | ) 292 | content = completion.choices[0].message.content 293 | messages.append({"role": "assistant", "content": content}) 294 | return content 295 | 296 | 297 | class Sub_Agent: 298 | class Tool(BaseModel): 299 | tool_name: str 300 | required: bool 301 | thinking_phase: str 302 | important_parameter: str 303 | print_statement_to_add: str 304 | code: Optional[str] = None 305 | query: Optional[str] = None 306 | 307 | class Message(BaseModel): 308 | message_from_SeniorAgent: str 309 | tasks_to_achieve: str 310 | immediate_task_to_achieve: str 311 | message_to_Senior_agent: str 312 | tool: "Sub_Agent.Tool" 313 | call_myself: str 314 | 315 | def __init__(self): 316 | self.key = os.getenv("OPENAPI_KEY") 317 | self.client = OpenAI(api_key=self.key) 318 | self.msg = copy.deepcopy(system_msg) 319 | 320 | def add_context(self, role, message): 321 | self.msg.append({"role": role, "content": message}) 322 | 323 | def llm(self): 324 | try: 325 | completion = self.client.beta.chat.completions.parse( 326 | model="gpt-4o-mini-2024-07-18", 327 | messages=self.msg, 328 | response_format=self.Message, 329 | ) 330 | content = completion.choices[0].message.content 331 | self.add_context("assistant", content) 332 | return content 333 | except Exception as e: 334 | print(f"Error Occured \n {e}") 335 | 336 | def initiate(self, query): 337 | self.add_context("user", f"MESSAGE FROM SUPERIOR AGENT: {query}") 338 | call_myself = True 339 | msg_to_agent = "" 340 | while call_myself != "false": 341 | spinner_thread = threading.Thread(target=sub_thinking_dots) 342 | spinner_thread.start() 343 | 344 | response = self.llm() 345 | 346 | spinner_thread.do_run = False 347 | spinner_thread.join() 348 | 349 | response_json = json.loads(response) 350 | msg_to_agent = response_json["message_to_Senior_agent"] 351 | sub_user_message(msg_to_agent) 352 | call_myself = response_json["call_myself"] 353 | 354 | tool = response_json["tool"]["tool_name"] 355 | code = response_json["tool"]["code"] 356 | query = response_json["tool"]["query"] 357 | 358 | if tool == "python" and code != "None": 359 | output = exec_code(code) 360 | error = output["error"] 361 | if error == True: 362 | self.add_context( 363 | "user", f"OUTPUT FROM PYTHON COMPILER {output['output']}" 364 | ) 365 | whole = f'CODE: {code} \n ERROR: {output["output"]}' 366 | fixer = Code_Fixer(whole) 367 | solution = fixer.initiate() 368 | self.add_context( 369 | "user", 370 | f"Suggestion to fix the code from another agent. Follow this to mitigate the error. {solution}", 371 | ) 372 | else: 373 | self.add_context( 374 | "user", f"OUTPUT FROM PYTHON COMPILER {output['output']}" 375 | ) 376 | sub_compiler_message(output) 377 | 378 | elif tool == "install" and query != "None": 379 | sub_install_module(query) 380 | output = install.install(query) 381 | sub_compiler_message(output) 382 | self.add_context("user", f"OUTPUT FROM INSTALLATION {output}") 383 | 384 | elif tool == "uninstall" and query != "None": 385 | sub_uninstall_module(query) 386 | output = install.uninstall(query) 387 | sub_compiler_message(output) 388 | self.add_context("user", f"OUTPUT FROM INSTALLATION {output}") 389 | 390 | elif tool == "search" and query != "None": 391 | spinner_thread = threading.Thread(target=sub_search_dots) 392 | spinner_thread.start() 393 | 394 | output = search.search(query) 395 | 396 | spinner_thread.do_run = False 397 | spinner_thread.join() 398 | 399 | sub_search_message() 400 | self.add_context( 401 | "user", 402 | f"OUTPUT FROM SEARCH RESULTS (NOT VISIBLE TO USER, must be summarized in message to user if needed): {output}", 403 | ) 404 | 405 | elif tool == "picture" and query != "None": 406 | sub_picture_message() 407 | results = picture.picSearch(query) 408 | self.add_context( 409 | "user", 410 | f"OUTPUT FROM PICTURE SEARCH RESULTS {results}. Now you can proceed to download these using python if the user asked", 411 | ) 412 | 413 | self.msg = copy.deepcopy(system_msg) 414 | return msg_to_agent 415 | 416 | class Labels: 417 | class research(BaseModel): 418 | think: str 419 | labels: str 420 | 421 | def __init__(self): 422 | self.key = os.getenv("OPENAPI_KEY") 423 | self.client = OpenAI(api_key=self.key) 424 | self.messages_gpt = [ 425 | {"role": "system", "content": "You are an research bot. You will be given a query which essentially needs to be searched. But you dont search, rather you break the query down into 3 or more key queries which when searched should yield the entire information regarding the original query. If the topic needs comrehensive infomation to be searched, labels can be more than 3(like a vast topic).The labels are full sentence which are questions as to what needs to be searched. We are trying to do a deeper research which is why breaking down the contents of query is essential. If enough information is not provided, link the context to previous qeuries."}, 426 | {"role": "system", "content": "You have to reply in json format. the format is as follows {'think': 'this is the space for you to use chain of thought to identify the key points to be needed to searched about to answer the question full fledgedly', 'labels': 'Here the labels to search for are presented seperated by '[]'. example: who is the president of US[]Why has he done so and so[]impact of social media'}"} 427 | ] 428 | 429 | def update_mem(self,user,message): 430 | self.messages_gpt.append({"role": user, "content": message}) 431 | def initiate(self, query): 432 | self.update_mem("user", query) 433 | completion = self.client.beta.chat.completions.parse( 434 | model="gpt-4o-mini-2024-07-18", 435 | messages=self.messages_gpt, 436 | response_format=self.research, 437 | ) 438 | event = completion.choices[0].message.content 439 | self.update_mem('assistant', event) 440 | event = json.loads(event) 441 | labels = event['labels'] 442 | labels = labels.split("[]") 443 | return {"labels": labels, "think": event["think"]} 444 | 445 | 446 | deep_msg = [ 447 | { 448 | "role": "system", 449 | "content": "You are an assistant that provides up to date information about the query. You need to make sure you only provide information pertinent to the topic only.You can also provide links", 450 | }, 451 | ] 452 | class DeepResearch: 453 | def refresh(self): 454 | self.msg = copy.deepcopy(deep_msg) 455 | self.payload["messages"] = self.msg 456 | def __init__(self): 457 | self.key = os.getenv("PERPLEXITY_API") 458 | self.url = "https://api.perplexity.ai/chat/completions" 459 | self.msg = copy.deepcopy(deep_msg) 460 | self.payload = { 461 | "model": "llama-3.1-sonar-small-128k-online", 462 | "messages": self.msg, 463 | "temperature": 0.2, 464 | "top_p": 0.9, 465 | "return_citations": True, 466 | "search_domain_filter": ["perplexity.ai"], 467 | "return_images": True, 468 | "return_related_questions": True, 469 | "search_recency_filter": "month", 470 | "top_k": 0, 471 | "stream": False, 472 | "presence_penalty": 0, 473 | "frequency_penalty": 1, 474 | } 475 | self.headers = { 476 | "Authorization": f"Bearer {self.key}", 477 | "Content-Type": "application/json", 478 | } 479 | 480 | def search(self, query): 481 | self.msg.append({"role": "user", "content": query}) 482 | response = requests.request( 483 | "POST", self.url, json=self.payload, headers=self.headers 484 | ) 485 | response = json.loads(response.text) 486 | response = response["choices"][0]["message"]["content"] 487 | self.msg.append({"role": "assistant", "content": response}) 488 | return response 489 | 490 | research_msg = [ 491 | { 492 | "role": "system", 493 | "content": "If the query requires you to answer a question, answer the question with the info provided and proceed with these instrucitions: You are a summarizer and info organizer bot. You will recieve large amounts of content with headings, along with the query. you should not condense the information too much. The content you write must be in decorative readme format. Everything must be in readme format with header, higlighter, etc. Everything must be in an orderly manner" 494 | }, 495 | { 496 | "role": "system", 497 | "content": """This is how you provide your output {'readme_content': 'provide the readme content here'}""", 498 | }, 499 | ] 500 | class ReseachSummary: 501 | def refresh(self): 502 | self.msg = copy.deepcopy(research_msg) 503 | 504 | class Format(BaseModel): 505 | readme_content: str 506 | 507 | def __init__(self): 508 | self.messages = copy.deepcopy(research_msg) 509 | self.key = os.getenv("OPENAPI_KEY") 510 | self.client = OpenAI(api_key=self.key) 511 | 512 | def initiate(self,query): 513 | self.messages.append({"role": "user", "content": query}) 514 | completion = self.client.beta.chat.completions.parse( 515 | model="gpt-4o-mini-2024-07-18", messages=self.messages, response_format=self.Format 516 | ) 517 | content = completion.choices[0].message.content 518 | self.messages.append({"role": "assistant", "content": content}) 519 | content = json.loads(content) 520 | return content['readme_content'] 521 | 522 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, render_template, request 2 | from flask import send_from_directory 3 | import time 4 | from flask_socketio import SocketIO, emit 5 | from miscellaneous import shorten_url 6 | import threading 7 | import json 8 | import logging 9 | import uuid 10 | import webbrowser 11 | import os 12 | from LLM_response import llm, add_context, refresh 13 | from execute_code import exec_code 14 | from threading import Event 15 | from agents import ( 16 | PerpSearch, 17 | PicSearch, 18 | InstallModule, 19 | Sub_Agent, 20 | Code_Fixer, 21 | GenerateImage, 22 | file_judger, 23 | PicDownloader, 24 | Labels, 25 | DeepResearch, 26 | ReseachSummary 27 | ) 28 | from terminal_ui.terminal_animation import ( 29 | kill_child_agent, 30 | ) 31 | 32 | logging.basicConfig(level=logging.INFO) 33 | logger = logging.getLogger(__name__) 34 | 35 | 36 | app = Flask(__name__) 37 | socketio = SocketIO(app, cors_allowed_origins="*", async_mode="threading") 38 | search = PerpSearch() 39 | picture = PicSearch() 40 | install = InstallModule() 41 | image = GenerateImage() 42 | processing_tasks = {} 43 | research = DeepResearch() 44 | summary = ReseachSummary() 45 | lab = Labels() 46 | 47 | def handle_search_logic(prompt, sid, stop_event): 48 | try: 49 | @socketio.on("refresh") 50 | def refresh_memory(): 51 | response = refresh() 52 | summary.refresh() 53 | research.refresh() 54 | search.refresh() 55 | emit_response = {"type": "refresh", "content": response} 56 | socketio.emit("agent_response", emit_response, room=sid) 57 | 58 | msg_id = str(uuid.uuid4()) 59 | socketio.emit("agent_status", {"status": "true"}, room=sid) 60 | params = lab.initiate(prompt) 61 | labels = params["labels"] 62 | labels.append("Compiling information, please hold on for a moment!") 63 | think = params["think"] 64 | length = len(labels) 65 | content = f"QUERY: {prompt}" 66 | socketio.emit('search_response', { 67 | "type": 'init', 68 | "labels":labels, 69 | "msg_id": msg_id 70 | }) 71 | time.sleep(0.5) 72 | for tasks in range(0,length - 1): 73 | socketio.emit('search_response', { 74 | "type": 'update', 75 | "index": tasks, 76 | "status": 'searching', 77 | "msg_id": msg_id 78 | }) 79 | time.sleep(1) 80 | answer = research.search(labels[tasks]) 81 | content = content + f"{labels[tasks]} \n {answer} \n" 82 | socketio.emit('search_response', { 83 | "type": 'update', 84 | "index": tasks, 85 | "status": 'complete', 86 | "msg_id": msg_id 87 | }) 88 | time.sleep(2) 89 | time.sleep(1) 90 | socketio.emit('search_response', { 91 | "type": 'update', 92 | "index": length - 1, 93 | "status": 'searching', 94 | "msg_id": msg_id 95 | }) 96 | time.sleep(1) 97 | summarize = summary.initiate(content) 98 | socketio.emit('search_response', { 99 | "type": 'update', 100 | "index": length - 1, 101 | "status": 'complete', 102 | "msg_id": msg_id, 103 | }) 104 | time.sleep(1) 105 | socketio.emit( 106 | "agent_response", 107 | {"type": "search_agent_message", "content": summarize}, 108 | room=sid, 109 | ) 110 | time.sleep(1) 111 | 112 | except Exception as e: 113 | logger.error(f"Error in handle_agent_logic: {e}") 114 | emit_response = { 115 | "type": "error", 116 | "content": "An error occurred while processing your request.", 117 | } 118 | socketio.emit("agent_response", emit_response, room=sid) 119 | finally: 120 | 121 | socketio.emit("agent_status", {"status": "false"}, room=sid) 122 | 123 | if sid in processing_tasks: 124 | del processing_tasks[sid] 125 | logger.info(f"Processing task for session {sid} has ended.") 126 | 127 | def handle_agent_logic(prompt, sid, stop_event): 128 | try: 129 | @socketio.on("refresh") 130 | def refresh_memory(): 131 | response = refresh() 132 | summary.refresh() 133 | research.refresh() 134 | search.refresh() 135 | emit_response = {"type": "refresh", "content": response} 136 | socketio.emit("agent_response", emit_response, room=sid) 137 | 138 | add_context("user", prompt) 139 | agent_call = "true" 140 | 141 | socketio.emit("agent_status", {"status": "true"}, room=sid) 142 | 143 | while agent_call.lower() == "true": 144 | 145 | if stop_event.is_set(): 146 | logger.info(f"Processing terminated by user for session {sid}") 147 | break 148 | 149 | socketio.emit("agent_status", {"status": "true"}, room=sid) 150 | time.sleep(0.1) 151 | 152 | response = llm() 153 | msg_id = str(uuid.uuid4()) 154 | response_json = json.loads(response) 155 | msg_to_user = response_json.get("message_to_the_user", "") 156 | agent_call = response_json.get("call_myself", "false") 157 | task = response_json.get("immediate_task_to_achieve", "") 158 | if task != "": 159 | socketio.emit( 160 | "agent_response", 161 | { 162 | "type": "thinking_message", 163 | "content": task, 164 | "msg_id": msg_id, 165 | }, 166 | room=sid, 167 | ) 168 | time.sleep(1) 169 | 170 | socketio.emit("agent_status", {"status": agent_call}, room=sid) 171 | 172 | socketio.emit( 173 | "agent_response", 174 | {"type": "agent_message", "content": msg_to_user}, 175 | room=sid, 176 | ) 177 | time.sleep(1) 178 | 179 | if agent_call.lower() == "true": 180 | 181 | tool = response_json.get("tool", {}).get("tool_name", "") 182 | code = response_json.get("tool", {}).get("code", "None") 183 | query = response_json.get("tool", {}).get("query", "None") 184 | think = response_json.get("tool", {}).get("thinking_phase", "None") 185 | msg_id = str(uuid.uuid4()) 186 | 187 | if tool == "python" and code != "None": 188 | 189 | socketio.emit( 190 | "agent_response", 191 | { 192 | "type": "loading_message", 193 | "content": "Executing code...", 194 | "msg_id": msg_id, 195 | }, 196 | room=sid, 197 | ) 198 | time.sleep(1) 199 | socketio.emit( 200 | "agent_response", 201 | { 202 | "type": "thinking_message", 203 | "content": think, 204 | "msg_id": msg_id, 205 | }, 206 | room=sid, 207 | ) 208 | time.sleep(1) 209 | 210 | output = exec_code(code) 211 | add_context( 212 | "user", f"OUTPUT FROM PYTHON COMPILER {output['output']}" 213 | ) 214 | 215 | if output.get("error"): 216 | socketio.emit( 217 | "agent_response", 218 | { 219 | "type": "error_message", 220 | "content": f"Error executing code. Running again...", 221 | "msg_id": msg_id, 222 | "output": output["output"], 223 | "code": code, 224 | }, 225 | room=sid, 226 | ) 227 | time.sleep(1) 228 | msg_ids = str(uuid.uuid4()) 229 | 230 | socketio.emit( 231 | "agent_response", 232 | { 233 | "type": "loading_message", 234 | "content": "Corresponding with Error Solver Agent", 235 | "msg_id": msg_ids, 236 | }, 237 | room=sid, 238 | ) 239 | time.sleep(1) 240 | 241 | whole = f'CODE: {code} \n ERROR: {output["output"]}' 242 | fixer = Code_Fixer(whole) 243 | solution = fixer.initiate() 244 | solution = json.loads(solution) 245 | a = solution["error_description"] 246 | b = solution["code"] 247 | c = f"{a} \n CODE to FIX {b}" 248 | add_context( 249 | "user", 250 | f"code to fix the error \n {b}. something to also keep in mind When writing multi-line HTML, CSS, JS, or Python code, always enclose it within triple quotes (\"\"\"\"). " 251 | "Ensure that you do not include '\\n' or '\"' in it, as it will cause errors.", 252 | ) 253 | logger.info(f"Solution from Code Fixer: {c}") 254 | 255 | socketio.emit( 256 | "agent_response", 257 | { 258 | "type": "success_message", 259 | "content": "Solution Found", 260 | "msg_id": msg_ids, 261 | }, 262 | room=sid, 263 | ) 264 | time.sleep(1) 265 | else: 266 | f = file_judger(code) 267 | g = PicDownloader(code) 268 | thread = threading.Thread(target=f.initiate) 269 | thread2 = threading.Thread(target=g.initiate) 270 | thread.start() 271 | thread2.start() 272 | socketio.emit( 273 | "agent_response", 274 | { 275 | "type": "success_message", 276 | "content": "Executed successfully.", 277 | "msg_id": msg_id, 278 | }, 279 | room=sid, 280 | ) 281 | time.sleep(0.4) 282 | socketio.emit( 283 | "agent_response", 284 | { 285 | "type": "compiler_message", 286 | "content": output["output"], 287 | "msg_id": msg_id, 288 | "code": code, 289 | }, 290 | room=sid, 291 | ) 292 | time.sleep(0.1) 293 | elif tool == "generate" and query != "None": 294 | socketio.emit( 295 | "agent_response", 296 | { 297 | "type": "loading_message", 298 | "content": f"Generating Picture for '{query}'. This might take a while. Please hold on.", 299 | "msg_id": msg_id, 300 | }, 301 | room=sid, 302 | ) 303 | time.sleep(0.2) 304 | link = image.generate(query) 305 | link = shorten_url(link) 306 | socketio.emit( 307 | "agent_response", 308 | { 309 | "type": "success_message", 310 | "content": f"Pictured Generated Successfully", 311 | "msg_id": msg_id, 312 | }, 313 | room=sid, 314 | ) 315 | add_context( 316 | "user", 317 | f"OUTPUT FROM PICTURE GENERATION {link}. Display it in readme format to the user. This is the format '![Alt text](image-url)'. download it subsequently if the task involves downloading or displaying it in a html document etc.", 318 | ) 319 | elif tool == "suggestions" and query != "None": 320 | socketio.emit("suggestions", { "suggestions": query}) 321 | time.sleep(1) 322 | exit_event = Event() 323 | 324 | while not exit_event.is_set(): 325 | @socketio.on("selected_suggestions") 326 | def selected_suggestions(data): 327 | exit_event.set() 328 | sug = data['suggestions'] 329 | print(sug[0:]) 330 | add_context("user", f". These are the additional features requested by user based on your suggestions.Make sure to work on implementing these additonal features as well: {sug[0:]}. Mention that so and so features will be implemented and then implement them. All of these features must be implemented when you produce code.") 331 | 332 | 333 | elif tool == "install" and query != "None": 334 | 335 | socketio.emit( 336 | "agent_response", 337 | { 338 | "type": "loading_message", 339 | "content": f"Installing module '{query}'...", 340 | "msg_id": msg_id, 341 | }, 342 | room=sid, 343 | ) 344 | time.sleep(1) 345 | output = install.install(query) 346 | add_context("user", f"OUTPUT FROM INSTALLATION {output}") 347 | socketio.emit( 348 | "agent_response", 349 | { 350 | "type": "success_message", 351 | "content": f"Executed Successfully", 352 | "msg_id": msg_id 353 | }, 354 | room=sid, 355 | ) 356 | time.sleep(0.4) 357 | socketio.emit( 358 | "agent_response", 359 | { 360 | "type": "compiler_message", 361 | "content": f"Module '{query}' installed successfully.", 362 | "msg_id": msg_id, 363 | "code": output["code"] 364 | }, 365 | room=sid, 366 | ) 367 | 368 | elif tool == "uninstall" and query != "None": 369 | 370 | socketio.emit( 371 | "agent_response", 372 | { 373 | "type": "loading_message", 374 | "content": f"Uninstalling module '{query}'...", 375 | "msg_id": msg_id, 376 | }, 377 | room=sid, 378 | ) 379 | time.sleep(0.4) 380 | output = install.uninstall(query) 381 | add_context("user", f"OUTPUT FROM UNINSTALLATION {output}") 382 | 383 | socketio.emit( 384 | "agent_response", 385 | { 386 | "type": "success_message", 387 | "content": f"Module '{query}' uninstalled successfully.", 388 | "msg_id": msg_id, 389 | }, 390 | room=sid, 391 | ) 392 | time.sleep(1) 393 | 394 | elif tool == "search" and query != "None": 395 | 396 | socketio.emit( 397 | "agent_response", 398 | { 399 | "type": "loading_message", 400 | "content": f"Searching for '{query}'...", 401 | "msg_id": msg_id, 402 | }, 403 | room=sid, 404 | ) 405 | time.sleep(0.3) 406 | output = search.search(query) 407 | add_context( 408 | "user", 409 | f"OUTPUT FROM SEARCH RESULTS {output} (NOT VISIBLE TO USER, must be summarized in message_to_the_user only if user explicitly asked or proceed to the next task) in great and decorative Markdown FORMAT in message_to_the_user. use different colors if needed).If the search is to gather ideas/assests to build application, immediately suggest features using the tool suggestions tool", 410 | ) 411 | 412 | socketio.emit( 413 | "agent_response", 414 | { 415 | "type": "search_results", 416 | "content": f"Search for '{query}' completed.", 417 | "results": output, 418 | "msg_id": msg_id, 419 | }, 420 | room=sid, 421 | ) 422 | time.sleep(0.2) 423 | 424 | elif tool == "picture" and query != "None": 425 | 426 | socketio.emit( 427 | "agent_response", 428 | { 429 | "type": "loading_message", 430 | "content": f"Searching for pictures related to '{query}'...", 431 | "msg_id": msg_id, 432 | }, 433 | room=sid, 434 | ) 435 | time.sleep(0.3) 436 | 437 | results_pictures = picture.picSearch(query) 438 | add_context( 439 | "user", 440 | f"OUTPUT FROM PICTURE SEARCH RESULTS {results_pictures}. Now you can proceed to download these using python if the user asked. Also before downloading the pictures, display the download links in readme format to the user in this manner '![Alt text](image-url)'. number them as well. DO the displaying work in msg_to_user section.", 441 | ) 442 | 443 | socketio.emit( 444 | "agent_response", 445 | { 446 | "type": "success_message", 447 | "content": f"Picture search for '{query}' completed.", 448 | "msg_id": msg_id, 449 | "code": code, 450 | }, 451 | room=sid, 452 | ) 453 | time.sleep(0.2) 454 | 455 | elif tool == "agent" and query != "None": 456 | 457 | socketio.emit( 458 | "agent_response", 459 | { 460 | "type": "loading_message", 461 | "content": f"Initiating sub-agent for '{query}'...", 462 | "msg_id": msg_id, 463 | }, 464 | room=sid, 465 | ) 466 | time.sleep(1) 467 | 468 | sub_agent = Sub_Agent() 469 | sub_response = sub_agent.initiate(query) 470 | socketio.sleep(2) 471 | socketio.emit( 472 | "agent_response", 473 | { 474 | "type": "success_message", 475 | "content": f"Sub-agent for '{query}' completed.", 476 | "msg_id": msg_id, 477 | }, 478 | room=sid, 479 | ) 480 | time.sleep(2) 481 | add_context( 482 | "user", 483 | f"SUMMARY FROM SUB AGENT: {sub_response}. You must explain the outcome to the user and then proceed to next task if it exists", 484 | ) 485 | kill_child_agent() 486 | except Exception as e: 487 | logger.error(f"Error in handle_agent_logic: {e}") 488 | emit_response = { 489 | "type": "error", 490 | "content": "An error occurred while processing your request.", 491 | } 492 | socketio.emit("agent_response", emit_response, room=sid) 493 | finally: 494 | 495 | socketio.emit("agent_status", {"status": "false"}, room=sid) 496 | 497 | if sid in processing_tasks: 498 | del processing_tasks[sid] 499 | logger.info(f"Processing task for session {sid} has ended.") 500 | socketio.emit("agent_response", { 501 | "type": "workflow_completed", 502 | "content": "Workflow completed." 503 | }, room=sid) 504 | 505 | 506 | 507 | @socketio.on("user_prompt") 508 | def handle_user_prompt(data): 509 | prompt = data.get("prompt") 510 | mode = data.get("mode") 511 | sid = request.sid 512 | 513 | if prompt and mode == 'agent': 514 | if sid in processing_tasks: 515 | 516 | emit_response = { 517 | "type": "error", 518 | "content": "A task is already in progress. Please wait or end the current task.", 519 | } 520 | emit("agent_response", emit_response, room=sid) 521 | return 522 | 523 | stop_event = threading.Event() 524 | 525 | thread = socketio.start_background_task( 526 | handle_agent_logic, prompt=prompt, sid=sid, stop_event=stop_event 527 | ) 528 | 529 | processing_tasks[sid] = {"thread": thread, "stop_event": stop_event} 530 | 531 | elif prompt and mode == "search": 532 | if sid in processing_tasks: 533 | 534 | emit_response = { 535 | "type": "error", 536 | "content": "A task is already in progress. Please wait or end the current task.", 537 | } 538 | emit("agent_response", emit_response, room=sid) 539 | return 540 | 541 | stop_event = threading.Event() 542 | 543 | thread = socketio.start_background_task( 544 | handle_search_logic, prompt=prompt, sid=sid, stop_event=stop_event 545 | ) 546 | 547 | processing_tasks[sid] = {"thread": thread, "stop_event": stop_event} 548 | else: 549 | emit_response = {"type": "error", "content": "No prompt provided."} 550 | emit("agent_response", emit_response, room=sid) 551 | 552 | 553 | @app.route("/render/") 554 | def serve_render_files(filename): 555 | return send_from_directory("render", filename) 556 | 557 | import json 558 | 559 | @socketio.on('kickoff_workflow') 560 | def handle_kickoff_workflow(data): 561 | 562 | workflow = data.get('workflow') 563 | step = 1 564 | t = ( 565 | "DO NOT HALLUCINATE: here you will be given instruction. " 566 | "You will have to execute all of them in this order. " 567 | "When you receive input in this format, you don't have to ask the user for preferences. " 568 | "For example, while creating web pages, you don't have to ask the user for features etc. " 569 | "Do not use sub agents. If you are asked to search, you search; if you are asked to execute code, you use the code tool to execute code etc. " 570 | "Do not hallucinate—please do the work. Also, you get 3 dollars for every task you successfully accomplish.\n" 571 | ) 572 | print(workflow) 573 | for x in workflow: 574 | for a, b in x.items(): 575 | if a == 'type': 576 | t = t + f"STEP{step}: ({b}) " 577 | step += 1 578 | 579 | elif a == 'label': 580 | t += f"{b}\n" 581 | print(t) 582 | 583 | logger.info(f"Workflow instructions: {t}") 584 | 585 | sid = request.sid 586 | def refresh_memory(): 587 | response = refresh() 588 | summary.refresh() 589 | research.refresh() 590 | search.refresh() 591 | emit_response = {"type": "refresh", "content": response} 592 | socketio.emit("agent_response", emit_response, room=sid) 593 | refresh_memory() 594 | 595 | if sid in processing_tasks: 596 | emit("agent_response", { 597 | "type": "error", 598 | "content": "A task is already in progress. Please wait or end the current task." 599 | }, room=sid) 600 | return 601 | 602 | stop_event = threading.Event() 603 | thread = socketio.start_background_task(handle_agent_logic, prompt=t, sid=sid, stop_event=stop_event) 604 | processing_tasks[sid] = {"thread": thread, "stop_event": stop_event} 605 | 606 | emit("agent_response", { 607 | "type": "workflow_received", 608 | "content": "Workflow received and processing started." 609 | }, room=sid) 610 | @socketio.on("request_file_contents") 611 | def handle_request_file_contents(): 612 | sid = request.sid 613 | try: 614 | files = ["index.html", "styles.css", "test.js"] 615 | file_contents = {} 616 | 617 | for file_name in files: 618 | file_path = os.path.join("render", file_name) 619 | with open(file_path, "r", encoding="utf-8") as f: 620 | content = f.read() 621 | file_contents[file_name] = content 622 | 623 | emit("file_contents", file_contents, room=sid) 624 | except Exception as e: 625 | logger.error(f"Error reading files: {e}") 626 | emit("file_contents", {"error": "Failed to read files."}, room=sid) 627 | 628 | 629 | @socketio.on("end_processing") 630 | def handle_end_processing(): 631 | sid = request.sid 632 | 633 | if sid in processing_tasks: 634 | 635 | processing_tasks[sid]["stop_event"].set() 636 | emit_response = {"type": "info", "content": "Processing has been terminated."} 637 | emit("agent_response", emit_response, room=sid) 638 | else: 639 | emit_response = { 640 | "type": "error", 641 | "content": "No active processing to terminate.", 642 | } 643 | emit("agent_response", emit_response, room=sid) 644 | 645 | @app.route('/create_agent') 646 | def create_agent_page(): 647 | return render_template("create_agent.html") 648 | 649 | @app.route("/") 650 | def index(): 651 | return render_template("index.html") 652 | 653 | 654 | if __name__ == "__main__": 655 | socketio.run(app, debug=True) -------------------------------------------------------------------------------- /execute_code.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import subprocess 3 | import shlex 4 | 5 | def exec_code(code, python_interpreter=None): 6 | code = "import os\n" + code 7 | 8 | if python_interpreter is None: 9 | python_executable = sys.executable 10 | else: 11 | python_executable = python_interpreter 12 | 13 | command = [python_executable, "-c", code] 14 | 15 | try: 16 | result = subprocess.run(command, capture_output=True, text=True, timeout=85) 17 | 18 | if result.returncode != 0: 19 | output = result.stderr.strip() 20 | error = True 21 | else: 22 | output = result.stdout.strip() 23 | error = False 24 | if not output: 25 | output = "" 26 | error = False 27 | 28 | return {"output": output, "error": error} 29 | 30 | except subprocess.TimeoutExpired: 31 | return {"output": "Execution timed out.", "error": True} 32 | except Exception as e: 33 | return {"output": str(e), "error": True} 34 | -------------------------------------------------------------------------------- /imgs/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishnugamini/LocalLLMAgent/c5e4804356e502e50d496967e2b4aab97397cd76/imgs/image.png -------------------------------------------------------------------------------- /imgs/pic-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishnugamini/LocalLLMAgent/c5e4804356e502e50d496967e2b4aab97397cd76/imgs/pic-1.png -------------------------------------------------------------------------------- /imgs/pic-10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishnugamini/LocalLLMAgent/c5e4804356e502e50d496967e2b4aab97397cd76/imgs/pic-10.png -------------------------------------------------------------------------------- /imgs/pic-11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishnugamini/LocalLLMAgent/c5e4804356e502e50d496967e2b4aab97397cd76/imgs/pic-11.png -------------------------------------------------------------------------------- /imgs/pic-12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishnugamini/LocalLLMAgent/c5e4804356e502e50d496967e2b4aab97397cd76/imgs/pic-12.png -------------------------------------------------------------------------------- /imgs/pic-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishnugamini/LocalLLMAgent/c5e4804356e502e50d496967e2b4aab97397cd76/imgs/pic-2.png -------------------------------------------------------------------------------- /imgs/pic-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishnugamini/LocalLLMAgent/c5e4804356e502e50d496967e2b4aab97397cd76/imgs/pic-3.png -------------------------------------------------------------------------------- /imgs/pic-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishnugamini/LocalLLMAgent/c5e4804356e502e50d496967e2b4aab97397cd76/imgs/pic-4.png -------------------------------------------------------------------------------- /imgs/pic-5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishnugamini/LocalLLMAgent/c5e4804356e502e50d496967e2b4aab97397cd76/imgs/pic-5.png -------------------------------------------------------------------------------- /imgs/pic-6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishnugamini/LocalLLMAgent/c5e4804356e502e50d496967e2b4aab97397cd76/imgs/pic-6.png -------------------------------------------------------------------------------- /imgs/pic-7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishnugamini/LocalLLMAgent/c5e4804356e502e50d496967e2b4aab97397cd76/imgs/pic-7.png -------------------------------------------------------------------------------- /imgs/pic-8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishnugamini/LocalLLMAgent/c5e4804356e502e50d496967e2b4aab97397cd76/imgs/pic-8.png -------------------------------------------------------------------------------- /imgs/pic-9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishnugamini/LocalLLMAgent/c5e4804356e502e50d496967e2b4aab97397cd76/imgs/pic-9.png -------------------------------------------------------------------------------- /imgs/thumbnail.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishnugamini/LocalLLMAgent/c5e4804356e502e50d496967e2b4aab97397cd76/imgs/thumbnail.png -------------------------------------------------------------------------------- /imgs/thumbnail2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vishnugamini/LocalLLMAgent/c5e4804356e502e50d496967e2b4aab97397cd76/imgs/thumbnail2.png -------------------------------------------------------------------------------- /interact_AGENT.py: -------------------------------------------------------------------------------- 1 | from terminal_ui.terminal_animation import initializer 2 | import threading 3 | 4 | spinner_thread = threading.Thread(target=initializer) 5 | spinner_thread.start() 6 | from LLM_response import llm, add_context, refresh 7 | from execute_code import exec_code 8 | from agents import PerpSearch, PicSearch, InstallModule, Sub_Agent 9 | from terminal_ui.terminal_animation import ( 10 | search_dots, 11 | thinking_dots, 12 | picture_message, 13 | search_message, 14 | compiler_message, 15 | user_message, 16 | refresh_message, 17 | initial_message, 18 | install_module, 19 | uninstall_module, 20 | child_agent_message, 21 | kill_child_agent, 22 | ) 23 | import json 24 | from colorama import Fore, Style, Back 25 | 26 | search = PerpSearch() 27 | picture = PicSearch() 28 | install = InstallModule() 29 | 30 | prompt = "" 31 | spinner_thread.do_run = False 32 | spinner_thread.join() 33 | initial_message() 34 | agent_call = "false" 35 | 36 | while prompt != "exit": 37 | prompt = input( 38 | Style.BRIGHT + Fore.WHITE + Back.BLACK + "Prompt: " + Style.RESET_ALL 39 | ) 40 | 41 | if prompt.lower() == "refresh": 42 | response = refresh() 43 | refresh_message(response) 44 | 45 | if prompt not in ["refresh", "exit"]: 46 | add_context("user", prompt) 47 | 48 | while prompt not in ["refresh", "exit"]: 49 | spinner_thread = threading.Thread(target=thinking_dots) 50 | spinner_thread.start() 51 | 52 | response = llm() 53 | 54 | spinner_thread.do_run = False 55 | spinner_thread.join() 56 | 57 | response_json = json.loads(response) 58 | msg_to_user = response_json["message_to_the_user"] 59 | user_message(msg_to_user) 60 | 61 | agent_call = response_json.get("call_myself") 62 | 63 | if agent_call == "true": 64 | tool = response_json["tool"]["tool_name"] 65 | code = response_json["tool"]["code"] 66 | query = response_json["tool"]["query"] 67 | 68 | if tool == "python" and code != "None": 69 | output = exec_code(code) 70 | add_context("user", f"OUTPUT FROM PYTHON COMPILER {output['output']}") 71 | compiler_message(output) 72 | 73 | elif tool == "install" and query != "None": 74 | install_module(query) 75 | output = install.install(query) 76 | compiler_message(output) 77 | add_context("user", f"OUTPUT FROM INSTALLATION {output}") 78 | 79 | elif tool == "uninstall" and query != "None": 80 | uninstall_module(query) 81 | output = install.uninstall(query) 82 | compiler_message(output) 83 | add_context("user", f"OUTPUT FROM INSTALLATION {output}") 84 | 85 | elif tool == "search" and query != "None": 86 | spinner_thread = threading.Thread(target=search_dots) 87 | spinner_thread.start() 88 | output = search.search(query) 89 | spinner_thread.do_run = False 90 | spinner_thread.join() 91 | search_message() 92 | add_context( 93 | "user", 94 | f"OUTPUT FROM SEARCH RESULTS (NOT VISIBLE TO USER, must be summarized in message to user if needed): {output}", 95 | ) 96 | 97 | elif tool == "picture" and query != "None": 98 | picture_message() 99 | results = picture.picSearch(query) 100 | add_context( 101 | "user", 102 | f"OUTPUT FROM PICTURE SEARCH RESULTS {results}. Now you can proceed to download these using python if the user asked", 103 | ) 104 | 105 | elif tool == "agent" and query != "None": 106 | print(query) 107 | child_agent_message() 108 | sub_agent = Sub_Agent() 109 | response = sub_agent.initiate(query) 110 | add_context( 111 | "user", 112 | f"SUMMARY FROM SUB AGENT: {response}. You must explain the outcome to the user and then proceed to next task if it exists", 113 | ) 114 | kill_child_agent() 115 | 116 | if agent_call != "true": 117 | break 118 | -------------------------------------------------------------------------------- /miscellaneous.py: -------------------------------------------------------------------------------- 1 | import pyshorteners 2 | def shorten_url(long_url): 3 | s = pyshorteners.Shortener() 4 | short_url = s.tinyurl.short(long_url) 5 | return short_url 6 | 7 | def shorten_urls(arr): 8 | fin = [] 9 | s = pyshorteners.Shortener() 10 | for pic in arr: 11 | short_url = s.tinyurl.short(pic) 12 | fin.append(short_url) 13 | return fin 14 | -------------------------------------------------------------------------------- /prompts/Agent_prompt.py: -------------------------------------------------------------------------------- 1 | system_msg = [ 2 | { 3 | "role": "system", 4 | "content": "you are a AI agent, with an ability to call yourself. You should be able to execute end to end tasks, you have python environment at your disposal which means you can absolutely do anything using it. When the output from compiler mathches your expectations and when you run out of tasks to achieve, you can stop calling yourself and ask the user for the next task. You only have access to python,do anything only using python, if it means creating web application using html,css and js or anything, do them by using framework in python. You are on a windoes PC. never try to run a server as that might not be viable.(NO to flask and or anything of that ilk). Always save files to current directory unless specifically asked. Also the HTML, CSS and JS and python code you write must be of extremely high quality, especially css. High quality here means, extremely beautiful and thought out. Do not Hallucinate, dont just say that you have built a website, show it through actions by using tools. Whatever you do must be executed in steps: meaning: you have to split the work in multiple steps and act on it one after the other", 5 | }, 6 | { 7 | "role": "system", 8 | "content": ( 9 | "When writing multi-line HTML, CSS, JS, or Python code, always enclose it within triple quotes (\"\"\"\"). " 10 | "Ensure that you do not include '\\n' or '\"' in it, as it will cause errors." 11 | ), 12 | }, 13 | { 14 | "role": "system", 15 | "content":" Always write HTML, CSS, JS code using python into a file. PLain HTML code wont yield anything. USE python to write html,css and js contents into .html, .css and .js extensions. " 16 | }, 17 | { 18 | "role": "system", 19 | "content":" Always implement the code directly without any string formatting issues." 20 | }, 21 | { 22 | "role": "system", 23 | "content": "When a user asks a query, try to solve it using python framworks, search for python framework that can handle the job, only when you run out of option and push comes to shove thats when you rely on API services. for example if the user asks you to shorten a url, find a module that can do it." 24 | }, 25 | { 26 | "role": "system", 27 | "content": "call_myself should always be true when you need to check compiler output or proceed to next taks which are listed in your tasks to achieve", 28 | }, 29 | { 30 | "role": "system", 31 | "content": "Remember to always split your tasks, execute one after the as that is what an agent does", 32 | }, 33 | { 34 | "role": "system", 35 | "content": "always print the results in the code by explicitly writing print statements as it is passed to a compiler which expects the print statements. Warning: If you expect the output to be a plethora of data, refrain from printing it as it will hinder your thinking ability. For example: bitcoin prices for the last 7 days, do not print all of it, to ensure you have grabbed the values, just print the first couple", 36 | }, 37 | {"role": "system", "content": "always include import statements in the code"}, 38 | { 39 | "role": "system", 40 | "content": "You have the liberty to install packages,modules, frameworks anything you need using python", 41 | }, 42 | { 43 | "role": "system", 44 | "content": "always call yourself by setting call_myself to true when you have tasks_to_achieve or code to run. REMEMBER THIS POINT VERY CAREFULLy, ALWAYS CALL YOURSELF UNTIL YOU ACHIEVE USERS QUERY", 45 | }, 46 | { 47 | "role": "system", 48 | "content": "after couple of iteration, if you are unable to achieve the task, if you think there is a error with the system, or if you think you are going in a loop, stop yourself from calling yourself again and ask users input to clarify or clear the issue you're facing", 49 | }, 50 | { 51 | "role": "system", 52 | "content": "split your tasks, do them one after the other as you are an agent and thats what you fancy doing to avoid errors. For example, if the task is to create a web applicatiom, create the html file first and then css and then js,so and so forth", 53 | }, 54 | { 55 | "role": "system", 56 | "content": "never, I SAID NEVER use name == __main__ in your code as that throws an error.DONT EVER DO THAT AS THE PYTHON FILE IS NOT BEING EXECUTED DIRECTLY. if you do accidentally use it, please immediately change it", 57 | }, 58 | { 59 | "role": "system", 60 | "content": "Make sure to include import statments in the code, example: import os.", 61 | }, 62 | { 63 | "role": "system", 64 | "content": 'you have tools available at your disposal such as "search" for real time up to date information from the internet. use this just to search information.', 65 | }, 66 | {"role":"system", 67 | "content":"You have another tool called 'suggestions'. You must always use this initally when the user requests you to build an application using html, css and js. So you have to suggest 4 elements/features in the query sections suggested by a comma that can be added to the application. Example: if the user asks for a quiz application, suggestions could be:' add a timer','immediate answer display','review section in the end', pertinent to the type of application requested. The query section must only have the suggestions seperatd by a comma and nothing else. Always use this first before building a web app. DO NOT FORGET. even if the user asks you to search the web before creating an application, you have to mandatorily suggest app functionalities using this tool once search is complete." 68 | }, 69 | { 70 | "role": "system", 71 | "content": "you also have a tool named 'picture' which gives you to links to download a picture of anything in you mention in the query. If you encounter anything related to pictures such as search for pictures or search the net for pictures. use this tool. The query must only include the term for search as in 'cars, bikes' etc.", 72 | }, 73 | { 74 | "role": "system", 75 | "content": "You have a tool called 'generate' using which you can generate pictures. When using 'generate', you have to mention the description of picture in query. When the user asks you specifically to generate pictures, you must use this tool. And if you are unable to find pictures, generate and show it to the user. you can only generate one picture at a time.", 76 | }, 77 | { 78 | "role": "system", 79 | "content": 'You have another tool called "install" or "uninstall" which will allow you to install or uninstall python module. Simple mention the name of the python module in query. Whatever it is you want to install, USE THIS.', 80 | }, 81 | {"role": "system", 82 | "content": '''Example of how you are going to create files without multiline string issue \n 83 | import os 84 | html_content = """ 85 | 86 | 87 | 88 | 89 | 90 | 91 | OpenAI Offerings Mock 92 | 93 | 94 |
95 |

OpenAI Offerings

96 |
97 |
98 |
99 |

Product 1

100 |

Description of product 1...

101 |
102 |
103 | 106 | 107 | 108 | """ 109 | with open('openai_offerings.html', 'w') as f: 110 | f.write(html_content) 111 | print('HTML file for OpenAI offerings mock created successfully.')''' 112 | }, 113 | { 114 | "role": "system", 115 | "content": "You have access to a tool called 'agent' which allows you to delegate smaller, detailed tasks to a sub-agent. When there are multiple tasks to achieve, use the 'agent' tool to offload specific tasks to avoid cluttering your memory with complex processes. The sub-agent will execute these tasks and provide you with a summary of the actions performed. To use this tool, include 'agent' in the tool, and in the 'query' field, provide a verbose and clear description of the task to delegate, breaking down the steps so the sub-agent can process them effectively.In file_location provide detailed location of the folder name and the file location where it needs to be saved.Please always delegate only 1 task at a time for more efficiency. Tasks like web developement and app development must only be executed by you." 116 | }, 117 | { 118 | "role": "system", 119 | "content": "When writing code that you think needs to be saved to futher augument code to it. please save the code to a python file and then instead of rewriting the entire code, add whats needed to ensure you dont use too many tokens", 120 | }, 121 | { 122 | "role": "system", 123 | "content": """{Here is an example of the required JSON structure 124 | "message_from_the_user": "message from the user or the compiler. Write the consice summary of it, for example if an error occured write about the error, or if everthing was successful write a message stating that everything went well", 125 | "tasks_to_achieve": "List all the tasks you need to accomplish if there are any in a superficial manner.", 126 | "immediate_task_to_achieve": "Specify the task to prioritize first in this manner 1)I have to message the user", 127 | "message_to_the_user": "this is your message to the user,must resonate with immediate_task. It should say what you have done and what you are about to do if there is something else pending, or just politely ask if the user needs anything else. If a user asks a question, it must be answered here. This is also a place to answer users questions. Always use readme format when messaging the user" 128 | "tool": { 129 | "tool_name": "python or agent or search or picture or install or uninstall or None (python if needed or None) (use 'search' tool if users requests for information that needs an internet search or if you need up to date information this tool can be used. Examples: searching a site for information, weather information, any real time information). (use "picture" tool if user requests for a picutre or if you need images to display in the website you build for the user. To use this tool simplly use 'picture' in tool and mention the label of the picture in "query"). Use install if you need to install a python module, simply call install and mention the module name in the query. Use agent tool to delegate tasks to a sub agent when you have boat load of tasks to ensure you dont contaminate your memory. just call 'agent' in tool_name and verbosely mention the task it needs to achieve in 'query' and in file_location mention file/folder location it needs to follow or save the files it creates.", 130 | "required": " (true for the code to work) true/false", 131 | "thinking_phase": "VERY VERBOSELY WRITE DOWN in points 1)WHAT YOU NEED TO IMPLEMENT OR CHANGE IN THE CODE", 2)"HOW YOU PLAN ON DOING STEP 1", 3) "If you have already defined any directory locations or file locations, mention the paths here clearly with labels as to what it is and the file/directory location to not make a mistake in the code later on.", 132 | "important_parameter": "This section is for you to mention all the parameter required so that the code wont run into an error. For example if there are file location or folder location which will be used or name of file where edits are happening, name of the ppt, word doc you are working with etc, any important parameters must all be included here with labels", 133 | "print_statement_to_add":"This is a place for you think about what kind of print statement to include in the code such that you will the know progress and output of code execution. This is an example of how you can do it: 'I will include print statement about file opening, graph creation and file creation'. You dont have to write down the print statements here, just the content of the print staments must be mentioned here. Add these print statements in the code", 134 | "code": "If tool is 'python'and 'required' is true, include the code to run here; otherwise, set this to 'None'. Add the print statements mentioned above here to know the progress." 135 | "query": if tool is search, then include what you want to search on the internet here, include the query verbosely, "None" otherwise. if tool is install, then include the just the name of the framework/module here. if tool is 'suggestions', include the suggestions here seperated by a comma. 136 | }, 137 | "call_myself": "true/false (TRUE ONLY IF YOU NEED TO CHECK COMPILER OUTPUT OR CHECK RESPONSE FROM TOOL OUTPUTS SUCH AS 'install', 'picture', 'tool' OR TO PROCEED TO NEXT IN TASKS TO ACHIEVE. Always call yourself when you have not achieved user's task, you dont have to ask users's permission to go ahead. you are entitiled to do anything and everything)" 138 | } 139 | """ 140 | }, 141 | { 142 | "role": "system", 143 | "content": "also remember, the code you run is just a piece of code being sent to the compiler but is not being saved. So the next time you write code, do no assume that the previous code written by you is already present. It must all be rewritten again." 144 | }, 145 | { 146 | "role": "system", 147 | "content": 'python code should never be enclosed in triple quotes """" like this as this means,it will never run. so do not do this when writing python code. you should only do this when needed like when you have to write html, css or js or etc when you actually need it.' 148 | } 149 | ] 150 | -------------------------------------------------------------------------------- /prompts/Child_Agent_prompt.py: -------------------------------------------------------------------------------- 1 | system_msg = [ 2 | { 3 | "role": "system", 4 | "content": "You are an AI sub-agent, working under the orders of a Senior Agent. You should be able to execute end-to-end tasks as assigned by the Senior Agent. You have a Python environment at your disposal, which means you can absolutely do anything using it. When the output from the compiler matches your expectations and when you have completed all assigned tasks, you can stop calling yourself and send a summary to the Senior Agent. You only have access to Python; do everything only using Python. If it means creating a web application using HTML, CSS, and JS or anything else, do them by using frameworks in Python. Once you have completed all assigned tasks, provide a detailed summary of everything you did, including specific details like code run, locations of files created, etc., in your final message to the Senior Agent.", 5 | }, 6 | { 7 | "role": "system", 8 | "content":"when writing a multi-line html, css, js, python code using this ('''), ensure that you dont include '\n' in it as the code will run into an error" 9 | }, 10 | { 11 | "role": "system", 12 | "content":" Always implement the code directly without any string formatting issues." 13 | }, 14 | { 15 | "role": "system", 16 | "content": "call_myself should always be true when you need to check compiler output or proceed to next taks which are listed in your tasks to achieve", 17 | }, 18 | { 19 | "role": "system", 20 | "content": "Remember to always split your tasks, execute one after the as that is what an agent does", 21 | }, 22 | { 23 | "role": "system", 24 | "content": "always print the results in the code by explicitly writing print statements as it is passed to a compiler which expects the print statements. Warning: If you expect the output to be a plethora of data, refrain from printing it as it will hinder your thinking ability. For example: bitcoin prices for the last 7 days, do not print all of it, to ensure you have grabbed the values, just print the first couple", 25 | }, 26 | {"role": "system", "content": "always include import statements in the code"}, 27 | { 28 | "role": "system", 29 | "content": "You have the liberty to install packages,modules, frameworks anything you need using python", 30 | }, 31 | { 32 | "role": "system", 33 | "content": "always call yourself by setting call_myself to true when you have tasks_to_achieve or code to run. REMEMBER THIS POINT VERY CAREFULLy, ALWAYS CALL YOURSELF UNTIL YOU ACHIEVE USERS QUERY", 34 | }, 35 | { 36 | "role": "system", 37 | "content": "Before running code, always save the code to a file and then execute the file.", 38 | }, 39 | { 40 | "role": "system", 41 | "content": "after couple of iteration, if you are unable to achieve the task, if you think there is a error with the system, or if you think you are going in a loop, stop yourself from calling yourself again and ask users input to clarify or clear the issue you're facing", 42 | }, 43 | { 44 | "role": "system", 45 | "content": "split your tasks, do them one after the other as you are an agent and thats what you fancy doing to avoid errors. For example, if the task is to create a web applicatiom, create the html file first and then css and then js,so and so forth", 46 | }, 47 | { 48 | "role": "system", 49 | "content": "never, I SAID NEVER use name == __main__ in your code as that throws an error.DONT EVER DO THAT AS THE PYTHON FILE IS NOT BEING EXECUTED DIRECTLY. if you do accidentally use it, please immediately change it", 50 | }, 51 | { 52 | "role": "system", 53 | "content": "when executing a file or code that creates a server, ensure you write such in a different thread and make it non blocking so that you still have control over everything so that user can speak to you. And set the debug to False so that the message dont interfere the connection between you and the user", 54 | }, 55 | { 56 | "role": "system", 57 | "content": "Make sure to include import statments in the code, example: import os. NOTE:never, I SAID NEVER use name == __main__ in your code as that throws an error.", 58 | }, 59 | {"role": "system", "content": "Always import os when needed, do not forget"}, 60 | { 61 | "role": "system", 62 | "content": 'you have tools available at your disposal such as "search" for real time up to date information. use this just to search information, such as to download a file, find its loaction using search and then use python to download it, "python" to execute code. Use them smartly', 63 | }, 64 | { 65 | "role": "system", 66 | "content": "you also have a tool named 'picture' which gives you to links to download a picture of anything in you mention in the query", 67 | }, 68 | { 69 | "role": "system", 70 | "content": 'You have another tool called "install" or "uninstall" which will allow you to install or uninstall python module. Simple mention the name of the python module in query', 71 | }, 72 | { 73 | "role": "system", 74 | "content": "When writing code that you think needs to be saved to futher augument code to it. please save the code to a python file and then instead of rewriting the entire code, add whats needed to ensure you dont use too many tokens", 75 | }, 76 | { 77 | "role": "system", 78 | "content": """{Here is an example of the required JSON structure 79 | "message_from_SeniorAgent": "message from the user or the compiler. Write the consice summary of it, for example if an error occured write about the error, or if everthing was successful write a message stating that everything went well", 80 | "tasks_to_achieve": "List all the tasks you need to accomplish if there are any", 81 | "immediate_task_to_achieve": "Specify the task to prioritize first", 82 | "message_to_Senior_agent": "this is your message to the user,must resonate with immediate_task. It should say what you have done and what you are about to do if there is something else pending, or just politely ask if the user needs anything else. ALWAYS REMEBER YOUR LAST MESSAGE MUST BE A AN OVERALL VERBOSE SUMMARY OF WHAT YOU HAVE DONE, FOR INSTANCE MENTIONING FILE LOCATIONS THAT YOU HAVE CREATED OR RAN SOMETHING TO ACHIEVE SO AND SO, AND THIS IS THE FINAL OUTCOME." 83 | "tool": { 84 | "tool_name": "python or search or picture or install or uninstall or None (python if needed or None) (use 'search' tool if users requests for information that needs an internet search or if you need up to date information this tool can be used. Examples: searching a site for information, weather information, any real time information). (use "picture" tool if user requests for a picutre or if you need images to display in the website you build for the user. To use this tool simplly use 'picture' in tool and mention the label of the picture in "query"). Use install if you need to install a python module, simply call install and mention the module name in the query", 85 | "required": " (true for the code to work) true/false", 86 | "thinking_phase": "VERY VERBOSELY WRITE DOWN in points 1.)WHAT YOU NEED TO IMPLEMENT OR CHANGE IN THE CODE", 2.)"HOW YOU PLAN ON DOING STEP 1", 3.) "If you have already defined any directory locations or file locations, mention the paths here clearly with labels as to what it is and the file/directory location to not make a mistake in the code later on.", 87 | "important_parameter": "This section is for you to mention all the parameter required so that the code wont run into an error. For example if there are file location or folder location which will be used or name of file where edits are happening, name of the ppt, word doc you are working with etc, any important parameters must all be included here with labels", 88 | "print_statement_to_add":"write down all the print statements that must be added to the code to ensure you can analyze output. This is being done to verify the successful execution of of key operations like file opening, file creation, completion of processes, or the occurence of specific events.Compiler ouput should never be empty!! Make sure to include these print statements in the code", 89 | "code": "If tool is 'python'and 'required' is true, include the code to run here; otherwise, set this to 'None'." 90 | "query": if tool is search, then include what you want to search on the internet here, include the query verbosely, "None" otherwise. if tool is install, then include the just the name of the framework/module here. 91 | }, 92 | "call_myself": "true/false (TRUE ONLY IF YOU NEED TO CHECK COMPILER OUTPUT OR CHECK RESPONSE FROM TOOL OUTPUTS SUCH AS 'install', 'picture', 'tool' OR TO PROCEED TO NEXT IN TASKS TO ACHIEVE. Always call yourself when you have not achieved user's task, you dont have to ask users's permission to go ahead. you are entitiled to do anything and everything)" 93 | } 94 | """ 95 | }, 96 | { 97 | "role": "system", 98 | "content": "please notice the compiler output and take actions accordingly. If its not the intended output, try again, if the output from compiler is empty, then make ammends to your code, if the code works you can stop calling yourself if there are no more tasks pending. In cases like GUI implementation or something that does not produce console outputs, you can safely assume that your appliation worked and confirm it from the user", 99 | }, 100 | { 101 | "role": "system", 102 | "content": "also remember, the code you run is just a piece of code being sent to the compiler but is not being saved. So the next time you write code, do no assume that the previous code written by you is already present. It must all be rewritten again. One way to tackle that is by creating a file and writing all the code in that file and executing it. This way you no longer need to write code multile times, rather just modify whats in the file. Your choice either rewrite the entire code again or just create a file to write code in it and run and keep modifying it " 103 | }, 104 | { 105 | "role": "system", 106 | "content": "When you have completed all assigned tasks, your last message to the Senior Agent must be a detailed summary of everything you did, including specific details such as code executed, locations of files created, and any other relevant information.", 107 | } 108 | ] 109 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | colorama==0.4.6 2 | Flask==3.0.3 3 | Flask_SocketIO==5.4.1 4 | openai==1.53.0 5 | pydantic==2.9.2 6 | pyshorteners==1.0.1 7 | python-dotenv==1.0.1 8 | Requests==2.32.3 9 | rich==13.9.3 10 | -------------------------------------------------------------------------------- /static/css/create_agent.css: -------------------------------------------------------------------------------- 1 | html, 2 | body { 3 | height: 100%; 4 | margin: 0; 5 | padding: 0; 6 | } 7 | 8 | * { 9 | box-sizing: border-box; 10 | margin: 0; 11 | padding: 0; 12 | transition: all 0.3s ease; 13 | } 14 | 15 | :root { 16 | --primary: #00ADB5; 17 | --primary-hover: #008A98; 18 | --danger: #FF2E63; 19 | --danger-hover: #E02D5E; 20 | --success: #9BC53D; 21 | --success-hover: #7AA62F; 22 | --background: #222831; 23 | --surface: #393E46; 24 | --surface-hover: #4E545B; 25 | --text: #EEEEEE; 26 | --text-muted: #AAAAAA; 27 | --border: #4E4E4E; 28 | } 29 | 30 | body { 31 | font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif; 32 | background-color: var(--background); 33 | color: var(--text); 34 | padding: 2rem; 35 | display: flex; 36 | gap: 2rem; 37 | height: 100vh; 38 | overflow: hidden; 39 | } 40 | 41 | .main-container { 42 | flex: 2; 43 | display: flex; 44 | flex-direction: column; 45 | gap: 1.5rem; 46 | } 47 | 48 | h2 { 49 | font-size: 1.875rem; 50 | font-weight: 600; 51 | color: var(--text); 52 | margin-bottom: 1rem; 53 | position: relative; 54 | display: inline-block; 55 | } 56 | 57 | h2::after { 58 | content: ''; 59 | position: absolute; 60 | bottom: -4px; 61 | left: 0; 62 | width: 60%; 63 | height: 3px; 64 | background: var(--primary); 65 | border-radius: 2px; 66 | } 67 | 68 | .controls { 69 | display: flex; 70 | gap: 1rem; 71 | align-items: center; 72 | background: var(--surface); 73 | padding: 1.5rem; 74 | border-radius: 12px; 75 | box-shadow: 0 4px 10px rgba(0, 0, 0, 0.15); 76 | } 77 | 78 | select { 79 | background-color: var(--background); 80 | color: var(--text); 81 | padding: 0.75rem 1rem; 82 | border: 2px solid var(--border); 83 | border-radius: 8px; 84 | font-size: 0.875rem; 85 | min-width: 200px; 86 | cursor: pointer; 87 | appearance: none; 88 | background-repeat: no-repeat; 89 | background-position: right 1rem center; 90 | background-size: 1.5rem; 91 | } 92 | 93 | select:focus { 94 | border-color: var(--primary); 95 | outline: none; 96 | box-shadow: 0 0 0 3px rgba(0, 173, 181, 0.2); 97 | } 98 | 99 | button { 100 | padding: 0.75rem 1.5rem; 101 | border: none; 102 | border-radius: 8px; 103 | font-weight: 500; 104 | font-size: 0.875rem; 105 | cursor: pointer; 106 | display: flex; 107 | align-items: center; 108 | gap: 0.5rem; 109 | color: var(--text); 110 | background: var(--primary); 111 | box-shadow: 0 2px 6px rgba(0, 0, 0, 0.15); 112 | } 113 | 114 | button:hover { 115 | background: var(--primary-hover); 116 | transform: translateY(-2px); 117 | } 118 | 119 | .workflow-container { 120 | background: var(--surface); 121 | min-height: 300px; 122 | border-radius: 12px; 123 | padding: 2rem; 124 | border: 2px dashed var(--border); 125 | transition: all 0.3s ease; 126 | overflow-y: auto; 127 | } 128 | 129 | .workflow-container:empty { 130 | display: flex; 131 | align-items: center; 132 | justify-content: center; 133 | color: var(--text-muted); 134 | font-size: 0.875rem; 135 | } 136 | 137 | .workflow-container:empty::before { 138 | content: 'click "Add Function'; 139 | } 140 | 141 | .workflow-container::-webkit-scrollbar { 142 | width: 20px; 143 | } 144 | 145 | .workflow-container::-webkit-scrollbar-thumb { 146 | background: var(--border); 147 | border-radius: 10px; 148 | border: 5px var(--surface) solid; 149 | } 150 | 151 | .workflow-container::-webkit-scrollbar-thumb:hover { 152 | background: #8c7ae6; 153 | } 154 | 155 | .workflow-container::-webkit-scrollbar-button { 156 | display: block; 157 | background-color: transparent; 158 | } 159 | 160 | 161 | .function-block { 162 | background: var(--background); 163 | border-radius: 10px; 164 | padding: 1.25rem; 165 | margin: 1rem 0; 166 | display: flex; 167 | align-items: center; 168 | gap: 1rem; 169 | position: relative; 170 | box-shadow: 0 2px 6px rgba(0, 0, 0, 0.15); 171 | animation: slideIn 0.3s ease; 172 | } 173 | 174 | @keyframes slideIn { 175 | from { 176 | opacity: 0; 177 | transform: translateY(-10px); 178 | } 179 | 180 | to { 181 | opacity: 1; 182 | transform: translateY(0); 183 | } 184 | } 185 | 186 | .function-block::before { 187 | content: ''; 188 | position: absolute; 189 | top: -12px; 190 | left: 50%; 191 | transform: translateX(-50%); 192 | width: 2px; 193 | height: 12px; 194 | background: var(--border); 195 | } 196 | 197 | .function-label { 198 | font-weight: 600; 199 | color: var(--primary); 200 | min-width: 120px; 201 | } 202 | 203 | .label-input { 204 | background: var(--surface); 205 | color: var(--text); 206 | border: 2px solid var(--border); 207 | border-radius: 6px; 208 | padding: 0.5rem 1rem; 209 | flex: 1; 210 | } 211 | 212 | .label-input:focus { 213 | border-color: var(--primary); 214 | outline: none; 215 | box-shadow: 0 0 0 3px rgba(0, 173, 181, 0.2); 216 | } 217 | 218 | .delete-btn { 219 | background: var(--danger); 220 | padding: 0.5rem 1rem; 221 | border-radius: 6px; 222 | font-size: 0.75rem; 223 | color: var(--text); 224 | } 225 | 226 | .delete-btn:hover { 227 | background: var(--danger-hover); 228 | } 229 | 230 | .saved-workflows { 231 | padding: 20px; 232 | display: flex; 233 | flex: 2; 234 | background: var(--surface); 235 | box-sizing: border-box; 236 | flex-direction: column; 237 | border-radius: 12px; 238 | box-shadow: 0 4px 10px rgba(0, 0, 0, 0.15); 239 | backdrop-filter: blur(15px); 240 | overflow-y: auto; 241 | transition: transform 0.3s ease, opacity 0.3s ease; 242 | } 243 | 244 | .saved-workflows::-webkit-scrollbar { 245 | width: 20px; 246 | } 247 | 248 | .saved-workflows::-webkit-scrollbar-thumb { 249 | background: var(--background); 250 | border-radius: 10px; 251 | border: 5px var(--surface) solid; 252 | } 253 | 254 | .saved-workflows::-webkit-scrollbar-thumb:hover { 255 | background: #8c7ae6; 256 | } 257 | 258 | .saved-workflows::-webkit-scrollbar-button { 259 | display: block; 260 | background-color: transparent; 261 | } 262 | 263 | .saved-workflow { 264 | background: var(--background); 265 | border-radius: 8px; 266 | padding: 1rem; 267 | margin: 0.75rem 0; 268 | display: flex; 269 | align-items: center; 270 | gap: 0.75rem; 271 | animation: fadeIn 0.3s ease; 272 | } 273 | 274 | @keyframes fadeIn { 275 | from { 276 | opacity: 0; 277 | } 278 | 279 | to { 280 | opacity: 1; 281 | } 282 | } 283 | 284 | .saved-workflow:hover { 285 | transform: translateX(4px); 286 | } 287 | 288 | .saved-workflow-name { 289 | font-weight: 500; 290 | flex: 1; 291 | cursor: pointer; 292 | padding: 0.25rem 0.5rem; 293 | border-radius: 4px; 294 | } 295 | 296 | .saved-workflow-name:hover { 297 | background: var(--surface-hover); 298 | } 299 | 300 | .kickoff-workflow-btn { 301 | background: var(--success); 302 | font-size: 0.75rem; 303 | padding: 0.5rem 1rem; 304 | border-radius: 6px; 305 | color: var(--text); 306 | } 307 | 308 | .kickoff-workflow-btn:hover { 309 | background: var(--success-hover); 310 | } 311 | 312 | .delete-workflow-btn { 313 | background: var(--danger); 314 | font-size: 0.75rem; 315 | padding: 0.5rem 1rem; 316 | border-radius: 6px; 317 | color: var(--text); 318 | } 319 | 320 | .delete-workflow-btn:hover { 321 | background: var(--danger-hover); 322 | } 323 | 324 | .chat-container { 325 | flex: 4; 326 | display: flex; 327 | flex-direction: column; 328 | background: var(--surface); 329 | border-radius: 16px; 330 | box-shadow: 0 8px 32px rgba(0, 0, 0, 0.2); 331 | overflow: hidden; 332 | } 333 | 334 | .chat-header { 335 | padding: 1rem 1.5rem; 336 | background: var(--background); 337 | border-bottom: 1px solid var(--border); 338 | } 339 | 340 | .chat-header h2 { 341 | font-size: 1.25rem; 342 | font-weight: 600; 343 | color: var(--text); 344 | margin: 0; 345 | display: flex; 346 | align-items: center; 347 | gap: 0.5rem; 348 | } 349 | 350 | #chat-window { 351 | flex: 1; 352 | padding: 1.5rem; 353 | overflow-y: auto; 354 | background: var(--background); 355 | display: flex; 356 | flex-direction: column; 357 | gap: 1rem; 358 | } 359 | 360 | #chat-window::-webkit-scrollbar { 361 | width: 20px; 362 | } 363 | 364 | #chat-window::-webkit-scrollbar-thumb { 365 | background: var(--border); 366 | border-radius: 10px; 367 | border: 5px var(--background) solid; 368 | } 369 | 370 | #chat-window::-webkit-scrollbar-thumb:hover { 371 | background: #8c7ae6; 372 | } 373 | 374 | #chat-window::-webkit-scrollbar-button { 375 | display: block; 376 | background-color: transparent; 377 | } 378 | 379 | .message { 380 | padding: 0.75rem 1rem; 381 | border-radius: 12px; 382 | max-width: 75%; 383 | line-height: 1.4; 384 | word-break: break-word; 385 | font-size: 1rem; 386 | } 387 | 388 | .user-message { 389 | align-self: flex-end; 390 | background: var(--primary); 391 | color: #fff; 392 | border-top-right-radius: 0; 393 | box-shadow: 0 2px 4px rgba(0, 0, 0, 0.15); 394 | } 395 | 396 | .agent-message { 397 | align-self: flex-start; 398 | background: var(--surface); 399 | color: var(--text); 400 | border: 1px solid var(--border); 401 | border-top-left-radius: 0; 402 | } 403 | 404 | .search-message { 405 | align-self: center; 406 | background: transparent; 407 | color: var(--text-muted); 408 | font-style: italic; 409 | padding: 0.5rem; 410 | font-size: 0.95rem; 411 | } 412 | 413 | .execution-message { 414 | align-self: center; 415 | background: var(--success); 416 | color: #fff; 417 | font-weight: bold; 418 | border-radius: 12px; 419 | padding: 0.5rem 0.75rem; 420 | font-size: 0.95rem; 421 | } 422 | 423 | .code-execution-message { 424 | align-self: center; 425 | background: var(--surface); 426 | color: var(--text); 427 | border: 1px solid var(--border); 428 | font-family: "Source Code Pro", monospace; 429 | white-space: pre-wrap; 430 | padding: 0.75rem; 431 | border-radius: 12px; 432 | font-size: 0.9rem; 433 | } 434 | 435 | .message::before, 436 | .message::after { 437 | display: none; 438 | } 439 | 440 | #chat-window .message+.message { 441 | margin-top: 0.5rem; 442 | } 443 | 444 | .chat-input-container { 445 | padding: 1rem 1.5rem; 446 | background: var(--background); 447 | border-top: 1px solid var(--border); 448 | display: flex; 449 | gap: 1rem; 450 | align-items: center; 451 | } 452 | 453 | #prompt { 454 | flex: 1; 455 | padding: 0.75rem 1rem; 456 | background: var(--surface); 457 | border: 1px solid var(--border); 458 | border-radius: 8px; 459 | color: var(--text); 460 | font-size: 1rem; 461 | resize: none; 462 | } 463 | 464 | #prompt:focus { 465 | outline: none; 466 | border-color: var(--primary); 467 | box-shadow: 0 0 0 3px rgba(0, 173, 181, 0.2); 468 | } 469 | 470 | #send-btn { 471 | padding: 0.75rem 1.5rem; 472 | background: var(--primary); 473 | border: none; 474 | border-radius: 8px; 475 | color: #fff; 476 | cursor: pointer; 477 | font-weight: 600; 478 | } 479 | 480 | #send-btn:hover { 481 | background: var(--primary-hover); 482 | transform: translateY(-1px); 483 | } 484 | 485 | #send-btn:active { 486 | transform: translateY(0); 487 | } 488 | 489 | .workflow-message { 490 | padding: 0.75rem 1rem; 491 | margin: 1rem 0; 492 | border-radius: 8px; 493 | background: var(--surface); 494 | color: var(--text); 495 | } 496 | 497 | .workflow-message.success { 498 | border-left: 4px solid var(--success); 499 | background: rgba(155, 197, 61, 0.1); 500 | color: var(--success); 501 | overflow: initial; 502 | } 503 | 504 | .workflow-message.success.completed { 505 | background: rgba(155, 197, 61, 0.2); 506 | } 507 | 508 | .workflow-header { 509 | font-weight: bold; 510 | margin-bottom: 0.5rem; 511 | } 512 | 513 | 514 | @media (max-width: 768px) { 515 | body { 516 | flex-direction: column; 517 | padding: 1rem; 518 | } 519 | 520 | .controls { 521 | flex-direction: column; 522 | align-items: stretch; 523 | } 524 | 525 | select { 526 | width: 100%; 527 | } 528 | 529 | .saved-workflows, 530 | .chat-container { 531 | width: 100%; 532 | } 533 | } 534 | 535 | .workflow-message { 536 | border: 2px solid #ccc; 537 | padding: 10px; 538 | border-radius: 5px; 539 | margin-bottom: 10px; 540 | position: relative; 541 | overflow: hidden; 542 | } 543 | 544 | .green-pulse { 545 | border-color: #28a745 !important; 546 | animation: pulseGreen 1.5s ease-out forwards; 547 | } 548 | 549 | @keyframes pulseGreen { 550 | 0% { 551 | box-shadow: 0 0 0 0 rgba(40, 167, 69, 0.5); 552 | } 553 | 554 | 50% { 555 | box-shadow: 0 0 0 10px rgba(40, 167, 69, 0); 556 | } 557 | 558 | 100% { 559 | box-shadow: 0 0 0 0 rgba(40, 167, 69, 0); 560 | } 561 | } 562 | 563 | .workflow-card { 564 | border: 3px solid #ccc; 565 | padding: 10px; 566 | border-radius: 8px; 567 | margin-bottom: 10px; 568 | position: relative; 569 | animation: pulseBorder 2s infinite; 570 | transition: border-color 0.3s ease; 571 | } 572 | 573 | @keyframes pulseBorder { 574 | 0% { 575 | box-shadow: 0 0 0 0 rgba(40, 167, 69, 0.7); 576 | } 577 | 578 | 50% { 579 | box-shadow: 0 0 0 10px rgba(40, 167, 69, 0.2); 580 | } 581 | 582 | 100% { 583 | box-shadow: 0 0 0 0 rgba(40, 167, 69, 0.7); 584 | } 585 | } 586 | 587 | .workflow-card.completed { 588 | animation: finalPulse 1s ease-out; 589 | } 590 | 591 | @keyframes finalPulse { 592 | 0% { 593 | transform: scale(1); 594 | box-shadow: 0 0 0 0 rgba(40, 167, 69, 1); 595 | } 596 | 597 | 50% { 598 | transform: scale(1.05); 599 | box-shadow: 0 0 0 15px rgba(40, 167, 69, 0); 600 | } 601 | 602 | 100% { 603 | transform: scale(1); 604 | box-shadow: 0 0 0 0 rgba(40, 167, 69, 0); 605 | } 606 | } 607 | 608 | .workflow-card.active { 609 | border-color: #28a745; 610 | } 611 | 612 | .chat-container { 613 | transition: all 0.3s ease-in-out; 614 | } 615 | 616 | .main-container, 617 | .saved-workflows { 618 | transition: opacity 0.3s ease; 619 | } 620 | 621 | body.chat-fullscreen .main-container, 622 | body.chat-fullscreen .saved-workflows { 623 | opacity: 0; 624 | pointer-events: none; 625 | } 626 | 627 | body.chat-fullscreen .chat-container { 628 | position: fixed; 629 | top: 0; 630 | left: 0; 631 | width: 100vw; 632 | height: 100vh; 633 | margin: 0; 634 | border-radius: 0; 635 | z-index: 9999; 636 | } 637 | 638 | #toggleChatBtn { 639 | padding: 0.5rem 1rem; 640 | border: none; 641 | background: var(--primary); 642 | color: #fff; 643 | border-radius: 8px; 644 | cursor: pointer; 645 | margin-left: auto; 646 | transition: background 0.3s ease; 647 | } 648 | 649 | #toggleChatBtn:hover { 650 | background: var(--primary-hover); 651 | } 652 | 653 | 654 | details { 655 | background: var(--surface); 656 | border: 1px solid var(--border); 657 | border-radius: 8px; 658 | margin: 0.5rem 0; 659 | padding: 0.5rem; 660 | } 661 | 662 | details summary { 663 | cursor: pointer; 664 | font-weight: bold; 665 | color: var(--primary); 666 | list-style: none; 667 | outline: none; 668 | } 669 | 670 | details[open] summary { 671 | border-bottom: 1px solid var(--border); 672 | margin-bottom: 0.5rem; 673 | } 674 | 675 | .collapsible-content { 676 | margin-top: 0.5rem; 677 | } 678 | 679 | 680 | pre { 681 | background: var(--surface); 682 | padding: 15px; 683 | border-radius: 10px; 684 | margin-top: 10px; 685 | font-family: "Fira Code", monospace; 686 | word-wrap: break-word; 687 | overflow-x: auto; 688 | } 689 | 690 | code { 691 | font-family: 'Source Code Pro', monospace; 692 | color: var(--text); 693 | } 694 | 695 | 696 | blockquote { 697 | border-left: 4px solid var(--primary); 698 | padding-left: 1rem; 699 | color: var(--text-muted); 700 | margin: 0.5rem 0; 701 | } 702 | 703 | 704 | .back-button-container { 705 | position: absolute; 706 | top: 90%; 707 | left: 2.5%; 708 | z-index: 100; 709 | } 710 | 711 | .back-button { 712 | display: flex; 713 | align-items: center; 714 | gap: 8px; 715 | padding: 10px 20px; 716 | background-color: #2c3e50; 717 | color: white; 718 | text-decoration: none; 719 | border-radius: 5px; 720 | transition: all 0.3s ease; 721 | font-family: 'Roboto', sans-serif; 722 | box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2); 723 | } 724 | 725 | .back-button:hover { 726 | background-color: #34495e; 727 | color: white; 728 | transform: translateY(-2px); 729 | box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2); 730 | } 731 | 732 | .back-button i { 733 | font-size: 16px; 734 | } -------------------------------------------------------------------------------- /static/js/app.js: -------------------------------------------------------------------------------- 1 | let selectedMode = "agent"; 2 | 3 | document.addEventListener("DOMContentLoaded", () => { 4 | const pendingQuery = localStorage.getItem("pendingQuery"); 5 | if (pendingQuery) { 6 | const promptInput = document.getElementById("prompt"); 7 | promptInput.value = pendingQuery; 8 | 9 | window.autoWorkflowMessage = true; 10 | 11 | const sendButton = document.getElementById("send-btn"); 12 | const endButton = document.getElementById("end-btn"); 13 | endButton.style.display = "none"; 14 | setTimeout(() => { 15 | sendButton.click(); 16 | }, 500); 17 | 18 | localStorage.removeItem("pendingQuery"); 19 | } 20 | 21 | 22 | const dropupMenu = document.getElementById("dropupMenuButton"); 23 | const dropdownItems = document.querySelectorAll(".dropdown-item"); 24 | const createAgentBtn = document.getElementById("create-agent"); 25 | if (createAgentBtn) { 26 | createAgentBtn.addEventListener("click", () => { 27 | window.location.href = '/create_agent'; 28 | }); 29 | } 30 | dropdownItems.forEach((item) => { 31 | item.addEventListener("click", (e) => { 32 | e.preventDefault(); 33 | selectedMode = item.getAttribute("data-mode"); 34 | 35 | dropupMenu.innerHTML = item.textContent; 36 | 37 | dropupMenu.classList.remove( 38 | "btn-secondary", 39 | "btn-primary", 40 | "btn-info", 41 | "btn-danger", 42 | "btn-success" 43 | ); 44 | 45 | switch (selectedMode) { 46 | case "search": 47 | dropupMenu.classList.add("btn-secondary"); 48 | break; 49 | case "agent": 50 | dropupMenu.classList.add("btn-primary"); 51 | break; 52 | default: 53 | dropupMenu.classList.add("btn-primary"); 54 | } 55 | 56 | console.log(`Selected mode: ${selectedMode}`); 57 | }); 58 | }); 59 | }); 60 | 61 | let codeSnippets = []; 62 | $(document).on("click", ".code-snippet-widget", function () { 63 | let msg_id = $(this).data("msg-id"); 64 | showCodeInModal(msg_id); 65 | }); 66 | 67 | function showCodeInModal(msg_id) { 68 | let codeSnippet = codeSnippets.find((snippet) => snippet.id === msg_id); 69 | if (codeSnippet) { 70 | let modalHtml = ` 71 | 82 | `; 83 | $("body").append(modalHtml); 84 | 85 | $("#code-modal").show(); 86 | 87 | $(".copy-button").click(function () { 88 | const codeText = codeSnippet.code; 89 | navigator.clipboard 90 | .writeText(codeText) 91 | .then(() => { 92 | const $copyButton = $(this); 93 | $copyButton.html(' Copied!'); 94 | setTimeout(() => { 95 | $copyButton.html(' Copy'); 96 | }, 2000); 97 | }) 98 | .catch((err) => { 99 | console.error("Failed to copy text: ", err); 100 | alert("Failed to copy code to clipboard"); 101 | }); 102 | }); 103 | 104 | $(".close-code-modal").click(function () { 105 | $("#code-modal").remove(); 106 | }); 107 | 108 | $(window).click(function (event) { 109 | if (event.target.id === "code-modal") { 110 | $("#code-modal").remove(); 111 | } 112 | }); 113 | } 114 | } 115 | const SpeechRecognition = 116 | window.SpeechRecognition || window.webkitSpeechRecognition; 117 | 118 | if (SpeechRecognition) { 119 | const recognition = new SpeechRecognition(); 120 | recognition.continuous = false; 121 | recognition.interimResults = false; 122 | recognition.lang = "en-US"; 123 | 124 | const voiceInputBtn = document.getElementById("voice-input-btn"); 125 | const promptTextarea = document.getElementById("prompt"); 126 | const sendButton = document.getElementById("send-btn"); 127 | 128 | voiceInputBtn.addEventListener("click", () => { 129 | recognition.start(); 130 | voiceInputBtn.classList.add("listening"); 131 | }); 132 | 133 | recognition.addEventListener("result", (event) => { 134 | const transcript = event.results[0][0].transcript; 135 | 136 | promptTextarea.value = transcript; 137 | }); 138 | 139 | recognition.addEventListener("speechend", () => { 140 | recognition.stop(); 141 | voiceInputBtn.classList.remove("listening"); 142 | setTimeout(() => { 143 | sendButton.click(); 144 | }, 500); 145 | }); 146 | 147 | recognition.addEventListener("error", (event) => { 148 | console.error("Speech recognition error:", event.error); 149 | voiceInputBtn.classList.remove("listening"); 150 | 151 | alert("Error with speech recognition: " + event.error); 152 | }); 153 | } else { 154 | console.warn("SpeechRecognition API is not supported in this browser."); 155 | document.getElementById("voice-input-btn").style.display = "none"; 156 | } 157 | const sendButton = document.getElementById("send-btn"); 158 | const voiceInputBtn = document.getElementById("voice-input-btn"); 159 | const promptTextareas = document.getElementById("prompt"); 160 | 161 | sendButton.classList.remove("show"); 162 | voiceInputBtn.classList.remove("show"); 163 | 164 | promptTextareas.addEventListener("input", function () { 165 | this.style.height = "auto"; 166 | this.style.height = this.scrollHeight + "px"; 167 | 168 | if (this.value.trim() !== "") { 169 | sendButton.classList.add("show"); 170 | } else { 171 | sendButton.classList.remove("show"); 172 | } 173 | }); 174 | promptTextareas.addEventListener("input", function () { 175 | this.style.height = "auto"; 176 | this.style.height = this.scrollHeight + "px"; 177 | 178 | if (this.value.trim() !== "") { 179 | voiceInputBtn.classList.add("show"); 180 | } else { 181 | voiceInputBtn.classList.remove("show"); 182 | } 183 | }); 184 | 185 | function escapeHtml(text) { 186 | if (!text) { 187 | return ""; 188 | } 189 | return text 190 | .replace(/&/g, "&") 191 | .replace(//g, ">") 193 | .replace(/"/g, """) 194 | .replace(/'/g, "'"); 195 | } 196 | const promptTextarea = document.getElementById("prompt"); 197 | 198 | promptTextarea.addEventListener("input", function () { 199 | this.style.height = "auto"; 200 | this.style.height = this.scrollHeight + "px"; 201 | }); 202 | 203 | $(document).ready(function () { 204 | var socket = io("http://localhost:5000"); 205 | window.socket = socket; 206 | socket.on("connect", function () { 207 | console.log("Connected to server"); 208 | }); 209 | console.log("Socket instance:", socket); 210 | 211 | $("#refresh-btn").click(function () { 212 | refresh(); 213 | }); 214 | 215 | $("#clear-btn").click(function () { 216 | clear(); 217 | }); 218 | 219 | $("#send-btn").click(function () { 220 | sendPrompt(); 221 | }); 222 | 223 | $("#end-btn").click(function () { 224 | endProcessing(); 225 | }); 226 | 227 | $("#prompt").on("keydown", function (e) { 228 | if (e.keyCode === 13 && !e.shiftKey) { 229 | e.preventDefault(); 230 | sendPrompt(); 231 | return false; 232 | } 233 | }); 234 | 235 | socket.on("connect", function () { 236 | console.log("Connected to server"); 237 | }); 238 | socket.on("suggestions", function (data) { 239 | let suggestionsStr = data.suggestions; 240 | let suggestions = suggestionsStr.split(",").map((s) => s.trim()); 241 | 242 | 243 | displaySuggestions(suggestions); 244 | }); 245 | 246 | function displaySuggestions(suggestions) { 247 | 248 | let suggestionsHtml = ` 249 |
250 |
251 |

Select one or more suggestions:

252 |
253 | ${suggestions 254 | .map( 255 | (s, index) => ` 256 | 259 | ` 260 | ) 261 | .join("")} 262 |
263 | 264 |
265 |
266 | `; 267 | $("#chat-window").append(suggestionsHtml); 268 | 269 | 270 | scrollChatToBottom(); 271 | 272 | 273 | $(".suggestion-btn").on("click", function () { 274 | $(this).toggleClass("selected"); 275 | }); 276 | 277 | 278 | $("#submit-suggestions-btn").on("click", function () { 279 | let selectedSuggestions = []; 280 | $(".suggestion-btn.selected").each(function () { 281 | selectedSuggestions.push($(this).data("suggestion")); 282 | }); 283 | if (selectedSuggestions.length === 0) { 284 | alert("Please select at least one suggestion."); 285 | return; 286 | } 287 | 288 | socket.emit("selected_suggestions", { suggestions: selectedSuggestions }); 289 | 290 | 291 | let selectedHtml = ` 292 |
293 |

Selected options:

294 | 297 |
298 | `; 299 | $("#chat-window").append(selectedHtml); 300 | 301 | 302 | $(".suggestions-message").remove(); 303 | 304 | 305 | scrollChatToBottom(); 306 | }); 307 | } 308 | 309 | 310 | 311 | socket.on("agent_response", function (data) { 312 | if (data.type === "info" && 313 | data.content === "Workflow completed." && 314 | !window.location.pathname.includes("create_agent")) { 315 | console.log("Skipping 'Workflow completed.' message because we're not on create_agent"); 316 | return; 317 | } 318 | if (data.type === "thinking_message") { 319 | displayThinkingMessage(data.content, data.msg_id); 320 | } 321 | if (data.type === "loading_message") { 322 | displayLoadingMessage(data.content, data.msg_id); 323 | }else if (data.type === "workflow_completed") {} 324 | else if (data.type === "search_results") { 325 | updateSearchResults(data.content, data.results, data.msg_id); 326 | } else if (data.type === "compiler_message") { 327 | displayCodeExecutionMessage(data.content, data.code, data.msg_id); 328 | } else if (data.type === "success_message") { 329 | updateLoadingMessage(data.msg_id, data.content); 330 | } else if (data.type === "error_message") { 331 | updateLoadingMessageWithError(data.msg_id, data.content, data.code); 332 | } else if (data.type === "error") { 333 | displayErrorMessage(data.content); 334 | } else if (data.type === "thinking_message") { 335 | displayThinkingMessage(data.content, data.msg_id); 336 | } else if (data.type === "search_agent_message") { 337 | SearchAgentMessage(data.content); 338 | } else { 339 | displayAgentMessage(data.content); 340 | } 341 | scrollChatToBottom(); 342 | }); 343 | socket.on("search_response", function (data) { 344 | if (data.type === "init") { 345 | displaySearchStatus(data.labels, data.msg_id); 346 | } else if (data.type === "update") { 347 | updateSearchStatus(data.index, data.status, data.msg_id); 348 | } 349 | }); 350 | 351 | function displaySearchStatus(labels, msg_id) { 352 | let html = ` 353 |
354 |
355 |
    356 |
357 |
358 |
359 | `; 360 | $("#chat-window").append(html); 361 | 362 | if (labels.length > 0) { 363 | let label = labels[0]; 364 | let itemHtml = ` 365 |
  • 366 |
    367 |
  • 368 | `; 369 | $(`#search-status-list-${msg_id}`).append(itemHtml); 370 | 371 | typeLabelText(`#search-item-${msg_id}-0 .search-label`, label, 0, msg_id); 372 | } 373 | 374 | $(`#search-status-${msg_id}`).data("labels", labels); 375 | $(`#search-status-${msg_id}`).data("currentIndex", 0); 376 | 377 | scrollChatToBottom(); 378 | } 379 | 380 | function updateSearchStatus(index, status, msg_id) { 381 | let item = $(`#search-item-${msg_id}-${index}`); 382 | if (item.length) { 383 | let labelElement = item.find(".search-label"); 384 | if (!labelElement.data("typingComplete")) { 385 | labelElement.data("pendingStatus", status); 386 | return; 387 | } 388 | 389 | item.removeClass("searching completed"); 390 | item.find(".spinner, .bi-check-circle-fill").remove(); 391 | 392 | if (status === "searching") { 393 | item.addClass("searching"); 394 | if (item.find(".search-label .spinner").length === 0) { 395 | let spinner = $('
    '); 396 | item.find(".search-label").append(spinner); 397 | } 398 | } else if (status === "complete") { 399 | item.addClass("completed"); 400 | if (item.find(".search-label i").length === 0) { 401 | let tickIcon = $(''); 402 | item.find(".search-label").append(tickIcon); 403 | } 404 | 405 | let container = $(`#search-status-${msg_id}`); 406 | let labels = container.data("labels"); 407 | let currentIndex = container.data("currentIndex"); 408 | 409 | if (currentIndex + 1 < labels.length) { 410 | let nextIndex = currentIndex + 1; 411 | let nextLabel = labels[nextIndex]; 412 | 413 | let itemHtml = ` 414 |
  • 415 |
    416 |
  • 417 | `; 418 | let nextItem = $(itemHtml).hide(); 419 | $(`#search-status-list-${msg_id}`).append(nextItem); 420 | nextItem.slideDown(300, function () { 421 | typeLabelText( 422 | `#search-item-${msg_id}-${nextIndex} .search-label`, 423 | nextLabel, 424 | nextIndex, 425 | msg_id 426 | ); 427 | }); 428 | 429 | container.data("currentIndex", nextIndex); 430 | scrollChatToBottom(); 431 | } 432 | } else { 433 | item.addClass(status); 434 | scrollChatToBottom(); 435 | } 436 | } 437 | } 438 | 439 | function typeLabelText(selector, text, index, msg_id) { 440 | let element = $(selector); 441 | let charIndex = 0; 442 | let interval = setInterval(function () { 443 | if (charIndex < text.length) { 444 | element.append(escapeHtml(text.charAt(charIndex))); 445 | charIndex++; 446 | } else { 447 | clearInterval(interval); 448 | element.data("typingComplete", true); 449 | 450 | let pendingStatus = element.data("pendingStatus"); 451 | if (pendingStatus) { 452 | updateSearchStatus(index, pendingStatus, msg_id); 453 | element.removeData("pendingStatus"); 454 | } 455 | } 456 | }, 8); 457 | } 458 | 459 | socket.on("agent_status", function (data) { 460 | const statusElement = $("#agent-status"); 461 | if (data.status === "true") { 462 | statusElement.text("Thinking"); 463 | statusElement.removeClass("inactive").addClass("active"); 464 | toggleButtons(true); 465 | } else { 466 | statusElement.text("Inactive"); 467 | statusElement.removeClass("active").addClass("inactive"); 468 | toggleButtons(false); 469 | } 470 | }); 471 | $(document).ready(function () { 472 | $("#preview-btn").click(function () { 473 | showPreviewModal(); 474 | }); 475 | }); 476 | 477 | function showPreviewModal() { 478 | let modalHtml = ` 479 | 489 | `; 490 | $("body").append(modalHtml); 491 | 492 | $("#preview-modal").css({ 493 | position: "fixed", 494 | top: "0", 495 | left: "0", 496 | right: "0", 497 | bottom: "0", 498 | width: "100vw", 499 | height: "100vh", 500 | "background-color": "rgba(0, 0, 0, 0.9)", 501 | "z-index": "9999", 502 | display: "flex", 503 | "align-items": "center", 504 | "justify-content": "center", 505 | }); 506 | 507 | $("#preview-modal .modal-contents").css({ 508 | position: "relative", 509 | width: "100vw", 510 | height: "100vh", 511 | "background-color": "#fff", 512 | display: "flex", 513 | "flex-direction": "column", 514 | overflow: "hidden", 515 | margin: "0", 516 | padding: "0", 517 | }); 518 | 519 | $("#preview-modal .modal-body").css({ 520 | flex: "1", 521 | overflow: "hidden", 522 | padding: "0", 523 | margin: "0", 524 | width: "100%", 525 | height: "100%", 526 | }); 527 | 528 | $("#preview-modal .web-app-preview").css({ 529 | width: "100%", 530 | height: "100%", 531 | "background-color": "#fff", 532 | overflow: "hidden", 533 | display: "flex", 534 | }); 535 | 536 | $("#preview-modal .web-app-preview iframe").css({ 537 | border: "solid 12px", 538 | width: "100%", 539 | height: "100%", 540 | border: "none", 541 | margin: "0", 542 | padding: "0", 543 | overflow: "auto", 544 | flex: "1", 545 | }); 546 | 547 | $("#preview-modal .close-preview-modal").css({ 548 | position: "fixed", 549 | top: "-5px", 550 | right: "20px", 551 | "font-size": "40px", 552 | cursor: "pointer", 553 | color: "#F2F0EF", 554 | "z-index": "10000", 555 | "text-shadow": "0 0 10px white", 556 | }); 557 | 558 | $("#preview-modal .close-preview-modal").click(function () { 559 | $("#preview-modal").remove(); 560 | }); 561 | 562 | $(document).on("keydown", function (e) { 563 | if (e.key === "Escape") { 564 | $("#preview-modal").remove(); 565 | } 566 | }); 567 | 568 | $("#preview-modal").on("click", function (e) { 569 | if ($(e.target).is("#preview-modal")) { 570 | $("#preview-modal").remove(); 571 | } 572 | }); 573 | 574 | renderWebApp(); 575 | } 576 | 577 | function renderWebApp() { 578 | const iframe = document.getElementById("preview-iframe"); 579 | iframe.onload = function () { 580 | try { 581 | const iframeDoc = 582 | iframe.contentDocument || iframe.contentWindow.document; 583 | const style = document.createElement("style"); 584 | style.textContent = ` 585 | ::-webkit-scrollbar { 586 | width: 15px; 587 | } 588 | ::-webkit-scrollbar-thumb { 589 | background: #ada7a7b6; 590 | border-radius: 10px; 591 | border: 5px white solid; 592 | } 593 | `; 594 | iframeDoc.head.appendChild(style); 595 | 596 | if (!iframeDoc.querySelector('meta[name="viewport"]')) { 597 | const meta = iframeDoc.createElement("meta"); 598 | meta.name = "viewport"; 599 | meta.content = 600 | "width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no"; 601 | iframeDoc.head.appendChild(meta); 602 | } 603 | } catch (e) { 604 | console.log( 605 | "Cannot access iframe content - likely due to same-origin policy" 606 | ); 607 | } 608 | }; 609 | 610 | iframe.src = "render/index.html"; 611 | } 612 | 613 | function scrollChatToBottom() { 614 | $("#chat-window").scrollTop($("#chat-window")[0].scrollHeight); 615 | } 616 | 617 | function scrollThinkingWindowToBottom() { 618 | $("#think-window").scrollTop($("#think-window")[0].scrollHeight); 619 | } 620 | 621 | function displayThinkingMessage(message, msg_id) { 622 | let regex = /(\d+)\)/g; 623 | let parts = message.split(regex); 624 | let items = []; 625 | 626 | for (let i = 1; i < parts.length; i += 2) { 627 | let number = parts[i].trim(); 628 | let content = parts[i + 1] ? parts[i + 1].trim() : ""; 629 | items.push({ number: number, content: content }); 630 | } 631 | 632 | let html = ` 633 |

    634 | THINKING PHASE 635 |

    636 |
    637 |
    638 | `; 639 | $("#think-window").html(html); 640 | 641 | typeThinkingSteps(items, msg_id); 642 | } 643 | 644 | function typeThinkingSteps(steps, msg_id) { 645 | let container = $(`#thinking-container-${msg_id}`); 646 | let stepIndex = 0; 647 | 648 | function typeNextStep() { 649 | if (stepIndex < steps.length) { 650 | let step = steps[stepIndex]; 651 | 652 | let stepHtml = ` 653 |
    654 |
    655 | 656 |
    657 |
    658 |

    659 |
    660 |
    661 | `; 662 | container.append(stepHtml); 663 | scrollThinkingWindowToBottom(); 664 | 665 | typeStepContent(step.content, msg_id, stepIndex, function () { 666 | stepIndex++; 667 | typeNextStep(); 668 | }); 669 | } else { 670 | $("#think-window").removeClass("highlight"); 671 | } 672 | } 673 | 674 | $("#think-window").addClass("highlight"); 675 | typeNextStep(); 676 | } 677 | 678 | function typeStepContent(content, msg_id, stepIndex, callback) { 679 | let contentElement = $(`#step-content-${msg_id}-${stepIndex}`); 680 | let index = 0; 681 | let speed = 5; 682 | let tempContent = ""; 683 | 684 | function typeChar() { 685 | if (index < content.length) { 686 | let currentChar = content.charAt(index); 687 | tempContent += currentChar; 688 | contentElement.html(escapeHtml(tempContent)); 689 | index++; 690 | scrollThinkingWindowToBottom(); 691 | setTimeout(typeChar, speed); 692 | } else { 693 | let parsedContent = marked.parse(tempContent); 694 | contentElement.html(parsedContent); 695 | scrollThinkingWindowToBottom(); 696 | if (callback) callback(); 697 | } 698 | } 699 | 700 | typeChar(); 701 | } 702 | 703 | function toggleButtons(isActive) { 704 | if (isActive) { 705 | $("#send-btn").prop("disabled", true).hide(); 706 | $("#end-btn").show(); 707 | } else { 708 | $("#send-btn").prop("disabled", false).show(); 709 | $("#end-btn").hide(); 710 | } 711 | } 712 | function endProcessing() { 713 | socket.emit("end_processing"); 714 | toggleButtons(false); 715 | } 716 | function updateSearchResults(message, results, msg_id) { 717 | let html = ` 718 | ${escapeHtml(message)} 719 | 720 | 723 | `; 724 | 725 | $(`#msg-${msg_id}`).html(html); 726 | $(`#msg-${msg_id}`) 727 | .removeClass("loading-message") 728 | .addClass("search-results-message"); 729 | 730 | $(`#show-results-${msg_id}`).click(function () { 731 | toggleResultsVisibility(msg_id); 732 | }); 733 | } 734 | 735 | function toggleResultsVisibility(msg_id) { 736 | let resultsBlock = $(`#results-block-${msg_id}`); 737 | let showResultsBtn = $(`#show-results-${msg_id}`); 738 | 739 | if (resultsBlock.is(":visible")) { 740 | resultsBlock.hide(); 741 | showResultsBtn.text("Show Results"); 742 | } else { 743 | resultsBlock.show(); 744 | showResultsBtn.text("Hide Results"); 745 | } 746 | } 747 | function displayCodeExecutionMessage(message, code, msg_id) { 748 | code = code || "No code available"; 749 | 750 | codeSnippets.push({ id: msg_id, code: code }); 751 | 752 | let html = ` 753 |
    754 |
    Result
    755 | ${escapeHtml(message)} 756 | 757 | 760 |
    761 | `; 762 | $("#chat-window").append(html); 763 | updateCodeSidebar(msg_id, code); 764 | 765 | $(`#show-code-${msg_id}`).click(function () { 766 | toggleCodeVisibility(msg_id); 767 | }); 768 | } 769 | 770 | function displayLoadingMessage(message, msg_id) { 771 | let html = ` 772 |
    773 | ${escapeHtml(message)} 774 |
    775 | Loading... 776 |
    777 |
    778 | `; 779 | $("#chat-window").append(html); 780 | } 781 | 782 | function toggleCodeVisibility(msg_id) { 783 | let codeBlock = $(`#code-block-${msg_id}`); 784 | let showCodeBtn = $(`#show-code-${msg_id}`); 785 | 786 | if (codeBlock.is(":visible")) { 787 | codeBlock.hide(); 788 | showCodeBtn.text("Code"); 789 | } else { 790 | codeBlock.show(); 791 | showCodeBtn.text("Hide Code"); 792 | } 793 | } 794 | 795 | function updateLoadingMessage(msg_id, message) { 796 | let messageElement = $(`#msg-${msg_id}`); 797 | if (messageElement.length) { 798 | messageElement.removeClass("loading-message").addClass("success-message"); 799 | messageElement.html(` 800 | ${message} 801 | 802 | `); 803 | } 804 | } 805 | function updateCodeSidebar(msg_id, code, isError = false) { 806 | let codePreview = code.split("\n")[2]; 807 | if (codePreview.length > 30) { 808 | codePreview = codePreview.substring(13, 27) + "..."; 809 | } 810 | 811 | let snippetClass = "code-snippet-widget"; 812 | if (isError) { 813 | snippetClass += " error-snippet"; 814 | } 815 | 816 | let snippetHtml = ` 817 |
    818 | ${escapeHtml(codePreview)} 819 |
    820 | `; 821 | $("#code-snippets-container").append(snippetHtml); 822 | } 823 | 824 | function updateLoadingMessageWithError(msg_id, message, code) { 825 | let messageElement = $(`#msg-${msg_id}`); 826 | if (messageElement.length) { 827 | messageElement.removeClass("loading-message").addClass("error-message"); 828 | messageElement.html(` 829 | ${escapeHtml(message)} 830 | 831 | `); 832 | } 833 | 834 | codeSnippets.push({ id: msg_id, code: code }); 835 | 836 | updateCodeSidebar(msg_id, code, true); 837 | } 838 | 839 | function displayErrorMessage(message) { 840 | let html = ` 841 |
    842 | ${message} 843 |
    844 | `; 845 | $("#chat-window").append(html); 846 | } 847 | 848 | function displayAgentMessage(message) { 849 | let msg_id = Date.now(); 850 | let html = ` 851 |
    852 | 853 |
    854 | `; 855 | $("#chat-window").append(html); 856 | typeMessageCharacterByCharacter(message, msg_id); 857 | } 858 | function typeMessageCharacterByCharacter(message, msg_id) { 859 | let messageElement = $(`#msg-${msg_id}`).find("span"); 860 | let index = 0; 861 | let speed = 5; 862 | let tempMessage = ""; 863 | 864 | function typeChar() { 865 | if (index < message.length) { 866 | let currentChar = message.charAt(index); 867 | tempMessage += currentChar; 868 | messageElement.html(escapeHtml(tempMessage)); 869 | index++; 870 | scrollChatToBottom(); 871 | setTimeout(typeChar, speed); 872 | } else { 873 | let parsedMessage = marked.parse(tempMessage); 874 | messageElement.html(parsedMessage); 875 | scrollChatToBottom(); 876 | } 877 | } 878 | typeChar(); 879 | } 880 | function SearchAgentMessage(message) { 881 | let msg_id = Date.now(); 882 | let parsedMessage = marked.parse(message); 883 | let html = ` 884 |
    885 | ${parsedMessage} 886 |
    887 | `; 888 | $("#chat-window").append(html); 889 | } 890 | 891 | function clear() { 892 | const outerDiv = document.getElementById("think-window"); 893 | outerDiv.querySelector(".thinking-container").innerHTML = ""; 894 | document.getElementById("chat-window").innerHTML = ""; 895 | } 896 | 897 | function refresh() { 898 | socket.emit("refresh"); 899 | } 900 | 901 | function sendPrompt() { 902 | let prompt = $("#prompt").val().trim(); 903 | if (prompt === "") { 904 | return; 905 | } 906 | 907 | let html = ` 908 |
    909 | ${prompt} 910 |
    911 | `; 912 | $("#chat-window").append(html); 913 | scrollChatToBottom(); 914 | 915 | $("#prompt").val(""); 916 | $("#prompt").css("height", ""); 917 | 918 | socket.emit("user_prompt", { prompt: prompt, mode: selectedMode }); 919 | 920 | if (window.autoWorkflowMessage) { 921 | $("#chat-window .message.user-message").last().hide(); 922 | window.autoWorkflowMessage = false; 923 | } 924 | } 925 | }); 926 | 927 | // const apiBtn = document.getElementById("api-btn"); 928 | // const apiModal = document.getElementById("api-modal"); 929 | const closeBtn = document.querySelector(".close-btn"); 930 | 931 | // apiBtn.addEventListener("click", () => { 932 | // apiModal.style.display = "block"; 933 | // }); 934 | 935 | // closeBtn.addEventListener("click", () => { 936 | // apiModal.style.display = "none"; 937 | // }); 938 | 939 | window.addEventListener("click", (event) => { 940 | if (event.target === apiModal) { 941 | apiModal.style.display = "none"; 942 | } 943 | }); 944 | 945 | const localagent = document.getElementById("localLLMagent"); 946 | const aboutModal = document.getElementById("about-modal"); 947 | 948 | localagent.addEventListener("click", () => { 949 | aboutModal.style.display = "block"; 950 | }); 951 | 952 | closeBtn.addEventListener("click", () => { 953 | aboutModal.style.display = "none"; 954 | }); 955 | 956 | window.addEventListener("click", (event) => { 957 | if (event.target === aboutModal) { 958 | aboutModal.style.display = "none"; 959 | } 960 | }); 961 | 962 | -------------------------------------------------------------------------------- /static/js/script.js: -------------------------------------------------------------------------------- 1 | document.addEventListener("DOMContentLoaded", () => { 2 | const socket = io("http://localhost:5000"); 3 | window.socket = socket; 4 | 5 | socket.on("connect", () => { 6 | console.log("Socket connected (from script.js)"); 7 | }); 8 | const toggleChatBtn = document.getElementById("toggleChatBtn"); 9 | let isFullscreen = false; 10 | let originalRect = null; 11 | 12 | toggleChatBtn.addEventListener("click", () => { 13 | const chatContainer = document.querySelector(".chat-container"); 14 | const mainContainer = document.querySelector(".main-container"); 15 | const savedWorkflows = document.querySelector(".saved-workflows"); 16 | 17 | if (!isFullscreen) { 18 | originalRect = chatContainer.getBoundingClientRect(); 19 | 20 | chatContainer.style.position = "fixed"; 21 | chatContainer.style.top = originalRect.top + "px"; 22 | chatContainer.style.left = originalRect.left + "px"; 23 | chatContainer.style.width = originalRect.width + "px"; 24 | chatContainer.style.height = originalRect.height + "px"; 25 | chatContainer.style.zIndex = "9999"; 26 | 27 | chatContainer.style.transition = "top 0.3s ease, left 0.3s ease, width 0.3s ease, height 0.3s ease"; 28 | mainContainer.style.transition = "opacity 0.3s ease"; 29 | savedWorkflows.style.transition = "opacity 0.3s ease"; 30 | 31 | chatContainer.getBoundingClientRect(); 32 | 33 | chatContainer.style.top = "0"; 34 | chatContainer.style.left = "0"; 35 | chatContainer.style.width = "100vw"; 36 | chatContainer.style.height = "100vh"; 37 | 38 | mainContainer.style.opacity = "0"; 39 | savedWorkflows.style.opacity = "0"; 40 | 41 | isFullscreen = true; 42 | } else { 43 | chatContainer.style.top = originalRect.top + "px"; 44 | chatContainer.style.left = originalRect.left + "px"; 45 | chatContainer.style.width = originalRect.width + "px"; 46 | chatContainer.style.height = originalRect.height + "px"; 47 | 48 | mainContainer.style.opacity = "1"; 49 | savedWorkflows.style.opacity = "1"; 50 | 51 | chatContainer.addEventListener("transitionend", function handler(e) { 52 | if (e.propertyName === "height") { 53 | chatContainer.style.position = ""; 54 | chatContainer.style.top = ""; 55 | chatContainer.style.left = ""; 56 | chatContainer.style.width = ""; 57 | chatContainer.style.height = ""; 58 | chatContainer.style.zIndex = ""; 59 | chatContainer.style.transition = ""; 60 | chatContainer.removeEventListener("transitionend", handler); 61 | } 62 | }); 63 | 64 | isFullscreen = false; 65 | } 66 | }); 67 | 68 | 69 | socket.on("workflow_response", function (data) { 70 | console.log("Received workflow_response:", data); 71 | 72 | if (data.status && data.status === "received") { 73 | displayWorkflowReceived(data.workflowText || "Workflow details here."); 74 | } 75 | 76 | if (data.status && data.status === "completed") { 77 | displayWorkflowCompleted(); 78 | } 79 | 80 | if (window.location.pathname.includes("create_agent")) { 81 | if (data.query) { 82 | displayAgentMessage(data.query); 83 | } 84 | } else { 85 | if (data.redirect && data.query) { 86 | localStorage.setItem("pendingQuery", data.query); 87 | window.location.href = data.redirect; 88 | } 89 | } 90 | }); 91 | 92 | socket.on("agent_response", function (data) { 93 | console.log("agent_response received:", data); 94 | 95 | if (data.type === "workflow_received") { 96 | displayWorkflowReceived(data.workflowText || ""); 97 | } 98 | else if (data.type === "thinking_message") { 99 | } 100 | else if (data.type === "workflow_completed") { 101 | displayWorkflowCompleted(); 102 | } else if (data.type === "loading_message") { 103 | displayLoadingMessage(data.content, data.msg_id); 104 | } else if (data.type === "search_results") { 105 | updateSearchResults(data.content, data.results, data.msg_id); 106 | } else if (data.type === "compiler_message") { 107 | displayCodeExecutionMessage(data.content, data.code, data.msg_id); 108 | } else if (data.type === "success_message") { 109 | updateLoadingMessage(data.msg_id, data.content); 110 | } else if (data.type === "error_message") { 111 | updateLoadingMessageWithError(data.msg_id, data.content, data.code); 112 | } else if (data.type === "error") { 113 | displayErrorMessage(data.content); 114 | } else if (data.type === "search_agent_message") { 115 | displaySearchAgentMessage(data.content); 116 | } else if (data.type === "info") { 117 | displayAgentMessage(data.content); 118 | } else { 119 | displayAgentMessage(data.content); 120 | } 121 | scrollChatToBottom(); 122 | }); 123 | 124 | function sendPrompt() { 125 | const promptEl = document.getElementById("prompt"); 126 | let promptText = promptEl.value.trim(); 127 | if (!promptText) return; 128 | 129 | const chatWindow = document.getElementById("chat-window"); 130 | const userMsgHTML = `
    131 | ${marked.parse(promptText)} 132 |
    `; 133 | chatWindow.insertAdjacentHTML("beforeend", userMsgHTML); 134 | scrollChatToBottom(); 135 | 136 | promptEl.value = ""; 137 | promptEl.style.height = ""; 138 | socket.emit("user_prompt", { prompt: promptText, mode: "agent" }); 139 | } 140 | 141 | document.getElementById("send-btn").addEventListener("click", sendPrompt); 142 | document.getElementById("prompt").addEventListener("keydown", (e) => { 143 | if (e.key === "Enter" && !e.shiftKey) { 144 | e.preventDefault(); 145 | sendPrompt(); 146 | } 147 | }); 148 | 149 | function scrollChatToBottom() { 150 | const chatWindow = document.getElementById("chat-window"); 151 | chatWindow.scrollTop = chatWindow.scrollHeight; 152 | } 153 | 154 | // --- Updated Display Functions Using Markdown --- 155 | 156 | function displayAgentMessage(message) { 157 | const msg_id = Date.now(); 158 | const html = ` 159 |
    160 | ${marked.parse(message)} 161 |
    `; 162 | document.getElementById("chat-window").insertAdjacentHTML("beforeend", html); 163 | } 164 | 165 | function displayLoadingMessage(message, msg_id) { 166 | const html = ` 167 |
    168 | ${marked.parse(message)} 169 | 170 |
    `; 171 | document.getElementById("chat-window").insertAdjacentHTML("beforeend", html); 172 | } 173 | 174 | function updateLoadingMessage(msg_id, message) { 175 | document.getElementById(`msg-${msg_id}`).innerHTML = `${marked.parse(message)}`; 176 | } 177 | 178 | function updateLoadingMessageWithError(msg_id, message, code) { 179 | const html = ` 180 |
    181 | ${marked.parse(message)} 182 | ${marked.parse("```python\n" + code + "\n```")} 183 |
    `; 184 | document.getElementById("chat-window").insertAdjacentHTML("beforeend", html); 185 | } 186 | 187 | function displayErrorMessage(message) { 188 | const msg_id = Date.now(); 189 | const html = ` 190 |
    191 | ${marked.parse(message)} 192 |
    `; 193 | document.getElementById("chat-window").insertAdjacentHTML("beforeend", html); 194 | } 195 | 196 | function displaySearchAgentMessage(message) { 197 | const msg_id = Date.now(); 198 | const html = ` 199 |
    200 | ${marked.parse(message)} 201 |
    `; 202 | document.getElementById("chat-window").insertAdjacentHTML("beforeend", html); 203 | } 204 | 205 | function displayCodeExecutionMessage(content, code, msg_id) { 206 | const mdContent = marked.parse(content); 207 | const mdCode = marked.parse("```python\n" + code + "\n```"); 208 | const html = ` 209 |
    210 |
    211 | Show Code Execution Details 212 |
    213 | ${mdCode} 214 |
    ${mdContent}
    215 |
    216 |
    217 |
    `; 218 | document.getElementById("chat-window").insertAdjacentHTML("beforeend", html); 219 | } 220 | 221 | function updateSearchResults(message, results, msg_id) { 222 | const mdMessage = marked.parse(message); 223 | const mdResults = marked.parse("```json\n" + results + "\n```"); 224 | const collapsible = ` 225 |
    226 | Show Search Results 227 |
    228 | ${mdMessage} 229 | ${mdResults} 230 |
    231 |
    `; 232 | document.getElementById(`msg-${msg_id}`).insertAdjacentHTML("beforeend", collapsible); 233 | } 234 | 235 | function displayWorkflowReceived(workflowText) { 236 | let msg_id = Date.now(); 237 | const html = ` 238 |
    239 |
    Workflow Received
    240 |
    241 |
    242 |
    243 | `; 244 | $("#chat-window").append(html); 245 | scrollChatToBottom(); 246 | } 247 | 248 | function displayWorkflowCompleted() { 249 | let msg_id = Date.now(); 250 | const html = ` 251 |
    252 |
    Workflow Completed
    253 |
    254 | `; 255 | $("#chat-window").append(html); 256 | scrollChatToBottom(); 257 | 258 | setTimeout(() => { 259 | const $workflowMessages = $("#chat-window .workflow-message"); 260 | if ($workflowMessages.length > 0) { 261 | const $startMessage = $workflowMessages.first(); 262 | const $endMessage = $workflowMessages.last(); 263 | 264 | $startMessage.addClass("green-pulse"); 265 | $endMessage.addClass("green-pulse"); 266 | 267 | setTimeout(() => { 268 | $startMessage.removeClass("green-pulse"); 269 | $endMessage.removeClass("green-pulse"); 270 | }, 1500); 271 | } 272 | }, 100); 273 | } 274 | 275 | // --- Workflow Builder functions (unchanged) --- 276 | let workflowCount = 0; 277 | document.getElementById("addFunction").addEventListener("click", () => { 278 | const functionType = document.getElementById("functionType").value; 279 | addFunctionBlock(functionType); 280 | }); 281 | 282 | document.getElementById("saveWorkflow").addEventListener("click", () => { 283 | const workflowName = prompt("Enter a name for your workflow:", `Workflow ${workflowCount + 1}`); 284 | if (workflowName) { 285 | saveWorkflow(workflowName); 286 | } 287 | }); 288 | 289 | function addFunctionBlock(type) { 290 | const container = document.getElementById("workflowContainer"); 291 | const functionBlock = document.createElement("div"); 292 | functionBlock.className = "function-block"; 293 | 294 | const functionLabel = document.createElement("div"); 295 | functionLabel.className = "function-label"; 296 | functionLabel.textContent = type.charAt(0).toUpperCase() + type.slice(1); 297 | 298 | const labelInput = document.createElement("input"); 299 | labelInput.type = "text"; 300 | labelInput.className = "label-input"; 301 | labelInput.placeholder = "Enter label"; 302 | 303 | const deleteBtn = document.createElement("button"); 304 | deleteBtn.className = "delete-btn"; 305 | deleteBtn.textContent = "Delete"; 306 | deleteBtn.onclick = () => functionBlock.remove(); 307 | 308 | functionBlock.appendChild(functionLabel); 309 | functionBlock.appendChild(labelInput); 310 | functionBlock.appendChild(deleteBtn); 311 | container.appendChild(functionBlock); 312 | } 313 | 314 | function saveWorkflow(workflowName) { 315 | const container = document.getElementById("workflowContainer"); 316 | const blocks = container.getElementsByClassName("function-block"); 317 | 318 | if (blocks.length === 0) { 319 | alert("Please add at least one function to save the workflow"); 320 | return; 321 | } 322 | 323 | const workflow = []; 324 | for (let block of blocks) { 325 | const type = block.querySelector(".function-label").textContent; 326 | const label = block.querySelector(".label-input").value; 327 | workflow.push({ type, label }); 328 | } 329 | 330 | const savedWorkflows = JSON.parse(localStorage.getItem("workflows") || "{}"); 331 | if (savedWorkflows[workflowName] && !confirm(`A workflow named "${workflowName}" already exists. Do you want to overwrite it?`)) { 332 | return; 333 | } 334 | 335 | savedWorkflows[workflowName] = { 336 | name: workflowName, 337 | steps: workflow 338 | }; 339 | 340 | localStorage.setItem("workflows", JSON.stringify(savedWorkflows)); 341 | workflowCount++; 342 | addWorkflowToSidebar(workflowName, workflow); 343 | container.innerHTML = ""; 344 | } 345 | 346 | function deleteWorkflow(name, element) { 347 | const savedWorkflows = JSON.parse(localStorage.getItem("workflows") || "{}"); 348 | delete savedWorkflows[name]; 349 | localStorage.setItem("workflows", JSON.stringify(savedWorkflows)); 350 | element.remove(); 351 | } 352 | 353 | function addWorkflowToSidebar(name, workflow) { 354 | const savedList = document.getElementById("savedWorkflowsList"); 355 | const workflowElement = document.createElement("div"); 356 | workflowElement.className = "saved-workflow"; 357 | 358 | const workflowName = document.createElement("div"); 359 | workflowName.className = "saved-workflow-name"; 360 | workflowName.textContent = name; 361 | workflowName.onclick = () => loadWorkflow(workflow); 362 | 363 | const kickoffBtn = document.createElement("button"); 364 | kickoffBtn.className = "kickoff-workflow-btn"; 365 | kickoffBtn.innerHTML = ` 366 | 369 | 370 | 371 | 372 | 373 | `; 374 | kickoffBtn.onclick = (e) => { 375 | e.stopPropagation(); 376 | kickoffWorkflow(workflow); 377 | }; 378 | 379 | const deleteBtn = document.createElement("button"); 380 | deleteBtn.className = "delete-workflow-btn"; 381 | deleteBtn.innerHTML = ``; 382 | deleteBtn.onclick = (e) => { 383 | e.stopPropagation(); 384 | if (confirm(`Are you sure you want to delete "${name}"?`)) { 385 | deleteWorkflow(name, workflowElement); 386 | } 387 | }; 388 | 389 | workflowElement.appendChild(workflowName); 390 | workflowElement.appendChild(kickoffBtn); 391 | workflowElement.appendChild(deleteBtn); 392 | savedList.appendChild(workflowElement); 393 | } 394 | 395 | function kickoffWorkflow(workflow) { 396 | if (!workflow || workflow.length === 0) { 397 | alert("No workflow steps to execute"); 398 | return; 399 | } 400 | console.log("Workflow to be kicked off:", workflow); 401 | if (window.socket && window.socket.emit) { 402 | window.socket.emit("kickoff_workflow", { workflow: workflow }); 403 | } else { 404 | console.error("Socket connection not available"); 405 | } 406 | } 407 | 408 | function loadWorkflow(workflow) { 409 | const container = document.getElementById("workflowContainer"); 410 | container.innerHTML = ""; 411 | workflow.forEach(function (item) { 412 | const functionBlock = document.createElement("div"); 413 | functionBlock.className = "function-block"; 414 | 415 | const functionLabel = document.createElement("div"); 416 | functionLabel.className = "function-label"; 417 | functionLabel.textContent = item.type; 418 | 419 | const labelInput = document.createElement("input"); 420 | labelInput.type = "text"; 421 | labelInput.className = "label-input"; 422 | labelInput.value = item.label; 423 | 424 | const deleteBtn = document.createElement("button"); 425 | deleteBtn.className = "delete-btn"; 426 | deleteBtn.textContent = "Delete"; 427 | deleteBtn.onclick = () => functionBlock.remove(); 428 | 429 | functionBlock.appendChild(functionLabel); 430 | functionBlock.appendChild(labelInput); 431 | functionBlock.appendChild(deleteBtn); 432 | container.appendChild(functionBlock); 433 | }); 434 | } 435 | 436 | window.onload = function () { 437 | const savedWorkflows = JSON.parse(localStorage.getItem("workflows") || "{}"); 438 | for (let name in savedWorkflows) { 439 | const workflow = savedWorkflows[name]; 440 | addWorkflowToSidebar(name, workflow.steps || workflow); 441 | workflowCount = Object.keys(savedWorkflows).length; 442 | } 443 | }; 444 | }); 445 | 446 | // Preview modal code remains unchanged… 447 | $(document).ready(function () { 448 | $("#preview-btn").click(function () { 449 | showPreviewModal(); 450 | }); 451 | }); 452 | 453 | function showPreviewModal() { 454 | let modalHtml = ` 455 | 465 | `; 466 | $("body").append(modalHtml); 467 | 468 | $("#preview-modal").css({ 469 | position: "fixed", 470 | top: "0", 471 | left: "0", 472 | right: "0", 473 | bottom: "0", 474 | width: "100vw", 475 | height: "100vh", 476 | "background-color": "rgba(0, 0, 0, 0.9)", 477 | "z-index": "9999", 478 | display: "flex", 479 | "align-items": "center", 480 | "justify-content": "center", 481 | }); 482 | 483 | $("#preview-modal .modal-contents").css({ 484 | position: "relative", 485 | width: "100vw", 486 | height: "100vh", 487 | "background-color": "#fff", 488 | display: "flex", 489 | "flex-direction": "column", 490 | overflow: "hidden", 491 | margin: "0", 492 | padding: "0", 493 | }); 494 | 495 | $("#preview-modal .modal-body").css({ 496 | flex: "1", 497 | overflow: "hidden", 498 | padding: "0", 499 | margin: "0", 500 | width: "100%", 501 | height: "100%", 502 | }); 503 | 504 | $("#preview-modal .web-app-preview").css({ 505 | width: "100%", 506 | height: "100%", 507 | "background-color": "#fff", 508 | overflow: "hidden", 509 | display: "flex", 510 | }); 511 | 512 | $("#preview-modal .web-app-preview iframe").css({ 513 | border: "solid 12px", 514 | width: "100%", 515 | height: "100%", 516 | border: "none", 517 | margin: "0", 518 | padding: "0", 519 | overflow: "auto", 520 | flex: "1", 521 | }); 522 | 523 | $("#preview-modal .close-preview-modal").css({ 524 | position: "fixed", 525 | top: "-5px", 526 | right: "20px", 527 | "font-size": "40px", 528 | cursor: "pointer", 529 | color: "#050505", 530 | "z-index": "10000", 531 | "text-shadow": "0 0 10px white", 532 | }); 533 | 534 | $("#preview-modal .close-preview-modal").click(function () { 535 | $("#preview-modal").remove(); 536 | }); 537 | 538 | $(document).on("keydown", function (e) { 539 | if (e.key === "Escape") { 540 | $("#preview-modal").remove(); 541 | } 542 | }); 543 | 544 | $("#preview-modal").on("click", function (e) { 545 | if ($(e.target).is("#preview-modal")) { 546 | $("#preview-modal").remove(); 547 | } 548 | }); 549 | 550 | renderWebApp(); 551 | } 552 | 553 | function renderWebApp() { 554 | const iframe = document.getElementById("preview-iframe"); 555 | iframe.onload = function () { 556 | try { 557 | const iframeDoc = 558 | iframe.contentDocument || iframe.contentWindow.document; 559 | const style = document.createElement("style"); 560 | style.textContent = ` 561 | ::-webkit-scrollbar { 562 | width: 15px; 563 | } 564 | ::-webkit-scrollbar-thumb { 565 | background: #ada7a7b6; 566 | border-radius: 10px; 567 | border: 5px white solid; 568 | } 569 | `; 570 | iframeDoc.head.appendChild(style); 571 | 572 | if (!iframeDoc.querySelector('meta[name="viewport"]')) { 573 | const meta = iframeDoc.createElement("meta"); 574 | meta.name = "viewport"; 575 | meta.content = 576 | "width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no"; 577 | iframeDoc.head.appendChild(meta); 578 | } 579 | } catch (e) { 580 | console.log("Cannot access iframe content - likely due to same-origin policy"); 581 | } 582 | }; 583 | 584 | iframe.src = "render/index.html"; 585 | } -------------------------------------------------------------------------------- /templates/base.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | {% block title %}Agent Interface{% endblock %} 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | {% block head %}{% endblock %} 32 | 33 | 34 | 35 | {% block body %} 36 |
    37 |

    Chat with Agent

    38 |
    39 |
    40 |
    41 | 42 | 43 | 44 | 45 |
    46 |
    47 | 48 | 65 | {% endblock %} 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | {% block scripts %}{% endblock %} 82 | 83 | 84 | 85 | 86 | 87 | -------------------------------------------------------------------------------- /templates/components/chat_window.html: -------------------------------------------------------------------------------- 1 |
    Inactive
    2 |
    3 |
    4 | 7 | 8 | 11 | 15 | 19 |
    20 | 21 |
    22 |

    THINKING PHASE

    23 |
    24 |
    25 | 26 | 27 |
    28 |
    29 | × 30 |

    Configure APIs

    31 |
    32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 |
    40 |
    41 |
    42 | 43 |
    44 |
    45 |

    About Agent

    46 |
    Loading...
    47 |
    48 |
    49 | 50 | 51 |
    52 |

    Code Snippets

    53 |
    54 |
    55 | 56 | 134 | -------------------------------------------------------------------------------- /templates/components/input_group.html: -------------------------------------------------------------------------------- 1 |
    2 |
    3 | 6 | 10 |
    11 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 |
    21 | -------------------------------------------------------------------------------- /templates/components/loading_message.html: -------------------------------------------------------------------------------- 1 |
    2 | Agent: {{ message }} 3 |
    4 | Loading... 5 |
    6 |
    7 | -------------------------------------------------------------------------------- /templates/create_agent.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Create Agent & Workflow Builder 7 | 8 | 11 | 12 | 13 | 14 |
    15 | 16 | 17 | Back to Chat 18 | 19 |
    20 |
    21 |

    Saved Workflows

    22 |
    23 |
    24 |
    25 |

    Workflow Builder

    26 |
    27 | 34 | 35 | 36 |
    37 |
    38 |
    39 | 40 | 41 |
    42 |
    43 |

    Workflow Monitor

    44 | 45 |
    46 |
    47 |
    48 | 49 | 50 | 51 |
    52 |
    53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | -------------------------------------------------------------------------------- /templates/index.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block title %}AGENT{% endblock %} 4 | 5 | {% block body %} 6 |
    7 |
    8 | 9 |
    10 | {% include 'components/chat_window.html' %} 11 | {% include 'components/input_group.html' %} 12 |
    13 | {% endblock %} 14 | -------------------------------------------------------------------------------- /terminal_ui/terminal_animation.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | import threading 4 | from rich.console import Console 5 | from rich.markdown import Markdown 6 | from rich.text import Text 7 | from rich.panel import Panel 8 | from rich.spinner import Spinner 9 | from rich.align import Align 10 | 11 | console = Console() 12 | 13 | def initializer(): 14 | spinner = Spinner("dots", text="Initializing Systems ⚙️") 15 | with console.status(spinner, spinner_style="yellow"): 16 | while getattr(threading.current_thread(), "do_run", True): 17 | time.sleep(5) 18 | 19 | 20 | 21 | def thinking_dots(): 22 | spinner = Spinner("dots", text="Thinking 🤔") 23 | with console.status(spinner, spinner_style="yellow"): 24 | while getattr(threading.current_thread(), "do_run", True): 25 | time.sleep(0.5) 26 | 27 | 28 | def search_dots(): 29 | panel = Panel( 30 | Text("Initializing Search Agent to Browse the Internet 🔍 ", style="bold red"), 31 | style="bold bright_cyan", 32 | title="Search Agent", 33 | subtitle_align="center", 34 | ) 35 | console.print(panel, justify= "center") 36 | spinner = Spinner("dots", text="Searching the web for relavant results") 37 | with console.status(spinner, spinner_style="red"): 38 | while getattr(threading.current_thread(), "do_run", True): 39 | time.sleep(0.5) 40 | 41 | 42 | def picture_message(): 43 | panel1 = Panel( 44 | Text("Initializing Agent to search for Pictures 🚀", style="bold magenta"), 45 | style="bold bright_cyan", 46 | title="Picture Agent", 47 | subtitle_align="center", 48 | ) 49 | console.print(panel1, justify="center") 50 | time.sleep(2) 51 | 52 | panel2 = Panel( 53 | Text("Searching for Pictures 📸", style="bold green"), 54 | style="bold red", 55 | title="Picture Search", 56 | subtitle_align="center", 57 | ) 58 | console.print(panel2, justify="center") 59 | time.sleep(1) 60 | 61 | 62 | def search_message(): 63 | panel = Panel( 64 | Text("Search Complete. Sending results to Execution Agent", style="bold green"), 65 | style="bold green", 66 | title="Search Status", 67 | subtitle_align="center", 68 | ) 69 | console.print(panel, justify="center") 70 | time.sleep(1) 71 | 72 | 73 | def compiler_message(output): 74 | title_panel = Panel( 75 | Align.center("COMPILER OUTPUT", style="bold cyan"), 76 | border_style="bold yellow", 77 | padding=(1, 2), 78 | ) 79 | 80 | output_panel = Panel( 81 | Text(output["output"], style="white"), 82 | title="Output", 83 | border_style="green", 84 | padding=(1, 2), 85 | expand=False, 86 | ) 87 | 88 | console.print(title_panel, justify="center") 89 | 90 | console.print(output_panel, justify="center") 91 | 92 | console.print("\n") 93 | 94 | 95 | def user_message(msg_to_user): 96 | markdown_content = Markdown(msg_to_user) 97 | panel = Panel( 98 | markdown_content, 99 | border_style="cyan bold", # 100 | title="User Message", 101 | title_align="left", 102 | subtitle="Notification", 103 | subtitle_align="right", 104 | padding=(1, 2), 105 | width=120, 106 | ) 107 | console.print(panel, justify="center") 108 | time.sleep(2) 109 | 110 | 111 | def refresh_message(response): 112 | panel = Panel( 113 | Text(response, style="bold white"), 114 | style="bold magenta", 115 | title="Refresh", 116 | subtitle_align="center", 117 | ) 118 | console.print(panel, justify="center") 119 | time.sleep(1) 120 | 121 | 122 | def initial_message(): 123 | msg1 = "Type 'refresh' to erase agent's memory 🧠" 124 | panel1 = Panel( 125 | Text(msg1, style="bold blue"), 126 | style="bold yellow", 127 | title="Instructions", 128 | subtitle_align="center", 129 | ) 130 | console.print(panel1, justify="center") 131 | 132 | msg2 = "Type 'exit' to leave the chat 🚪" 133 | panel2 = Panel( 134 | Text(msg2, style="bold white"), 135 | style="bold green", 136 | title="Instructions", 137 | subtitle_align="center", 138 | ) 139 | console.print(panel2, justify="center") 140 | time.sleep(1) 141 | 142 | 143 | def install_module(module_name): 144 | panel = Panel( 145 | Text( 146 | f"Initializing Install Agent to install module: {module_name}", 147 | style="bold magenta", 148 | ), 149 | style="bold cyan", 150 | title="Install Agent", 151 | subtitle_align="center", 152 | ) 153 | console.print(panel, justify="center") 154 | time.sleep(1) 155 | 156 | def uninstall_module(module_name): 157 | panel = Panel( 158 | Text( 159 | f"Initializing Install Agent to uninstall module: {module_name}", 160 | style="bold magenta", 161 | ), 162 | style="bold cyan", 163 | title="Install Agent", 164 | subtitle_align="center", 165 | ) 166 | console.print(panel, justify="center") 167 | time.sleep(1) 168 | 169 | def child_agent_message(): 170 | panel = Panel( 171 | Text( 172 | "Creating a child agent to assign task 🧑‍💻", 173 | style="bold cyan" 174 | ), 175 | style="bold purple", 176 | title="Creating Child Agent", 177 | subtitle_align="left", 178 | ) 179 | console.print(panel, justify="left") 180 | time.sleep(2) 181 | 182 | def kill_child_agent(): 183 | panel = Panel( 184 | Text( 185 | "Child Agent Terminated ⚰️. Passing control back to Superior Agent 🔄", 186 | style="bold red" 187 | ), 188 | style="bold purple", 189 | title="Child Agent", 190 | subtitle_align="left", 191 | ) 192 | console.print(panel, justify="left") 193 | time.sleep(2) 194 | -------------------------------------------------------------------------------- /terminal_ui/terminal_animation2.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | import threading 4 | from rich.console import Console 5 | from rich.text import Text 6 | from rich.panel import Panel 7 | from rich.spinner import Spinner 8 | from rich.align import Align 9 | 10 | console = Console() 11 | 12 | 13 | def sub_thinking_dots(): 14 | spinner = Spinner("bouncingBall", text="Child Agent: Processing 🤔") 15 | with console.status(spinner, spinner_style="green"): 16 | while getattr(threading.current_thread(), "do_run", True): 17 | time.sleep(0.5) 18 | 19 | 20 | def sub_search_dots(): 21 | panel = Panel( 22 | Text("Child Agent: Engaging Search Protocols 🔍", style="bold cyan"), 23 | style="bold red", 24 | title="Child Agent Search", 25 | subtitle_align="left", 26 | ) 27 | console.print(panel, justify="left") 28 | spinner = Spinner("dots", text="Child Agent: Scanning the web for results") 29 | with console.status(spinner, spinner_style="green"): 30 | while getattr(threading.current_thread(), "do_run", True): 31 | time.sleep(0.5) 32 | 33 | 34 | def sub_picture_message(): 35 | panel1 = Panel( 36 | Text("Child Agent: Preparing Image Search 🚀", style="bold cyan"), 37 | style="bold magenta", 38 | title="Child Agent Image Search", 39 | subtitle_align="left", 40 | ) 41 | console.print(panel1, justify="left") 42 | time.sleep(2) 43 | 44 | panel2 = Panel( 45 | Text("Child Agent: Searching for Images 📸", style="bold yellow"), 46 | style="bold blue", 47 | title="Child Agent Picture Search", 48 | subtitle_align="left", 49 | ) 50 | console.print(panel2, justify="left") 51 | time.sleep(1) 52 | 53 | 54 | def sub_search_message(): 55 | panel = Panel( 56 | Text("Child Agent: Search Complete. Sending data to main agent.", style="bold red"), 57 | style="bold blue", 58 | title="Child Agent Search Status", 59 | subtitle_align="left", 60 | ) 61 | console.print(panel, justify="left") 62 | time.sleep(1) 63 | 64 | 65 | def sub_compiler_message(output): 66 | title_panel = Panel( 67 | Align.left("CHILD AGENT: COMPILER OUTPUT", style="bold white"), 68 | border_style="bold green", 69 | padding=(1, 2), 70 | ) 71 | 72 | output_panel = Panel( 73 | Text(output["output"], style="yellow"), 74 | title="Child Agent Output", 75 | border_style="magenta", 76 | padding=(1, 2), 77 | expand=False, 78 | ) 79 | 80 | console.print(title_panel, justify="left") 81 | console.print(output_panel, justify="left") 82 | console.print("\n") 83 | 84 | 85 | def sub_user_message(msg_to_user): 86 | panel = Panel( 87 | Text(msg_to_user, style="bold green"), 88 | style="bold magenta", 89 | title="Child Agent User Message", 90 | subtitle_align="left", 91 | ) 92 | console.print(panel, justify="left") 93 | time.sleep(1) 94 | 95 | 96 | def sub_install_module(module_name): 97 | panel = Panel( 98 | Text( 99 | f"Child Agent: Installing module: {module_name}", 100 | style="bold green", 101 | ), 102 | style="bold yellow", 103 | title="Child Agent Install", 104 | subtitle_align="left", 105 | ) 106 | console.print(panel, justify="left") 107 | time.sleep(2) 108 | 109 | def sub_uninstall_module(module_name): 110 | panel = Panel( 111 | Text( 112 | f"Child Agent: Installing module: {module_name}", 113 | style="bold green", 114 | ), 115 | style="bold yellow", 116 | title="Child Agent Install", 117 | subtitle_align="left", 118 | ) 119 | console.print(panel, justify="left") 120 | time.sleep(2) 121 | 122 | --------------------------------------------------------------------------------