├── .gitignore
├── requirements.txt
├── .env.example
├── main.py
├── README.md
├── templates
└── index.html
├── utils
└── sootiai_web.py
└── main_cli.py
/.gitignore:
--------------------------------------------------------------------------------
1 | /pythonProject1/.venv/
2 | /pythonProject1/.idea/
3 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | bs4~=0.0.2
2 | beautifulsoup4~=4.12.3
3 | openai~=1.56.2
4 | flask
5 | selenium~=4.27.1
6 | tqdm~=4.67.1
7 | stealth_requests
8 | yt-dlp[default]
9 | ttok
10 | flask-socketio
11 | urllib3
12 | colorama
13 | python-dotenv
14 | setuptools
15 | tls-client
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
1 | BASE_MODEL=gpt-4o
2 | BASE_API=OPEN_API_KEY # Use your OpenAI API key
3 | BASE_URL=http://localhost:5000/v1 # Use the base url of your API if you have one
4 | TEMPERATURE=0.3
5 | TOP_P=1
6 | FREQUENCY_PENALTY=0
7 | PRESENCE_PENALTY=0
8 | MAX_TOKENS=2048
9 | MAX_CONTEXT=32000
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 |
4 | from dotenv import load_dotenv
5 | from flask import Flask, request, jsonify, render_template
6 | from flask_socketio import SocketIO, emit
7 | from utils.sootiai_web import Agent
8 |
9 | # Load .env file
10 | load_dotenv()
11 | # Get values from .env
12 | base_api = os.getenv("BASE_API")
13 | base_url = os.getenv("BASE_URL")
14 |
15 | app = Flask(__name__)
16 | app.secret_key = 'your_secret_key'
17 | socketio = SocketIO(app)
18 |
19 | agent = Agent(base_url=base_url, api_key=base_api)
20 |
21 |
22 | @app.route('/')
23 | def index():
24 | return render_template('index.html')
25 |
26 |
27 | @app.route('/execute_task', methods=['POST'])
28 | def execute_task():
29 | agent.task_stopped = False
30 | agent.stop_processing = False
31 | task = request.json.get('task')
32 | try:
33 | agent.execute_task(task)
34 | return jsonify({'status': 'success', 'message': 'Task executed successfully'})
35 | except Exception as e:
36 | return jsonify({'status': 'error', 'message': str(e)})
37 |
38 |
39 | @socketio.on('clear')
40 | def clear():
41 | try:
42 | agent.clear_global_history = True # Clear global history
43 | agent.task_stopped = False
44 | agent.stop_processing = False
45 | print(agent.global_history)
46 | return jsonify({'status': 'success', 'message': 'Tasks cleared successfully'})
47 | except Exception as e:
48 | return jsonify({'status': 'error', 'message': str(e)})
49 |
50 | @socketio.on('stop_processing')
51 | def stop_processing():
52 | agent.stop_processing = True # Clear global history
53 | emit('receive_message', {'status': 'error', 'message': 'Stopping task... please wait'})
54 |
55 |
56 |
57 | @socketio.on('send_message')
58 | def handle_send_message(data):
59 | task = data.get('task')
60 | try:
61 | # Execute the task
62 | agent.execute_task(task)
63 | if agent.task_stopped is True:
64 | emit('receive_message', {'status': 'error', 'message': 'Task execution stopped'})
65 | else:
66 | emit('receive_message', {'status': 'completed', 'message': 'Task executed successfully'})
67 | agent.task_stopped = False
68 | except Exception as e:
69 | # Handle any errors during task execution
70 | emit('receive_message', {'status': 'error', 'message': str(e)})
71 |
72 |
73 | if __name__ == '__main__':
74 | socketio.run(app, host='0.0.0.0', port=8080, allow_unsafe_werkzeug=True)
75 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # SootiAI
2 |
3 | SootiAI is a multi-purpose large language model (LLM) agent designed to perform general tasks on both local machines and online environments. It is simple to use, highly flexible, and equipped with a range of tools to handle tasks like research, data analysis, local file operations, and more.
4 |
5 | ## Features
6 |
7 | 1. **Researching Topics**
8 | - Search and scrape multiple sources to gather information.
9 | - Generate summaries or detailed research papers with structured sections like Abstract and Results.
10 |
11 |
12 | 2. **Data Handling and Visualization**
13 | - Gather data online and use it to create data sheets or plot charts using Python.
14 | - Example: "Plot a graph of the weather in NYC, Chicago, and Houston for the next 3 days."
15 | -
16 | 
17 | 
18 |
19 | 3. **Local Machine Operations**
20 | - Execute tasks like creating folders, listing directory contents, or downloading files.
21 | - Example: "Download the top 3 math PDFs to my home directory under 'math' and sort them by date."
22 |
23 | 4. **Multi-Tasking**
24 | - Perform multiple tasks in a single command seamlessly.
25 |
26 | 5. **User-Friendly Interfaces**
27 | - **CLI**: Ideal for terminal enthusiasts.
28 | - **WebUI**: Includes a browser-based interface with local conversation context saving (until cleared). Multi-session save/load functionality is on the roadmap.
29 |
30 | ## Why SootiAI?
31 |
32 | Existing agents often come with limitations such as:
33 | - Complex setup processes.
34 | - Lack of essential tools like scraping and searching.
35 | - Dependence on paid APIs for basic functionalities.
36 | - Inability to write and execute code effectively.
37 | - Poor performance with smaller models or overly complex workflows for simple tasks.
38 |
39 | SootiAI bridges these gaps by providing a streamlined, efficient, and flexible solution for users.
40 |
41 | ## Setup Instructions
42 |
43 | 1. Clone the repository:
44 | ```bash
45 | git clone https://github.com/sooti/sootiAI.git
46 | ```
47 |
48 | 2. Navigate to the project directory:
49 | ```bash
50 | cd sootiAI
51 | ```
52 |
53 | 3. Set up a virtual environment:
54 | ```bash
55 | python3 -m venv .venv
56 | ```
57 |
58 | 4. Install dependencies:
59 | ```bash
60 | pip install -r requirements.txt
61 | ```
62 |
63 | 5. Configure the environment:
64 | - Copy the example environment file:
65 | ```bash
66 | cp .env.example .env
67 | ```
68 | - Edit the `.env` file to customize the following:
69 | - **OpenAI Endpoint**: Set the endpoint to local, remote, llama.cpp, or another compatible source.
70 | - **API Key**: Add an API key if required (not needed for local models).
71 | - **Model Name**: Specify the model name (e.g., required for MLX, not for llama.cpp).
72 |
73 | 6. Start the application:
74 | - For WebUI (default port: 8080):
75 | ```bash
76 | python main.py
77 | ```
78 | - For CLI mode:
79 | ```bash
80 | python main_cli.py
81 | ```
82 |
83 | ## Examples of Use Cases
84 |
85 | 1. **Research and Summarization**
86 | - "Research the history of quantum computing and summarize it in a research paper format."
87 |
88 | 2. **Data Visualization**
89 | - "Plot a line graph showing the temperature trends in San Francisco over the past week."
90 |
91 | 3. **Local File Operations**
92 | - "Create a folder named 'Projects' and move all files with '.py' extension into it."
93 |
94 | 4. **Automated Data Collection**
95 | - "Scrape the latest stock prices for Apple, Google, and Tesla and save them in a CSV file."
96 |
97 | ## Recommended Local Models
98 | 1. Qwen-2.5 instruct 14B - I've found this one to be the best balance of speed, script writing and instruction following.
99 | 2. Qwen-2.5 instruct 7B - Good for some basic research cases and basic tasks, but not complex programming requests.
100 | 3. EXAONE 7.8B - Good for research, OK for programming tasks.
101 |
102 | ## Bad models in my tests
103 | 1. Llama-3.1
104 | 2. Heremes 2 and Hermes 3
105 | 3. Gemma 9b - mixed results, sometimes ok but other times fails to follow instructions.
106 |
107 | ## Roadmap
108 |
109 | - Add support for multi-session save/load in the WebUI.
110 | - Enhance CLI commands with more intuitive shortcuts.
111 | - Expand compatibility with additional LLM backends and endpoints.
112 | - Improve documentation and add community-contributed examples.
113 |
114 | ## Contributing
115 |
116 | We welcome contributions! Feel free to open issues or submit pull requests to help improve SootiAI. Make sure to follow the [contributing guidelines](CONTRIBUTING.md) (to be added soon).
117 |
118 | ## License
119 |
120 | SootiAI is licensed under the [MIT License](LICENSE).
121 |
122 | ---
123 |
124 | Feel free to explore and enjoy the capabilities of SootiAI!
125 |
--------------------------------------------------------------------------------
/templates/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | SootiAI
7 |
8 |
9 |
10 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
Enter the task for the AI to perform.
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 |
406 |
407 |
--------------------------------------------------------------------------------
/utils/sootiai_web.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import json
3 | import os
4 | import re
5 | import subprocess
6 | from duckduckgo_search import DDGS
7 | import sys
8 | import time
9 | from typing import Any, Dict, Set, Generator
10 | from urllib.parse import urlparse, unquote
11 | import pkg_resources
12 |
13 | import colorama
14 | import stealth_requests as requests
15 | import urllib3
16 | import yt_dlp
17 | from bs4 import BeautifulSoup
18 | from flask_socketio import emit
19 | from openai import OpenAI
20 | from tqdm import tqdm
21 | from dotenv import load_dotenv
22 | import os
23 |
24 |
25 | colorama.init(autoreset=True)
26 | urllib3.disable_warnings()
27 | # Load .env file
28 | load_dotenv()
29 |
30 | # Get values from .env
31 | base_model = os.getenv("BASE_MODEL")
32 | base_api = os.getenv("BASE_API")
33 | base_url = os.getenv("BASE_URL")
34 | temperature = float(os.getenv("TEMPERATURE", 0.3))
35 | top_p = float(os.getenv("TOP_P", 1))
36 | frequency_penalty = float(os.getenv("FREQUENCY_PENALTY", 0))
37 | presence_penalty = float(os.getenv("PRESENCE_PENALTY", 0))
38 | max_tokens = int(os.getenv("MAX_TOKENS", 2048))
39 | max_context = int(os.getenv("MAX_CONTEXT", 32000))
40 |
41 |
42 | def get_installed_packages() -> Set[str]:
43 | return {pkg.key for pkg in pkg_resources.working_set}
44 |
45 |
46 | def install_missing_packages(required_packages: Set[str]) -> Dict[str, bool]:
47 | installed_packages = get_installed_packages()
48 | missing_packages = required_packages - installed_packages
49 | results = {}
50 |
51 | for package in missing_packages:
52 | try:
53 | subprocess.check_call([sys.executable, '-m', 'pip', 'install', package])
54 | results[package] = True
55 | except subprocess.CalledProcessError:
56 | results[package] = False
57 |
58 | return results
59 |
60 |
61 | def extract_imports(code: str) -> Set[str]:
62 | import_pattern = re.compile(r'^(?:from\s+(\S+?)(?:.\S+)?\s+import\s+.*$|import\s+(\S+))', re.MULTILINE)
63 | matches = import_pattern.finditer(code)
64 | packages = set()
65 |
66 | for match in matches:
67 | package = match.group(1) or match.group(2)
68 | base_package = package.split('.')[0]
69 | if base_package not in sys.stdlib_module_names:
70 | packages.add(base_package)
71 |
72 | return packages
73 |
74 |
75 | # Global variables
76 | HEADERS = {
77 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:98.0) Gecko/20100101 Firefox/98.0",
78 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
79 | "Accept-Language": "en-US,en;q=0.5",
80 | "Accept-Encoding": "gzip, deflate",
81 | "Connection": "keep-alive",
82 | "Upgrade-Insecure-Requests": "1",
83 | "Sec-Fetch-Dest": "document",
84 | "Sec-Fetch-Mode": "navigate",
85 | "Sec-Fetch-Site": "none",
86 | "Sec-Fetch-User": "?1",
87 | "Cache-Control": "max-age=0",
88 | }
89 | RATE_LIMIT = 1
90 | TIMEOUT = 10
91 | MAX_RETRIES = 3
92 | last_request_time = {}
93 |
94 |
95 | def respect_rate_limit(url):
96 | domain = urlparse(url).netloc
97 | current_time = time.time()
98 | if domain in last_request_time:
99 | time_since_last_request = current_time - last_request_time[domain]
100 | if time_since_last_request < RATE_LIMIT:
101 | time.sleep(RATE_LIMIT - time_since_last_request)
102 | last_request_time[domain] = current_time
103 |
104 |
105 | def scrape_website(url: str) -> dict[str, dict[str, Any] | str] | str:
106 | try:
107 | # Respect rate limits
108 | respect_rate_limit(url)
109 |
110 | # Fetch and parse response
111 | response = requests.get(url, verify=False, headers=HEADERS, impersonate='safari')
112 | response.raise_for_status()
113 | soup = BeautifulSoup(response.text, 'html.parser')
114 |
115 | # Remove unnecessary elements
116 | for tag in soup(['nav', 'footer', 'aside', 'script', 'style']):
117 | tag.decompose()
118 |
119 | # Extract and clean text
120 | text = soup.get_text()
121 | text = re.sub(r'\s+', ' ', text) # Collapse multiple spaces/newlines into one
122 | text = re.sub(r'[^\w\s.,?!]', '', text) # Remove special characters
123 | text = " ".join(text.split()[:int(max_context / 8)]) # Truncate to fit context size
124 |
125 | # Extract and clean links
126 | links = {
127 | re.sub(r'\s+', ' ', a.text.strip()): a['href']
128 | for a in soup.find_all('a', href=True)[:int(max_context / 1000)]
129 | if a.text.strip()
130 | }
131 |
132 | print(
133 | f"{colorama.Fore.CYAN}\n🕷️✅ Scrape successful!\n🧠🧠🧠analyzing the content... please wait...")
134 | emit('receive_message', {'status': 'info', 'message': f"🕷️✅ Scrape successful!"})
135 | emit('receive_message',
136 | {'status': 'info', 'message': "🧠🧠🧠analyzing the content... please wait..."})
137 | return {'text': text, 'links': links}
138 | except Exception as e:
139 | emit(f"{colorama.Fore.RED}\n🕷️❌ Scrape failed with error: {e}")
140 | print(f"Scraping failed with error: {e}")
141 | return f"Scraping failed with error: {e}"
142 |
143 |
144 | class Agent:
145 | def __init__(self, base_url=None, api_key=None):
146 | self.tasks = {}
147 | self.global_history = []
148 | self.stop_processing = False
149 | self.task_stopped = False
150 | self.clear_global_history = False
151 | # Initialize client parameters with defaults
152 | client_params = {
153 | 'base_url': base_url,
154 | 'api_key': api_key or os.environ.get('OPENAI_API_KEY')
155 | }
156 |
157 | # Validate API key
158 | if not client_params['api_key']:
159 | raise ValueError(
160 | "API key not provided. Either pass it as 'api_key' or set it in the 'OPENAI_API_KEY' environment variable."
161 | )
162 |
163 | # Initialize OpenAI client
164 | try:
165 | self.client = OpenAI(**client_params)
166 | except Exception as e:
167 | raise RuntimeError(f"Failed to initialize OpenAI client: {e}")
168 |
169 | # Retry settings
170 | self.max_retries = 3
171 | self.retry_delay = 1
172 |
173 | def generate_prompt(self, task: str, previous_actions: list) -> str:
174 | """
175 | Generates a dynamic prompt based on the task and previous actions.
176 | """
177 | complexity = "complex" if len(previous_actions) > 5 else "simple"
178 |
179 | if complexity == "complex":
180 | instructions = """
181 | For complex tasks, ensure intermediate results are validated before proceeding.
182 | Use structured approaches and avoid assumptions.
183 | """
184 | else:
185 | instructions = """
186 | For simple tasks, provide precise and concise responses to quickly achieve the goal.
187 | """
188 |
189 | action_definitions = """
190 | You are an AI with tools and actions.
191 | HE FORMAT FOR ACTIONS IS {ACTION} [ARGUMENTS]
192 | The following are the actions that fit the above format:
193 | 1. {SEARCH} [QUERY] - Conduct a web search with a clear, focused query. Example: {SEARCH} weather in New York.
194 | You must Scrape between 2-6 results depending on task complexity.
195 | 2. {SCRAPE} [URL] -
196 | Only use {SCRAPE} if one or more of the following conditions are met:
197 | a) You have the URL from search results
198 | b) You have the URL from a website you scraped
199 | c) The user included the URL in the task description.
200 | In each case or cases you can only use the {SCRAPE} action on the URL provided.
201 | 3. {DOWNLOAD} [URL] - Download a file from a URL. Example: {DOWNLOAD} https://example.com/file.pdf.
202 | 4. {EXECUTE_PYTHON} [CODE] - Run Python code. Example: {EXECUTE_PYTHON} print(42).
203 | 5. {EXECUTE_BASH} [CODE] - Run a Bash command. Example: {EXECUTE_BASH} ls -l.
204 | 6. {CONCLUDE} [CONCLUSION] - Provide a detailed summary once all tasks are completed. This should be used **only after all
205 | actions have been executed** and the task is ready to conclude,
206 | For research or scientific tasks, structure your conclusion as follows:
207 | {CONCLUDE}
208 | - Abstract – summary of the research objectives, methods, findings, and conclusions.
209 | - Introduction – Provide background, state the research problem, and outline objectives.
210 | - Literature Review – Summarize relevant studies and identify gaps.
211 | - Methodology – Describe the research design, sample size, and methods.
212 | - Results – Present findings (include tables/graphs if necessary).
213 | - Discussion – Interpret results, compare with existing studies, and discuss limitations.
214 | - Conclusion – Summarize findings and suggest future research.
215 | - References – List citations used.
216 | For all other cases just provide the summary like this: {CONCLUDE}: followed by the summary of the task.
217 |
218 | - NEVER DO MORE THEN ONE ACTION IN A RESPONSE
219 | - NEVER DESCRIBE WHAT YOU ARE DOING.
220 | - DO NOT BE CHATTY! JUST DO.
221 | - DO NOT GIVE INTROS OR OUTROS, JUST ONE SINGLE ACTION.
222 | - YOU ALWAYS PERFORM ONE SINGLE ACTION, NOT MULTIPLE - For example do not do the following:
223 | {SEARCH} "Weather in nyc today"
224 |
225 | {SCRAPE} https://weather.com/nyc
226 |
227 | {SCRAPE} https://weather.com/new_york
228 |
229 | {CONCLUDE} The weather in NYC is 70 degrees.
230 | - NEVER REPEAT A PREVIOUS ACTION - For example do not do the following:
231 | {SCRAPE} https://weather.com/nyc
232 | User: {WEBSITE CONTENTS OF SCRAPED WEBSITE}
233 | {SCRAPE} https://weather.com/nyc
234 |
235 | """
236 |
237 | return f"""
238 | You are an AI assistant. Follow the rules:
239 | {action_definitions}
240 | {instructions}
241 |
242 | Task: {task}
243 | Previous Actions: {json.dumps(previous_actions or [])}
244 | Today's Date: {datetime.datetime.now().isoformat()}
245 |
246 | Remember: Do not use CONCLUDE until all necessary actions have been performed.
247 | """
248 |
249 | def get_conclusion(self, task, actions):
250 | messages = [
251 | {"role": "system", "content": "You are an AI agent that provides conclusions based on task completion."},
252 | {"role": "user",
253 | "content": f"Task: {task}\n\nActions taken: {json.dumps(actions)}\n\nProvide a conclusion for the task."}
254 | ]
255 | for attempt in range(self.max_retries):
256 | try:
257 | response = self.client.chat.completions.create(
258 | model=base_model,
259 | messages=messages,
260 | temperature=temperature,
261 | max_tokens=max_context,
262 | # max_completion_tokens=max_context,
263 | top_p=top_p,
264 | frequency_penalty=frequency_penalty,
265 | presence_penalty=presence_penalty
266 | )
267 | return response.choices[0].message.content.strip()
268 | except Exception as e:
269 | if attempt == self.max_retries - 1:
270 | raise e
271 | time.sleep(self.retry_delay * (attempt + 1))
272 |
273 | def stream_response(self, task: str, previous_actions: list) -> Generator[str, None, None]:
274 | """
275 | Streams the response from the AI, ensuring it follows the single-action rule and
276 | dynamically adapts to task complexity.
277 | """
278 | prompt = self.generate_prompt(task, previous_actions)
279 | messages = [{"role": "system", "content": prompt}]
280 |
281 | try:
282 | response = self.client.chat.completions.create(
283 | temperature=temperature,
284 | top_p=top_p,
285 | frequency_penalty=frequency_penalty,
286 | presence_penalty=presence_penalty,
287 | model=base_model,
288 | messages=messages,
289 | max_tokens=max_tokens,
290 | max_completion_tokens=max_context,
291 | stream=True,
292 | )
293 |
294 | full_response = ""
295 | response_iterator = response.__iter__()
296 |
297 | while True:
298 | if self.stop_processing:
299 | print("Stopping task processing...")
300 | response.close()
301 | self.task_stopped = True
302 | break
303 |
304 | try:
305 | # Get next chunk with timeout
306 | chunk = next(response_iterator)
307 | if chunk.choices[0].delta.content:
308 | content = chunk.choices[0].delta.content
309 | full_response += content
310 | print(content, end='', flush=True)
311 | yield content
312 | except StopIteration:
313 | break
314 |
315 | if task in self.tasks:
316 | self.tasks[task]['streamed_response'] = full_response
317 |
318 | except Exception as e:
319 | error_type = type(e).__name__
320 | print(f"Error occurred: {error_type} - {str(e)}")
321 | emit('receive_message', {
322 | 'status': 'error',
323 | 'message': f"{error_type}: {str(e)}. Try rephrasing the task or checking input data."
324 | })
325 |
326 | def evaluate_completion(self, task, actions):
327 | if len(actions) > 2:
328 | messages = [
329 | {"role": "system", "content": "You are an AI agent that evaluates task completion."},
330 | {"role": "user",
331 | "content": f"Task: {task}\n\nActions taken: {json.dumps(actions)}\n\nHas the task been completed? Respond with 'YES' if completed, 'NO' if not."}
332 | ]
333 | for attempt in range(self.max_retries):
334 | try:
335 | response = self.client.chat.completions.create(
336 | model=base_model,
337 | messages=messages,
338 | temperature=temperature,
339 | max_tokens=max_context,
340 | # max_completion_tokens=max_context,
341 | top_p=top_p,
342 | frequency_penalty=frequency_penalty,
343 | presence_penalty=presence_penalty
344 | )
345 | return "YES" in response.choices[0].message.content.upper()
346 | except Exception as e:
347 | if attempt == self.max_retries - 1:
348 | raise e
349 | time.sleep(self.retry_delay * (attempt + 1))
350 | else:
351 | return
352 |
353 | def search_web(self, query):
354 | results = DDGS().text(query, max_results=10)
355 | return results
356 |
357 | def _scrape_python_files(self, path):
358 | """
359 | Collects the names and contents of all Python files in a folder and its subfolders,
360 | or from a single Python file if a file path is provided.
361 |
362 | Args:
363 | path (str): The file or folder path to scan.
364 |
365 | Returns:
366 | list[dict]: A list of dictionaries, each containing 'filename' and 'content' keys.
367 | """
368 | python_files_data = []
369 |
370 | if os.path.isfile(path): # Check if the input is a file
371 | if path.endswith('.py'): # Ensure it's a Python file
372 | try:
373 | with open(path, 'r', encoding='utf-8') as f:
374 | content = f.read()
375 | python_files_data.append({'filename': path, 'content': content})
376 | except Exception as e:
377 | print(f"{colorama.Fore.RED}Error reading file {path}: {e}")
378 | elif os.path.isdir(path): # Check if the input is a folder
379 | for root, _, files in os.walk(path):
380 | for file in files:
381 | if file.endswith('.py'):
382 | file_path = os.path.join(root, file)
383 | try:
384 | with open(file_path, 'r', encoding='utf-8') as f:
385 | content = f.read()
386 | python_files_data.append({'filename': file_path, 'content': content})
387 | except Exception as e:
388 | print(f"{colorama.Fore.RED}Error reading file {file_path}: {e}")
389 | else:
390 | print(f"{colorama.Fore.RED}Invalid path: {path} is neither a file nor a directory.")
391 |
392 | return python_files_data
393 |
394 | def _download_file(self, url: str, output_path=None) -> str:
395 |
396 | def has_video_content(url):
397 | try:
398 | # Get the webpage content
399 | response = requests.get(url, verify=False, headers=HEADERS)
400 | soup = BeautifulSoup(response.text, 'html.parser')
401 |
402 | # Check for common video elements
403 | video_elements = (
404 | soup.find_all('video') or
405 | soup.find_all('iframe', src=lambda x: x and ('youtube.com' in x or 'vimeo.com' in x)) or
406 | 'youtube.com' in url or
407 | 'vimeo.com' in url or
408 | any(vid_site in url for vid_site in [
409 | 'dailymotion.com', 'twitter.com', 'tiktok.com',
410 | 'facebook.com', 'instagram.com', 'reddit.com'
411 | ])
412 | )
413 | return bool(video_elements)
414 | except:
415 | return False
416 |
417 | # If video content is detected, use yt-dlp
418 | if has_video_content(url):
419 | try:
420 | output_path = output_path or os.getcwd()
421 | if not os.path.isdir(output_path):
422 | output_path = os.path.dirname(output_path)
423 |
424 | ydl_opts = {
425 | 'format': 'bestvideo+bestaudio/best', # Download best quality
426 | 'outtmpl': os.path.join(output_path, '%(title)s.%(ext)s'),
427 | 'quiet': True,
428 | 'no_warnings': True,
429 | 'progress_hooks': [
430 | lambda d: print(
431 | f"\rDownloading... {(d.get('downloaded_bytes', 0) / d.get('total_bytes', 1) * 100):.1f}%"
432 | if d['status'] == 'downloading' and d.get('total_bytes')
433 | else f"\rDownloading... {d.get('downloaded_bytes', 0) / 1024 / 1024:.1f}MB downloaded"
434 | if d['status'] == 'downloading'
435 | else "\nDownload completed. Processing...", end='')
436 | ],
437 | }
438 |
439 | with yt_dlp.YoutubeDL(ydl_opts) as ydl:
440 | info = ydl.extract_info(url, download=True)
441 | video_title = info['title']
442 | video_path = os.path.join(output_path, f"{video_title}.{info['ext']}")
443 | print(f"{colorama.Fore.GREEN}\n✅ Video downloaded successfully: {video_path}")
444 | return f"✅ Video downloaded successfully: {video_path}"
445 |
446 | except Exception as e:
447 | print(f"{colorama.Fore.RED}\n❌ Failed to download video: {e}")
448 | return f"❌ Failed to download video: {e}"
449 |
450 | # If no video content or video download fails, do regular file download
451 | try:
452 | response = requests.get(url, stream=True, verify=False, headers=HEADERS)
453 | response.raise_for_status()
454 | total_size = int(response.headers.get('content-length', 0))
455 | output_path = output_path or os.path.join(os.getcwd(), os.path.basename(url))
456 | if os.path.isdir(output_path):
457 | output_path = os.path.join(output_path, os.path.basename(url))
458 | with open(output_path, 'wb') as file, tqdm(
459 | desc=f"Downloading {os.path.basename(output_path)}",
460 | total=total_size,
461 | unit='iB',
462 | unit_scale=True,
463 | unit_divisor=1024,
464 | ) as progress_bar:
465 | for data in response.iter_content(chunk_size=1024):
466 | size = file.write(data)
467 | progress_bar.update(size)
468 | print(f"{colorama.Fore.GREEN}\n✅ File downloaded successfully: {output_path}")
469 | return f"✅ File downloaded successfully: {output_path}"
470 | except requests.exceptions.RequestException as e:
471 | print(f"{colorama.Fore.RED}\n❌ Failed to download file: {e}")
472 | return f"❌ Failed to download file: {e}"
473 | except IOError as e:
474 | print(f"{colorama.Fore.RED}\n❌ Error saving file: {e}")
475 | return f"❌ Error saving file: {e}"
476 |
477 | def execute_code(self, code: str, language: str) -> Dict[str, Any]:
478 | temp_file = os.path.join(os.getcwd(), f"temp_{int(time.time())}.{'py' if language == 'python' else 'sh'}")
479 | try:
480 | if language == 'python':
481 | required_packages = extract_imports(code)
482 | if required_packages:
483 | installation_results = install_missing_packages(required_packages)
484 | if not all(installation_results.values()):
485 | failed_packages = [pkg for pkg, success in installation_results.items() if not success]
486 | return {'success': False, 'output': None,
487 | 'error': f"Failed to install required packages: {', '.join(failed_packages)}",
488 | 'return_code': -1}
489 |
490 | with open(temp_file, 'w') as f:
491 | f.write(code)
492 | result = subprocess.run([language, temp_file], capture_output=True, text=True, timeout=30)
493 | success = result.returncode == 0
494 | print(f"{'✅ Code executed successfully' if success else '❌ Code execution failed'}")
495 | print(f"Result: {result.stdout if success else result.stderr}")
496 | return {'success': success, 'output': result.stdout if success else result.stderr,
497 | 'error': result.stderr if not success else None, 'return_code': result.returncode}
498 | except Exception as e:
499 | print(f"💥 Code execution error: {e}")
500 | return {'success': False, 'output': None, 'error': str(
501 | e) + '\nMake sure you only send the command and the code, without anything else in your message',
502 | 'return_code': -1}
503 | finally:
504 | if os.path.exists(temp_file):
505 | os.remove(temp_file)
506 |
507 | def extract_actions(self, response: str) -> list:
508 | actions = []
509 | action_pattern = re.compile(r'\{([A-Z_]+)\}(.+?)(?=\{[A-Z_]+\}|$)', re.DOTALL)
510 | matches = action_pattern.findall(response)
511 | for action_type, action_content in matches:
512 | action = f"{{{action_type}}}{action_content.strip()}"
513 | if action not in actions:
514 | actions.append(action)
515 | return actions
516 |
517 | def execute_task(self, task):
518 | if task not in self.tasks:
519 | self.tasks[task] = {'previous_actions': [], 'conclusions': [], 'performed_actions': set()}
520 |
521 | task_context = self.tasks[task]
522 | if self.clear_global_history:
523 | self.global_history = []
524 | self.clear_global_history = False
525 | previous_actions = task_context['previous_actions'] + self.global_history
526 | self.global_history.append(task_context['previous_actions'])
527 | conclusions = task_context['conclusions']
528 | performed_actions = task_context['performed_actions']
529 | max_steps = 20
530 | step = 0
531 |
532 | print(f"{colorama.Fore.CYAN}🚀 Starting task: {task}\n🧠🧠🧠Analyzing the task... please wait...")
533 | emit('receive_message', {'status': 'info', 'message': f"🚀 Starting task: {task}"})
534 | emit('receive_message', {'status': 'info', 'message': "🧠🧠🧠Analyzing the task... please wait..."})
535 |
536 | while step < max_steps and not self.task_stopped:
537 | step += 1
538 |
539 | full_response = ""
540 | for chunk in self.stream_response(task, previous_actions):
541 | full_response += chunk
542 |
543 | actions = self.extract_actions(full_response)
544 |
545 | for action in actions:
546 |
547 | performed_actions.add(action)
548 |
549 | if "{END_SESSION}" in action:
550 | print("\n👋 Session ended by agent.")
551 | emit('receive_message', {'status': 'info', 'message': "👋 Session ended by agent."})
552 | step = max_steps
553 | break
554 |
555 | elif "{CONCLUDE}" in action:
556 | conclusion = action[11:].strip()
557 | print(f"\n📊 Here's my conclusion:\n{conclusion}")
558 | emit('receive_message', {'status': 'info', 'message': f"📊 Here's my conclusion:"})
559 | emit('receive_message', {'status': 'info', 'message': conclusion})
560 | conclusions.append(conclusion)
561 | step = max_steps
562 | break
563 |
564 | elif action.startswith("{SCRAPE_PYTHON}"):
565 | python_project_files = action[16:].strip()
566 | print(f"{colorama.Fore.CYAN}\nFinished scraping python files in {python_project_files}\n")
567 | emit('receive_message',
568 | {'status': 'info', 'message': f"Finished scraping python files in {python_project_files}"})
569 | previous_actions.append(f"Scraped Python files in {python_project_files}")
570 |
571 | elif action.startswith("{SEARCH}"):
572 | search_query = action[8:].strip().split('\n')[0].replace('"', '')
573 | print(f"{colorama.Fore.CYAN}\n🔍 Searching web for: {search_query}")
574 | emit('receive_message', {'status': 'info', 'message': f"🔍 Searching web for: {search_query}"})
575 | search_result = self.search_web(search_query)
576 | if json.dumps(search_result) in json.dumps(previous_actions):
577 | print(f"{colorama.Fore.RED}\n🔍 Search results already provided: {search_query}")
578 | emit('receive_message',
579 | {'status': 'info', 'message': f"🔍 Search results already provided: {search_query}"})
580 | else:
581 | previous_actions.append(f"Searched: {search_query}")
582 | previous_actions.append(f"Search results: {json.dumps(search_result)}\n Select between 2-4 results"
583 | f" to scrape or download")
584 | print(f"{colorama.Fore.CYAN}\n🔍 Search results found: {json.dumps(len(search_result))}")
585 | emit('receive_message',
586 | {'status': 'info', 'message': f"🔍 Search results found: {json.dumps(len(search_result))}"})
587 | emit('receive_message',
588 | {'status': 'info', 'message': f"🧠🧠🧠 Analyzing the search results... please wait..."})
589 |
590 | elif action.startswith("{DOWNLOAD}"):
591 | try:
592 | url = re.search(r'{DOWNLOAD}\s*(https?://\S+)', action).group(1)
593 | print(f"{colorama.Fore.CYAN}\n📥 Downloading file: {url}")
594 | download_result = self._download_file(url)
595 | previous_actions.append(f"Downloaded: {url} - {download_result}")
596 | print(f"{colorama.Fore.CYAN}\n📥 Downloaded: {url} - {download_result}")
597 | emit('receive_message',
598 | {'status': 'info', 'message': f"📥 Downloaded: {url} - {download_result}"})
599 | except ValueError as ve:
600 | print(f"Value error: {ve}")
601 | except AttributeError as ae:
602 | print(f"Attribute error: {ae}")
603 |
604 | elif action.startswith("{SCRAPE}"):
605 | match = re.search(r'{SCRAPE}\s*(https?://\S+)', action)
606 | try:
607 | url = match.group(1)
608 | if url.endswith(".pdf"):
609 | # Handle PDF scraping
610 | pass
611 | print(f"{colorama.Fore.CYAN}\n🕷️ Scraping website: {url}")
612 | emit('receive_message', {'status': 'info', 'message': f"🕷️ Scraping website: {url}"})
613 | result = scrape_website(url)
614 | if json.dumps(result) in json.dumps(previous_actions):
615 | print(f"{colorama.Fore.RED}🕷️ Scraping results already provided: {url}")
616 | emit('receive_message',
617 | {'status': 'info', 'message': f"🕷️ Scraping results already provided: {url}"})
618 | else:
619 | previous_actions.append(f"Scraped {url}")
620 | previous_actions.append(f"Scraping results: {json.dumps(result)} is this the information you "
621 | f"were looking for?")
622 | except Exception as e:
623 | previous_actions.append(f"Scraping error: {str(e)}")
624 | print(f"{colorama.Fore.RED}🕷️ Scraping error: {str(e)}")
625 |
626 | elif action.startswith("{EXECUTE_PYTHON}"):
627 | code = action[16:].strip().removeprefix("```python").removesuffix("```").strip()
628 | print(f"{colorama.Fore.CYAN}🐍 Executing Python code:\n```python\n{code}\n```")
629 | emit('receive_message',
630 | {'status': 'info', 'message': f"🐍 Executing Python code:\n```python\n{code}\n```"})
631 | result = self.execute_code(code, 'python')
632 | previous_actions.append(f"Executed Python: {code}")
633 | previous_actions.append(f"Result: {result}")
634 | print(f"{colorama.Fore.CYAN}🐍 Result:\n```markdown\n{result}\n```")
635 | emit('receive_message', {'status': 'info', 'message': f"🐍 Result:\n```markdown\n{result}\n```"})
636 |
637 | elif action.startswith("{EXECUTE_BASH}"):
638 | code = action[14:].strip().removeprefix("```bash").removesuffix("```").strip()
639 | print(f"{colorama.Fore.CYAN}💻 Executing Bash code:\n{code}")
640 | emit('receive_message', {'status': 'info', 'message': f"💻 Executing Bash code:\n{code}"})
641 | result = self.execute_code(code, 'bash')
642 | previous_actions.append(f"Executed Bash: {code}")
643 | previous_actions.append(f"Result: {result}")
644 | print(f"{colorama.Fore.CYAN}💻 Result: {result}")
645 | emit('receive_message', {'status': 'info', 'message': f"💻 Result: {result}"})
646 |
647 | time.sleep(5)
648 |
649 | self.tasks[task] = {'previous_actions': previous_actions, 'conclusions': conclusions,
650 | 'performed_actions': performed_actions}
651 | self.global_history.extend(previous_actions)
652 |
653 | if self.evaluate_completion(task, previous_actions):
654 | print("🎉 Task completed successfully!\nWorking on creating a conclusion...🧠🧠🧠")
655 | emit('receive_message', {'status': 'info', 'message': "🎉 Task completed successfully!"})
656 | emit('receive_message', {'status': 'info', 'message': "Working on creating a conclusion...🧠🧠🧠"})
657 | break
658 |
659 |
660 | if not conclusions:
661 | conclusion = self.get_conclusion(task, previous_actions)
662 | if conclusion:
663 | conclusions.append(conclusion)
664 | previous_actions.append(f"Added conclusion: {conclusion}")
665 |
666 | if conclusions:
667 | print("\n📊 Conclusions:\n")
668 | for conclusion in conclusions:
669 | print(conclusion)
670 | emit('receive_message', {'status': 'info', 'message': conclusion})
671 |
672 | emit('hide_waiting_animation')
673 | return len(conclusions) > 0
674 |
675 |
676 |
677 | def main():
678 | agent = Agent(base_url=base_url, api_key=base_api)
679 | current_task = ""
680 | while True:
681 | if current_task:
682 | print(f"\n{colorama.Fore.CYAN}🔄 Current task: {current_task}\nEnter your task (or 'quit' to exit):")
683 | else:
684 | print("\nEnter your task (or 'quit' to exit):")
685 | task_input = input("INPUT: ").strip()
686 |
687 | if task_input.lower() in ['quit', 'exit', 'q']:
688 | print("👋 Goodbye!")
689 | break
690 |
691 | if not task_input:
692 | print("Please enter a valid task.")
693 | continue
694 |
695 | task = task_input
696 | current_task = task_input
697 |
698 | try:
699 | print("\n" + "=" * 50)
700 | agent.execute_task(task)
701 | print("=" * 50)
702 | except KeyboardInterrupt:
703 | print("\n🛑 Task interrupted by user.")
704 | continue
705 | except Exception as e:
706 | print(f"\n{colorama.Fore.RED}❌ Error executing task: {e}")
707 | continue
708 |
709 |
710 | if __name__ == "__main__":
711 | main()
712 |
--------------------------------------------------------------------------------
/main_cli.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import json
3 | import os
4 | import re
5 | import subprocess
6 | import sys
7 | import time
8 | from typing import Any, Dict, Set, Generator
9 | from urllib.parse import urlparse
10 | import yt_dlp
11 |
12 | import pkg_resources
13 | import requests
14 | import urllib3
15 | from bs4 import BeautifulSoup
16 | from dotenv import load_dotenv
17 | from openai import OpenAI
18 | from selenium import webdriver
19 | from selenium.webdriver import Keys
20 | from selenium.webdriver.chrome.options import Options
21 | from selenium.webdriver.common.by import By
22 | from selenium_stealth import stealth
23 | from tqdm import tqdm
24 | import colorama
25 |
26 | colorama.init(autoreset=True)
27 | urllib3.disable_warnings()
28 | # Load .env file
29 | load_dotenv()
30 |
31 | # Get values from .env
32 | base_model = os.getenv("BASE_MODEL")
33 | base_api = os.getenv("BASE_API")
34 | base_url = os.getenv("BASE_URL")
35 | temperature = float(os.getenv("TEMPERATURE", 0.3))
36 | top_p = float(os.getenv("TOP_P", 1))
37 | frequency_penalty = float(os.getenv("FREQUENCY_PENALTY", 0))
38 | presence_penalty = float(os.getenv("PRESENCE_PENALTY", 0))
39 | max_tokens = int(os.getenv("MAX_TOKENS", 2048))
40 | max_context = int(os.getenv("MAX_CONTEXT", 32000))
41 |
42 |
43 | def get_installed_packages() -> Set[str]:
44 | return {pkg.key for pkg in pkg_resources.working_set}
45 |
46 |
47 | def install_missing_packages(required_packages: Set[str]) -> Dict[str, bool]:
48 | installed_packages = get_installed_packages()
49 | missing_packages = required_packages - installed_packages
50 | results = {}
51 |
52 | for package in missing_packages:
53 | try:
54 | subprocess.check_call([sys.executable, '-m', 'pip', 'install', package])
55 | results[package] = True
56 | except subprocess.CalledProcessError:
57 | results[package] = False
58 |
59 | return results
60 |
61 |
62 | def extract_imports(code: str) -> Set[str]:
63 | import_pattern = re.compile(r'^(?:from\s+(\S+?)(?:.\S+)?\s+import\s+.*$|import\s+(\S+))', re.MULTILINE)
64 | matches = import_pattern.finditer(code)
65 | packages = set()
66 |
67 | for match in matches:
68 | package = match.group(1) or match.group(2)
69 | base_package = package.split('.')[0]
70 | if base_package not in sys.stdlib_module_names:
71 | packages.add(base_package)
72 |
73 | return packages
74 |
75 |
76 | # Global variables
77 | HEADERS = {
78 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:98.0) Gecko/20100101 Firefox/98.0",
79 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
80 | "Accept-Language": "en-US,en;q=0.5",
81 | "Accept-Encoding": "gzip, deflate",
82 | "Connection": "keep-alive",
83 | "Upgrade-Insecure-Requests": "1",
84 | "Sec-Fetch-Dest": "document",
85 | "Sec-Fetch-Mode": "navigate",
86 | "Sec-Fetch-Site": "none",
87 | "Sec-Fetch-User": "?1",
88 | "Cache-Control": "max-age=0",
89 | }
90 | RATE_LIMIT = 1
91 | TIMEOUT = 10
92 | MAX_RETRIES = 3
93 | last_request_time = {}
94 |
95 |
96 | def respect_rate_limit(url):
97 | domain = urlparse(url).netloc
98 | current_time = time.time()
99 | if domain in last_request_time:
100 | time_since_last_request = current_time - last_request_time[domain]
101 | if time_since_last_request < RATE_LIMIT:
102 | time.sleep(RATE_LIMIT - time_since_last_request)
103 | last_request_time[domain] = current_time
104 |
105 |
106 | def scrape_website(url: str) -> dict[str, dict[str, Any] | str] | str:
107 | try:
108 | # Respect rate limits
109 | respect_rate_limit(url)
110 |
111 | # Fetch and parse response
112 | response = requests.get(url, verify=False, headers=HEADERS)
113 | response.raise_for_status()
114 | soup = BeautifulSoup(response.text, 'html.parser')
115 |
116 | # Remove unnecessary elements
117 | for tag in soup(['nav', 'footer', 'aside', 'script', 'style']):
118 | tag.decompose()
119 |
120 | # Extract and clean text
121 | text = soup.get_text()
122 | text = re.sub(r'\s+', ' ', text) # Collapse multiple spaces/newlines into one
123 | text = re.sub(r'[^\w\s.,?!]', '', text) # Remove special characters
124 | text = " ".join(text.split()[:int(max_context / 8)]) # Truncate to fit context size
125 |
126 | # Extract and clean links
127 | links = {
128 | re.sub(r'\s+', ' ', a.text.strip()): a['href']
129 | for a in soup.find_all('a', href=True)[:int(max_context / 1000)]
130 | if a.text.strip()
131 | }
132 |
133 | return {'text': text, 'links': links}
134 | except Exception as e:
135 | print(f"Scraping failed with error: {e}")
136 | return f"Scraping failed with error: {e}"
137 |
138 |
139 | class Agent:
140 | def __init__(self, base_url=None, api_key=None):
141 | # Initialize client parameters with defaults
142 | client_params = {
143 | 'base_url': base_url,
144 | 'api_key': api_key or os.environ.get('OPENAI_API_KEY')
145 | }
146 |
147 | # Validate API key
148 | if not client_params['api_key']:
149 | raise ValueError(
150 | "API key not provided. Either pass it as 'api_key' or set it in the 'OPENAI_API_KEY' environment variable."
151 | )
152 |
153 | # Initialize OpenAI client
154 | try:
155 | self.client = OpenAI(**client_params)
156 | except Exception as e:
157 | raise RuntimeError(f"Failed to initialize OpenAI client: {e}")
158 |
159 | # Retry settings
160 | self.max_retries = 3
161 | self.retry_delay = 1
162 |
163 | # Additional properties
164 | self.current_request = None
165 | self.tasks = {}
166 | self.global_history = []
167 |
168 | def stream_response(self, task: str, previous_actions: list) -> Generator[str, None, None]:
169 | if previous_actions is None:
170 | messages = [
171 | {"role": "system", "content": """
172 | You are an AI assistant designed to help with various tasks, You will be given a task to solve,
173 | and you will need to follow the instructions provided to solve it, you never say anything other then
174 | using the actions provided in the instructions, imagine you are a robot that can only perform the actions
175 | Bad Example:
176 | User: whats the weather tomorrow in new york?
177 | AI: I will have to perform 10 searches, {CONCLUDE} all done {SCRAPE} http://nework.com
178 | {SEARCH} weather in new york.
179 | Good Example:
180 | User: whats the weather tomorrow in new york?
181 | AI: {SEARCH} weather in new york.
182 | The following are the actions you can perform:
183 | - Thoughts - Response with {THOUGHTS} - these are intermediate steps that you double check before
184 | performing an action, use a tree model to decide which action is best and only on the next step perform
185 | the action, do not perform it in the same reply!.
186 | - Web search: Respond with {SEARCH} followed by your query, try to create a logical search query
187 | that will yield the most effective results, don't use long queries, if you need to look for multiple
188 | items, example, nvidia stock, intel stock, just do one search at a time for each,
189 | Once you have the search results, you MUST select between 3-8 results to scrape.
190 | , never do more than 8.
191 | - File web search: Respond with {SEARCH} followed by the file type and a colon and the query
192 | example of a websearch of files: filetype:pdf "jane eyre".
193 | - Execute Python code: Respond with {EXECUTE_PYTHON} followed by the code, never use anything with APIs
194 | that require signup.
195 | - Execute Bash commands: Respond with {EXECUTE_BASH} followed by the commands.
196 | - Scrape a website: Respond with {SCRAPE} followed by the URL.
197 | - Download files: Respond with {DOWNLOAD} followed by the URL - works for webpages with videos as well.
198 | only download files, never webpages.
199 | - Scrape python files in a project: Respond with {SCRAPE_PYTHON} followed by the folder path.
200 | example: {DOWNLOAD} https://example.com/file.txt
201 | Ensure accuracy at each step before proceeding, use {THOUGHTS} and try to decide what to do next.
202 | Always respond with a single, actionable step from the list I provided.
203 | """},
204 | {"role": "user",
205 | "content": f"Task: {task}\n\nPrevious actions taken: {json.dumps(previous_actions)}\n\nToday's date: "
206 | f"{datetime.datetime.now()} Pleas input next action"}
207 | ]
208 | else:
209 | messages = [
210 | {"role": "system", "content": """Solve the following task efficiently and clearly:
211 | You are an AI assistant designed to help with various tasks, You will be given a task to solve,
212 | and you will need to follow the instructions provided to solve it, you never say anything other then
213 | using the actions provided in the instructions, imagine you are a robot that can only perform the actions
214 | Bad Example:
215 | User: whats the weather tomorrow in new york?
216 | AI: I will have to perform 10 searches, {CONCLUDE} all done {SCRAPE} http://nework.com
217 | {SEARCH} weather in new york.
218 | Good Example:
219 | User: whats the weather tomorrow in new york?
220 | AI: {SEARCH} weather in new york.
221 | The following are the actions you can perform:
222 | - Thoughts - Response with {THOUGHTS} - these are intermediate steps that you double check before
223 | performing an action, use a tree model to decide which action is best and only on the next step perform
224 | the action, do not perform it in the same reply!.
225 | - Web search: Respond with {SEARCH} followed by your query, try to create a logical search query
226 | that will yield the most effective results, don't use long queries, if you need to look for multiple
227 | items, example, nvidia stock, intel stock, just do one search at a time for each.
228 | Once you have the search results, you MUST select between 3-8 results to scrape.
229 | , never do more than 8.
230 | - File web search: Respond with {SEARCH} followed by the file type and a colon and the query
231 | example of a websearch of files: filetype:pdf "jane eyre".
232 | - Execute Python code: Respond with {EXECUTE_PYTHON} followed by the code, never use anything with APIs
233 | that require signup.
234 | - Execute Bash commands: Respond with {EXECUTE_BASH} followed by the commands.
235 | - Scrape a website: Respond with {SCRAPE} followed by the URL.
236 | - Download files: Respond with {DOWNLOAD} followed by the URL - works for webpages with videos as well.
237 | only download files, never webpages.
238 | - Scrape python files in a project: Respond with {SCRAPE_PYTHON} followed by the folder path.
239 | example: {DOWNLOAD} https://example.com/file.txt
240 | - Provide conclusions: Respond with {CONCLUDE} followed by your summary, do this ONLY if ALL of your
241 | tasks are done and you are ready to provide the summary and end the session, never do it in the same
242 | step as another action, it should be its own action, never do it in the first step.
243 | If the subject is scientific related then The conclusion should be in a format similar to this if its concluding research or information gathering:
244 | Abstract – summary of the research objectives, methods, findings, and conclusions.
245 | Introduction – Provide background, state the research problem, and outline objectives.
246 | Literature Review – Summarize relevant studies and identify gaps.
247 | Methodology – Describe the research design, sample size, and methods.
248 | Results – Present findings (include tables/graphs if necessary).
249 | Discussion – Interpret results, compare with existing studies, and discuss limitations.
250 | Conclusion – Summarize findings and suggest future research.
251 | References – List citations used.
252 | Otherwise if its none scientific, such as a simple question on weather tomorrow, just do a detailed
253 | summary.
254 |
255 | Always respond with a single, Always respond with a single, actionable step from the list I
256 | provided, don't add an explanation beyond the action unless its the conclusion.
257 | """},
258 | {"role": "user",
259 | "content": f"Task: {task}\n\nPrevious actions taken: {json.dumps(previous_actions)}\n\nFor context Today's date is "
260 | f"{datetime.datetime.now()} What should be the next action?"}
261 | ]
262 |
263 | try:
264 | response = self.client.chat.completions.create(
265 | model=base_model,
266 | messages=messages,
267 | max_tokens=max_tokens,
268 | stream=True
269 | )
270 | full_response = ""
271 | for chunk in response:
272 | if chunk.choices[0].delta.content:
273 | content = chunk.choices[0].delta.content
274 | full_response += content
275 | print(content, end='', flush=True) # Log streaming response
276 | yield content
277 | if task in self.tasks:
278 | self.tasks[task]['streamed_response'] = full_response
279 | except Exception as e:
280 | print(" ")
281 |
282 | def search_web(self, query):
283 | results = []
284 | options = Options()
285 | options.add_argument("--headless")
286 | options.add_experimental_option("excludeSwitches", ["enable-automation"])
287 | options.add_experimental_option('useAutomationExtension', False)
288 | driver = webdriver.Chrome(options=options)
289 | stealth(driver,
290 | languages=["en-US", "en"],
291 | vendor="Google Inc.",
292 | platform="Win32",
293 | webgl_vendor="Intel Inc.",
294 | renderer="Intel Iris OpenGL Engine",
295 | fix_hairline=True,
296 | )
297 | driver.get('https://www.google.com')
298 | search_box = driver.find_element(By.NAME, 'q')
299 | search_box.send_keys(query)
300 | search_box.send_keys(Keys.RETURN)
301 | time.sleep(2)
302 | for page in range(2):
303 | for result in driver.find_elements(By.CSS_SELECTOR, 'div.g'):
304 | try:
305 | results.append({
306 | 'title': result.find_element(By.CSS_SELECTOR, "h3").text,
307 | 'link': result.find_element(By.CSS_SELECTOR, "a").get_attribute("href"),
308 | 'summary': result.find_element(By.CSS_SELECTOR, ".VwiC3b").text
309 | })
310 | except:
311 | pass
312 | if page == 0:
313 | try:
314 | driver.find_element(By.CSS_SELECTOR, f'[aria-label="Page {page + 2}"]').click()
315 | except Exception:
316 | continue
317 | time.sleep(2)
318 |
319 | return results[:int(max_context / 500)]
320 |
321 | def _scrape_python_files(self, path):
322 | """
323 | Collects the names and contents of all Python files in a folder and its subfolders,
324 | or from a single Python file if a file path is provided.
325 |
326 | Args:
327 | path (str): The file or folder path to scan.
328 |
329 | Returns:
330 | list[dict]: A list of dictionaries, each containing 'filename' and 'content' keys.
331 | """
332 | python_files_data = []
333 |
334 | if os.path.isfile(path): # Check if the input is a file
335 | if path.endswith('.py'): # Ensure it's a Python file
336 | try:
337 | with open(path, 'r', encoding='utf-8') as f:
338 | content = f.read()
339 | python_files_data.append({'filename': path, 'content': content})
340 | except Exception as e:
341 | print(f"{colorama.Fore.RED}Error reading file {path}: {e}")
342 | elif os.path.isdir(path): # Check if the input is a folder
343 | for root, _, files in os.walk(path):
344 | for file in files:
345 | if file.endswith('.py'):
346 | file_path = os.path.join(root, file)
347 | try:
348 | with open(file_path, 'r', encoding='utf-8') as f:
349 | content = f.read()
350 | python_files_data.append({'filename': file_path, 'content': content})
351 | except Exception as e:
352 | print(f"{colorama.Fore.RED}Error reading file {file_path}: {e}")
353 | else:
354 | print(f"{colorama.Fore.RED}Invalid path: {path} is neither a file nor a directory.")
355 |
356 | return python_files_data
357 |
358 | def _download_file(self, url: str, output_path=None) -> str:
359 |
360 | def has_video_content(url):
361 | try:
362 | # Get the webpage content
363 | response = requests.get(url, verify=False, headers=HEADERS)
364 | soup = BeautifulSoup(response.text, 'html.parser')
365 |
366 | # Check for common video elements
367 | video_elements = (
368 | soup.find_all('video') or
369 | soup.find_all('iframe', src=lambda x: x and ('youtube.com' in x or 'vimeo.com' in x)) or
370 | 'youtube.com' in url or
371 | 'vimeo.com' in url or
372 | any(vid_site in url for vid_site in [
373 | 'dailymotion.com', 'twitter.com', 'tiktok.com',
374 | 'facebook.com', 'instagram.com', 'reddit.com'
375 | ])
376 | )
377 | return bool(video_elements)
378 | except:
379 | return False
380 |
381 | # If video content is detected, use yt-dlp
382 | if has_video_content(url):
383 | try:
384 | output_path = output_path or os.getcwd()
385 | if not os.path.isdir(output_path):
386 | output_path = os.path.dirname(output_path)
387 |
388 | ydl_opts = {
389 | 'format': 'bestvideo+bestaudio/best', # Download best quality
390 | 'outtmpl': os.path.join(output_path, '%(title)s.%(ext)s'),
391 | 'quiet': True,
392 | 'no_warnings': True,
393 | 'progress_hooks': [
394 | lambda d: print(
395 | f"\rDownloading... {(d.get('downloaded_bytes', 0) / d.get('total_bytes', 1) * 100):.1f}%"
396 | if d['status'] == 'downloading' and d.get('total_bytes')
397 | else f"\rDownloading... {d.get('downloaded_bytes', 0) / 1024 / 1024:.1f}MB downloaded"
398 | if d['status'] == 'downloading'
399 | else "\nDownload completed. Processing...", end='')
400 | ],
401 | }
402 |
403 | with yt_dlp.YoutubeDL(ydl_opts) as ydl:
404 | info = ydl.extract_info(url, download=True)
405 | video_title = info['title']
406 | video_path = os.path.join(output_path, f"{video_title}.{info['ext']}")
407 | print(f"{colorama.Fore.GREEN}\n✅ Video downloaded successfully: {video_path}")
408 | return f"✅ Video downloaded successfully: {video_path}"
409 |
410 | except Exception as e:
411 | print(f"{colorama.Fore.RED}\n❌ Failed to download video: {e}")
412 | return f"❌ Failed to download video: {e}"
413 |
414 | # If no video content or video download fails, do regular file download
415 | try:
416 | response = requests.get(url, stream=True, verify=False, headers=HEADERS)
417 | response.raise_for_status()
418 | total_size = int(response.headers.get('content-length', 0))
419 | output_path = output_path or os.path.join(os.getcwd(), os.path.basename(url))
420 | if os.path.isdir(output_path):
421 | output_path = os.path.join(output_path, os.path.basename(url))
422 | with open(output_path, 'wb') as file, tqdm(
423 | desc=f"Downloading {os.path.basename(output_path)}",
424 | total=total_size,
425 | unit='iB',
426 | unit_scale=True,
427 | unit_divisor=1024,
428 | ) as progress_bar:
429 | for data in response.iter_content(chunk_size=1024):
430 | size = file.write(data)
431 | progress_bar.update(size)
432 | print(f"{colorama.Fore.GREEN}\n✅ File downloaded successfully: {output_path}")
433 | return f"✅ File downloaded successfully: {output_path}"
434 | except requests.exceptions.RequestException as e:
435 | print(f"{colorama.Fore.RED}\n❌ Failed to download file: {e}")
436 | return f"❌ Failed to download file: {e}"
437 | except IOError as e:
438 | print(f"{colorama.Fore.RED}\n❌ Error saving file: {e}")
439 | return f"❌ Error saving file: {e}"
440 |
441 | def execute_code(self, code: str, language: str) -> Dict[str, Any]:
442 | temp_file = os.path.join(os.getcwd(), f"temp_{int(time.time())}.{'py' if language == 'python' else 'sh'}")
443 | try:
444 | if language == 'python':
445 | required_packages = extract_imports(code)
446 | if required_packages:
447 | installation_results = install_missing_packages(required_packages)
448 | if not all(installation_results.values()):
449 | failed_packages = [pkg for pkg, success in installation_results.items() if not success]
450 | return {'success': False, 'output': None,
451 | 'error': f"Failed to install required packages: {', '.join(failed_packages)}",
452 | 'return_code': -1}
453 |
454 | with open(temp_file, 'w') as f:
455 | f.write(code)
456 | result = subprocess.run([language, temp_file], capture_output=True, text=True, timeout=30)
457 | success = result.returncode == 0
458 | print(f"{'✅ Code executed successfully' if success else '❌ Code execution failed'}")
459 | print(f"Result: {result.stdout if success else result.stderr}")
460 | return {'success': success, 'output': result.stdout if success else result.stderr,
461 | 'error': result.stderr if not success else None, 'return_code': result.returncode}
462 | except Exception as e:
463 | print(f"💥 Code execution error: {e}")
464 | return {'success': False, 'output': None, 'error': str(e) + '\nMake sure you only send the command and the code, without anything else in your message', 'return_code': -1}
465 | finally:
466 | if os.path.exists(temp_file):
467 | os.remove(temp_file)
468 |
469 | def get_next_action(self, task, previous_actions):
470 | messages = [
471 | {"role": "system", "content": """You are a genius AI, you analyze and think as long as needed about each
472 | answer, don't EVER explain the step, until you reach the conclusion phase, just run the action!
473 | NEVER DO MULTIPLE DIFFERENT ACTIONS IN ONE STEP, EXAMPLE: {SCRAPE} https://exmaple.com {DOWNLOAD} https://cnn.com/file
474 | , ALWAYS DO ONE ACTION AT A TIME.
475 | You MUST follow these instructions meticulously, you lose 100 points for each time you don't follow:
476 | 1. Break down tasks into clear, logical steps.
477 | 2. Perform the following actions as needed:
478 | - Web search: Respond with {SEARCH} followed by your query, try to create a logical search query
479 | that will yield the most effective results, don't use long queries, if you need to look for multiple
480 | items, example, nvidia stock, intel stock, just do one search at a time for each.
481 | - File web search: Respond with {SEARCH} followed by the file type and a colon and the query
482 | example: filetype:pdf "jane eyre".
483 | - Execute Python code: Respond with {EXECUTE_PYTHON} followed by the code.
484 | - Execute Bash commands: Respond with {EXECUTE_BASH} followed by the commands.
485 | - Scrape a website: Respond with {SCRAPE} followed by the URL.
486 | - Download files: Respond with {DOWNLOAD} followed by the URL - works for webpages with videos as well.
487 | - Scrape python files in a project: Respond with {SCRAPE_PYTHON} followed by the folder path.
488 | example: {DOWNLOAD} https://example.com/file.txt
489 | - End the session: Respond with {END_SESSION} if the task is impossible or complete.
490 | - Provide conclusions: Respond with {CONCLUDE} followed by your summary, do this ONLY if ALL of your
491 | tasks are done and you are ready to provide the summary and end the session, never do it in the same
492 | step as another action, it should be its own action.
493 | The conclusion should be simple for simple actions, in the case of research actions it must be
494 | in a format similar to this if its concluding research or information gathering:
495 | Abstract – 200-300 words summarizing the research objectives, methods, findings, and conclusions.
496 | Introduction – Provide background, state the research problem, and outline objectives.
497 | Literature Review – Summarize relevant studies and identify gaps.
498 | Methodology – Describe the research design, sample size, and methods.
499 | Results – Present findings (include tables/graphs if necessary).
500 | Discussion – Interpret results, compare with existing studies, and discuss limitations.
501 | Conclusion – Summarize findings and suggest future research.
502 | References – List citations used.
503 | 3. Ensure accuracy at each step before proceeding.
504 | 4. Always respond with a single, actionable step, don't add an explanation beyond the action."""},
505 | {"role": "user",
506 | "content": f"Todays date is: {datetime.datetime.now()} your Task: {task}\n\nPrevious actions taken: {json.dumps(previous_actions)}\n\nWhat should be the next action?"}
507 | ]
508 | for attempt in range(self.max_retries):
509 | try:
510 | response = self.client.chat.completions.create(
511 | model=base_model,
512 | messages=messages,
513 | temperature=temperature,
514 | max_tokens=max_tokens,
515 | top_p=top_p,
516 | frequency_penalty=frequency_penalty,
517 | presence_penalty=presence_penalty,
518 | )
519 | next_action = response.choices[0].message.content
520 | print(f"{colorama.Fore.CYAN}🔍 Next action: {next_action}") # Log next action with emoji
521 | return next_action
522 | except Exception as e:
523 | if attempt == self.max_retries - 1:
524 | raise e
525 | time.sleep(self.retry_delay * (attempt + 1))
526 |
527 | def evaluate_completion(self, task, actions):
528 | messages = [
529 | {"role": "system", "content": "You are an AI agent that evaluates task completion."},
530 | {"role": "user",
531 | "content": f"Task: {task}\n\nActions taken: {json.dumps(actions)}\n\nHas the task been completed? Respond with 'YES' if completed, 'NO' if not."}
532 | ]
533 | for attempt in range(self.max_retries):
534 | try:
535 | response = self.client.chat.completions.create(
536 | model=base_model,
537 | messages=messages,
538 | temperature=temperature,
539 | max_tokens=max_tokens,
540 | top_p=top_p,
541 | frequency_penalty=frequency_penalty,
542 | presence_penalty=presence_penalty
543 | )
544 | return "YES" in response.choices[0].message.content.upper()
545 | except Exception as e:
546 | if attempt == self.max_retries - 1:
547 | raise e
548 | time.sleep(self.retry_delay * (attempt + 1))
549 |
550 | def extract_actions(self, response: str) -> list:
551 | actions = []
552 | action_pattern = re.compile(r'\{([A-Z_]+)\}(.+?)(?=\{[A-Z_]+\}|$)', re.DOTALL)
553 | matches = action_pattern.findall(response)
554 | for action_type, action_content in matches:
555 | action = f"{{{action_type}}}{action_content.strip()}"
556 | if action not in actions:
557 | actions.append(action)
558 | return actions
559 |
560 | def execute_task(self, task):
561 | if task not in self.tasks:
562 | self.tasks[task] = {'previous_actions': [], 'conclusions': []}
563 |
564 | task_context = self.tasks[task]
565 | previous_actions = task_context['previous_actions'] + self.global_history
566 | conclusions = task_context['conclusions']
567 | max_steps = 50
568 | step = 0
569 |
570 | print(f"{colorama.Fore.CYAN}🚀 Starting task: {task}\n🧠🧠🧠Analyzing the task... please wait...")
571 |
572 | while step < max_steps:
573 |
574 | full_response = ""
575 | for chunk in self.stream_response(task, previous_actions):
576 | full_response += chunk
577 | actions = self.extract_actions(full_response)
578 |
579 | if not actions:
580 | print(f"{colorama.Fore.YELLOW}🤷♂️ No action taken: {full_response}\n")
581 | previous_actions.append(f"the reply: {full_response} is not an action, you MUST reply using one of the "
582 | f"following actions: {['{SEARCH}', '{DOWNLOAD}', '{SCRAPE}', '{EXECUTE_PYTHON}', '{EXECUTE_BASH}', '{CONCLUDE}', '{END_SESSION}']}")
583 |
584 | for action in actions:
585 | if action.startswith("{END_SESSION}"):
586 | print("\n👋 Session ended by agent.")
587 | step = max_steps
588 | break
589 |
590 | elif "{CONCLUDE}" in action or "summary" in action or "conclusion" in action:
591 | conclusion = action[10:].strip()
592 | print(f"\n📊 Here's my conclusion:\n{conclusion}")
593 | conclusions.append(conclusion)
594 | step = max_steps
595 | break
596 |
597 | elif action.startswith("{SCRAPE_PYTHON}"):
598 | python_project_files = action[15:].strip()
599 | print(f"{colorama.Fore.CYAN}\nFinished scraping python files in {python_project_files}\n")
600 | previous_actions.append(
601 | f"The python files requested:\n{json.dumps(self._scrape_python_files(python_project_files))}")
602 |
603 | elif action.startswith("{SEARCH}"):
604 | search_query = action[8:].strip()
605 | search_query = search_query.replace('"', '')
606 | print(f"{colorama.Fore.CYAN}\n🔍 Searching web for: {search_query}")
607 | search_result = self.search_web(search_query)
608 | previous_actions.append(f"Searched: {search_query}")
609 | previous_actions.append(f"Search results: {json.dumps(search_result)}")
610 | previous_actions.append(
611 | "\nYou must select at least between 2 and 8 results from the search results to scrape using the {SCRAPE} "
612 | "action"
613 | "select the ones that you think will yield the most useful information."
614 | "If the information returned is not sufficient, try again and scrape one of the other results."
615 | "If the information is sufficient, you can proceed to the next action.")
616 | print(f"{colorama.Fore.CYAN}\n🔍 Search results found: {json.dumps(len(search_result))}")
617 | break
618 |
619 | elif action.startswith("{DOWNLOAD}"):
620 | try:
621 | match = re.search(r'{DOWNLOAD}\s*(https?://\S+)', action)
622 | if match is None:
623 | raise ValueError("No match found for the provided action.")
624 | url = match.group(1)
625 | print(f"Extracted URL: {url}")
626 | except ValueError as ve:
627 | print(f"Value error: {ve}")
628 | except AttributeError as ae:
629 | print(f"Attribute error: {ae}")
630 | print(f"{colorama.Fore.CYAN}\n📥 Downloading file: {url}")
631 | download_result = self._download_file(url)
632 | previous_actions.append(f"Downloaded: {url} - {download_result}")
633 | print(f"{colorama.Fore.CYAN}\n📥 Downloaded: {url} - {download_result}")
634 | break
635 |
636 | elif action.startswith("{SCRAPE}"):
637 | match = re.search(r'{SCRAPE}\s*(https?://\S+)', action)
638 | try:
639 | url = match.group(1)
640 | if url.endswith(".pdf") \
641 | or url.endswith(".doc") \
642 | or url.endswith(".docx") \
643 | or url.endswith(".mp4"):
644 | previous_actions.append("Please use {DOWNLOAD} action for downloading files.")
645 | break
646 | print(f"{colorama.Fore.CYAN}\n🕷️ Scraping website: {url}")
647 | result = scrape_website(url)
648 | previous_actions.append(f"Scraped {url}")
649 | previous_actions.append(f"Scraping results: {json.dumps(result)} is this the information you "
650 | f"were looking for?"
651 | f"is it sufficient? if not, select an "
652 | f"additional website to scrape.")
653 | print(f"{colorama.Fore.CYAN}\n🕷️✅ Scrape successful!\n🧠🧠🧠analyzing the content... please wait...")
654 | except Exception as e:
655 | previous_actions.append(f"Scraping error: {str(e)}")
656 | print(f"{colorama.Fore.RED}🕷️ Scraping error: {str(e)}")
657 |
658 | elif action.startswith("{EXECUTE_PYTHON}"):
659 | code = action[16:].strip().removeprefix("```python").removesuffix("```").strip()
660 | print(f"{colorama.Fore.CYAN}🐍 Executing Python code:\n{code}")
661 | result = self.execute_code(code, 'python')
662 | previous_actions.append(f"Executed Python: {code}")
663 | previous_actions.append(f"Result: {result}")
664 | print(f"{colorama.Fore.CYAN}🐍 Result: {result}")
665 | break
666 |
667 | elif action.startswith("{EXECUTE_BASH}"):
668 | code = action[14:].strip().removeprefix("```bash").removesuffix("```").strip()
669 | print(f"{colorama.Fore.CYAN}💻 Executing Bash code:\n{code}")
670 | result = self.execute_code(code, 'bash')
671 | previous_actions.append(f"Executed Bash: {code}")
672 | previous_actions.append(f"Result: {result}")
673 | print(f"{colorama.Fore.CYAN}💻 Result: {result}")
674 | break
675 |
676 | elif action.startswith("{CONCLUDE}"):
677 | conclusion = action[10:].strip()
678 | print(f"{colorama.Fore.CYAN}\n📊 Here's my conclusion:\n\n\n===============================\n"
679 | f"{conclusion}===============================")
680 | conclusions.append(conclusion)
681 | break
682 |
683 | else:
684 | previous_actions.append(f"the reply: {action} is not an action, please reply using one of the "
685 | f"following actions: {['{SEARCH}', '{DOWNLOAD}', '{SCRAPE}', '{EXECUTE_PYTHON}', '{EXECUTE_BASH}', '{CONCLUDE}', '{END_SESSION}']}")
686 | print(f"{colorama.Fore.YELLOW}🤷♂️ No action taken: {action}")
687 | break
688 |
689 | self.tasks[task] = {'previous_actions': previous_actions, 'conclusions': conclusions}
690 | self.global_history.extend(previous_actions)
691 |
692 | return len(conclusions) > 0
693 |
694 | def save_session(self):
695 | if self.session_file:
696 | with open(self.session_file, 'w', encoding='utf-8') as f:
697 | json.dump(self.tasks, f, indent=4)
698 | print(f"{colorama.Fore.GREEN}✅ Session saved to {self.session_file}")
699 |
700 | def load_session(self, session_file):
701 | try:
702 | with open(session_file, 'r', encoding='utf-8') as f:
703 | self.tasks = json.load(f)
704 | self.session_file = session_file
705 | print(f"{colorama.Fore.GREEN}✅ Session loaded from {session_file}")
706 | except FileNotFoundError:
707 | print(f"{colorama.Fore.RED}❌ Session file not found: {session_file}")
708 |
709 |
710 | def main():
711 | agent = Agent(base_url=base_url, api_key=base_api)
712 | current_task = ""
713 | while True:
714 | if current_task:
715 | print(f"\n{colorama.Fore.CYAN}🔄 Current task: {current_task}\nEnter your task (or 'quit' to exit):")
716 | else:
717 | print("\nEnter your task (or 'quit' to exit):")
718 | task_input = input("INPUT: ").strip()
719 |
720 | if task_input.lower() in ['quit', 'exit', 'q']:
721 | print("👋 Goodbye!")
722 | break
723 |
724 | if not task_input:
725 | print("Please enter a valid task.")
726 | continue
727 |
728 | task = task_input
729 | current_task = task_input
730 |
731 | try:
732 | print("\n" + "=" * 50)
733 | agent.execute_task(task)
734 | print("=" * 50)
735 | except KeyboardInterrupt:
736 | print("\n🛑 Task interrupted by user.")
737 | continue
738 | except Exception as e:
739 | print(f"\n{colorama.Fore.RED}❌ Error executing task: {e}")
740 | continue
741 |
742 |
743 | if __name__ == "__main__":
744 | main()
745 |
--------------------------------------------------------------------------------