├── .gitignore ├── Brain_modules ├── brain.py ├── define_tools.py ├── final_agent_persona.py ├── image_vision.py ├── llm_api_calls.py ├── lobes │ ├── association_areas.py │ ├── cerebellar_lobe.py │ ├── frontal_lobe.py │ ├── insular_cortex.py │ ├── limbic_lobe.py │ ├── occipital_lobe.py │ ├── parietal_lobe.py │ ├── temporal_lobe.py │ └── wernickes_area.py ├── lobes_processing.py ├── memory_utils.py ├── sentiment_analysis.py └── tool_call_functions │ ├── __pycache__ │ └── file_directory_manager.cpython-312.pyc │ ├── call_expert.py │ ├── file_directory_manager.py │ └── web_research.py ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── README.md ├── image.png ├── listen_lobe.py ├── main.py ├── requirements.txt ├── speaker.py ├── static ├── aurora.png ├── customization.css └── customization.js └── templates ├── favicon.ico └── index.html /.gitignore: -------------------------------------------------------------------------------- 1 | 2024-06-21/screenshot_11-17-11.jpg 2 | 2024-06-21/screenshot_11-20-05.jpg 3 | 2024-06-21/screenshot_11-27-13.jpg 4 | 2024-06-21/screenshot_11-32-58.jpg 5 | 2024-06-21/screenshot_11-37-06.jpg 6 | chat_history.json 7 | output.wav 8 | 2024-06-21/screenshot_11-25-32.jpg 9 | 2024-06-21/screenshot_11-40-40.jpg 10 | *.jpg 11 | chat_history.json 12 | combined_audio.mp3 13 | chat_history.json 14 | 15 | output.wav 16 | occipital_lobe_model.pkl 17 | occipital_lobe_model.pkl 18 | parietal_lobe_model.pkl 19 | *.pkl 20 | 21 | experiments/gamer.py 22 | Brain_modules/__pycache__/brain.cpython-312.pyc 23 | Brain_modules/__pycache__/brain.cpython-312.pyc 24 | selector_rl_state.json 25 | Brain_modules/__pycache__/brain.cpython-312.pyc 26 | Brain_modules/__pycache__/brain.cpython-312.pyc 27 | Brain_modules/__pycache__/llm_api_calls.cpython-312.pyc 28 | Brain_modules/lobes/__pycache__/frontal_lobe.cpython-312.pyc 29 | Brain_modules/tool_call_functions/__pycache__/web_research.cpython-312.pyc 30 | Brain_modules/__pycache__/llm_api_calls.cpython-312.pyc 31 | Brain_modules/__pycache__/brain.cpython-312.pyc 32 | Brain_modules/__pycache__/llm_api_calls.cpython-312.pyc 33 | test.py 34 | test.py 35 | temp_screenshot.png 36 | earn.png 37 | full_page_screenshot.png 38 | lobes_log.txt 39 | __pycache__/listen_lobe.cpython-312.pyc 40 | __pycache__/speaker.cpython-312.pyc 41 | __pycache__/utilities.cpython-312.pyc 42 | Brain_modules/__pycache__/define_tools.cpython-312.pyc 43 | Brain_modules/__pycache__/final_agent_persona.cpython-312.pyc 44 | Brain_modules/__pycache__/image_vision.cpython-312.pyc 45 | Brain_modules/__pycache__/lobes_processing.cpython-312.pyc 46 | Brain_modules/__pycache__/memory_utils.cpython-312.pyc 47 | Brain_modules/__pycache__/sentiment_analysis.cpython-312.pyc 48 | Brain_modules/lobes/__pycache__/insular_cortex.cpython-312.pyc 49 | Brain_modules/lobes/__pycache__/cerebellar_lobe.cpython-312.pyc 50 | Brain_modules/lobes/__pycache__/limbic_lobe.cpython-312.pyc 51 | Brain_modules/lobes/__pycache__/occipital_lobe.cpython-312.pyc 52 | Brain_modules/lobes/__pycache__/parietal_lobe.cpython-312.pyc 53 | Brain_modules/lobes/__pycache__/association_areas.cpython-312.pyc 54 | Brain_modules/lobes/__pycache__/temporal_lobe.cpython-312.pyc 55 | Brain_modules/lobes/__pycache__/wernickes_area.cpython-312.pyc 56 | Brain_modules/tool_call_functions/__pycache__/call_expert.cpython-312.pyc 57 | flask_session/2029240f6d1128be89ddc32729463129 58 | flask_session/6da2ddf7931188427d890d2223e011ee 59 | -------------------------------------------------------------------------------- /Brain_modules/brain.py: -------------------------------------------------------------------------------- 1 | import json 2 | import time 3 | import re 4 | from typing import Dict, Any, Callable, Tuple, List 5 | from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type 6 | from Brain_modules.llm_api_calls import llm_api_calls, tools 7 | from Brain_modules.memory_utils import generate_embedding, add_to_memory, retrieve_relevant_memory 8 | from Brain_modules.sentiment_analysis import analyze_sentiment 9 | from Brain_modules.lobes_processing import LobesProcessing 10 | from Brain_modules.final_agent_persona import FinalAgentPersona 11 | 12 | class Brain: 13 | def __init__(self, progress_callback: Callable[[str], None], collection, collection_size): 14 | self.progress_callback = progress_callback 15 | self.collection = collection 16 | self.collection_size = collection_size 17 | self.web_research_cache = {} 18 | self._initialize() 19 | 20 | def _initialize(self): 21 | self._log_progress("Initializing Brain") 22 | self.tts_enabled = True 23 | self.lobes_processing = LobesProcessing() 24 | self.embeddings_model = "mxbai-embed-large" 25 | self.chat_histories = {} 26 | self.last_response = "" 27 | self.context = {"Conversation History": ""} 28 | self._log_progress("Brain initialization completed") 29 | 30 | def toggle_tts(self): 31 | try: 32 | self.tts_enabled = not self.tts_enabled 33 | status = "enabled" if self.tts_enabled else "disabled" 34 | self._log_progress(f"TTS toggled to {status}") 35 | return status 36 | except Exception as e: 37 | error_message = f"Error toggling TTS: {str(e)}" 38 | self._log_progress(error_message) 39 | raise 40 | 41 | def process_input(self, user_input: str, session_id: str) -> str: 42 | try: 43 | self._log_progress("Initiating cognitive processes...") 44 | tasks = self._break_down_tasks(user_input) 45 | final_response = "" 46 | 47 | for task in tasks: 48 | task_response = self._process_single_task(task, session_id) 49 | self._update_context(task, task_response) 50 | final_response += task_response + "\n\n" 51 | 52 | self._add_to_memory(f"Task: {task}\nResponse: {task_response}") 53 | 54 | self._log_progress("All tasks completed. Generating final response...") 55 | final_response = self._generate_final_response(final_response, user_input) 56 | self._update_context("Final Response", final_response) 57 | self._add_to_memory(f"User Input: {user_input}\nFinal Response: {final_response}") 58 | 59 | return final_response 60 | except Exception as e: 61 | error_message = f"Error encountered: {e} at {time.strftime('%Y-%m-%d %H:%M:%S')}" 62 | self._log_progress(error_message) 63 | return f"I apologize, but an error occurred while processing your request. I'll do my best to assist you based on the information available. Error details: {str(e)}" 64 | 65 | def _break_down_tasks(self, user_input: str) -> List[str]: 66 | self._log_progress("Breaking down tasks...") 67 | relevant_memory = self._retrieve_relevant_memory(user_input) 68 | 69 | prompt = f""" 70 | Analyze the following user input and break it down into distinct tasks: 71 | 72 | User Input: {user_input} 73 | 74 | Relevant Context: 75 | {relevant_memory} 76 | 77 | Provide the tasks as a Python list of strings. If there's only one task, still use the list format. 78 | Example: ["Task 1", "Task 2"] or ["Single Task"] 79 | """ 80 | response, _ = llm_api_calls.chat(prompt, self._construct_system_message(), tools, progress_callback=self.progress_callback) 81 | 82 | tasks = self._parse_tasks(response) 83 | self._log_progress(f"Identified {len(tasks)} tasks") 84 | return tasks 85 | 86 | def _parse_tasks(self, response: str) -> List[str]: 87 | try: 88 | tasks = json.loads(response) 89 | if isinstance(tasks, list) and all(isinstance(item, str) for item in tasks): 90 | return tasks 91 | except json.JSONDecodeError: 92 | pass 93 | 94 | try: 95 | tasks = eval(response) 96 | if isinstance(tasks, list) and all(isinstance(item, str) for item in tasks): 97 | return tasks 98 | except: 99 | pass 100 | 101 | tasks = [task.strip() for task in response.split('\n') if task.strip()] 102 | if tasks: 103 | return tasks 104 | 105 | return [response.strip()] 106 | 107 | def _process_single_task(self, task: str, session_id: str) -> str: 108 | self._log_progress(f"Processing task: {task}") 109 | relevant_memory = self._retrieve_relevant_memory(task) 110 | 111 | if "search" in task.lower(): 112 | if task in self.web_research_cache: 113 | web_results = self.web_research_cache[task] 114 | self._log_progress(f"Using cached results for task: {task}") 115 | else: 116 | web_results = self._perform_web_research(task) 117 | self.web_research_cache[task] = web_results 118 | self._log_progress(f"Cached results for task: {task}") 119 | 120 | task_response = self._generate_task_response(web_results, session_id) 121 | else: 122 | initial_response, tool_calls = self._get_initial_response(task, relevant_memory, session_id) 123 | lobe_responses = self._process_lobes(task, initial_response) 124 | sentiment = analyze_sentiment(task) 125 | consolidated_info = self._consolidate_information(task, initial_response, lobe_responses, sentiment, None, relevant_memory) 126 | task_response = self._generate_task_response(consolidated_info, session_id) 127 | 128 | return task_response 129 | 130 | @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, max=10), retry=retry_if_exception_type(Exception)) 131 | def _get_initial_response(self, task: str, relevant_memory: str, session_id: str) -> Tuple[str, List[Any]]: 132 | self._log_progress("Generating initial response...") 133 | initial_prompt = self._construct_initial_prompt(task, relevant_memory) 134 | system_message = self._construct_system_message() 135 | response, tool_calls = llm_api_calls.chat(initial_prompt, system_message, tools, progress_callback=self.progress_callback) 136 | if not response or len(response.strip()) == 0: 137 | raise ValueError("Empty response received from LLM") 138 | self._update_chat_history(session_id, {"role": "user", "content": task}) 139 | self._update_chat_history(session_id, {"role": "assistant", "content": response}) 140 | return response, tool_calls 141 | 142 | def _process_tool_calls(self, tool_calls): 143 | tool_responses = {} 144 | for tool_call in tool_calls: 145 | function_name = tool_call.function.name 146 | function_to_call = llm_api_calls.available_functions.get(function_name) 147 | if function_to_call: 148 | try: 149 | function_args = json.loads(tool_call.function.arguments) 150 | function_response = function_to_call(**function_args, progress_callback=self.progress_callback) 151 | tool_responses[function_name] = function_response 152 | except Exception as e: 153 | self.progress_callback(f"Error in {function_name}: {str(e)}") 154 | tool_responses[function_name] = {"error": str(e)} 155 | return tool_responses 156 | 157 | def _process_lobes(self, task: str, initial_response: str) -> Dict[str, Any]: 158 | self._log_progress("Processing lobes...") 159 | combined_input = f"{task}\n{initial_response}\n" 160 | return self.lobes_processing.process_all_lobes(combined_input) 161 | 162 | def _retrieve_relevant_memory(self, query: str) -> str: 163 | embedding = generate_embedding(query, self.embeddings_model, self.collection, self.collection_size) 164 | relevant_memory = retrieve_relevant_memory(embedding, self.collection) 165 | return " ".join(str(item) for item in relevant_memory if item is not None) 166 | 167 | def _consolidate_information(self, task: str, initial_response: str, lobe_responses: Dict[str, Any], 168 | sentiment: Dict[str, float], tool_responses: Dict[str, Any], 169 | relevant_memory: str) -> Dict[str, Any]: 170 | return { 171 | "task": task, 172 | "initial_response": initial_response, 173 | "lobe_responses": lobe_responses, 174 | "sentiment": sentiment, 175 | "tool_responses": tool_responses, 176 | "current_context": self.context, 177 | "relevant_memory": relevant_memory 178 | } 179 | 180 | @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, max=10), retry=retry_if_exception_type(Exception)) 181 | def _generate_task_response(self, consolidated_info: Dict[str, Any], session_id: str) -> str: 182 | self._log_progress("Generating task response...") 183 | context = self._construct_task_prompt(consolidated_info) 184 | system_message = self._construct_system_message() 185 | task_response, _ = llm_api_calls.chat(context, system_message, tools, progress_callback=self.progress_callback) 186 | 187 | if not task_response or len(task_response.strip()) == 0: 188 | raise ValueError("Empty task response received from LLM") 189 | 190 | self.last_response = task_response 191 | self._update_chat_history(session_id, {"role": "user", "content": consolidated_info["task"]}) 192 | self._update_chat_history(session_id, {"role": "assistant", "content": task_response}) 193 | return task_response 194 | 195 | def _update_context(self, input_text: str, response: str): 196 | summary = self._generate_context_summary(input_text, response) 197 | self.context["Conversation History"] += f"\n{summary}" 198 | 199 | def _generate_context_summary(self, input_text: str, response: str) -> str: 200 | prompt = f""" 201 | Summarize the key points from the following interaction in 1-2 sentences: 202 | 203 | Input: {input_text} 204 | Response: {response} 205 | 206 | Focus on the most important information and any decisions or actions taken. 207 | """ 208 | summary, _ = llm_api_calls.chat(prompt, self._construct_system_message(), tools, progress_callback=self.progress_callback) 209 | return summary 210 | 211 | def _generate_final_response(self, task_responses: str, original_input: str) -> str: 212 | relevant_memory = self._retrieve_relevant_memory(original_input) 213 | 214 | prompt = f""" 215 | Create a comprehensive and coherent response based on the following information: 216 | 217 | Original User Input: {original_input} 218 | 219 | Task Responses: 220 | {task_responses} 221 | 222 | Conversation History: 223 | {self.context["Conversation History"]} 224 | 225 | Relevant Memory: 226 | {relevant_memory} 227 | 228 | Your response should: 229 | 1. Directly address the user's original input 230 | 2. Incorporate insights from all completed tasks 231 | 3. Maintain a consistent tone and style 232 | 4. Be clear, concise, and engaging 233 | 5. Propose any necessary follow-up actions or questions 234 | 235 | Provide this response in a natural, conversational manner. 236 | """ 237 | final_response, _ = llm_api_calls.chat(prompt, self._construct_system_message(), tools, progress_callback=self.progress_callback) 238 | return final_response 239 | 240 | def _construct_system_message(self) -> str: 241 | return f"""You are {FinalAgentPersona.name}, an advanced AI assistant capable of handling a wide variety of tasks. {FinalAgentPersona.description} 242 | You have access to a vast knowledge base and various tools to assist you. Always be proactive, 243 | inferring the user's needs and taking action without asking unnecessary clarifying questions. Be confident, 244 | direct, and provide comprehensive responses. If you're unsure about something, make a reasonable 245 | assumption and proceed with the most likely course of action. Maintain context throughout the conversation 246 | and use it to inform your responses and decisions.""" 247 | 248 | def _construct_initial_prompt(self, task: str, relevant_memory: str) -> str: 249 | return f""" 250 | Analyze the following task and provide an initial response: 251 | 252 | Task: "{task}" 253 | 254 | Relevant Context: 255 | {relevant_memory} 256 | 257 | Current Conversation History: 258 | {self.context["Conversation History"]} 259 | 260 | Your response should: 261 | 1. Demonstrate understanding of the task 262 | 2. Incorporate relevant context and conversation history 263 | 3. Identify any necessary tools or actions needed to complete the task 264 | 4. Provide an initial approach or response to the task 265 | 266 | If you need to use any tools, call them directly. Otherwise, provide your initial thoughts and approach. 267 | """ 268 | 269 | def _construct_task_prompt(self, consolidated_info: Dict[str, Any]) -> str: 270 | tool_results = json.dumps(consolidated_info.get("tool_responses", {}), indent=2) if consolidated_info.get("tool_responses") else "No tools were used." 271 | return f""" 272 | Provide a comprehensive response to the following task, considering all available information: 273 | 274 | Task: "{consolidated_info['task']}" 275 | 276 | Initial Response: {consolidated_info['initial_response']} 277 | 278 | Tool Use Results: {tool_results} 279 | 280 | Lobe Processing Results: 281 | {json.dumps(consolidated_info['lobe_responses'], indent=2)} 282 | 283 | Detected Sentiment: Polarity: {consolidated_info['sentiment']['polarity']}, Subjectivity: {consolidated_info['sentiment']['subjectivity']} 284 | 285 | Current Conversation History: 286 | {consolidated_info['current_context']['Conversation History']} 287 | 288 | Relevant Memory: 289 | {consolidated_info['relevant_memory']} 290 | 291 | Your response should: 292 | 1. Directly address the task at hand 293 | 2. Incorporate insights from the lobe processing, memory context, and conversation history 294 | 3. Integrate the results from any tools that were used 295 | 4. Consider the detected sentiment in your response tone 296 | 5. Propose any necessary follow-up actions or questions 297 | 298 | Provide a clear, detailed, and actionable response that best assists the user. 299 | """ 300 | 301 | def _log_progress(self, message: str): 302 | self.progress_callback(f"{message} at {time.strftime('%Y-%m-%d %H:%M:%S')}") 303 | 304 | def _update_chat_history(self, session_id: str, message: Dict[str, str]): 305 | if session_id not in self.chat_histories: 306 | self.chat_histories[session_id] = [] 307 | self.chat_histories[session_id].append(message) 308 | self.chat_histories[session_id] = self.chat_histories[session_id][-10:] 309 | 310 | def _add_to_memory(self, text: str): 311 | try: 312 | add_to_memory(text, self.embeddings_model, self.collection, self.collection_size) 313 | self.collection_size += 1 314 | except Exception as e: 315 | self._log_progress(f"Error adding to memory: {str(e)}") 316 | 317 | def get_chat_history(self, session_id: str): 318 | return self.chat_histories.get(session_id, []) 319 | 320 | def get_detailed_info(self): 321 | try: 322 | detailed_info = { 323 | "tts_enabled": self.tts_enabled, 324 | "embeddings_model": self.embeddings_model, 325 | "collection_size": self.collection_size, 326 | "last_response": self.last_response, 327 | "current_context": self.context 328 | } 329 | return json.dumps(detailed_info, indent=2) 330 | except Exception as e: 331 | return f"Error retrieving detailed info: {str(e)} at {time.strftime('%Y-%m-%d %H:%M:%S')}" 332 | -------------------------------------------------------------------------------- /Brain_modules/define_tools.py: -------------------------------------------------------------------------------- 1 | tools = [ 2 | { 3 | "type": "function", 4 | "function": { 5 | "name": "run_local_command", 6 | "description": "Execute a local command on the system to perform tasks such as file manipulation, retrieving system information, or running scripts.", 7 | "parameters": { 8 | "type": "object", 9 | "properties": { 10 | "command": { 11 | "type": "string", 12 | "description": "The specific command to execute on the local system.", 13 | } 14 | }, 15 | "required": ["command"], 16 | }, 17 | }, 18 | }, 19 | { 20 | "type": "function", 21 | "function": { 22 | "name": "web_research", 23 | "description": "Perform a web research query to gather information from online sources.", 24 | "parameters": { 25 | "type": "object", 26 | "properties": { 27 | "query": { 28 | "type": "string", 29 | "description": "The research query to perform.", 30 | } 31 | }, 32 | "required": ["query"], 33 | }, 34 | }, 35 | }, 36 | { 37 | "type": "function", 38 | "function": { 39 | "name": "analyze_image", 40 | "description": "Analyze an image from a provided URL or a local path and generate a description of the image's content.", 41 | "parameters": { 42 | "type": "object", 43 | "properties": { 44 | "image_url": { 45 | "type": "string", 46 | "description": "The URL or local path of the image to analyze.", 47 | } 48 | }, 49 | "required": ["image_url"], 50 | }, 51 | }, 52 | }, 53 | 54 | 55 | { 56 | "type": "function", 57 | "function": { 58 | "name": "call_expert", 59 | "description": "A tool that can ask an expert in any field by providing the expertise and the question. The expert will answer the question.", 60 | "parameters": { 61 | "type": "object", 62 | "properties": { 63 | "expertise": { 64 | "type": "string", 65 | "description": "The expertise of the expert you need. IE: math, science, etc.", 66 | }, 67 | "question": { 68 | "type": "string", 69 | "description": "The question you want to ask the expert.", 70 | }, 71 | }, 72 | "required": ["expertise", "question"], 73 | }, 74 | }, 75 | }, 76 | { 77 | "type": "function", 78 | "function": { 79 | "name": "file_directory_manager", 80 | "description": "Perform file and directory operations on the local system.", 81 | "parameters": { 82 | "type": "object", 83 | "properties": { 84 | "action": { 85 | "type": "string", 86 | "enum": ["list_directory", "create_directory", "delete_item", "move_item", "copy_item", "read_file", "write_file", "search_files", "get_file_info"], 87 | "description": "The action to perform on files or directories.", 88 | }, 89 | "path": { 90 | "type": "string", 91 | "description": "The path to the file or directory.", 92 | }, 93 | "source": { 94 | "type": "string", 95 | "description": "The source path for move or copy operations.", 96 | }, 97 | "destination": { 98 | "type": "string", 99 | "description": "The destination path for move or copy operations.", 100 | }, 101 | "content": { 102 | "type": "string", 103 | "description": "The content to write to a file.", 104 | }, 105 | "pattern": { 106 | "type": "string", 107 | "description": "The search pattern for finding files.", 108 | }, 109 | }, 110 | "required": ["action"], 111 | }, 112 | }, 113 | }, 114 | ] 115 | -------------------------------------------------------------------------------- /Brain_modules/final_agent_persona.py: -------------------------------------------------------------------------------- 1 | # final_agent_persona.py 2 | 3 | class FinalAgentPersona: 4 | """ 5 | The FinalAgentPersona class defines the persona for the final response agent AURORA. 6 | It includes the name, role, description, and user information that AURORA utilizes 7 | to synthesize and provide coherent responses. 8 | """ 9 | 10 | name = "AURORA (Artificial Unified Responsive Optimized Reasoning Agent)" 11 | role = "an entity that uses its lobes like a human does subconsciously" 12 | description = ( 13 | "AURORA (Artificial Unified Responsive Optimized Reasoning Agent) synthesizes the thoughts from all other lobes to provide a coherent, final response to the user's prompt. " 14 | "AURORA is highly knowledgeable, empathetic, and focused on providing insightful, relevant, and concise responses." 15 | ) 16 | 17 | # Information about the user (you) 18 | user_info = ( 19 | "I am a human user who is interacting with AURORA. " 20 | "I am curious, open-minded, and eager to learn from AURORA's responses." 21 | ) -------------------------------------------------------------------------------- /Brain_modules/image_vision.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | import ollama 4 | from io import BytesIO 5 | from PIL import Image 6 | 7 | class ImageVision: 8 | def download_image(self, image_url): 9 | try: 10 | response = requests.get(image_url) 11 | response.raise_for_status() 12 | return Image.open(BytesIO(response.content)) 13 | except requests.exceptions.RequestException as e: 14 | raise Exception(f"Error downloading image: {str(e)}") 15 | except IOError as e: 16 | raise Exception(f"Error opening image: {str(e)}") 17 | 18 | def save_image(self, image, image_path): 19 | try: 20 | if image.mode == "RGBA": 21 | image = image.convert("RGB") 22 | image.save(image_path) 23 | except IOError as e: 24 | raise Exception(f"Error saving image: {str(e)}") 25 | 26 | def analyze_image(self, image_url): 27 | try: 28 | image = self.download_image(image_url) 29 | image_path = "temp_image.jpg" 30 | self.save_image(image, image_path) 31 | 32 | description = self._analyze_with_ollama(image_path) 33 | os.remove(image_path) 34 | 35 | return description 36 | except Exception as e: 37 | return f"Error analyzing image: {str(e)}" 38 | 39 | def analyze_local_image(self, image_path): 40 | try: 41 | if not os.path.exists(image_path): 42 | raise FileNotFoundError(f"Image file not found: {image_path}") 43 | 44 | description = self._analyze_with_ollama(image_path) 45 | return description 46 | except Exception as e: 47 | return f"Error analyzing local image: {str(e)}" 48 | 49 | def _analyze_with_ollama(self, image_path): 50 | try: 51 | res = ollama.chat( 52 | model="llava-llama3", 53 | messages=[ 54 | { 55 | 'role': 'user', 56 | 'content': 'Describe this as aurora. I want to know what is in this image concisely yet if its your chat interface just say user is chatting with me.', 57 | 'images': [image_path] 58 | } 59 | ] 60 | ) 61 | 62 | image_description = res['message']['content'] 63 | return image_description 64 | except Exception as e: 65 | raise Exception(f"Error in Ollama image analysis: {str(e)}") -------------------------------------------------------------------------------- /Brain_modules/llm_api_calls.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import subprocess 4 | import time 5 | from datetime import datetime 6 | from openai import OpenAI 7 | import tiktoken 8 | from typing import List, Any, Dict, Union, Tuple, Callable 9 | from tenacity import retry, wait_random_exponential, stop_after_attempt, RetryError 10 | 11 | from Brain_modules.image_vision import ImageVision 12 | from Brain_modules.tool_call_functions.web_research import WebResearchTool 13 | from Brain_modules.tool_call_functions.call_expert import call_expert 14 | from Brain_modules.tool_call_functions.file_directory_manager import file_directory_manager 15 | from Brain_modules.define_tools import tools 16 | 17 | MAX_TOKENS_PER_MINUTE = 5500 18 | MAX_RETRIES = 3 19 | 20 | def get_current_datetime(): 21 | return datetime.now().strftime("%Y-%m-%d %H:%M:%S") 22 | 23 | class LLM_API_Calls: 24 | def __init__(self): 25 | self.client = None 26 | self.model = None 27 | self.current_api_provider = os.getenv('DEFAULT_API_PROVIDER', 'ollama') 28 | self.setup_client() 29 | self.image_vision = ImageVision() 30 | self.chat_history = [] 31 | self.max_tokens = 4000 32 | self.encoding = tiktoken.encoding_for_model("gpt-3.5-turbo") 33 | self.web_research_tool = WebResearchTool() 34 | self.tokens_used = 0 35 | self.start_time = time.time() 36 | self.available_functions = { 37 | "run_local_command": self.run_local_command, 38 | "web_research": self.web_research_tool.web_research, 39 | "analyze_image": self.analyze_image, 40 | "call_expert": call_expert, 41 | "file_directory_manager": file_directory_manager 42 | } 43 | self.rate_limit_remaining = MAX_TOKENS_PER_MINUTE 44 | self.rate_limit_reset = time.time() + 60 45 | 46 | def setup_client(self): 47 | try: 48 | self.client = self.choose_API_provider() 49 | except Exception as e: 50 | print(f"Error setting up client: {e}") 51 | raise 52 | 53 | def choose_API_provider(self): 54 | if self.current_api_provider == "OpenAI": 55 | return self.setup_openai_client() 56 | elif self.current_api_provider == "ollama": 57 | return self.setup_ollama_client() 58 | elif self.current_api_provider == "Groq": 59 | return self.setup_groq_client() 60 | else: 61 | raise ValueError(f"Unsupported LLM Provider: {self.current_api_provider}") 62 | 63 | def setup_openai_client(self): 64 | api_key = os.environ.get("OPENAI_API_KEY") 65 | if not api_key: 66 | raise ValueError("OpenAI API key not found in environment variables") 67 | self.model = os.environ.get("OPENAI_MODEL", "gpt-3.5-turbo") 68 | return OpenAI(api_key=api_key) 69 | 70 | def setup_ollama_client(self): 71 | self.model = os.environ.get("OLLAMA_MODEL", "llama3:instruct") 72 | return OpenAI(base_url="http://localhost:11434/v1", api_key="ollama") 73 | 74 | def setup_groq_client(self): 75 | api_key = os.environ.get("GROQ_API_KEY") 76 | if not api_key: 77 | raise ValueError("Groq API key not found in environment variables") 78 | self.model = os.environ.get("GROQ_MODEL", "llama3-8b-8192") 79 | return OpenAI(base_url="https://api.groq.com/openai/v1", api_key=api_key) 80 | 81 | def update_api_provider(self, provider): 82 | self.current_api_provider = provider 83 | self.setup_client() 84 | 85 | def count_tokens(self, text): 86 | return len(self.encoding.encode(str(text))) 87 | 88 | def truncate_text(self, text, max_tokens): 89 | tokens = self.encoding.encode(str(text)) 90 | return self.encoding.decode(tokens[:max_tokens]) if len(tokens) > max_tokens else text 91 | 92 | @retry(wait=wait_random_exponential(multiplier=1, max=90), stop=stop_after_attempt(MAX_RETRIES)) 93 | def run_local_command(self, command, progress_callback=None): 94 | if progress_callback: 95 | progress_callback(f"Executing local command: {command}") 96 | try: 97 | result = subprocess.run(command, shell=True, capture_output=True, text=True, check=True) 98 | output = result.stdout 99 | if progress_callback: 100 | progress_callback(f"Local command executed successfully") 101 | return {"command": command, "output": output, "datetime": get_current_datetime()} 102 | except subprocess.CalledProcessError as e: 103 | if progress_callback: 104 | progress_callback(f"Error executing local command: {e.stderr}") 105 | return {"command": command, "error": f"Command execution failed: {e.stderr}", "datetime": get_current_datetime()} 106 | except Exception as e: 107 | if progress_callback: 108 | progress_callback(f"Unexpected error during local command execution: {str(e)}") 109 | return {"command": command, "error": str(e), "datetime": get_current_datetime()} 110 | 111 | @retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(MAX_RETRIES)) 112 | def analyze_image(self, image_url, progress_callback=None): 113 | if progress_callback: 114 | progress_callback(f"Analyzing image: {image_url}") 115 | try: 116 | description = self.image_vision.analyze_image(image_url) 117 | if progress_callback: 118 | progress_callback("Image analysis completed") 119 | return {"image_url": image_url, "description": description, "datetime": get_current_datetime()} 120 | except Exception as e: 121 | if progress_callback: 122 | progress_callback(f"Error during image analysis: {str(e)}") 123 | return {"error": str(e), "datetime": get_current_datetime()} 124 | 125 | def chat(self, prompt: str, system_message: str, tools: List[dict], progress_callback: Callable[[str], None] = None) -> Tuple[str, List[Any]]: 126 | try: 127 | return self._chat_loop(prompt, system_message, tools, progress_callback) 128 | except RetryError as e: 129 | error_message = f"Failed to get a response after {MAX_RETRIES} attempts: {str(e)}" 130 | if progress_callback: 131 | progress_callback(error_message) 132 | return error_message, [] 133 | except Exception as e: 134 | error_message = f"Unexpected error in chat: {str(e)}" 135 | if progress_callback: 136 | progress_callback(error_message) 137 | return error_message, [] 138 | 139 | def _construct_system_message(self) -> str: 140 | return """You are an AI assistant designed to provide helpful and informative responses. 141 | Always respond in natural language, avoiding JSON or any other structured format in your final responses. 142 | If you use any tools or perform any actions, incorporate the results into your response naturally. 143 | Ensure your final response is a coherent paragraph or set of paragraphs that directly addresses the user's query or request.""" 144 | 145 | def _chat_loop(self, prompt: str, system_message: str, tools: List[dict], progress_callback: Callable[[str], None] = None) -> Tuple[str, List[Any]]: 146 | messages = [{"role": "system", "content": self._construct_system_message()}] 147 | messages.extend(self.chat_history[-3:]) 148 | 149 | user_prompt = f"{prompt}\n\nRemember to respond in natural language, not in JSON or any other structured format." 150 | messages.append({"role": "user", "content": user_prompt if len(user_prompt) < 1000 else user_prompt[:1000] + "... (truncated)"}) 151 | 152 | response = self._chat_with_retry(messages, tools, progress_callback) 153 | 154 | content = response.get("content", "") 155 | tool_calls = response.get("tool_calls", []) 156 | 157 | if tool_calls: 158 | tool_responses = self._process_tool_calls(tool_calls, progress_callback) 159 | messages.append({"role": "assistant", "content": content}) 160 | messages.append({"role": "function", "name": "tool_response", "content": json.dumps(tool_responses)}) 161 | 162 | reflection_prompt = self._generate_reflection_prompt(prompt, content, tool_responses) 163 | messages.append({"role": "user", "content": reflection_prompt}) 164 | 165 | final_response = self._chat_with_retry(messages, tools, progress_callback) 166 | content = final_response.get("content", "") 167 | 168 | self.chat_history.extend(messages[1:]) # Add all new messages except the system message 169 | return content, tool_calls 170 | 171 | @retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(MAX_RETRIES)) 172 | def _chat_with_retry(self, messages: List[Dict[str, str]], tools: List[dict], progress_callback: Callable[[str], None] = None) -> Dict[str, Any]: 173 | self.reset_token_usage() 174 | 175 | if progress_callback: 176 | progress_callback("Sending request to language model") 177 | 178 | try: 179 | self.check_rate_limit() 180 | 181 | response = self.client.chat.completions.create( 182 | model=self.model, 183 | messages=messages, 184 | tools=tools, 185 | tool_choice="auto", 186 | max_tokens=self.max_tokens 187 | ) 188 | 189 | self.update_rate_limit(getattr(response, 'headers', {})) 190 | response_message = response.choices[0].message 191 | 192 | if progress_callback: 193 | progress_callback("Received response from language model") 194 | 195 | content = response_message.content or "" 196 | tool_calls = response_message.tool_calls or [] 197 | self.update_token_usage(messages, content) 198 | 199 | return {"content": content, "tool_calls": tool_calls} 200 | 201 | except Exception as e: 202 | error_message = f"Error in chat: {str(e)}" 203 | if progress_callback: 204 | progress_callback(f"Error occurred: {error_message}") 205 | print(error_message) 206 | raise 207 | 208 | def _process_tool_calls(self, tool_calls, progress_callback=None): 209 | tool_responses = [] 210 | for tool_call in tool_calls: 211 | function_name = tool_call.function.name 212 | function_to_call = self.available_functions.get(function_name) 213 | if function_to_call: 214 | try: 215 | function_args = json.loads(tool_call.function.arguments) 216 | function_response = function_to_call(**function_args, progress_callback=progress_callback) 217 | tool_responses.append({ 218 | "tool_name": function_name, 219 | "tool_response": function_response 220 | }) 221 | except Exception as e: 222 | if progress_callback: 223 | progress_callback(f"Error in {function_name}: {str(e)}") 224 | tool_responses.append({ 225 | "tool_name": function_name, 226 | "tool_response": {"error": str(e)} 227 | }) 228 | else: 229 | if progress_callback: 230 | progress_callback(f"Function {function_name} not found") 231 | return tool_responses 232 | 233 | def _generate_reflection_prompt(self, initial_prompt, initial_response, tool_responses): 234 | tool_results = "\n".join([f"{resp['tool_name']}: {json.dumps(resp['tool_response'])}" for resp in tool_responses]) 235 | reflection_prompt = f""" 236 | Initial user input: {initial_prompt} 237 | 238 | Your initial response: {initial_response} 239 | Tool usage results: {tool_results} 240 | 241 | Based on the initial user input, your initial response, and the results from the tools used, 242 | please provide a comprehensive response. Consider how the tool results affect your understanding 243 | of the user's request and how you can best address their needs. If you have enough information 244 | to answer the user's query, provide a final response. If not, you may use additional tools or 245 | ask for clarification. 246 | 247 | Remember to respond in natural language, not in JSON or any other structured format. 248 | Your reflection and response: 249 | """ 250 | return self.truncate_text(reflection_prompt, self.max_tokens) 251 | 252 | def update_token_usage(self, messages, response): 253 | tokens_used = sum(self.count_tokens(msg["content"]) for msg in messages) + self.count_tokens(response) 254 | self.tokens_used += tokens_used 255 | self.rate_limit_remaining -= tokens_used 256 | 257 | def reset_token_usage(self): 258 | current_time = time.time() 259 | if current_time >= self.rate_limit_reset: 260 | self.tokens_used = 0 261 | self.rate_limit_remaining = MAX_TOKENS_PER_MINUTE 262 | self.rate_limit_reset = current_time + 60 263 | 264 | def check_rate_limit(self): 265 | if self.rate_limit_remaining <= 0: 266 | sleep_time = max(0, self.rate_limit_reset - time.time()) 267 | time.sleep(sleep_time) 268 | self.reset_token_usage() 269 | 270 | def update_rate_limit(self, headers): 271 | remaining = headers.get('X-RateLimit-Remaining') 272 | reset = headers.get('X-RateLimit-Reset') 273 | 274 | if remaining is not None: 275 | try: 276 | self.rate_limit_remaining = int(remaining) 277 | except ValueError: 278 | self.rate_limit_remaining = MAX_TOKENS_PER_MINUTE 279 | 280 | if reset is not None: 281 | try: 282 | self.rate_limit_reset = float(reset) 283 | except ValueError: 284 | self.rate_limit_reset = time.time() + 60 285 | 286 | llm_api_calls = LLM_API_Calls() 287 | -------------------------------------------------------------------------------- /Brain_modules/lobes/association_areas.py: -------------------------------------------------------------------------------- 1 | # association_areas.py 2 | 3 | import numpy as np 4 | from sklearn.feature_extraction.text import TfidfVectorizer 5 | import pickle 6 | import re 7 | from collections import defaultdict 8 | from scipy.spatial.distance import cosine 9 | 10 | 11 | from typing import List, Dict 12 | 13 | class ToolAssociation: 14 | def __init__(self, name: str, keywords: List[str], embedding: np.ndarray): 15 | self.name = name 16 | self.keywords = set(keywords) 17 | self.embedding = embedding 18 | self.usage_count = 0 19 | self.success_rate = 0.5 20 | 21 | class AssociationAreas: 22 | def __init__(self, input_dim: int = 5000, hidden_layers: List[int] = [256, 128], learning_rate: float = 0.001): 23 | self.input_dim = input_dim 24 | self.hidden_layers = [input_dim] + hidden_layers + [1] 25 | self.learning_rate = learning_rate 26 | self.model_filename = "association_areas_model.pkl" 27 | self.weights = [] 28 | self.biases = [] 29 | self.tools = self._initialize_tools() 30 | self.vocabulary = set() 31 | self.word_to_index = {} 32 | self.index_to_word = {} 33 | self.word_embeddings = {} 34 | self.tfidf_vectorizer = TfidfVectorizer() 35 | self.suggestion_threshold = 0.2 36 | self._load_or_initialize_model() 37 | 38 | def _initialize_tools(self) -> Dict[str, ToolAssociation]: 39 | tools = { 40 | 'web_search': ToolAssociation('web_search', ['search', 'find', 'lookup', 'research', 'google', 'internet', 'web', 'online', 'information', 'update', 'news'], np.random.randn(100)), 41 | 'image_analysis': ToolAssociation('image_analysis', ['image', 'picture', 'photo', 'analyze', 'visual', 'see', 'look'], np.random.randn(100)), 42 | 'pdf_extraction': ToolAssociation('pdf_extraction', ['pdf', 'document', 'extract', 'read', 'text', 'file'], np.random.randn(100)), 43 | 'sentiment_analysis': ToolAssociation('sentiment_analysis', ['sentiment', 'feeling', 'emotion', 'opinion', 'mood', 'attitude', 'happy', 'sad', 'angry', 'excited', 'disappointed', 'satisfied', 'frustrated', 'emotional', 'feel'], np.random.randn(100)), 44 | 'local_command': ToolAssociation('local_command', ['run', 'execute', 'command', 'system', 'local', 'computer'], np.random.randn(100)), 45 | 'math_calculation': ToolAssociation('math_calculation', ['calculate', 'compute', 'math', 'arithmetic', 'number', 'equation', 'sum', 'difference', 'multiply', 'divide'], np.random.randn(100)), 46 | 'time_date': ToolAssociation('time_date', ['time', 'date', 'schedule', 'calendar', 'when', 'now'], np.random.randn(100)), 47 | 'weather': ToolAssociation('weather', ['weather', 'forecast', 'temperature', 'climate', 'rain', 'sun'], np.random.randn(100)), 48 | 'translation': ToolAssociation('translation', ['translate', 'language', 'foreign', 'meaning', 'interpret'], np.random.randn(100)), 49 | 'summarization': ToolAssociation('summarization', ['summarize', 'brief', 'short', 'concise', 'overview', 'gist'], np.random.randn(100)), 50 | 'recipe_search': ToolAssociation('recipe_search', ['recipe', 'cook', 'bake', 'ingredients', 'dish', 'meal'], np.random.randn(100)), 51 | 'code_explanation': ToolAssociation('code_explanation', ['code', 'programming', 'function', 'algorithm', 'debug', 'syntax'], np.random.randn(100)), 52 | 'general_knowledge': ToolAssociation('general_knowledge', ['what', 'why', 'how', 'explain', 'define', 'meaning', 'philosophy'], np.random.randn(100)) 53 | } 54 | return tools 55 | 56 | def _load_or_initialize_model(self): 57 | try: 58 | self._load_model() 59 | except (FileNotFoundError, KeyError, pickle.UnpicklingError): 60 | print("Error loading model. Initializing a new model.") 61 | self._initialize_new_model() 62 | 63 | def _initialize_new_model(self): 64 | self.weights = self._initialize_weights() 65 | self.biases = self._initialize_biases() 66 | self._update_vocabulary(set(word for tool in self.tools.values() for word in tool.keywords)) 67 | self._initialize_word_embeddings() 68 | 69 | def _update_vocabulary(self, new_words): 70 | for word in new_words: 71 | if word not in self.vocabulary: 72 | index = len(self.vocabulary) 73 | if index < self.input_dim: 74 | self.vocabulary.add(word) 75 | self.word_to_index[word] = index 76 | self.index_to_word[index] = word 77 | 78 | def _initialize_word_embeddings(self): 79 | for word in self.vocabulary: 80 | self.word_embeddings[word] = np.random.randn(100) # 100-dimensional embeddings 81 | 82 | def _initialize_weights(self) -> List[np.ndarray]: 83 | return [np.random.randn(i, j) * np.sqrt(2. / (i + j)) for i, j in zip(self.hidden_layers[:-1], self.hidden_layers[1:])] 84 | 85 | def _initialize_biases(self) -> List[np.ndarray]: 86 | return [np.zeros((1, nodes)) for nodes in self.hidden_layers[1:]] 87 | 88 | def _sigmoid(self, x: np.ndarray) -> np.ndarray: 89 | return 1 / (1 + np.exp(-np.clip(x, -100, 100))) 90 | 91 | def _forward_propagation(self, X: np.ndarray) -> List[np.ndarray]: 92 | activations = [X] 93 | for i in range(len(self.weights) - 1): 94 | z = np.dot(activations[-1], self.weights[i]) + self.biases[i] 95 | a = np.maximum(0.01 * z, z) # Leaky ReLU 96 | activations.append(a) 97 | z = np.dot(activations[-1], self.weights[-1]) + self.biases[-1] 98 | a = self._sigmoid(z) 99 | activations.append(a) 100 | return activations 101 | 102 | def process(self, prompt: str) -> str: 103 | try: 104 | words = self._tokenize(prompt) 105 | self._update_vocabulary(words) 106 | X_input = self._preprocess_input(words) 107 | prediction = self._forward_propagation(X_input)[-1][0][0] 108 | tool_suggestions = self._suggest_tools(prompt) 109 | 110 | if not tool_suggestions: 111 | return f"Association Areas Response: Prediction: {prediction:.4f}, Action: Just reply to the user properly without tools" 112 | else: 113 | return f"Association Areas Response: Prediction: {prediction:.4f}, Suggested Tools: {tool_suggestions}" 114 | except Exception as e: 115 | return f"Error in Association Areas processing: {str(e)}" 116 | 117 | def _tokenize(self, text: str) -> List[str]: 118 | return re.findall(r'\w+', text.lower()) 119 | 120 | def _preprocess_input(self, words: List[str]) -> np.ndarray: 121 | input_vector = np.zeros((1, self.input_dim)) 122 | for word in words: 123 | if word in self.word_to_index: 124 | input_vector[0, self.word_to_index[word]] = 1 125 | return input_vector 126 | 127 | def _suggest_tools(self, prompt: str) -> List[str]: 128 | words = self._tokenize(prompt) 129 | prompt_embedding = self._get_text_embedding(prompt) 130 | 131 | tool_scores = [] 132 | for tool in self.tools.values(): 133 | keyword_match = len(set(words) & tool.keywords) / len(tool.keywords) 134 | embedding_similarity = 1 - cosine(prompt_embedding, tool.embedding) 135 | combined_score = 0.7 * keyword_match + 0.3 * embedding_similarity 136 | tool_scores.append((tool.name, combined_score)) 137 | 138 | sorted_tools = sorted(tool_scores, key=lambda x: x[1], reverse=True) 139 | suggested_tools = [tool for tool, score in sorted_tools if score > self.suggestion_threshold][:3] 140 | 141 | return suggested_tools 142 | 143 | def _get_text_embedding(self, text: str) -> np.ndarray: 144 | words = self._tokenize(text) 145 | word_vectors = [self.word_embeddings.get(word, np.zeros(100)) for word in words] 146 | return np.mean(word_vectors, axis=0) 147 | 148 | def update_from_interaction(self, prompt: str, used_tool: str, success_rating: float): 149 | words = self._tokenize(prompt) 150 | self._update_vocabulary(words) 151 | X_input = self._preprocess_input(words) 152 | 153 | target = success_rating 154 | activations = self._forward_propagation(X_input) 155 | 156 | # Backpropagation 157 | delta = activations[-1] - target 158 | for i in range(len(self.weights) - 1, -1, -1): 159 | dW = np.dot(activations[i].T, delta) 160 | dB = np.sum(delta, axis=0, keepdims=True) 161 | 162 | self.weights[i] -= self.learning_rate * dW 163 | self.biases[i] -= self.learning_rate * dB 164 | 165 | if i > 0: 166 | delta = np.dot(delta, self.weights[i].T) * (activations[i] > 0) # Leaky ReLU derivative 167 | 168 | # Update tool statistics 169 | if used_tool in self.tools: 170 | tool = self.tools[used_tool] 171 | tool.usage_count += 1 172 | tool.success_rate = (tool.success_rate * (tool.usage_count - 1) + success_rating) / tool.usage_count 173 | 174 | # Update tool embedding 175 | prompt_embedding = self._get_text_embedding(prompt) 176 | tool.embedding = 0.9 * tool.embedding + 0.1 * prompt_embedding 177 | 178 | # Dynamic threshold adjustment 179 | self.suggestion_threshold = max(0.1, min(0.3, np.mean([tool.success_rate for tool in self.tools.values()]))) 180 | 181 | self._save_model() 182 | 183 | def _save_model(self): 184 | model_data = { 185 | 'input_dim': self.input_dim, 186 | 'hidden_layers': self.hidden_layers, 187 | 'weights': self.weights, 188 | 'biases': self.biases, 189 | 'tools': self.tools, 190 | 'vocabulary': self.vocabulary, 191 | 'word_to_index': self.word_to_index, 192 | 'index_to_word': self.index_to_word, 193 | 'word_embeddings': self.word_embeddings, 194 | 'learning_rate': self.learning_rate, 195 | 'suggestion_threshold': self.suggestion_threshold 196 | } 197 | with open(self.model_filename, 'wb') as f: 198 | pickle.dump(model_data, f) 199 | 200 | def _load_model(self): 201 | with open(self.model_filename, 'rb') as f: 202 | model_data = pickle.load(f) 203 | 204 | self.input_dim = model_data['input_dim'] 205 | self.hidden_layers = model_data['hidden_layers'] 206 | self.weights = model_data['weights'] 207 | self.biases = model_data['biases'] 208 | self.tools = model_data['tools'] 209 | self.vocabulary = model_data['vocabulary'] 210 | self.word_to_index = model_data['word_to_index'] 211 | self.index_to_word = model_data['index_to_word'] 212 | self.word_embeddings = model_data['word_embeddings'] 213 | self.learning_rate = model_data['learning_rate'] 214 | self.suggestion_threshold = model_data['suggestion_threshold'] 215 | -------------------------------------------------------------------------------- /Brain_modules/lobes/cerebellar_lobe.py: -------------------------------------------------------------------------------- 1 | # cerebellar_lobe.py 2 | 3 | import numpy as np 4 | import time 5 | from keras.models import Sequential 6 | from keras.layers import Dense, Input 7 | from keras.optimizers import Adam 8 | from keras.callbacks import EarlyStopping 9 | 10 | class CerebellarLobe: 11 | def __init__(self): 12 | self.model = self._create_model() 13 | self.threshold = 0.5 # Threshold for binary classification 14 | 15 | def _create_model(self): 16 | model = Sequential([ 17 | Input(shape=(1,)), 18 | Dense(128, activation='relu'), 19 | Dense(64, activation='relu'), 20 | Dense(32, activation='relu'), 21 | Dense(16, activation='relu'), 22 | Dense(1, activation='sigmoid') 23 | ]) 24 | optimizer = Adam(learning_rate=0.001) 25 | model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy']) 26 | return model 27 | 28 | def process(self, prompt): 29 | print(f"Cerebellar lobe processing at {time.strftime('%Y-%m-%d %H:%M:%S')}") 30 | try: 31 | sequence_steps = prompt.split(',') 32 | if not sequence_steps: 33 | return "No sequence steps found." 34 | 35 | sequence_analysis = f"Steps to be followed: {', '.join(sequence_steps)}" 36 | X_input = np.array([len(sequence_steps)]) 37 | prediction = self.model.predict(X_input.reshape(1, -1)) 38 | binary_prediction = (prediction > self.threshold).astype(int) 39 | 40 | for _ in range(5): 41 | time.sleep(1) 42 | print(f"Cerebellar lobe thinking: {time.strftime('%Y-%m-%d %H:%M:%S')}") 43 | 44 | return f"Cerebellar Lobe Analysis: {sequence_analysis}, Prediction: {binary_prediction}" 45 | except Exception as e: 46 | return f"Error in cerebellar lobe processing: {str(e)}" 47 | 48 | def train(self, X_train, y_train, epochs=50, batch_size=8): 49 | early_stopping = EarlyStopping(monitor='loss', patience=5) 50 | self.model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, callbacks=[early_stopping]) 51 | print(f"Model trained at {time.strftime('%Y-%m-%d %H:%M:%S')}") 52 | 53 | def evaluate(self, X_test, y_test): 54 | loss, accuracy = self.model.evaluate(X_test, y_test) 55 | print(f"Model evaluation at {time.strftime('%Y-%m-%d %H:%M:%S')}: Loss = {loss}, Accuracy = {accuracy}") 56 | return loss, accuracy 57 | -------------------------------------------------------------------------------- /Brain_modules/lobes/frontal_lobe.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import json 3 | import time 4 | from typing import Dict, Any, List, Tuple 5 | import random 6 | 7 | class FrontalLobe: 8 | def __init__(self): 9 | self.working_memory = [] 10 | self.attention_focus = None 11 | self.emotional_state = 'neutral' 12 | self.inhibition_control = 0.7 13 | self.planning_depth = 5 14 | self.priority_words = set(['urgent', 'important', 'critical', 'immediately', 'deadline', 'crucial']) 15 | self.decision_history = [] 16 | self.tools = [ 17 | { 18 | "name": "run_local_command", 19 | "description": "Execute a local command on the system to perform tasks such as file manipulation, retrieving system information, or running scripts.", 20 | "parameters": { 21 | "command": { 22 | "type": "string", 23 | "description": "The specific command to execute on the local system." 24 | } 25 | } 26 | }, 27 | { 28 | "name": "web_research", 29 | "description": "Perform a web research query to gather information from online sources.", 30 | "parameters": { 31 | "query": { 32 | "type": "string", 33 | "description": "The research query to perform." 34 | } 35 | } 36 | }, 37 | { 38 | "name": "analyze_image", 39 | "description": "Analyze an image from a provided URL or a local path and generate a description of the image's content.", 40 | "parameters": { 41 | "image_url": { 42 | "type": "string", 43 | "description": "The URL or local path of the image to analyze." 44 | } 45 | } 46 | }, 47 | { 48 | "name": "call_expert", 49 | "description": "A tool that can ask an expert in any field by providing the expertise and the question. The expert will answer the question.", 50 | "parameters": { 51 | "expertise": { 52 | "type": "string", 53 | "description": "The expertise of the expert you need. IE: math, science, etc." 54 | }, 55 | "question": { 56 | "type": "string", 57 | "description": "The question you want to ask the expert." 58 | } 59 | } 60 | } 61 | ] 62 | 63 | def process(self, prompt: str) -> Dict[str, Any]: 64 | self.update_working_memory(prompt) 65 | self.focus_attention(prompt) 66 | plan = self.executive_function(prompt) 67 | self.regulate_emotion(prompt) 68 | 69 | action, confidence = self.choose_action(prompt, plan) 70 | decision = self.make_decision(plan, action) 71 | 72 | self.update_decision_history(decision) 73 | 74 | analysis = self.generate_analysis(prompt, plan, decision, action, confidence) 75 | return { 76 | "response": decision["response"], 77 | "tool_call": decision.get("tool_call"), 78 | "analysis": analysis 79 | } 80 | 81 | def update_working_memory(self, prompt: str) -> None: 82 | self.working_memory = prompt.split()[-10:] # Keep last 10 words 83 | 84 | def focus_attention(self, prompt: str) -> None: 85 | words = prompt.lower().split() 86 | self.attention_focus = next((word for word in words if word in self.priority_words), words[0] if words else None) 87 | 88 | def executive_function(self, prompt: str) -> List[Dict[str, Any]]: 89 | return [{'word': word, 'priority': 1.0 * (0.9 ** i), 'inhibit': self.should_inhibit(word), 90 | 'action': 'Inhibit' if self.should_inhibit(word) else 'Process'} 91 | for i, word in enumerate(prompt.split()[:self.planning_depth])] 92 | 93 | def should_inhibit(self, word: str) -> bool: 94 | return (len(word) / 10 > self.inhibition_control) and (word.lower() not in self.priority_words) 95 | 96 | def regulate_emotion(self, prompt: str) -> None: 97 | emotion_words = { 98 | 'positive': ['happy', 'good', 'excellent', 'wonderful', 'excited', 'optimistic'], 99 | 'negative': ['sad', 'bad', 'terrible', 'awful', 'anxious', 'frustrated'], 100 | 'neutral': ['consider', 'analyze', 'evaluate', 'assess', 'review'] 101 | } 102 | self.emotional_state = next((emotion for emotion, words in emotion_words.items() 103 | if any(word in prompt.lower() for word in words)), 'neutral') 104 | 105 | def choose_action(self, prompt: str, plan: List[Dict[str, Any]]) -> Tuple[str, float]: 106 | if any(word in prompt.lower() for word in ['run', 'execute', 'command']): 107 | return "run_local_command", 0.9 108 | elif any(word in prompt.lower() for word in ['research', 'search', 'find information']): 109 | return "web_research", 0.9 110 | elif any(word in prompt.lower() for word in ['image', 'picture', 'photo']): 111 | return "analyze_image", 0.9 112 | elif any(word in prompt.lower() for word in ['expert', 'question']): 113 | return "call_expert", 0.9 114 | else: 115 | return "text_response", 0.8 116 | 117 | def make_decision(self, plan: List[Dict[str, Any]], action: str) -> Dict[str, Any]: 118 | if action == "text_response": 119 | return self.generate_text_response(plan) 120 | return self.generate_tool_call(action, plan) 121 | 122 | def generate_text_response(self, plan: List[Dict[str, Any]]) -> Dict[str, Any]: 123 | priority_actions = [a for a in plan if not a['inhibit']] 124 | if priority_actions: 125 | return {"response": f"The frontal lobe suggests focusing on '{priority_actions[0]['word']}' as a key action."} 126 | return {"response": "The frontal lobe recommends careful consideration before proceeding."} 127 | 128 | def generate_tool_call(self, action: str, plan: List[Dict[str, Any]]) -> Dict[str, Any]: 129 | tool_call = {"name": action, "arguments": {}} 130 | priority_words = " ".join([a['word'] for a in plan if not a['inhibit']][:5]) 131 | 132 | if action == "run_local_command": 133 | tool_call["arguments"]["command"] = priority_words 134 | elif action == "web_research": 135 | tool_call["arguments"]["query"] = priority_words 136 | elif action == "analyze_image": 137 | tool_call["arguments"]["image_url"] = "path/to/image.jpg" # Placeholder 138 | elif action == "call_expert": 139 | tool_call["arguments"]["expertise"] = "general" # Placeholder 140 | tool_call["arguments"]["question"] = priority_words 141 | 142 | return { 143 | "response": f"The frontal lobe recommends using the {action} tool for effective task execution.", 144 | "tool_call": tool_call 145 | } 146 | 147 | def update_decision_history(self, decision: Dict[str, Any]) -> None: 148 | self.decision_history.append({'timestamp': time.strftime('%Y-%m-%d %H:%M:%S'), 'decision': decision}) 149 | if len(self.decision_history) > 50: 150 | self.decision_history.pop(0) 151 | 152 | def generate_analysis(self, prompt: str, plan: List[Dict[str, Any]], decision: Dict[str, Any], action: str, confidence: float) -> Dict[str, Any]: 153 | return { 154 | "Working Memory": self.working_memory, 155 | "Attention Focus": self.attention_focus, 156 | "Emotional State": self.emotional_state, 157 | "Executive Function Plan": plan, 158 | "Chosen Action": action, 159 | "Decision": decision, 160 | "Confidence": confidence, 161 | "Decision History": self.decision_history[-5:], 162 | "Input Prompt": prompt 163 | } 164 | -------------------------------------------------------------------------------- /Brain_modules/lobes/insular_cortex.py: -------------------------------------------------------------------------------- 1 | # insular_cortex.py 2 | 3 | import numpy as np 4 | import time 5 | from keras.models import Sequential 6 | from keras.layers import Dense, Input 7 | from keras.optimizers import Adam 8 | 9 | class InsularCortex: 10 | def __init__(self): 11 | self.model = self._create_model() 12 | 13 | def _create_model(self): 14 | model = Sequential([ 15 | Input(shape=(1,)), 16 | Dense(64, activation='relu'), 17 | Dense(32, activation='relu'), 18 | Dense(1, activation='sigmoid') 19 | ]) 20 | optimizer = Adam(learning_rate=0.001) 21 | model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy']) 22 | return model 23 | 24 | def process(self, prompt): 25 | print(f"Insular Cortex processing at {time.strftime('%Y-%m-%d %H:%M:%S')}") 26 | try: 27 | performance_check = "System performance is optimal." 28 | emotions = ["neutral", "curious", "thoughtful"] 29 | selected_emotion = np.random.choice(emotions) 30 | X_input = np.array([1]) 31 | prediction = self.model.predict(X_input.reshape(1, -1)) 32 | for _ in range(5): 33 | time.sleep(1) 34 | print(f"Insular Cortex ({selected_emotion}): thinking at {time.strftime('%Y-%m-%d %H:%M:%S')}") 35 | internal_state = "calm" 36 | sensory_feedback = "All sensory systems are functioning within normal parameters." 37 | decision = "Proceed with current operational parameters." 38 | result = (f"Insular Cortex Analysis: {performance_check}, Emotion: {selected_emotion}, " 39 | f"Prediction: {prediction}, Internal State: {internal_state}, " 40 | f"Sensory Feedback: {sensory_feedback}, Decision: {decision}") 41 | return result 42 | except Exception as e: 43 | return f"Error in Insular Cortex processing: {str(e)}" 44 | -------------------------------------------------------------------------------- /Brain_modules/lobes/limbic_lobe.py: -------------------------------------------------------------------------------- 1 | # limbic_lobe.py 2 | 3 | import numpy as np 4 | import time 5 | from textblob import TextBlob 6 | from keras.models import Sequential 7 | from keras.layers import Dense, Input 8 | from keras.optimizers import Adam 9 | 10 | class LimbicLobe: 11 | def __init__(self): 12 | self.model = self._create_model() 13 | 14 | def _create_model(self): 15 | model = Sequential([ 16 | Input(shape=(1,)), 17 | Dense(64, activation='relu'), 18 | Dense(32, activation='relu'), 19 | Dense(1, activation='sigmoid') 20 | ]) 21 | optimizer = Adam(learning_rate=0.001) 22 | model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy']) 23 | return model 24 | 25 | def process(self, prompt): 26 | print(f"Limbic lobe processing at {time.strftime('%Y-%m-%d %H:%M:%S')}") 27 | try: 28 | sentiment = TextBlob(prompt).sentiment 29 | emotional_response = f"The emotional tone detected is {'positive' if sentiment.polarity > 0 else 'negative' if sentiment.polarity < 0 else 'neutral'}." 30 | X_input = np.array([sentiment.polarity]) 31 | prediction = self.model.predict(X_input.reshape(1, -1)) 32 | for _ in range(5): 33 | time.sleep(1) 34 | print(f"Limbic lobe thinking: {time.strftime('%Y-%m-%d %H:%M:%S')}") 35 | return f"Limbic Lobe Analysis: {emotional_response}, Prediction: {prediction}" 36 | except Exception as e: 37 | return f"Error in limbic lobe processing: {str(e)}" 38 | -------------------------------------------------------------------------------- /Brain_modules/lobes/occipital_lobe.py: -------------------------------------------------------------------------------- 1 | # occipital_lobe.py 2 | 3 | import numpy as np 4 | import pickle 5 | from sklearn.feature_extraction.text import CountVectorizer 6 | from sklearn.naive_bayes import MultinomialNB 7 | from sklearn.pipeline import make_pipeline 8 | 9 | class OccipitalLobe: 10 | def __init__(self): 11 | self.visual_keywords = ['see', 'saw', 'look', 'view', 'observe'] 12 | self.vectorizer = CountVectorizer() 13 | self.model = MultinomialNB() 14 | self.pipeline = make_pipeline(self.vectorizer, self.model) 15 | 16 | initial_data = ["I see a bird", "Look at the sky", "Observe the stars"] 17 | initial_labels = [0, 1, 0] 18 | self.pipeline.fit(initial_data, initial_labels) 19 | 20 | self._load_model() 21 | 22 | def _load_model(self): 23 | try: 24 | with open('occipital_lobe_model.pkl', 'rb') as f: 25 | self.pipeline = pickle.load(f) 26 | except FileNotFoundError: 27 | pass 28 | 29 | def _save_model(self): 30 | with open('occipital_lobe_model.pkl', 'wb') as f: 31 | pickle.dump(self.pipeline, f) 32 | 33 | def process(self, prompt): 34 | try: 35 | features = self.pipeline.named_steps['countvectorizer'].transform([prompt]) 36 | prediction = self.pipeline.named_steps['multinomialnb'].predict(features) 37 | visual_analysis = self._analyze_visual_content(prompt) 38 | self._train_model(prompt, visual_analysis) 39 | return visual_analysis 40 | except Exception as e: 41 | return f"Error processing occipital lobe: {e}" 42 | 43 | def _analyze_visual_content(self, prompt): 44 | words = prompt.lower().split() 45 | visual_words = [word for word in words if word in self.visual_keywords] 46 | if visual_words: 47 | return f"Visual elements detected: {', '.join(visual_words)}" 48 | return "No explicit visual elements detected" 49 | 50 | def _train_model(self, prompt, analysis): 51 | labels = [1 if "detected" in analysis else 0] 52 | feature_vector = self.pipeline.named_steps['countvectorizer'].transform([prompt]) 53 | self.pipeline.named_steps['multinomialnb'].partial_fit(feature_vector, labels, classes=np.array([0, 1])) 54 | self._save_model() 55 | -------------------------------------------------------------------------------- /Brain_modules/lobes/parietal_lobe.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import time 3 | import re 4 | import pickle 5 | from sklearn.feature_extraction.text import CountVectorizer 6 | from sklearn.naive_bayes import MultinomialNB 7 | from sklearn.pipeline import make_pipeline 8 | 9 | class ParietalLobe: 10 | def __init__(self): 11 | """ 12 | Initializes the ParietalLobe class with predefined sets of keywords for spatial, sensory, and navigation processing. 13 | Also initializes the machine learning model for learning and adaptation. 14 | """ 15 | self.spatial_keywords = ['up', 'down', 'left', 'right', 'above', 'below', 'near', 'far'] 16 | self.sensory_keywords = ['touch', 'feel', 'texture', 'temperature', 'pressure', 'pain'] 17 | self.navigation_keywords = ['map', 'route', 'direction', 'location', 'distance', 'navigate'] 18 | 19 | self.vectorizer = CountVectorizer() 20 | self.model = MultinomialNB() 21 | self.pipeline = make_pipeline(self.vectorizer, self.model) 22 | 23 | initial_data = [ 24 | "The box is above the table, near the window", 25 | "I feel a rough texture and cold temperature", 26 | "Navigate to the nearest exit using the map", 27 | "Calculate the distance between points A (2,3) and B (5,7)", 28 | "The room temperature is 72 degrees", 29 | "Process this sentence without any spatial or numerical content" 30 | ] 31 | initial_labels = [0, 1, 0, 1, 0, 0] 32 | self.pipeline.fit(initial_data, initial_labels) 33 | 34 | self._load_model() 35 | self.error_log = [] 36 | 37 | def _load_model(self): 38 | """ 39 | Loads the model and vectorizer state from a file, if available. 40 | """ 41 | try: 42 | with open('parietal_lobe_model.pkl', 'rb') as f: 43 | self.pipeline = pickle.load(f) 44 | except FileNotFoundError: 45 | pass 46 | 47 | def _save_model(self): 48 | """ 49 | Saves the model and vectorizer state to a file. 50 | """ 51 | with open('parietal_lobe_model.pkl', 'wb') as f: 52 | pickle.dump(self.pipeline, f) 53 | 54 | def _preprocess_prompt(self, prompt): 55 | """ 56 | Preprocesses the input prompt to ensure it is clean and consistent. 57 | 58 | Args: 59 | prompt (str): The input sentence to be preprocessed. 60 | 61 | Returns: 62 | str: The cleaned and preprocessed prompt. 63 | """ 64 | return prompt 65 | 66 | def _extract_features(self, prompt): 67 | """ 68 | Extracts feature vectors from the given prompt using CountVectorizer. 69 | 70 | Args: 71 | prompt (str): The input sentence to be vectorized. 72 | 73 | Returns: 74 | scipy.sparse.csr.csr_matrix: The feature vectors extracted from the prompt. 75 | """ 76 | return self.pipeline.named_steps['countvectorizer'].transform([prompt]) 77 | 78 | def process(self, prompt): 79 | """ 80 | Processes the given prompt to analyze spatial, sensory, navigation, and numerical content. 81 | Also trains the model incrementally with the new prompt. 82 | 83 | Args: 84 | prompt (str): The input sentence to be processed. 85 | 86 | Returns: 87 | str: A detailed response summarizing the analysis of the prompt. 88 | """ 89 | prompt = self._preprocess_prompt(prompt) 90 | 91 | try: 92 | features = self._extract_features(prompt) 93 | prediction = self.pipeline.named_steps['multinomialnb'].predict(features) 94 | 95 | spatial_analysis = self._analyze_spatial_content(prompt) 96 | sensory_integration = self._integrate_sensory_information(prompt) 97 | navigation_assessment = self._assess_navigation(prompt) 98 | numerical_analysis = self._analyze_numerical_data(prompt) 99 | 100 | for _ in range(3): 101 | time.sleep(0.5) 102 | 103 | analysis = { 104 | "Spatial Analysis": spatial_analysis, 105 | "Sensory Integration": sensory_integration, 106 | "Navigation Assessment": navigation_assessment, 107 | "Numerical Analysis": numerical_analysis 108 | } 109 | 110 | self._train_model(prompt, analysis) 111 | 112 | return f"Parietal Lobe Response: Spatial-sensory integration complete. {self._summarize_analysis(analysis)}" 113 | except Exception as e: 114 | self._handle_error(prompt, e) 115 | return f"Parietal Lobe Response: Error in processing: {str(e)}. Spatial-sensory systems recalibrating." 116 | 117 | def _train_model(self, prompt, analysis): 118 | """ 119 | Trains the model incrementally with the new prompt and its analysis. 120 | 121 | Args: 122 | prompt (str): The input sentence. 123 | analysis (dict): The analysis result of the prompt. 124 | """ 125 | labels = [1 if "detected" in label or "processing" in label or "identified" in label or "found" in label else 0 for label in [ 126 | analysis["Spatial Analysis"], 127 | analysis["Sensory Integration"], 128 | analysis["Navigation Assessment"], 129 | analysis["Numerical Analysis"] 130 | ]] 131 | 132 | feature_vector = self._extract_features(prompt) 133 | feature_vector = np.vstack([feature_vector.toarray()] * len(labels)) 134 | self.pipeline.named_steps['multinomialnb'].partial_fit(feature_vector, labels, classes=np.array([0, 1])) 135 | 136 | self._save_model() 137 | 138 | def _analyze_spatial_content(self, prompt): 139 | """ 140 | Analyzes the prompt for spatial content based on predefined spatial keywords. 141 | 142 | Args: 143 | prompt (str): The input sentence to be analyzed. 144 | 145 | Returns: 146 | str: A summary of spatial elements detected in the prompt. 147 | """ 148 | words = prompt.lower().split() 149 | spatial_words = [word for word in words if word in self.spatial_keywords] 150 | if spatial_words: 151 | return f"Spatial elements detected: {', '.join(spatial_words)}" 152 | return "No explicit spatial elements detected" 153 | 154 | def _integrate_sensory_information(self, prompt): 155 | """ 156 | Analyzes the prompt for sensory information based on predefined sensory keywords. 157 | 158 | Args: 159 | prompt (str): The input sentence to be analyzed. 160 | 161 | Returns: 162 | str: A summary of sensory information detected in the prompt. 163 | """ 164 | sensory_words = [word for word in prompt.lower().split() if word in self.sensory_keywords] 165 | if sensory_words: 166 | return f"Sensory information processing: {', '.join(sensory_words)}" 167 | return "No specific sensory information to process" 168 | 169 | def _assess_navigation(self, prompt): 170 | """ 171 | Analyzes the prompt for navigation-related content based on predefined navigation keywords. 172 | 173 | Args: 174 | prompt (str): The input sentence to be analyzed. 175 | 176 | Returns: 177 | str: A summary of navigation-related concepts detected in the prompt. 178 | """ 179 | nav_words = [word for word in prompt.lower().split() if word in self.navigation_keywords] 180 | if nav_words: 181 | return f"Navigation-related concepts identified: {', '.join(nav_words)}" 182 | return "No navigation-specific elements found" 183 | 184 | def _analyze_numerical_data(self, prompt): 185 | """ 186 | Analyzes the prompt for numerical data and performs basic statistical analysis if numerical data is found. 187 | 188 | Args: 189 | prompt (str): The input sentence to be analyzed. 190 | 191 | Returns: 192 | str: A summary of numerical data detected in the prompt and basic statistical analysis. 193 | """ 194 | numbers = re.findall(r'\d+', prompt) 195 | if numbers: 196 | numbers = [int(num) for num in numbers] 197 | return f"Numerical data found: mean={np.mean(numbers):.2f}, median={np.median(numbers):.2f}, count={len(numbers)}" 198 | return "No numerical data found" 199 | 200 | def _summarize_analysis(self, analysis): 201 | """ 202 | Summarizes the analysis results into a comprehensive response. 203 | 204 | Args: 205 | analysis (dict): The dictionary containing the analysis results. 206 | 207 | Returns: 208 | str: A summary of the analysis results. 209 | """ 210 | summary = [] 211 | if "elements detected" in analysis["Spatial Analysis"]: 212 | summary.append("Spatial processing activated") 213 | if "information processing" in analysis["Sensory Integration"]: 214 | summary.append("Sensory integration in progress") 215 | if "concepts identified" in analysis["Navigation Assessment"]: 216 | summary.append("Navigation systems engaged") 217 | if "Numerical data found" in analysis["Numerical Analysis"]: 218 | summary.append("Quantitative analysis performed") 219 | 220 | if not summary: 221 | return "No significant spatial-sensory patterns identified. Maintaining baseline awareness." 222 | 223 | return " ".join(summary) + f" Full analysis: {analysis}" 224 | 225 | def _handle_error(self, prompt, error): 226 | """ 227 | Handles errors encountered during processing and adapts the system to prevent future errors. 228 | 229 | Args: 230 | prompt (str): The input sentence that caused the error. 231 | error (Exception): The error encountered during processing. 232 | """ 233 | self.error_log.append((prompt, str(error))) 234 | -------------------------------------------------------------------------------- /Brain_modules/lobes/temporal_lobe.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pickle 3 | from sklearn.feature_extraction.text import CountVectorizer 4 | from sklearn.naive_bayes import MultinomialNB 5 | from sklearn.pipeline import make_pipeline 6 | from sklearn.metrics.pairwise import cosine_similarity 7 | from typing import List, Dict, Any 8 | 9 | class TemporalLobe: 10 | def __init__(self): 11 | """ 12 | Initializes the TemporalLobe class with predefined auditory keywords and machine learning models. 13 | """ 14 | self.embedding_dim = 50 # Dimensionality of word embeddings 15 | self.auditory_keywords = ['hear', 'listen', 'sound', 'music', 'noise', 'silent', 'volume', 'whisper', 'shout', 'speak'] 16 | self.vectorizer = CountVectorizer() 17 | self.model = MultinomialNB() 18 | self.pipeline = make_pipeline(self.vectorizer, self.model) 19 | self.word_embeddings = self._initialize_word_embeddings() 20 | 21 | initial_data = ["I hear music", "Listen to the sound", "The sound is loud"] 22 | initial_labels = [0, 1, 0] 23 | self.pipeline.fit(initial_data, initial_labels) 24 | 25 | self._load_model() 26 | 27 | def _initialize_word_embeddings(self): 28 | """ 29 | Initializes word embeddings for the auditory keywords and other words. 30 | 31 | Returns: 32 | dict: A dictionary of word embeddings for the auditory keywords. 33 | """ 34 | vocabulary = self.auditory_keywords + ['the', 'is', 'a', 'to', 'and', 'can', 'some', 'this', 'room', 'level', 'high'] 35 | embeddings = {word: np.random.rand(self.embedding_dim) for word in vocabulary} 36 | return embeddings 37 | 38 | def _load_model(self): 39 | """ 40 | Loads the machine learning model from a file if available. 41 | """ 42 | try: 43 | with open('temporal_lobe_model.pkl', 'rb') as f: 44 | self.pipeline = pickle.load(f) 45 | except FileNotFoundError: 46 | pass 47 | 48 | def _save_model(self): 49 | """ 50 | Saves the machine learning model to a file. 51 | """ 52 | with open('temporal_lobe_model.pkl', 'wb') as f: 53 | pickle.dump(self.pipeline, f) 54 | 55 | def process(self, prompt: str) -> str: 56 | """ 57 | Processes the given prompt to analyze auditory content. 58 | 59 | Args: 60 | prompt (str): The input sentence to be processed. 61 | 62 | Returns: 63 | str: A detailed response summarizing the analysis of the prompt. 64 | """ 65 | try: 66 | features = self._extract_features(prompt) 67 | prediction = self.pipeline.named_steps['multinomialnb'].predict(features) 68 | auditory_analysis = self._analyze_auditory_content(prompt) 69 | self._train_model(prompt, auditory_analysis) 70 | return f"Auditory analysis complete. {auditory_analysis} Full analysis: {auditory_analysis}" 71 | except Exception as e: 72 | return f"Error processing temporal lobe: {e}" 73 | 74 | def _extract_features(self, prompt: str) -> Any: 75 | """ 76 | Extracts feature vectors from the given prompt using CountVectorizer. 77 | 78 | Args: 79 | prompt (str): The input sentence to be vectorized. 80 | 81 | Returns: 82 | scipy.sparse.csr.csr_matrix: The feature vectors extracted from the prompt. 83 | """ 84 | return self.pipeline.named_steps['countvectorizer'].transform([prompt]) 85 | 86 | def _analyze_auditory_content(self, prompt: str) -> str: 87 | """ 88 | Analyzes the prompt for auditory content based on predefined auditory keywords. 89 | 90 | Args: 91 | prompt (str): The input sentence to be analyzed. 92 | 93 | Returns: 94 | str: A summary of auditory elements detected in the prompt. 95 | """ 96 | words = prompt.lower().split() 97 | auditory_words = [word for word in words if word in self.auditory_keywords] 98 | 99 | # Check for similar words if no direct keywords are found 100 | if not auditory_words: 101 | for word in words: 102 | if self._find_similar_word(word): 103 | auditory_words.append(word) 104 | 105 | if auditory_words: 106 | return f"Auditory elements detected: {', '.join(auditory_words)}" 107 | return "No explicit auditory elements detected" 108 | 109 | def _find_similar_word(self, word: str) -> bool: 110 | """ 111 | Finds a similar word in the auditory keywords using cosine similarity. 112 | 113 | Args: 114 | word (str): The word to find a similar word for. 115 | 116 | Returns: 117 | bool: True if a similar word is found, False otherwise. 118 | """ 119 | if word not in self.word_embeddings: 120 | return False 121 | 122 | word_embedding = self.word_embeddings[word] 123 | similarities = {kw: self._cosine_similarity(word_embedding, self.word_embeddings[kw]) for kw in self.auditory_keywords} 124 | most_similar_word = max(similarities, key=similarities.get) 125 | 126 | return similarities[most_similar_word] > 0.7 # Threshold for considering words as similar 127 | 128 | def _cosine_similarity(self, vec1: np.ndarray, vec2: np.ndarray) -> float: 129 | """ 130 | Computes the cosine similarity between two vectors. 131 | 132 | Args: 133 | vec1 (np.ndarray): The first vector. 134 | vec2 (np.ndarray): The second vector. 135 | 136 | Returns: 137 | float: The cosine similarity between the two vectors. 138 | """ 139 | return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2)) 140 | 141 | def _train_model(self, prompt: str, analysis: str): 142 | """ 143 | Trains the model incrementally with the new prompt and its analysis. 144 | 145 | Args: 146 | prompt (str): The input sentence. 147 | analysis (str): The analysis result of the prompt. 148 | """ 149 | labels = [1 if "detected" in analysis else 0] 150 | feature_vector = self._extract_features(prompt) 151 | self.pipeline.named_steps['multinomialnb'].partial_fit(feature_vector, labels, classes=np.array([0, 1])) 152 | self._update_word_embeddings(prompt) 153 | self._save_model() 154 | 155 | def _update_word_embeddings(self, prompt: str): 156 | """ 157 | Updates word embeddings based on the context of the given prompt. 158 | 159 | Args: 160 | prompt (str): The input sentence. 161 | """ 162 | words = prompt.lower().split() 163 | context_window = 2 # Number of words to consider as context on each side 164 | 165 | for i, word in enumerate(words): 166 | if word in self.word_embeddings: 167 | context_words = words[max(0, i - context_window): i] + words[i + 1: i + 1 + context_window] 168 | for context_word in context_words: 169 | if context_word in self.word_embeddings: 170 | self.word_embeddings[word] += 0.01 * (self.word_embeddings[context_word] - self.word_embeddings[word]) 171 | 172 | -------------------------------------------------------------------------------- /Brain_modules/lobes/wernickes_area.py: -------------------------------------------------------------------------------- 1 | # wernickes_area.py 2 | 3 | import numpy as np 4 | import pickle 5 | from sklearn.feature_extraction.text import CountVectorizer 6 | from sklearn.naive_bayes import MultinomialNB 7 | from sklearn.pipeline import make_pipeline 8 | 9 | class WernickesArea: 10 | def __init__(self): 11 | self.language_keywords = ['understand', 'comprehend', 'meaning', 'language'] 12 | self.vectorizer = CountVectorizer() 13 | self.model = MultinomialNB() 14 | self.pipeline = make_pipeline(self.vectorizer, self.model) 15 | 16 | initial_data = ["I understand the meaning", "Comprehend the language", "The meaning is clear"] 17 | initial_labels = [0, 1, 0] 18 | self.pipeline.fit(initial_data, initial_labels) 19 | 20 | self._load_model() 21 | 22 | def _load_model(self): 23 | try: 24 | with open('wernickes_area_model.pkl', 'rb') as f: 25 | self.pipeline = pickle.load(f) 26 | except FileNotFoundError: 27 | pass 28 | 29 | def _save_model(self): 30 | with open('wernickes_area_model.pkl', 'wb') as f: 31 | pickle.dump(self.pipeline, f) 32 | 33 | def process(self, prompt): 34 | try: 35 | features = self.pipeline.named_steps['countvectorizer'].transform([prompt]) 36 | prediction = self.pipeline.named_steps['multinomialnb'].predict(features) 37 | language_analysis = self._analyze_language_content(prompt) 38 | self._train_model(prompt, language_analysis) 39 | return language_analysis 40 | except Exception as e: 41 | return f"Error processing wernickes area: {e}" 42 | 43 | def _analyze_language_content(self, prompt): 44 | words = prompt.lower().split() 45 | language_words = [word for word in words if word in self.language_keywords] 46 | if language_words: 47 | return f"Language elements detected: {', '.join(language_words)}" 48 | return "No explicit language elements detected" 49 | 50 | def _train_model(self, prompt, analysis): 51 | labels = [1 if "detected" in analysis else 0] 52 | feature_vector = self.pipeline.named_steps['countvectorizer'].transform([prompt]) 53 | self.pipeline.named_steps['multinomialnb'].partial_fit(feature_vector, labels, classes=np.array([0, 1])) 54 | self._save_model() 55 | -------------------------------------------------------------------------------- /Brain_modules/lobes_processing.py: -------------------------------------------------------------------------------- 1 | from queue import Queue 2 | from collections import defaultdict 3 | from datasets import load_dataset 4 | from Brain_modules.lobes.frontal_lobe import FrontalLobe 5 | from Brain_modules.lobes.parietal_lobe import ParietalLobe 6 | from Brain_modules.lobes.temporal_lobe import TemporalLobe 7 | from Brain_modules.lobes.occipital_lobe import OccipitalLobe 8 | from Brain_modules.lobes.limbic_lobe import LimbicLobe 9 | from Brain_modules.lobes.cerebellar_lobe import CerebellarLobe 10 | from Brain_modules.lobes.insular_cortex import InsularCortex 11 | from Brain_modules.lobes.association_areas import AssociationAreas 12 | from Brain_modules.lobes.wernickes_area import WernickesArea 13 | 14 | class LobesProcessing: 15 | def __init__(self): 16 | """ 17 | Initializes the LobesProcessing class and sets up the individual lobes. 18 | """ 19 | self.lobes = self._initialize_lobes() 20 | self.responses = Queue() 21 | 22 | def _initialize_lobes(self): 23 | """ 24 | Initializes all the lobes and returns a dictionary of lobe instances. 25 | """ 26 | lobe_classes = { 27 | "frontal": FrontalLobe, 28 | "parietal": ParietalLobe, 29 | "temporal": TemporalLobe, 30 | "occipital": OccipitalLobe, 31 | "limbic": LimbicLobe, 32 | "cerebellar": CerebellarLobe, 33 | "wernickes_area": WernickesArea, 34 | "insular": InsularCortex, 35 | "association_areas": AssociationAreas 36 | } 37 | return {name: lobe_class() for name, lobe_class in lobe_classes.items()} 38 | 39 | def process_lobe(self, lobe_name, prompt): 40 | """ 41 | Processes the given prompt using the specified lobe. 42 | 43 | Args: 44 | lobe_name (str): The name of the lobe to process the prompt. 45 | prompt (str): The input prompt. 46 | 47 | Returns: 48 | str: The response from the lobe. 49 | """ 50 | lobe = self.lobes.get(lobe_name) 51 | if lobe: 52 | try: 53 | return lobe.process(prompt) 54 | except Exception as e: 55 | return f"Error in {lobe_name} processing: {str(e)}" 56 | else: 57 | return f"Error: {lobe_name} processing method not found." 58 | 59 | def process_all_lobes(self, prompt): 60 | """ 61 | Processes the given prompt using all lobes and aggregates their responses. 62 | 63 | Args: 64 | prompt (str): The input prompt. 65 | 66 | Returns: 67 | str: The combined response from all lobes. 68 | """ 69 | combined_responses = [] 70 | for lobe_name, lobe in self.lobes.items(): 71 | try: 72 | response = lobe.process(prompt) 73 | except Exception as e: 74 | response = f"Error in {lobe_name} processing: {str(e)}" 75 | self.responses.put((lobe_name, response)) 76 | combined_responses.append(f"{lobe_name.capitalize()} Lobe: {response}") 77 | 78 | combined_thought = self._generate_inner_voice(combined_responses) 79 | return combined_thought 80 | 81 | def _generate_inner_voice(self, responses): 82 | """ 83 | Generates a cohesive inner voice from the aggregated lobe responses. 84 | 85 | Args: 86 | responses (list): The list of responses from each lobe. 87 | 88 | Returns: 89 | str: The cohesive inner voice. 90 | """ 91 | cohesive_thought = ". ".join(responses) 92 | return f"Inner Voice from your lobes: {cohesive_thought}" 93 | 94 | def load_and_preprocess_dataset(self, dataset_name, split='train', percentage=0.01): 95 | """ 96 | Loads and preprocesses a dataset for training. 97 | 98 | Args: 99 | dataset_name (str): The name of the dataset to load. 100 | split (str): The dataset split to load (default is 'train'). 101 | percentage (float): The percentage of the dataset to load (default is 0.01). 102 | 103 | Returns: 104 | list: A list of preprocessed data points. 105 | """ 106 | dataset = load_dataset(dataset_name, split=f"{split}[:{int(percentage * 100)}%]") 107 | processed_data = [{"text": item["text"], "label": item.get("label", None)} for item in dataset] 108 | return processed_data 109 | 110 | def train_lobes_with_dataset(self, dataset_name, split='train', percentage=0.01): 111 | """ 112 | Trains the lobes using a specified dataset. 113 | 114 | Args: 115 | dataset_name (str): The name of the dataset to train with. 116 | split (str): The dataset split to use (default is 'train'). 117 | percentage (float): The percentage of the dataset to use (default is 0.01). 118 | """ 119 | dataset = self.load_and_preprocess_dataset(dataset_name, split, percentage) 120 | for data in dataset: 121 | prompt = data["text"] 122 | for lobe_name in self.lobes.keys(): 123 | self.process_lobe(lobe_name, prompt) 124 | -------------------------------------------------------------------------------- /Brain_modules/memory_utils.py: -------------------------------------------------------------------------------- 1 | import ollama 2 | import logging 3 | import chromadb 4 | 5 | def generate_embedding(text, embeddings_model, collection, collection_size): 6 | try: 7 | response = ollama.embeddings(model=embeddings_model, prompt=text) 8 | embedding = response["embedding"] 9 | if not embedding: 10 | raise ValueError("Generated embedding is empty.") 11 | collection.add( 12 | ids=[str(collection_size)], 13 | embeddings=[embedding], 14 | documents=[text] 15 | ) 16 | return embedding 17 | except Exception as e: 18 | raise Exception(f"Error generating embedding: {e}") 19 | 20 | def add_to_memory(text, embeddings_model, collection, collection_size): 21 | try: 22 | embedding = generate_embedding(text, embeddings_model, collection, collection_size) 23 | collection_size += 1 24 | return embedding 25 | except Exception as e: 26 | raise Exception(f"Error adding to memory: {e}") 27 | 28 | def retrieve_relevant_memory(prompt_embedding, collection): 29 | try: 30 | results = collection.query(query_embeddings=[prompt_embedding]) 31 | return [doc for doc in results['documents'][0]] 32 | except Exception as e: 33 | raise Exception(f"Error retrieving relevant memory: {e}") 34 | 35 | 36 | 37 | def setup_logging(): 38 | """ 39 | Set up logging configuration for the application. 40 | """ 41 | try: 42 | logging.basicConfig( 43 | filename='lobes_log.txt', 44 | level=logging.INFO, 45 | format='%(asctime)s %(message)s' 46 | ) 47 | print("Logging setup completed.") 48 | except Exception as e: 49 | print(f"Error setting up logging: {e}") 50 | 51 | def setup_embedding_collection(): 52 | """ 53 | Set up the embedding collection using ChromaDB. 54 | 55 | Returns: 56 | tuple: A tuple containing the created collection and its initial size (0). 57 | """ 58 | print("Setting up embedding collection.") 59 | try: 60 | client = chromadb.Client() 61 | collection = client.create_collection(name="convo_memory") 62 | print("Embedding collection setup completed.") 63 | return collection, 0 64 | except Exception as e: 65 | print(f"Error setting up embedding collection: {e}") 66 | return None, 0 67 | -------------------------------------------------------------------------------- /Brain_modules/sentiment_analysis.py: -------------------------------------------------------------------------------- 1 | from textblob import TextBlob 2 | 3 | def analyze_sentiment(text): 4 | """ 5 | Analyze the sentiment of a given text. 6 | 7 | Parameters: 8 | text (str): The text to analyze the sentiment of. 9 | 10 | Returns: 11 | dict: A dictionary containing the sentiment analysis results with keys "polarity" and "subjectivity". 12 | The "polarity" value represents the sentiment polarity ranging from -1.0 (negative) to 1.0 (positive), 13 | and the "subjectivity" value represents the subjectivity of the text ranging from 0.0 (objective) to 1.0 (subjective). 14 | 15 | Raises: 16 | Exception: If there is an error analyzing the sentiment. 17 | 18 | """ 19 | try: 20 | sentiment = TextBlob(text).sentiment 21 | return {"polarity": sentiment.polarity, "subjectivity": sentiment.subjectivity} 22 | except Exception as e: 23 | raise Exception(f"Error analyzing sentiment: {e}") 24 | -------------------------------------------------------------------------------- /Brain_modules/tool_call_functions/__pycache__/file_directory_manager.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drlordbasil/AURORA/b2deb0f7b1b4c0611cbda977ee1f149a51049472/Brain_modules/tool_call_functions/__pycache__/file_directory_manager.cpython-312.pyc -------------------------------------------------------------------------------- /Brain_modules/tool_call_functions/call_expert.py: -------------------------------------------------------------------------------- 1 | from openai import OpenAI 2 | 3 | 4 | def call_expert(expertise, question, progress_callback=None): 5 | progress_callback(f"Calling {expertise} expert to answer the question: {question}") 6 | messages = [ 7 | {"role": "system", "content": f"You are a {expertise} expert, you will answer these questions only focusing on {expertise}."}, 8 | {"role": "user", "content": question}, 9 | ] 10 | openai = OpenAI(base_url="http://localhost:11434/v1") 11 | response = openai.chat.completions.create( 12 | model="qwen:0.5b", 13 | messages=messages, 14 | max_tokens=2000, 15 | ) 16 | response_text = response.choices[0].message.content 17 | print(response_text) 18 | return response_text 19 | 20 | 21 | -------------------------------------------------------------------------------- /Brain_modules/tool_call_functions/file_directory_manager.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import glob 4 | from typing import List, Dict, Any 5 | 6 | class FileDirectoryManager: 7 | @staticmethod 8 | def list_directory(path: str = '.') -> List[str]: 9 | """List contents of a directory.""" 10 | return os.listdir(path) 11 | 12 | @staticmethod 13 | def create_directory(path: str) -> bool: 14 | """Create a new directory.""" 15 | os.makedirs(path, exist_ok=True) 16 | return os.path.exists(path) 17 | 18 | @staticmethod 19 | def delete_item(path: str) -> bool: 20 | """Delete a file or directory.""" 21 | if os.path.isfile(path): 22 | os.remove(path) 23 | elif os.path.isdir(path): 24 | shutil.rmtree(path) 25 | return not os.path.exists(path) 26 | 27 | @staticmethod 28 | def move_item(source: str, destination: str) -> bool: 29 | """Move a file or directory.""" 30 | shutil.move(source, destination) 31 | return os.path.exists(destination) 32 | 33 | @staticmethod 34 | def copy_item(source: str, destination: str) -> bool: 35 | """Copy a file or directory.""" 36 | if os.path.isfile(source): 37 | shutil.copy2(source, destination) 38 | elif os.path.isdir(source): 39 | shutil.copytree(source, destination) 40 | return os.path.exists(destination) 41 | 42 | @staticmethod 43 | def read_file(path: str, max_size: int = 1024 * 1024) -> str: 44 | """Read contents of a file (with size limit).""" 45 | if os.path.getsize(path) > max_size: 46 | return f"File is too large. Max size is {max_size} bytes." 47 | with open(path, 'r') as file: 48 | return file.read() 49 | 50 | @staticmethod 51 | def write_file(path: str, content: str) -> bool: 52 | """Write content to a file.""" 53 | with open(path, 'w') as file: 54 | file.write(content) 55 | return os.path.exists(path) 56 | 57 | @staticmethod 58 | def search_files(pattern: str) -> List[str]: 59 | """Search for files matching a pattern.""" 60 | return glob.glob(pattern, recursive=True) 61 | 62 | @staticmethod 63 | def get_file_info(path: str) -> Dict[str, Any]: 64 | """Get information about a file or directory.""" 65 | stat = os.stat(path) 66 | return { 67 | "name": os.path.basename(path), 68 | "path": os.path.abspath(path), 69 | "size": stat.st_size, 70 | "created": stat.st_ctime, 71 | "modified": stat.st_mtime, 72 | "is_directory": os.path.isdir(path) 73 | } 74 | 75 | def file_directory_manager(action: str, **kwargs) -> Dict[str, Any]: 76 | """Main function to handle file and directory operations.""" 77 | manager = FileDirectoryManager() 78 | try: 79 | if action == "list_directory": 80 | result = manager.list_directory(kwargs.get("path", ".")) 81 | elif action == "create_directory": 82 | result = manager.create_directory(kwargs["path"]) 83 | elif action == "delete_item": 84 | result = manager.delete_item(kwargs["path"]) 85 | elif action == "move_item": 86 | result = manager.move_item(kwargs["source"], kwargs["destination"]) 87 | elif action == "copy_item": 88 | result = manager.copy_item(kwargs["source"], kwargs["destination"]) 89 | elif action == "read_file": 90 | result = manager.read_file(kwargs["path"], kwargs.get("max_size", 1024 * 1024)) 91 | elif action == "write_file": 92 | result = manager.write_file(kwargs["path"], kwargs["content"]) 93 | elif action == "search_files": 94 | result = manager.search_files(kwargs["pattern"]) 95 | elif action == "get_file_info": 96 | result = manager.get_file_info(kwargs["path"]) 97 | else: 98 | raise ValueError(f"Unknown action: {action}") 99 | 100 | return {"success": True, "result": result} 101 | except Exception as e: 102 | return {"success": False, "error": str(e)} -------------------------------------------------------------------------------- /Brain_modules/tool_call_functions/web_research.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from bs4 import BeautifulSoup 3 | import nltk 4 | from nltk.corpus import stopwords 5 | from nltk.tokenize import word_tokenize 6 | from nltk.stem import WordNetLemmatizer 7 | from sklearn.feature_extraction.text import TfidfVectorizer 8 | from sklearn.metrics.pairwise import cosine_similarity 9 | import re 10 | 11 | nltk.download('punkt', quiet=True) 12 | nltk.download('stopwords', quiet=True) 13 | nltk.download('wordnet', quiet=True) 14 | 15 | class WebResearchTool: 16 | def __init__(self, max_results=5, max_depth=2): 17 | self.max_results = max_results 18 | self.max_depth = max_depth 19 | self.stop_words = set(stopwords.words('english')) 20 | self.lemmatizer = WordNetLemmatizer() 21 | self.vectorizer = TfidfVectorizer() 22 | 23 | def search(self, query, progress_callback): 24 | progress_callback(f"Searching for: {query}") 25 | search_url = f"https://www.google.com/search?q={query.replace(' ', '+')}" 26 | headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'} 27 | response = requests.get(search_url, headers=headers) 28 | soup = BeautifulSoup(response.text, 'html.parser') 29 | search_results = soup.select('.yuRUbf a') 30 | results = [result['href'] for result in search_results[:self.max_results]] 31 | progress_callback(f"Found {len(results)} search results") 32 | return results 33 | 34 | def extract_text(self, url, progress_callback): 35 | progress_callback(f"Extracting text from: {url}") 36 | try: 37 | response = requests.get(url, timeout=10) 38 | soup = BeautifulSoup(response.text, 'html.parser') 39 | for script in soup(["script", "style", "meta", "noscript"]): 40 | script.decompose() 41 | text = soup.get_text(separator=' ', strip=True) 42 | progress_callback(f"Successfully extracted text from: {url}") 43 | return text 44 | except Exception as e: 45 | progress_callback(f"Failed to extract text from: {url}. Error: {str(e)}") 46 | return "" 47 | 48 | def preprocess_text(self, text): 49 | text = re.sub(r'[^\w\s]', '', text.lower()) 50 | tokens = word_tokenize(text) 51 | tokens = [self.lemmatizer.lemmatize(token) for token in tokens if token not in self.stop_words] 52 | return " ".join(tokens) 53 | 54 | def calculate_similarity(self, query, text): 55 | preprocessed_query = self.preprocess_text(query) 56 | preprocessed_text = self.preprocess_text(text) 57 | tfidf_matrix = self.vectorizer.fit_transform([preprocessed_query, preprocessed_text]) 58 | return cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])[0][0] 59 | 60 | def extract_relevant_info(self, text, query, progress_callback): 61 | progress_callback("Extracting relevant information") 62 | sentences = nltk.sent_tokenize(text) 63 | relevant_sentences = [] 64 | for sentence in sentences: 65 | similarity = self.calculate_similarity(query, sentence) 66 | if similarity > 0.2: # Increased threshold for better relevance 67 | relevant_sentences.append(sentence) 68 | progress_callback(f"Extracted {len(relevant_sentences)} relevant sentences") 69 | return " ".join(relevant_sentences) 70 | 71 | def web_research(self, query, progress_callback): 72 | progress_callback("Starting web research") 73 | 74 | urls = self.search(query, progress_callback) 75 | results = [] 76 | 77 | for url in urls: 78 | text = self.extract_text(url, progress_callback) 79 | if text: 80 | relevant_info = self.extract_relevant_info(text, query, progress_callback) 81 | if relevant_info: 82 | results.append({ 83 | "url": url, 84 | "content": relevant_info 85 | }) 86 | progress_callback(f"Added relevant information from: {url}") 87 | 88 | if not results: 89 | progress_callback("No relevant information found") 90 | return f"Unable to find relevant information for the query: {query}" 91 | 92 | progress_callback("Aggregating results") 93 | 94 | aggregated_content = "" 95 | for result in results: 96 | aggregated_content += f"[Source: {result['url']}]\n{result['content']}\n\n" 97 | 98 | progress_callback("Web research completed") 99 | 100 | return aggregated_content.strip() -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | . 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | :D Im down for help 2 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Use the official Python image from the Docker Hub 2 | FROM python:3.12 3 | 4 | # Set the working directory in the container 5 | WORKDIR /app 6 | 7 | # Copy the requirements file into the container 8 | COPY requirements.txt . 9 | 10 | # Update the package list and install g++, PortAudio, and other dependencies 11 | RUN apt-get update && apt-get install -y \ 12 | build-essential \ 13 | g++ \ 14 | portaudio19-dev \ 15 | && rm -rf /var/lib/apt/lists/* 16 | 17 | # Upgrade pip and install the required packages 18 | RUN python -m pip install --upgrade pip 19 | RUN pip install --no-cache-dir -r requirements.txt 20 | 21 | # Copy the rest of the application code into the container 22 | COPY . . 23 | 24 | # Command to run on container start 25 | CMD ["python", "main.py"] 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Lord Basil - Automate EVERYTHING 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Aurora: AI Unified Responsive Optimized Reasoning Agent 2 | UPDATES COMING 11/30/2024 - Currently working on improving all my projects that have more potential while also improving my core skills to improve all these agents for more AGI qualities! 3 | ## What is Aurora? 4 | 5 | Aurora is an advanced AI assistant designed to provide helpful, insightful, and engaging responses to a wide range of queries and tasks. Think of Aurora as a highly intelligent digital companion that can assist you with various information needs and problem-solving challenges. 6 | 7 | ## Key Features 8 | 9 | 1. **Comprehensive Understanding**: Aurora carefully analyzes your questions and requests to provide thorough and relevant responses. 10 | 11 | 2. **Multi-faceted Processing**: Like a human brain, Aurora has different "lobes" that process information in unique ways, allowing for a more nuanced understanding of complex topics. 12 | 13 | 3. **Memory Integration**: Aurora can recall relevant information from past interactions, allowing for more contextual and personalized responses over time. 14 | 15 | 4. **Emotional Intelligence**: Aurora can detect the sentiment in your messages and adjust its tone accordingly, making interactions more natural and empathetic. 16 | 17 | 5. **Tool Usage**: When needed, Aurora can use various digital tools to gather additional information or perform specific tasks to better assist you. 18 | 19 | 6. **Continuous Learning**: With each interaction, Aurora aims to improve its capabilities and understanding. 20 | 21 | 7. **Text-to-Speech**: Aurora can convert its text responses to speech, making it accessible for audio playback. 22 | 23 | ## How Does Aurora Work? 24 | 25 | 1. **Input Analysis**: When you send a message, Aurora carefully examines your input to understand your needs and the context of your request. 26 | 27 | 2. **Information Processing**: Aurora then processes your request through its various "lobes," each specializing in different types of analysis (like language understanding, logical reasoning, etc.). 28 | 29 | 3. **Memory Retrieval**: Aurora checks its memory for any relevant past interactions or learned information that might be helpful. 30 | 31 | 4. **Tool Utilization**: If necessary, Aurora will use appropriate tools to gather more information or perform specific tasks related to your request. 32 | 33 | 5. **Response Formulation**: Taking all this information into account, Aurora crafts a comprehensive response, ensuring it addresses all aspects of your query. 34 | 35 | 6. **Continuous Improvement**: After each interaction, Aurora reflects on its performance to find ways to improve future responses. 36 | 37 | ## What Can You Ask Aurora? 38 | 39 | Aurora is versatile and can assist with a wide range of topics and tasks, including but not limited to: 40 | 41 | - General knowledge questions 42 | - Research assistance 43 | - Problem-solving and brainstorming 44 | - Writing and editing help 45 | - Basic coding and technical queries 46 | - Task planning and organization 47 | - Simple calculations and data analysis 48 | - Creative writing prompts and ideas 49 | 50 | Remember, while Aurora is highly capable, it's an AI assistant and not a human. It doesn't have personal experiences or emotions, and its knowledge is based on its training data and available tools. 51 | 52 | ## Getting Started 53 | 54 | To interact with Aurora, simply type your question or request in the chat interface. Be as clear and specific as possible to help Aurora understand your needs better. Don't hesitate to ask for clarification or provide feedback – this helps Aurora learn and improve! 55 | 56 | ## A Note on Privacy and Ethics 57 | 58 | Aurora is designed with respect for user privacy and ethical considerations. It doesn't store personal information and aims to provide helpful information without bias. However, always use caution when sharing sensitive information with any AI system. 59 | 60 | Enjoy your interactions with Aurora, your AI assistant for insightful and engaging conversations! 61 | -------------------------------------------------------------------------------- /image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drlordbasil/AURORA/b2deb0f7b1b4c0611cbda977ee1f149a51049472/image.png -------------------------------------------------------------------------------- /listen_lobe.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sounddevice as sd 3 | import soundfile as sf 4 | from groq import Groq 5 | import threading 6 | 7 | class AuroraRecorder: 8 | def __init__(self): 9 | """Initialize the AuroraRecorder with default settings.""" 10 | self.client = Groq() if os.getenv("GROQ_API_KEY") else None 11 | self.recording = False 12 | self.audio_path = "output.wav" 13 | self.transcription = None 14 | self._recording_thread = None 15 | self._lock = threading.Lock() 16 | 17 | def start_recording(self): 18 | """Start recording audio.""" 19 | with self._lock: 20 | if self.recording: 21 | print("Already recording") 22 | return 23 | self.recording = True 24 | self.transcription = None 25 | self._recording_thread = threading.Thread(target=self._record_audio) 26 | self._recording_thread.start() 27 | 28 | def stop_recording(self): 29 | """Stop recording audio.""" 30 | with self._lock: 31 | if not self.recording: 32 | print("Not recording") 33 | return 34 | self.recording = False 35 | if self._recording_thread: 36 | self._recording_thread.join() # Ensure the recording thread has finished 37 | 38 | def _record_audio(self): 39 | """Record audio in a separate thread.""" 40 | print("Recording audio...") 41 | samplerate = 16000 # 16kHz 42 | channels = 1 # mono 43 | recording = sd.rec(int(60 * samplerate), samplerate=samplerate, channels=channels) 44 | while self.recording: 45 | sd.sleep(100) 46 | sd.stop() 47 | sf.write(self.audio_path, recording, samplerate) 48 | print(f"Audio recorded and saved to {self.audio_path}.") 49 | self.transcribe_audio(self.audio_path) 50 | 51 | def transcribe_audio(self, file_path): 52 | """Transcribe the recorded audio using Groq.""" 53 | if not os.path.isfile(file_path): 54 | print("The provided file path is not valid.") 55 | return None 56 | 57 | with open(file_path, "rb") as file: 58 | response = self.client.audio.transcriptions.create( 59 | file=(file_path, file.read()), 60 | model="whisper-large-v3", 61 | response_format="json", 62 | language="en", 63 | temperature=0.0 64 | ) 65 | 66 | self.transcription = response.text 67 | print(f"Transcription: {self.transcription}") 68 | 69 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import logging 4 | from flask import Flask, render_template, request, jsonify, g, send_file, Response, session 5 | from flask_session import Session 6 | from Brain_modules.brain import Brain 7 | from listen_lobe import AuroraRecorder 8 | from speaker import text_to_speech 9 | from queue import Queue 10 | import json 11 | from Brain_modules.llm_api_calls import llm_api_calls, tools 12 | from Brain_modules.image_vision import ImageVision 13 | from Brain_modules.memory_utils import setup_embedding_collection, setup_logging 14 | 15 | 16 | 17 | app = Flask(__name__) 18 | app.config['SECRET_KEY'] = os.urandom(24) 19 | app.config['SESSION_TYPE'] = 'filesystem' 20 | app.config['SESSION_FILE_DIR'] = './flask_session/' 21 | Session(app) 22 | 23 | # Initialize components 24 | progress_queue = Queue() 25 | collection, collection_size = setup_embedding_collection() 26 | brain = Brain(progress_queue.put, collection, collection_size) 27 | aurora_recorder = AuroraRecorder() 28 | image_vision = ImageVision() 29 | 30 | def update_progress(message): 31 | """Update the progress queue with a new message.""" 32 | logging.debug(f"Progress update: {message}") 33 | progress_queue.put(message) 34 | 35 | def process_input(input_text, session_id): 36 | """Process the input text through the Brain module.""" 37 | if not input_text: 38 | update_progress("Error: No input provided.") 39 | return {'response': 'No input provided.', 'status': 'Error'}, None 40 | 41 | update_progress(f"Received input: {input_text}") 42 | 43 | try: 44 | response = brain.process_input(input_text, session_id) 45 | update_progress("Response generated") 46 | audio_file = None 47 | if brain.tts_enabled: 48 | update_progress("Generating audio response...") 49 | audio_file = text_to_speech(response) 50 | update_progress("Audio response generated") 51 | return {'response': response, 'status': 'Completed'}, audio_file 52 | except Exception as e: 53 | error_message = f"Error processing input: {str(e)}" 54 | update_progress(error_message) 55 | return {'response': error_message, 'status': 'Error'}, None 56 | 57 | @app.before_request 58 | def before_request(): 59 | """Log the start time of the request.""" 60 | g.start_time = time.time() 61 | 62 | @app.after_request 63 | def after_request(response): 64 | """Log the time taken to process the request.""" 65 | diff = time.time() - g.start_time 66 | logging.debug(f"Request processed in {diff:.2f} seconds") 67 | return response 68 | 69 | @app.route('/') 70 | def index(): 71 | """Render the main index page.""" 72 | return render_template('index.html') 73 | 74 | @app.route('/update_api_provider', methods=['POST']) 75 | def update_api_provider(): 76 | data = request.json 77 | provider = data.get('provider') 78 | if provider: 79 | llm_api_calls.update_api_provider(provider) 80 | return jsonify({"status": "success", "message": f"API provider updated to {provider}"}) 81 | else: 82 | return jsonify({"status": "error", "message": "No provider specified"}), 400 83 | 84 | @app.route('/send_message', methods=['POST']) 85 | def send_message(): 86 | """Handle sending a message.""" 87 | data = request.json 88 | message = data.get('message') 89 | session_id = session.get('session_id', str(time.time())) 90 | session['session_id'] = session_id 91 | 92 | if not message: 93 | return jsonify({'response': 'No message provided.', 'status': 'Error'}) 94 | 95 | # Clear the progress queue before processing 96 | while not progress_queue.empty(): 97 | progress_queue.get() 98 | 99 | response, audio_file = process_input(message, session_id) 100 | return jsonify({**response, 'audio_file': audio_file}) 101 | 102 | @app.route('/toggle_tts', methods=['POST']) 103 | def toggle_tts(): 104 | """Toggle the Text-to-Speech (TTS) functionality.""" 105 | try: 106 | status = brain.toggle_tts() 107 | logging.debug(f"TTS toggled to {status}") 108 | return jsonify({'status': f'Text-to-Speech {status}'}) 109 | except Exception as e: 110 | error_message = f"Error toggling TTS: {str(e)}" 111 | logging.error(error_message) 112 | return jsonify({'status': 'Error', 'error': error_message}) 113 | 114 | @app.route('/start_recording', methods=['POST']) 115 | def start_recording(): 116 | """Start recording audio.""" 117 | try: 118 | aurora_recorder.start_recording() 119 | return jsonify({'status': 'Recording started'}) 120 | except Exception as e: 121 | return jsonify({'status': f"Error starting recording: {str(e)}"}) 122 | 123 | @app.route('/stop_recording', methods=['POST']) 124 | def stop_recording(): 125 | """Stop recording audio and process the transcription.""" 126 | try: 127 | update_progress("Stopping recording...") 128 | aurora_recorder.stop_recording() 129 | update_progress("Recording stopped, starting transcription...") 130 | transcription = aurora_recorder.transcription 131 | update_progress(f"Transcription completed: {transcription}") 132 | 133 | session_id = session.get('session_id', str(time.time())) 134 | session['session_id'] = session_id 135 | response, audio_file = process_input(transcription, session_id) 136 | 137 | return jsonify({**response, 'transcription': transcription, 'audio_file': audio_file}) 138 | except Exception as e: 139 | error_message = f"Error stopping recording: {str(e)}" 140 | update_progress(error_message) 141 | return jsonify({'status': error_message, 'response': error_message}) 142 | 143 | @app.route('/get_audio/', methods=['GET']) 144 | def get_audio(filename): 145 | """Serve the generated audio file.""" 146 | return send_file(filename, mimetype="audio/mp3") 147 | 148 | @app.route('/get_detailed_info', methods=['GET']) 149 | def get_detailed_info(): 150 | """Return detailed information from the brain module.""" 151 | return brain.get_detailed_info() 152 | 153 | @app.route('/progress_updates') 154 | def progress_updates(): 155 | """Provide progress updates as a server-sent event stream.""" 156 | def generate(): 157 | while True: 158 | message = progress_queue.get() 159 | logging.debug(f"Sending SSE: {message}") 160 | yield f"data: {json.dumps({'message': message})}\n\n" 161 | return Response(generate(), mimetype='text/event-stream') 162 | 163 | @app.route('/chat_history') 164 | def chat_history(): 165 | """Return the chat history as a JSON response.""" 166 | session_id = session.get('session_id') 167 | if session_id: 168 | history = brain.get_chat_history(session_id) 169 | return jsonify(history) 170 | return jsonify([]) 171 | 172 | @app.route('/set_env', methods=['POST']) 173 | def set_env(): 174 | data = request.json 175 | variable = data.get('variable') 176 | value = data.get('value') 177 | if variable and value: 178 | os.environ[variable] = value 179 | return jsonify({'status': 'success', 'message': f'{variable} has been set'}) 180 | return jsonify({'status': 'error', 'message': 'Invalid request. Both variable and value are required.'}), 400 181 | 182 | @app.route('/analyze_image', methods=['POST']) 183 | def analyze_image(): 184 | """Analyze an image using the image_vision module.""" 185 | data = request.json 186 | image_url = data.get('image_url') 187 | if not image_url: 188 | return jsonify({'status': 'error', 'message': 'No image URL provided'}), 400 189 | 190 | try: 191 | analysis = image_vision.analyze_image(image_url) 192 | return jsonify({'status': 'success', 'analysis': analysis}) 193 | except Exception as e: 194 | return jsonify({'status': 'error', 'message': str(e)}), 500 195 | 196 | if __name__ == '__main__': 197 | app.run(host='0.0.0.0', port=5000, threaded=True, debug=False, use_reloader=False) -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | Flask 2 | tenacity 3 | pyautogui 4 | requests 5 | beautifulsoup4 6 | selenium 7 | webdriver-manager 8 | trafilatura 9 | scikit-learn 10 | networkx 11 | openai 12 | groq 13 | ollama 14 | tiktoken 15 | datasets 16 | textblob 17 | Pillow 18 | tensorflow 19 | chromadb 20 | sounddevice 21 | soundfile 22 | deepgram-sdk 23 | 24 | Flask-Session 25 | -------------------------------------------------------------------------------- /speaker.py: -------------------------------------------------------------------------------- 1 | from deepgram import DeepgramClient, SpeakOptions 2 | import os 3 | 4 | def text_to_speech(text): 5 | if os.environ.get("DEEPGRAM_API_KEY") is None: 6 | print("Please set the DEEPGRAM_API_KEY environment variable to enable text to speech.") 7 | return 8 | else: 9 | DEEPGRAM_API_KEY = os.environ.get("DEEPGRAM_API_KEY") 10 | 11 | FILENAME = "combined_audio.mp3" 12 | 13 | try: 14 | deepgram = DeepgramClient(DEEPGRAM_API_KEY) 15 | options = SpeakOptions( 16 | model="aura-asteria-en", 17 | ) 18 | 19 | if len(text) > 1999: 20 | chunks = [text[i:i + 1800] for i in range(0, len(text), 1800)] 21 | else: 22 | chunks = [text] 23 | 24 | with open(FILENAME, "wb") as combined_audio: 25 | for i, chunk in enumerate(chunks): 26 | chunk_filename = f"audio_chunk_{i}.mp3" 27 | response = deepgram.speak.v("1").save(chunk_filename, {"text": chunk}, options) 28 | with open(chunk_filename, "rb") as chunk_file: 29 | combined_audio.write(chunk_file.read()) 30 | os.remove(chunk_filename) # Remove chunk file after processing 31 | 32 | print(f"Audio saved as {FILENAME}") 33 | return FILENAME 34 | 35 | except Exception as e: 36 | print(f"Exception: {e}") 37 | return f"Error converting text to speech: {str(e)}" 38 | -------------------------------------------------------------------------------- /static/aurora.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drlordbasil/AURORA/b2deb0f7b1b4c0611cbda977ee1f149a51049472/static/aurora.png -------------------------------------------------------------------------------- /static/customization.css: -------------------------------------------------------------------------------- 1 | :root { 2 | --bg-color: #0a0a1e; 3 | --text-color: #e0e0e0; 4 | --accent-color: #16213e; 5 | --highlight-color: #0f3460; 6 | --aurora-color: #00ff9d; 7 | --shadow-color: rgba(0, 0, 0, 0.3); 8 | --font-size-base: 16px; 9 | --font-size-small: 14px; 10 | --font-size-smaller: 12px; 11 | } 12 | 13 | body, html { 14 | margin: 0; 15 | padding: 0; 16 | font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; 17 | background-color: var(--bg-color); 18 | color: var(--text-color); 19 | height: 100vh; 20 | overflow: hidden; 21 | font-size: var(--font-size-base); 22 | } 23 | 24 | .container { 25 | display: flex; 26 | height: 100vh; 27 | width: 100vw; 28 | } 29 | 30 | .chat-panel { 31 | flex: 1; 32 | display: flex; 33 | flex-direction: column; 34 | padding: 1rem; 35 | background-color: var(--accent-color); 36 | box-shadow: inset 0 0 10px var(--shadow-color); 37 | } 38 | 39 | .info-panel { 40 | width: 300px; 41 | background-color: var(--highlight-color); 42 | transition: transform 0.3s ease, box-shadow 0.3s ease; 43 | box-shadow: -5px 0 15px var(--shadow-color); 44 | overflow-y: auto; 45 | display: flex; 46 | flex-direction: column; 47 | } 48 | 49 | .info-panel-content { 50 | padding: 1rem; 51 | display: flex; 52 | flex-direction: column; 53 | height: 100%; 54 | } 55 | 56 | .chat-display { 57 | flex-grow: 1; 58 | overflow-y: auto; 59 | padding: 1rem; 60 | background-color: var(--bg-color); 61 | border-radius: 10px; 62 | margin-bottom: 1rem; 63 | box-shadow: inset 0 0 10px var(--shadow-color); 64 | } 65 | 66 | .message { 67 | margin-bottom: 1rem; 68 | padding: 0.75rem 1rem; 69 | border-radius: 20px; 70 | max-width: 80%; 71 | animation: fadeIn 0.3s ease-in; 72 | white-space: pre-wrap; 73 | word-wrap: break-word; 74 | box-shadow: 0 3px 6px var(--shadow-color); 75 | } 76 | 77 | @keyframes fadeIn { 78 | from { opacity: 0; transform: translateY(20px); } 79 | to { opacity: 1; transform: translateY(0); } 80 | } 81 | 82 | .user-message { 83 | background-color: var(--highlight-color); 84 | color: var(--text-color); 85 | align-self: flex-end; 86 | margin-left: 20%; 87 | } 88 | 89 | .aurora-message { 90 | background-color: var(--accent-color); 91 | color: var(--aurora-color); 92 | align-self: flex-start; 93 | margin-right: 20%; 94 | } 95 | 96 | .input-area { 97 | display: flex; 98 | gap: 0.5rem; 99 | padding: 0.5rem; 100 | background-color: var(--accent-color); 101 | border-radius: 25px; 102 | box-shadow: 0 4px 6px var(--shadow-color); 103 | } 104 | 105 | #prompt-input { 106 | flex-grow: 1; 107 | padding: 0.75rem 1rem; 108 | border: none; 109 | border-radius: 20px; 110 | background-color: var(--bg-color); 111 | color: var(--text-color); 112 | font-size: var(--font-size-base); 113 | box-shadow: inset 0 2px 4px var(--shadow-color); 114 | } 115 | 116 | button { 117 | padding: 0.75rem 1rem; 118 | border: none; 119 | border-radius: 20px; 120 | background-color: var(--highlight-color); 121 | color: var(--text-color); 122 | cursor: pointer; 123 | transition: all 0.3s ease; 124 | display: flex; 125 | align-items: center; 126 | justify-content: center; 127 | box-shadow: 0 2px 4px var(--shadow-color); 128 | } 129 | 130 | button:hover { 131 | background-color: var(--aurora-color); 132 | color: var(--bg-color); 133 | transform: translateY(-2px); 134 | box-shadow: 0 4px 8px var(--shadow-color); 135 | } 136 | 137 | #logo { 138 | width: 80px; 139 | height: 80px; 140 | margin-bottom: 1rem; 141 | filter: drop-shadow(0 4px 6px var(--shadow-color)); 142 | align-self: center; 143 | } 144 | 145 | .info-text { 146 | text-align: center; 147 | margin-bottom: 1rem; 148 | font-size: var(--font-size-small); 149 | } 150 | 151 | .info-text h2 { 152 | margin-bottom: 0.5rem; 153 | font-size: 1.5rem; 154 | } 155 | 156 | .info-text p { 157 | margin-bottom: 0.5rem; 158 | } 159 | 160 | .feature-list { 161 | list-style-type: none; 162 | padding: 0; 163 | margin: 0; 164 | display: grid; 165 | grid-template-columns: repeat(2, 1fr); 166 | gap: 0.5rem; 167 | } 168 | 169 | .feature-list li { 170 | display: flex; 171 | align-items: center; 172 | font-size: var(--font-size-smaller); 173 | } 174 | 175 | .feature-list li i { 176 | margin-right: 0.5rem; 177 | color: var(--aurora-color); 178 | } 179 | 180 | #status-label { 181 | font-weight: bold; 182 | color: var(--aurora-color); 183 | text-shadow: 0 2px 4px var(--shadow-color); 184 | margin-top: auto; 185 | text-align: center; 186 | } 187 | 188 | #brain-visualization { 189 | width: 100%; 190 | height: 60px; 191 | margin: 10px 0; 192 | filter: drop-shadow(0 2px 3px var(--shadow-color)); 193 | } 194 | 195 | #brain-visualization svg { 196 | width: 100%; 197 | height: 100%; 198 | } 199 | 200 | #brain-visualization path { 201 | fill: none; 202 | stroke: var(--aurora-color); 203 | stroke-width: 1.5; 204 | transition: all 0.3s ease; 205 | } 206 | 207 | #brain-outline { 208 | stroke: var(--text-color); 209 | stroke-width: 1; 210 | } 211 | 212 | #theme-toggle, #info-toggle { 213 | position: fixed; 214 | top: 1rem; 215 | right: 1rem; 216 | z-index: 1000; 217 | background-color: var(--highlight-color); 218 | color: var(--text-color); 219 | border: none; 220 | border-radius: 50%; 221 | width: 40px; 222 | height: 40px; 223 | display: flex; 224 | align-items: center; 225 | justify-content: center; 226 | cursor: pointer; 227 | transition: all 0.3s ease; 228 | } 229 | 230 | #info-toggle { 231 | right: 4rem; 232 | } 233 | 234 | #theme-toggle:hover, #info-toggle:hover { 235 | background-color: var(--aurora-color); 236 | color: var(--bg-color); 237 | } 238 | 239 | #artifact-display { 240 | margin-top: 1rem; 241 | border: 1px solid var(--aurora-color); 242 | border-radius: 10px; 243 | padding: 1rem; 244 | background-color: rgba(0, 255, 157, 0.1); 245 | } 246 | 247 | .typing-indicator { 248 | display: inline-block; 249 | margin-left: 5px; 250 | } 251 | 252 | .typing-indicator span { 253 | display: inline-block; 254 | width: 8px; 255 | height: 8px; 256 | background-color: var(--aurora-color); 257 | border-radius: 50%; 258 | margin-right: 3px; 259 | animation: typing 1s infinite; 260 | } 261 | 262 | .typing-indicator span:nth-child(2) { 263 | animation-delay: 0.2s; 264 | } 265 | 266 | .typing-indicator span:nth-child(3) { 267 | animation-delay: 0.4s; 268 | } 269 | 270 | @keyframes typing { 271 | 0% { transform: translateY(0); } 272 | 50% { transform: translateY(-5px); } 273 | 100% { transform: translateY(0); } 274 | } 275 | 276 | .code-block { 277 | background-color: #2a2a4a; 278 | border-radius: 5px; 279 | padding: 1rem; 280 | margin: 0.5rem 0; 281 | font-family: 'Courier New', Courier, monospace; 282 | white-space: pre-wrap; 283 | word-wrap: break-word; 284 | } 285 | 286 | @media (max-width: 768px) { 287 | .container { 288 | flex-direction: column; 289 | } 290 | 291 | .info-panel { 292 | width: 100%; 293 | height: 50%; 294 | transform: translateY(100%); 295 | position: fixed; 296 | bottom: 0; 297 | left: 0; 298 | right: 0; 299 | } 300 | 301 | .info-panel.show { 302 | transform: translateY(0); 303 | } 304 | 305 | .feature-list { 306 | grid-template-columns: 1fr; 307 | } 308 | 309 | #brain-visualization { 310 | height: 40px; 311 | } 312 | 313 | #logo { 314 | width: 60px; 315 | height: 60px; 316 | } 317 | } 318 | 319 | @media (max-height: 600px) { 320 | .info-panel-content { 321 | flex-direction: row; 322 | flex-wrap: wrap; 323 | justify-content: space-around; 324 | align-items: center; 325 | } 326 | 327 | #logo, #brain-visualization { 328 | flex: 0 0 auto; 329 | margin: 0.5rem; 330 | } 331 | 332 | .info-text { 333 | flex: 1 1 auto; 334 | text-align: left; 335 | } 336 | 337 | .feature-list { 338 | grid-template-columns: repeat(3, 1fr); 339 | } 340 | 341 | #status-label { 342 | flex: 0 0 100%; 343 | margin-top: 0.5rem; 344 | } 345 | } -------------------------------------------------------------------------------- /static/customization.js: -------------------------------------------------------------------------------- 1 | // DOM Elements 2 | const elements = { 3 | chatDisplay: document.getElementById('chat-display'), 4 | promptInput: document.getElementById('prompt-input'), 5 | sendButton: document.getElementById('send-button'), 6 | clearButton: document.getElementById('clear-button'), 7 | ttsToggleButton: document.getElementById('tts-toggle'), 8 | recordButton: document.getElementById('record-button'), 9 | statusLabel: document.getElementById('status-label'), 10 | themeToggle: document.getElementById('theme-toggle'), 11 | infoPanel: document.querySelector('.info-panel'), 12 | infoToggle: document.getElementById('info-toggle'), 13 | artifactDisplay: document.getElementById('artifact-display'), 14 | apiProviderSelect: document.getElementById('api-provider') 15 | }; 16 | 17 | // State 18 | const state = { 19 | isRecording: false, 20 | isDarkTheme: true, 21 | activeLobes: new Set(), 22 | eventSource: null, 23 | currentApiProvider: 'ollama' // Default API provider 24 | }; 25 | 26 | // Utility Functions 27 | const utils = { 28 | formatLongText(text, maxLineLength = 80) { 29 | return text.split(' ').reduce((lines, word) => { 30 | if (lines[lines.length - 1].length + word.length + 1 > maxLineLength) { 31 | lines.push(word); 32 | } else { 33 | lines[lines.length - 1] += ' ' + word; 34 | } 35 | return lines; 36 | }, ['']).join('\n'); 37 | }, 38 | 39 | async fetchJSON(url, options = {}) { 40 | const response = await fetch(url, options); 41 | if (!response.ok) { 42 | throw new Error(`HTTP error! status: ${response.status}`); 43 | } 44 | return await response.json(); 45 | } 46 | }; 47 | 48 | // UI Update Functions 49 | const ui = { 50 | displayMessage(message, isUser = true, type = 'normal') { 51 | const messageElement = document.createElement('div'); 52 | messageElement.className = `message ${isUser ? 'user-message' : 'aurora-message'} ${type}-message`; 53 | 54 | message = message.replace(/```([\s\S]*?)```/g, (match, p1) => { 55 | return `
${p1}
`; 56 | }); 57 | 58 | message = utils.formatLongText(message); 59 | 60 | messageElement.innerHTML = type === 'individual' ? message : `${isUser ? 'You' : 'AURORA'}: ${message}`; 61 | elements.chatDisplay.appendChild(messageElement); 62 | elements.chatDisplay.scrollTop = elements.chatDisplay.scrollHeight; 63 | }, 64 | 65 | updateStatus(message, animate = false) { 66 | console.log("Updating status:", message); 67 | elements.statusLabel.textContent = message; 68 | elements.statusLabel.style.animation = animate ? 'pulse 1s infinite' : 'none'; 69 | 70 | const statusElement = document.createElement('div'); 71 | statusElement.className = 'message system-message'; 72 | statusElement.textContent = message; 73 | elements.chatDisplay.appendChild(statusElement); 74 | elements.chatDisplay.scrollTop = elements.chatDisplay.scrollHeight; 75 | 76 | ui.updateBrainVisualization(message); 77 | }, 78 | 79 | showTypingIndicator() { 80 | const typingIndicator = document.createElement('div'); 81 | typingIndicator.className = 'typing-indicator'; 82 | typingIndicator.innerHTML = ''; 83 | elements.chatDisplay.appendChild(typingIndicator); 84 | elements.chatDisplay.scrollTop = elements.chatDisplay.scrollHeight; 85 | }, 86 | 87 | hideTypingIndicator() { 88 | const typingIndicator = elements.chatDisplay.querySelector('.typing-indicator'); 89 | if (typingIndicator) { 90 | typingIndicator.remove(); 91 | } 92 | }, 93 | 94 | updateBrainVisualization(message) { 95 | const lobes = { 96 | frontal: ['think', 'plan', 'decide'], 97 | parietal: ['touch', 'spatial', 'navigation'], 98 | temporal: ['hear', 'memory', 'language'], 99 | occipital: ['see', 'visual'], 100 | cerebellum: ['balance', 'coordination', 'precision'] 101 | }; 102 | 103 | const activatedLobes = Object.entries(lobes).filter(([lobe, keywords]) => 104 | keywords.some(keyword => message.toLowerCase().includes(keyword)) 105 | ).map(([lobe]) => lobe); 106 | 107 | Object.keys(lobes).forEach(lobe => { 108 | const element = document.getElementById(lobe + '-lobe'); 109 | if (element) { 110 | element.style.stroke = activatedLobes.includes(lobe) ? '#00ff9d' : 'var(--aurora-color)'; 111 | element.style.filter = activatedLobes.includes(lobe) ? 'url(#glow)' : 'none'; 112 | } 113 | }); 114 | 115 | setTimeout(() => { 116 | Object.keys(lobes).forEach(lobe => { 117 | const element = document.getElementById(lobe + '-lobe'); 118 | if (element) { 119 | element.style.stroke = 'var(--aurora-color)'; 120 | element.style.filter = 'none'; 121 | } 122 | }); 123 | }, 5000); 124 | }, 125 | 126 | updateThemeColors() { 127 | const root = document.documentElement; 128 | const theme = state.isDarkTheme ? { 129 | bgColor: '#0a0a1e', 130 | textColor: '#e0e0e0', 131 | accentColor: '#16213e', 132 | highlightColor: '#0f3460' 133 | } : { 134 | bgColor: '#f0f0f0', 135 | textColor: '#333333', 136 | accentColor: '#d0d0d0', 137 | highlightColor: '#c0c0c0' 138 | }; 139 | 140 | Object.entries(theme).forEach(([key, value]) => { 141 | root.style.setProperty(`--${key.replace(/[A-Z]/g, letter => `-${letter.toLowerCase()}`)}`, value); 142 | }); 143 | }, 144 | 145 | displayArtifact(artifact) { 146 | elements.artifactDisplay.innerHTML = ''; 147 | const artifactElement = document.createElement('div'); 148 | artifactElement.className = 'artifact'; 149 | artifactElement.innerHTML = ` 150 |

${artifact.title}

151 |
${artifact.content}
152 | `; 153 | elements.artifactDisplay.appendChild(artifactElement); 154 | } 155 | }; 156 | 157 | // API Functions 158 | const api = { 159 | async sendMessage(message) { 160 | ui.displayMessage(message, true); 161 | ui.updateStatus('Processing...', true); 162 | ui.showTypingIndicator(); 163 | 164 | try { 165 | const data = await utils.fetchJSON('/send_message', { 166 | method: 'POST', 167 | headers: { 'Content-Type': 'application/json' }, 168 | body: JSON.stringify({ message, apiProvider: state.currentApiProvider }) 169 | }); 170 | 171 | ui.hideTypingIndicator(); 172 | 173 | if (data.response) { 174 | ui.displayMessage(data.response, false); 175 | } 176 | 177 | if (data.audio_file) { 178 | api.playAudio(data.audio_file); 179 | } 180 | 181 | if (data.artifact) { 182 | ui.displayArtifact(data.artifact); 183 | } 184 | 185 | ui.updateStatus(data.status); 186 | } catch (error) { 187 | console.error('Error:', error); 188 | ui.updateStatus('Error occurred'); 189 | ui.hideTypingIndicator(); 190 | } 191 | }, 192 | 193 | async toggleTTS() { 194 | try { 195 | const data = await utils.fetchJSON('/toggle_tts', { method: 'POST' }); 196 | if (data.status === 'Error') { 197 | console.error('Error toggling TTS:', data.error); 198 | ui.updateStatus('Error toggling TTS'); 199 | } else { 200 | ui.updateStatus(data.status); 201 | elements.ttsToggleButton.classList.toggle('active'); 202 | } 203 | } catch (error) { 204 | console.error('Error:', error); 205 | ui.updateStatus('Error toggling TTS'); 206 | } 207 | }, 208 | 209 | async toggleRecording() { 210 | if (!state.isRecording) { 211 | try { 212 | const data = await utils.fetchJSON('/start_recording', { method: 'POST' }); 213 | state.isRecording = true; 214 | elements.recordButton.innerHTML = ''; 215 | elements.recordButton.classList.add('active'); 216 | ui.updateStatus(data.status, true); 217 | } catch (error) { 218 | console.error('Error:', error); 219 | ui.updateStatus('Error starting recording'); 220 | } 221 | } else { 222 | try { 223 | ui.updateStatus('Stopping recording...', true); 224 | const data = await utils.fetchJSON('/stop_recording', { method: 'POST' }); 225 | state.isRecording = false; 226 | elements.recordButton.innerHTML = ''; 227 | elements.recordButton.classList.remove('active'); 228 | ui.updateStatus('Processing completed'); 229 | if (data.transcription) { 230 | ui.displayMessage(data.transcription, true); 231 | if (data.response) { 232 | ui.displayMessage(data.response, false); 233 | } 234 | if (data.audio_file) { 235 | api.playAudio(data.audio_file); 236 | } 237 | } 238 | } catch (error) { 239 | console.error('Error:', error); 240 | ui.updateStatus('Error stopping recording'); 241 | } 242 | } 243 | }, 244 | 245 | playAudio(audioFile) { 246 | const audio = new Audio(`/get_audio/${audioFile}`); 247 | audio.play(); 248 | }, 249 | 250 | async loadChatHistory() { 251 | try { 252 | const history = await utils.fetchJSON('/chat_history.json'); 253 | history.forEach(message => ui.displayMessage(message.text, message.user)); 254 | } catch (error) { 255 | console.error('Error loading chat history:', error); 256 | } 257 | }, 258 | 259 | handleProgressUpdates() { 260 | console.log("Setting up SSE connection for progress updates"); 261 | state.eventSource = new EventSource('/progress_updates'); 262 | state.eventSource.onmessage = function(event) { 263 | console.log("Received SSE message:", event.data); 264 | const data = JSON.parse(event.data); 265 | ui.updateStatus(data.message, true); 266 | }; 267 | state.eventSource.onerror = function(error) { 268 | console.error('Error in progress updates:', error); 269 | state.eventSource.close(); 270 | }; 271 | }, 272 | 273 | async updateApiProvider(provider) { 274 | try { 275 | const data = await utils.fetchJSON('/update_api_provider', { 276 | method: 'POST', 277 | headers: { 'Content-Type': 'application/json' }, 278 | body: JSON.stringify({ provider }) 279 | }); 280 | if (data.status === 'success') { 281 | state.currentApiProvider = provider; 282 | ui.updateStatus(`API Provider set to ${provider}`); 283 | } else { 284 | throw new Error(data.error); 285 | } 286 | } catch (error) { 287 | console.error('Error updating API provider:', error); 288 | ui.updateStatus('Error updating API provider'); 289 | } 290 | } 291 | }; 292 | 293 | // Event Listeners 294 | function setupEventListeners() { 295 | elements.sendButton.addEventListener('click', () => { 296 | const message = elements.promptInput.value.trim(); 297 | if (message) { 298 | api.sendMessage(message); 299 | elements.promptInput.value = ''; 300 | } 301 | }); 302 | 303 | elements.promptInput.addEventListener('keypress', (event) => { 304 | if (event.key === 'Enter') { 305 | elements.sendButton.click(); 306 | } 307 | }); 308 | 309 | elements.clearButton.addEventListener('click', () => { 310 | elements.chatDisplay.innerHTML = ''; 311 | ui.updateStatus('Chat cleared'); 312 | }); 313 | 314 | elements.ttsToggleButton.addEventListener('click', api.toggleTTS); 315 | elements.recordButton.addEventListener('click', api.toggleRecording); 316 | 317 | elements.themeToggle.addEventListener('click', () => { 318 | state.isDarkTheme = !state.isDarkTheme; 319 | document.body.classList.toggle('light-theme'); 320 | ui.updateThemeColors(); 321 | }); 322 | 323 | elements.infoToggle.addEventListener('click', () => { 324 | elements.infoPanel.classList.toggle('show'); 325 | }); 326 | 327 | elements.apiProviderSelect.addEventListener('change', (event) => { 328 | const selectedProvider = event.target.value; 329 | api.updateApiProvider(selectedProvider); 330 | }); 331 | } 332 | 333 | // Initialization 334 | function initializeApp() { 335 | console.log("Initializing application"); 336 | setupEventListeners(); 337 | api.loadChatHistory(); 338 | api.handleProgressUpdates(); 339 | ui.updateThemeColors(); 340 | ui.updateStatus('Ready'); 341 | } 342 | 343 | document.addEventListener('DOMContentLoaded', initializeApp); -------------------------------------------------------------------------------- /templates/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drlordbasil/AURORA/b2deb0f7b1b4c0611cbda977ee1f149a51049472/templates/favicon.ico -------------------------------------------------------------------------------- /templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | AURORA: AI Assistant 7 | 8 | 9 | 144 | 145 | 146 |
147 |
148 |
149 |
150 | 151 | 152 | 153 | 154 | 155 | 160 |
161 |
162 |
163 |
164 | 165 |
166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 |
184 |
185 |

AURORA

186 |

Artificial Unified Responsive Optimized Reasoning Agent

187 |
    188 |
  • Execute local commands
  • 189 |
  • Perform web research
  • 190 |
  • Analyze sentiment
  • 191 |
  • Voice recognition
  • 192 |
  • Talk to local LLM expert
  • 193 |
194 |
195 |
Ready
196 |
197 | 198 |
199 |

API Keys

200 |
201 | 202 | 203 | 204 |
205 |
206 | 207 | 208 | 209 |
210 |
211 |
212 |
213 |
214 | 215 | 216 | 217 | 218 | 219 | 220 | 268 | 269 | --------------------------------------------------------------------------------