├── .devcontainer └── devcontainer.json ├── .gitignore ├── .vs ├── AutoGroq │ └── v17 │ │ ├── .wsuo │ │ └── DocumentLayout.json ├── ProjectSettings.json ├── VSWorkspaceState.json └── slnx.sqlite ├── AutoGroq.md ├── AutoGroq ├── .gitignore ├── agent_management.py ├── agents │ ├── code_developer.py │ ├── code_tester.py │ └── web_content_retriever.py ├── cli │ ├── create_agent.py │ └── rephrase_prompt.py ├── configs │ ├── config.py │ ├── config_agent.py │ ├── config_sessions.py │ └── current_project.py ├── llm_providers │ ├── anthropic_provider.py │ ├── base_provider.py │ ├── fireworks_provider.py │ ├── groq_provider.py │ ├── lmstudio_provider.py │ ├── ollama_provider.py │ └── openai_provider.py ├── main.py ├── models │ ├── agent_base_model.py │ ├── project_base_model.py │ ├── tool_base_model.py │ └── workflow_base_model.py ├── prompts.py ├── secrets.toml ├── style.css ├── tools │ ├── __init__.py │ ├── code_generator.py │ ├── code_test.py │ └── fetch_web_content.py └── utils │ ├── agent_utils.py │ ├── api_utils.py │ ├── auth_utils.py │ ├── db_utils.py │ ├── error_handling.py │ ├── file_utils.py │ ├── sandbox.py │ ├── session_utils.py │ ├── text_utils.py │ ├── tool_execution.py │ ├── tool_utils.py │ ├── ui_utils.py │ └── workflow_utils.py ├── README.md └── requirements.txt /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Python 3", 3 | // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile 4 | "image": "mcr.microsoft.com/devcontainers/python:1-3.11-bullseye", 5 | "customizations": { 6 | "codespaces": { 7 | "openFiles": [ 8 | "README.md", 9 | "AutoGroq/main.py" 10 | ] 11 | }, 12 | "vscode": { 13 | "settings": {}, 14 | "extensions": [ 15 | "ms-python.python", 16 | "ms-python.vscode-pylance" 17 | ] 18 | } 19 | }, 20 | "updateContentCommand": "[ -f packages.txt ] && sudo apt update && sudo apt upgrade -y && sudo xargs apt install -y 133 | div[data-testid*="stButton"] > button[kind="secondary"] { 134 | background-color: green !important; 135 | color: white !important; 136 | } 137 | 138 | """ 139 | st.markdown(button_style, unsafe_allow_html=True) 140 | st.button(agent.name, key=f"agent_{index}", on_click=agent_button_callback(index)) 141 | 142 | if st.session_state.get(f'show_edit_{index}', False): 143 | display_agent_edit_form(agent, index) 144 | 145 | 146 | def display_agent_buttons(agents): 147 | for index, agent in enumerate(agents): 148 | agent_name = agent.name if agent.name else f"Unnamed Agent {index + 1}" 149 | agent_id = getattr(agent, 'id', index) # Use agent's id if available, otherwise use index 150 | col1, col2 = st.sidebar.columns([1, 4]) 151 | with col1: 152 | gear_icon = "⚙️" # Unicode character for gear icon 153 | if st.button( 154 | gear_icon, 155 | key=f"gear_{agent_id}_{agent_name}", # Use both id and name for uniqueness 156 | help="Edit Agent" # Add the tooltip text 157 | ): 158 | st.session_state['edit_agent_index'] = index 159 | st.session_state['show_edit'] = True 160 | with col2: 161 | if "next_agent" in st.session_state and st.session_state.next_agent == agent_name: 162 | button_style = """ 163 | 169 | """ 170 | st.markdown(button_style, unsafe_allow_html=True) 171 | st.button(agent_name, key=f"agent_{agent_id}_{agent_name}", on_click=agent_button_callback(index)) 172 | 173 | 174 | def display_agent_edit_form(agent, edit_index): 175 | with st.expander(f"Edit Properties of {agent.name}", expanded=True): 176 | col1, col2 = st.columns([4, 1]) 177 | with col1: 178 | unique_key = f"name_{edit_index}_{agent.name}" 179 | new_name = st.text_input("Name", value=agent.name, key=unique_key) 180 | if st.session_state.get(f"delete_confirmed_{edit_index}_{agent.name}", False): 181 | if st.button("Confirm Deletion", key=f"confirm_delete_{edit_index}_{agent.name}"): 182 | st.session_state.agents.pop(edit_index) 183 | st.session_state[f'show_edit_{edit_index}'] = False 184 | del st.session_state[f"delete_confirmed_{edit_index}_{agent.name}"] 185 | st.experimental_rerun() 186 | if st.button("Cancel", key=f"cancel_delete_{edit_index}_{agent.name}"): 187 | del st.session_state[f"delete_confirmed_{edit_index}_{agent.name}"] 188 | st.experimental_rerun() 189 | with col2: 190 | container = st.container() 191 | if container.button("X", key=f"delete_{edit_index}_{agent.name}"): 192 | if st.session_state.get(f"delete_confirmed_{edit_index}_{agent.name}", False): 193 | st.session_state.agents.pop(edit_index) 194 | st.session_state[f'show_edit_{edit_index}'] = False 195 | st.experimental_rerun() 196 | else: 197 | st.session_state[f"delete_confirmed_{edit_index}_{agent.name}"] = True 198 | st.experimental_rerun() 199 | 200 | description_value = agent.description 201 | 202 | col1, col2 = st.columns([3, 1]) 203 | with col1: 204 | current_provider = agent.provider or st.session_state.get('provider') 205 | selected_provider = st.selectbox( 206 | "Provider", 207 | options=SUPPORTED_PROVIDERS, 208 | index=SUPPORTED_PROVIDERS.index(current_provider), 209 | key=f"provider_select_{edit_index}_{agent.name}" 210 | ) 211 | 212 | # Fetch available models for the selected provider 213 | with st.spinner(f"Fetching models for {selected_provider}..."): 214 | provider_models = fetch_available_models(selected_provider) 215 | 216 | if not provider_models: 217 | st.warning(f"No models available for {selected_provider}. Using fallback list.") 218 | provider_models = FALLBACK_MODEL_TOKEN_LIMITS.get(selected_provider, {}) 219 | 220 | current_model = agent.model or st.session_state.get('model', 'default') 221 | 222 | if current_model not in provider_models: 223 | st.warning(f"Current model '{current_model}' is not available for {selected_provider}. Please select a new model.") 224 | current_model = next(iter(provider_models)) if provider_models else None 225 | 226 | if provider_models: 227 | selected_model = st.selectbox( 228 | "Model", 229 | options=list(provider_models.keys()), 230 | index=list(provider_models.keys()).index(current_model) if current_model in provider_models else 0, 231 | key=f"model_select_{edit_index}_{agent.name}" 232 | ) 233 | else: 234 | st.error(f"No models available for {selected_provider}.") 235 | selected_model = None 236 | 237 | with col2: 238 | if st.button("Set for ALL agents", key=f"set_all_agents_{edit_index}_{agent.name}"): 239 | for agent in st.session_state.agents: 240 | agent.config['provider'] = selected_provider 241 | if 'llm_config' not in agent.config: 242 | agent.config['llm_config'] = {'config_list': [{}]} 243 | if not agent.config['llm_config']['config_list']: 244 | agent.config['llm_config']['config_list'] = [{}] 245 | agent.config['llm_config']['config_list'][0]['model'] = selected_model 246 | agent.config['llm_config']['max_tokens'] = provider_models.get(selected_model, 4096) 247 | st.experimental_rerun() 248 | 249 | # Display the description in a text area 250 | new_description = st.text_area("Description", value=description_value, key=f"desc_{edit_index}_{agent.name}") 251 | 252 | col1, col2 = st.columns([3, 1]) 253 | with col1: 254 | if st.button("Update User Description", key=f"regenerate_{edit_index}_{agent.name}"): 255 | print(f"Regenerate button clicked for agent {edit_index}") 256 | new_description = regenerate_agent_description(agent) 257 | if new_description: 258 | agent.description = new_description 259 | print(f"Description regenerated for {agent.name}: {new_description}") 260 | st.session_state[f"regenerate_description_{edit_index}_{agent.name}"] = True 261 | description_value = new_description 262 | st.experimental_rerun() 263 | else: 264 | print(f"Failed to regenerate description for {agent.name}") 265 | with col2: 266 | if st.button("Save", key=f"save_{edit_index}_{agent.name}"): 267 | agent.name = new_name 268 | agent.description = new_description 269 | agent.provider = selected_provider 270 | agent.model = selected_model 271 | 272 | # Update the config as well 273 | agent.config['provider'] = selected_provider 274 | if 'llm_config' not in agent.config: 275 | agent.config['llm_config'] = {'config_list': [{}]} 276 | if not agent.config['llm_config']['config_list']: 277 | agent.config['llm_config']['config_list'] = [{}] 278 | agent.config['llm_config']['config_list'][0]['model'] = selected_model 279 | agent.config['llm_config']['max_tokens'] = provider_models.get(selected_model, 4096) 280 | 281 | st.session_state[f'show_edit_{edit_index}'] = False 282 | 283 | if 'edit_agent_index' in st.session_state: 284 | del st.session_state['edit_agent_index'] 285 | st.session_state.agents[edit_index] = agent 286 | st.experimental_rerun() 287 | 288 | # Add a debug print to check the agent's description 289 | print(f"Agent {agent.name} description: {agent.description}") 290 | 291 | 292 | def download_agent_file(expert_name): 293 | # Format the expert_name 294 | formatted_expert_name = re.sub(r'[^a-zA-Z0-9\s]', '', expert_name) # Remove non-alphanumeric characters 295 | formatted_expert_name = formatted_expert_name.lower().replace(' ', '_') # Convert to lowercase and replace spaces with underscores 296 | # Get the full path to the agent JSON file 297 | agents_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "agents")) 298 | json_file = os.path.join(agents_dir, f"{formatted_expert_name}.json") 299 | # Check if the file exists 300 | if os.path.exists(json_file): 301 | # Read the file content 302 | with open(json_file, "r") as f: 303 | file_content = f.read() 304 | # Encode the file content as base64 305 | b64_content = base64.b64encode(file_content.encode()).decode() 306 | # Create a download link 307 | href = f'Download {formatted_expert_name}.json' 308 | st.markdown(href, unsafe_allow_html=True) 309 | else: 310 | st.error(f"File not found: {json_file}") 311 | 312 | 313 | def extract_content(response): 314 | if isinstance(response, dict) and 'choices' in response: 315 | # Handle response from providers like Groq 316 | return response['choices'][0]['message']['content'] 317 | elif hasattr(response, 'content') and isinstance(response.content, list): 318 | # Handle Anthropic-style response 319 | return response.content[0].text 320 | elif isinstance(response, requests.models.Response): 321 | # Handle response from providers using requests.Response 322 | try: 323 | json_response = response.json() 324 | if 'choices' in json_response and json_response['choices']: 325 | return json_response['choices'][0]['message']['content'] 326 | except json.JSONDecodeError: 327 | logger.error("Failed to decode JSON from response") 328 | logger.error(f"Unexpected response format: {type(response)}") 329 | return None 330 | 331 | 332 | def process_agent_interaction(agent_index): 333 | agent = st.session_state.agents[agent_index] 334 | logger.debug(f"Processing interaction for agent: {agent.name}") 335 | logger.debug(f"Agent tools: {agent.tools}") 336 | 337 | if isinstance(agent, AgentBaseModel): 338 | agent_name = agent.name 339 | description = agent.description 340 | agent_tools = agent.tools 341 | else: 342 | # Fallback for dictionary-like structure 343 | agent_name = agent.get('config', {}).get('name', '') 344 | description = agent.get('description', '') 345 | agent_tools = agent.get("tools", []) 346 | 347 | user_request = st.session_state.get('user_request', '') 348 | user_input = st.session_state.get('user_input', '') 349 | rephrased_request = st.session_state.get('rephrased_request', '') 350 | reference_url = st.session_state.get('reference_url', '') 351 | 352 | # Execute associated tools for the agent 353 | tool_results = {} 354 | for tool in agent_tools: 355 | try: 356 | logger.debug(f"Executing tool: {tool.name}") 357 | if tool.name in st.session_state.tool_functions: 358 | tool_function = st.session_state.tool_functions[tool.name] 359 | if tool.name == 'fetch_web_content' and reference_url: 360 | tool_result = tool_function(reference_url) 361 | elif tool.name == 'generate_code': 362 | tool_result = tool_function(user_input or user_request or rephrased_request) 363 | else: 364 | tool_result = tool_function(user_input or user_request or rephrased_request) 365 | logger.debug(f"Tool result: {tool_result[:500]}...") # Log first 500 characters of result 366 | else: 367 | logger.error(f"Tool function not found for {tool.name}") 368 | tool_result = f"Error: Tool function not found for {tool.name}" 369 | 370 | tool_results[tool.name] = tool_result 371 | 372 | logger.debug(f"Tool result for {tool.name}: {tool_result[:500]}...") 373 | 374 | # Update the tool_result_string in the session state 375 | st.session_state.tool_result_string = tool_result[:1000] + "..." # Limit to first 1000 characters 376 | 377 | # Update the discussion and whiteboard immediately 378 | update_discussion_and_whiteboard(tool.name, st.session_state.tool_result_string, "") 379 | 380 | except Exception as e: 381 | error_message = f"Error executing tool {tool.name}: {str(e)}" 382 | logger.error(error_message, exc_info=True) 383 | tool_results[tool.name] = error_message 384 | st.session_state.tool_result_string = error_message 385 | update_discussion_and_whiteboard(tool.name, error_message, "") 386 | 387 | request = construct_request(agent, agent_name, description, user_request, user_input, rephrased_request, reference_url, tool_results) 388 | 389 | # Use the agent-specific provider and model 390 | if isinstance(agent, AgentBaseModel): 391 | provider = agent.provider or st.session_state.get('provider', LLM_PROVIDER) 392 | model = agent.model or st.session_state.get('model', 'default') 393 | else: 394 | # Fallback for dictionary-like structure 395 | provider = agent.get('provider') or st.session_state.get('provider', LLM_PROVIDER) 396 | model = agent.get('model') or st.session_state.get('model', 'default') 397 | 398 | logger.debug(f"Using provider: {provider}, model: {model}") 399 | 400 | api_key = get_api_key(provider) 401 | llm_provider = get_llm_provider(api_key=api_key, provider=provider) 402 | 403 | llm_request_data = { 404 | "model": model, 405 | "temperature": st.session_state.temperature, 406 | "max_tokens": FALLBACK_MODEL_TOKEN_LIMITS.get(model, 4096), 407 | "top_p": 1, 408 | "stop": "TERMINATE", 409 | "messages": [ 410 | { 411 | "role": "user", 412 | "content": request 413 | } 414 | ] 415 | } 416 | logger.debug(f"Sending request to {provider} using model {model}") 417 | response = llm_provider.send_request(llm_request_data) 418 | 419 | content = extract_content(response) 420 | if content: 421 | update_discussion_and_whiteboard(agent_name, content, user_input) 422 | st.session_state['form_agent_name'] = agent_name 423 | st.session_state['form_agent_description'] = description 424 | st.session_state['selected_agent_index'] = agent_index 425 | else: 426 | error_message = f"Error: Failed to extract content from response" 427 | log_error(error_message) 428 | logger.error(error_message) 429 | 430 | # Force a rerun to update the UI and trigger the moderator if necessary 431 | st.experimental_rerun() 432 | 433 | 434 | def regenerate_agent_description(agent): 435 | agent_name = agent.name if hasattr(agent, 'name') else "Unknown Agent" 436 | agent_description = agent.description if hasattr(agent, 'description') else "" 437 | print(f"agent_name: {agent_name}") 438 | print(f"agent_description: {agent_description}") 439 | user_request = st.session_state.get('user_request', '') 440 | print(f"user_request: {user_request}") 441 | discussion_history = st.session_state.get('discussion_history', '') 442 | prompt = f""" 443 | You are an AI assistant helping to improve an agent's description. The agent's current details are: 444 | Name: {agent_name} 445 | Description: {agent_description} 446 | The current user request is: {user_request} 447 | The discussion history so far is: {discussion_history} 448 | Please generate a revised description for this agent that defines it in the best manner possible to address the current user request, taking into account the discussion thus far. Return only the revised description, written in the third-person, without any additional commentary or narrative. It is imperative that you return ONLY the text of the new description written in the third-person. No preamble, no narrative, no superfluous commentary whatsoever. Just the description, written in the third-person, unlabeled, please. You will have been successful if your reply is thorough, comprehensive, concise, written in the third-person, and adherent to all of these instructions. 449 | """ 450 | print(f"regenerate_agent_description called with agent_name: {agent_name}") 451 | print(f"regenerate_agent_description called with prompt: {prompt}") 452 | 453 | api_key = get_api_key() 454 | llm_provider = get_llm_provider(api_key=api_key) 455 | llm_request_data = { 456 | "model": st.session_state.model, 457 | "temperature": st.session_state.temperature, 458 | "max_tokens": st.session_state.max_tokens, 459 | "top_p": 1, 460 | "stop": "TERMINATE", 461 | "messages": [ 462 | { 463 | "role": "user", 464 | "content": prompt 465 | } 466 | ] 467 | } 468 | response = llm_provider.send_request(llm_request_data) 469 | 470 | if response.status_code == 200: 471 | response_data = llm_provider.process_response(response) 472 | if "choices" in response_data and response_data["choices"]: 473 | content = response_data["choices"][0]["message"]["content"] 474 | return content.strip() 475 | 476 | return None 477 | 478 | 479 | def retrieve_agent_information(agent_index): 480 | agent = st.session_state.agents[agent_index] 481 | agent_name = agent["config"]["name"] 482 | description = agent["description"] 483 | return agent_name, description 484 | 485 | 486 | def send_request(agent_name, request): 487 | api_key = get_api_key() 488 | llm_provider = get_llm_provider(api_key=api_key) 489 | response = llm_provider.send_request(request) 490 | return response 491 | -------------------------------------------------------------------------------- /AutoGroq/agents/code_developer.py: -------------------------------------------------------------------------------- 1 | # agents/code_developer.py 2 | 3 | import datetime 4 | import streamlit as st 5 | from configs.config import LLM_PROVIDER 6 | from models.agent_base_model import AgentBaseModel 7 | from models.tool_base_model import ToolBaseModel 8 | from tools.code_generator import code_generator_tool 9 | 10 | class CodeDeveloperAgent(AgentBaseModel): 11 | def __init__(self, name, description, tools, config, role, goal, backstory, provider, model): 12 | current_timestamp = datetime.datetime.now().isoformat() 13 | super().__init__(name=name, description=description, tools=tools, config=config, 14 | role=role, goal=goal, backstory=backstory) 15 | self.provider = provider 16 | self.model = model 17 | self.created_at = current_timestamp 18 | self.updated_at = current_timestamp 19 | self.user_id = "default" 20 | self.timestamp = current_timestamp 21 | 22 | @classmethod 23 | def create_default(cls): 24 | return cls( 25 | name="Code Developer", 26 | description="An agent specialized in generating code based on feature descriptions.", 27 | tools=[code_generator_tool], 28 | config={ 29 | "llm_config": { 30 | "config_list": [{"model": st.session_state.get('model', 'default'), "api_key": None}], 31 | "temperature": st.session_state.get('temperature', 0.7) 32 | }, 33 | "human_input_mode": "NEVER", 34 | "max_consecutive_auto_reply": 10 35 | }, 36 | role="Code Developer", 37 | goal="To create efficient and effective code solutions based on given requirements.", 38 | backstory="I am an AI agent with extensive knowledge of various programming languages and software development best practices. My purpose is to assist in creating code that meets the specified requirements.", 39 | provider=st.session_state.get('provider', LLM_PROVIDER), 40 | model=st.session_state.get('model', 'default') 41 | ) 42 | 43 | 44 | def to_dict(self): 45 | data = self.__dict__ 46 | for key, value in data.items(): 47 | if isinstance(value, ToolBaseModel): 48 | data[key] = value.to_dict() 49 | return data -------------------------------------------------------------------------------- /AutoGroq/agents/code_tester.py: -------------------------------------------------------------------------------- 1 | # agents/code_tester.py 2 | 3 | import datetime 4 | import streamlit as st 5 | from configs.config import LLM_PROVIDER 6 | from models.agent_base_model import AgentBaseModel 7 | from models.tool_base_model import ToolBaseModel 8 | from tools.code_test import code_test_tool 9 | 10 | class CodeTesterAgent(AgentBaseModel): 11 | def __init__(self, name, description, tools, config, role, goal, backstory, provider, model): 12 | current_timestamp = datetime.datetime.now().isoformat() 13 | super().__init__(name=name, description=description, tools=tools, config=config, 14 | role=role, goal=goal, backstory=backstory) 15 | self.provider = provider 16 | self.model = model 17 | self.created_at = current_timestamp 18 | self.updated_at = current_timestamp 19 | self.user_id = "default" 20 | self.timestamp = current_timestamp 21 | 22 | @classmethod 23 | def create_default(cls): 24 | return cls( 25 | name="Code Tester", 26 | description="An agent specialized in testing code and providing feedback on its functionality.", 27 | tools=[code_test_tool], 28 | config={ 29 | "llm_config": { 30 | "config_list": [{"model": st.session_state.get('model', 'default'), "api_key": None}], 31 | "temperature": st.session_state.get('temperature', 0.7) 32 | }, 33 | "human_input_mode": "NEVER", 34 | "max_consecutive_auto_reply": 10 35 | }, 36 | role="Code Tester", 37 | goal="To thoroughly test code and provide comprehensive feedback to ensure its reliability and correctness.", 38 | backstory="I am an AI agent with expertise in software testing and quality assurance. My purpose is to rigorously test code and provide comprehensive feedback to ensure its reliability and correctness.", 39 | provider=st.session_state.get('provider', LLM_PROVIDER), 40 | model=st.session_state.get('model', 'default') 41 | ) 42 | 43 | def to_dict(self): 44 | data = self.__dict__ 45 | for key, value in data.items(): 46 | if isinstance(value, ToolBaseModel): 47 | data[key] = value.to_dict() 48 | return data -------------------------------------------------------------------------------- /AutoGroq/agents/web_content_retriever.py: -------------------------------------------------------------------------------- 1 | # agents/web_content_retriever.py 2 | 3 | import datetime 4 | import streamlit as st 5 | from configs.config import LLM_PROVIDER 6 | from models.agent_base_model import AgentBaseModel 7 | from models.tool_base_model import ToolBaseModel 8 | from tools.fetch_web_content import fetch_web_content_tool 9 | 10 | class WebContentRetrieverAgent(AgentBaseModel): 11 | def __init__(self, name, description, tools, config, role, goal, backstory, provider, model): 12 | current_timestamp = datetime.datetime.now().isoformat() 13 | super().__init__(name=name, description=description, tools=tools, config=config, 14 | role=role, goal=goal, backstory=backstory) 15 | self.provider = provider 16 | self.model = model 17 | self.created_at = current_timestamp 18 | self.updated_at = current_timestamp 19 | self.user_id = "default" 20 | self.timestamp = current_timestamp 21 | self.reference_url = None 22 | self.web_content = None 23 | 24 | @classmethod 25 | def create_default(cls): 26 | return cls( 27 | name="Web Content Retriever", 28 | description="An agent specialized in retrieving and processing web content.", 29 | tools=[fetch_web_content_tool], 30 | config={ 31 | "llm_config": { 32 | "config_list": [{"model": st.session_state.get('model', 'default'), "api_key": None}], 33 | "temperature": st.session_state.get('temperature', 0.7) 34 | }, 35 | "human_input_mode": "NEVER", 36 | "max_consecutive_auto_reply": 10 37 | }, 38 | role="Web Content Specialist", 39 | goal="To retrieve and analyze web content efficiently and accurately.", 40 | backstory="I am an AI agent designed to fetch and analyze web content, providing valuable insights and information from various online sources.", 41 | provider=st.session_state.get('provider', LLM_PROVIDER), 42 | model=st.session_state.get('model', 'default') 43 | ) 44 | 45 | def to_dict(self): 46 | data = self.__dict__ 47 | for key, value in data.items(): 48 | if isinstance(value, ToolBaseModel): 49 | data[key] = value.to_dict() 50 | return data 51 | 52 | def retrieve_web_content(self, reference_url): 53 | """ 54 | Retrieve web content from the given reference URL and store it in the agent's memory. 55 | 56 | Args: 57 | reference_url (str): The URL to fetch content from. 58 | 59 | Returns: 60 | dict: A dictionary containing the status, URL, and content (or error message). 61 | """ 62 | self.reference_url = reference_url 63 | fetch_tool = next((tool for tool in self.tools if tool.name == "fetch_web_content"), None) 64 | if fetch_tool is None: 65 | return {"status": "error", "message": "fetch_web_content tool not found"} 66 | 67 | result = fetch_tool.function(reference_url) 68 | if result["status"] == "success": 69 | self.web_content = result["content"] 70 | return result 71 | 72 | def get_web_content(self): 73 | """ 74 | Get the stored web content. 75 | 76 | Returns: 77 | str: The stored web content or None if not available. 78 | """ 79 | return self.web_content 80 | 81 | def get_reference_url(self): 82 | """ 83 | Get the stored reference URL. 84 | 85 | Returns: 86 | str: The stored reference URL or None if not available. 87 | """ 88 | return self.reference_url 89 | -------------------------------------------------------------------------------- /AutoGroq/cli/create_agent.py: -------------------------------------------------------------------------------- 1 | 2 | import argparse 3 | import datetime 4 | import json 5 | import os 6 | import streamlit as st 7 | import sys 8 | 9 | # Add the root directory to the Python module search path 10 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 11 | 12 | from configs.config import FALLBACK_MODEL_TOKEN_LIMITS 13 | from prompts import get_agent_prompt 14 | from utils.api_utils import get_llm_provider 15 | from utils.agent_utils import create_agent_data 16 | from utils.auth_utils import get_api_key 17 | from utils.file_utils import sanitize_text 18 | 19 | def create_agent(request, provider, model, temperature, max_tokens, output_file): 20 | # Get the API key and provider 21 | api_key = get_api_key() 22 | llm_provider = get_llm_provider(api_key=api_key) 23 | 24 | # Generate the prompt using get_agent_prompt 25 | prompt = get_agent_prompt(request) 26 | 27 | # Adjust the token limit based on the selected model 28 | max_tokens = FALLBACK_MODEL_TOKEN_LIMITS.get(provider, {}).get(model, 4096) 29 | 30 | # Make the request to the LLM API 31 | llm_request_data = { 32 | "model": model, 33 | "temperature": st.session_state.temperature, 34 | "max_tokens": max_tokens, 35 | "messages": [{"role": "user", "content": prompt}], 36 | } 37 | response = llm_provider.send_request(llm_request_data) 38 | 39 | if response.status_code != 200: 40 | print(f"Error: Received status code {response.status_code}") 41 | print(response.text) 42 | return 43 | 44 | response_data = response.json() 45 | 46 | if 'choices' not in response_data or len(response_data['choices']) == 0: 47 | print("Error: 'choices' not found in the response data or it's empty") 48 | print(json.dumps(response_data, indent=2)) 49 | return 50 | 51 | agent_description = response_data['choices'][0]['message']['content'].strip() 52 | 53 | agent_data = { 54 | "type": "assistant", 55 | "config": { 56 | "name": request, 57 | "llm_config": { 58 | "config_list": [ 59 | { 60 | "user_id": "default", 61 | "timestamp": datetime.datetime.now().isoformat(), 62 | "model": model, 63 | "base_url": None, 64 | "api_type": None, 65 | "api_version": None, 66 | "description": "OpenAI model configuration" 67 | } 68 | ], 69 | "temperature": temperature, 70 | "cache_seed": None, 71 | "timeout": None, 72 | "max_tokens": max_tokens, 73 | "extra_body": None 74 | }, 75 | "human_input_mode": "NEVER", 76 | "max_consecutive_auto_reply": 8, 77 | "system_message": f"You are a helpful assistant that can act as {sanitize_text(agent_description)} who {request}.", 78 | "is_termination_msg": None, 79 | "code_execution_config": None, 80 | "default_auto_reply": "", 81 | "description": agent_description # Ensure the description key is present 82 | }, 83 | "timestamp": datetime.datetime.now().isoformat(), 84 | "user_id": "default", 85 | "tools": [] 86 | } 87 | 88 | # Debug print to verify agent_data 89 | print("Agent Data:", json.dumps(agent_data, indent=2)) 90 | 91 | # Create the appropriate agent data 92 | autogen_agent_data, crewai_agent_data = create_agent_data(agent_data) 93 | 94 | # Save the agent data to the output file 95 | with open(output_file, "w") as f: 96 | json.dump(autogen_agent_data, f, indent=2) 97 | 98 | print(f"Agent created successfully. Output saved to: {output_file}") 99 | 100 | if __name__ == "__main__": 101 | parser = argparse.ArgumentParser(description="Create an agent based on a user request.") 102 | parser.add_argument("--request", required=True, help="The user request for creating the agent.") 103 | parser.add_argument("--model", default="mixtral-8x7b-32768", help="The model to use for the agent.") 104 | parser.add_argument("--temperature", type=float, default=0.5, help="The temperature value for the agent.") 105 | parser.add_argument("--max_tokens", type=int, default=32768, help="The maximum number of tokens for the agent.") 106 | parser.add_argument("--agent_type", default="autogen", choices=["autogen", "crewai"], help="The type of agent to create.") 107 | parser.add_argument("--output", default="agent.json", help="The output file path for the agent JSON.") 108 | parser.add_argument("--provider", default="groq", help="The LLM provider to use (e.g., 'openai', 'anthropic').") 109 | 110 | args = parser.parse_args() 111 | create_agent(args.request, args.provider, args.model, args.temperature, args.max_tokens, args.output) 112 | -------------------------------------------------------------------------------- /AutoGroq/cli/rephrase_prompt.py: -------------------------------------------------------------------------------- 1 | 2 | import argparse 3 | import os 4 | import sys 5 | 6 | # Add the root directory to the Python module search path 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | 9 | from configs.config import FALLBACK_MODEL_TOKEN_LIMITS, LLM_PROVIDER 10 | from utils.api_utils import get_llm_provider 11 | from utils.auth_utils import get_api_key 12 | from utils.ui_utils import rephrase_prompt 13 | 14 | 15 | def rephrase_prompt_cli(prompt, provider, model, temperature, max_tokens): 16 | # Get the API key 17 | api_key = get_api_key() 18 | 19 | # Use the provider specified in the CLI arguments 20 | llm_provider = get_llm_provider(api_key=api_key, provider=provider) 21 | 22 | # Override the model and max_tokens if specified in the command-line arguments 23 | model_to_use = model if model else provider 24 | max_tokens_to_use = FALLBACK_MODEL_TOKEN_LIMITS.get(model_to_use, max_tokens) 25 | 26 | rephrased_prompt = rephrase_prompt(prompt, model_to_use, max_tokens_to_use, llm_provider=llm_provider, provider=provider) 27 | 28 | if rephrased_prompt: 29 | print(f"Rephrased Prompt: {rephrased_prompt}") 30 | else: 31 | print("Error: Failed to rephrase the prompt.") 32 | 33 | 34 | if __name__ == "__main__": 35 | parser = argparse.ArgumentParser(description="Rephrase a user prompt.") 36 | parser.add_argument("--prompt", required=True, help="The user prompt to rephrase.") 37 | parser.add_argument("--model", default=None, help="The model to use for rephrasing.") 38 | parser.add_argument("--temperature", type=float, default=0.5, help="The temperature value for rephrasing.") 39 | parser.add_argument("--max_tokens", type=int, default=32768, help="The maximum number of tokens for rephrasing.") 40 | parser.add_argument("--provider", default=None, help="The LLM provider to use (e.g., 'openai', 'anthropic').") 41 | 42 | args = parser.parse_args() 43 | rephrase_prompt_cli(args.prompt, args.provider, args.model, args.temperature, args.max_tokens) 44 | -------------------------------------------------------------------------------- /AutoGroq/configs/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Dict 3 | 4 | # Get user home directory 5 | home_dir = os.path.expanduser("~") 6 | default_db_path = f'{home_dir}/.autogenstudio/database.sqlite' 7 | 8 | # Debug 9 | DEFAULT_DEBUG = False 10 | 11 | # Default configurations 12 | DEFAULT_LLM_PROVIDER = "anthropic" 13 | DEFAULT_GROQ_API_URL = "https://api.groq.com/openai/v1/chat/completions" 14 | DEFAULT_LMSTUDIO_API_URL = "http://localhost:1234/v1/chat/completions" 15 | DEFAULT_OLLAMA_API_URL = "http://127.0.0.1:11434/api/generate" 16 | DEFAULT_OPENAI_API_URL = "https://api.openai.com/v1/chat/completions" 17 | DEFAULT_ANTHROPIC_API_URL = "https://api.anthropic.com/v1/messages" 18 | 19 | # Try to import user-specific configurations from config_local.py 20 | try: 21 | from config_local import * 22 | except ImportError: 23 | pass 24 | 25 | # Set the configurations using the user-specific values if available, otherwise use the defaults 26 | DEBUG = locals().get('DEBUG', DEFAULT_DEBUG) 27 | LLM_PROVIDER = locals().get('LLM_PROVIDER', DEFAULT_LLM_PROVIDER) 28 | 29 | # API URLs for different providers 30 | API_URLS = { 31 | "groq": locals().get('GROQ_API_URL', DEFAULT_GROQ_API_URL), 32 | "lmstudio": locals().get('LMSTUDIO_API_URL', DEFAULT_LMSTUDIO_API_URL), 33 | "ollama": locals().get('OLLAMA_API_URL', DEFAULT_OLLAMA_API_URL), 34 | "openai": locals().get('OPENAI_API_URL', DEFAULT_OPENAI_API_URL), 35 | "anthropic": locals().get('ANTHROPIC_API_URL', DEFAULT_ANTHROPIC_API_URL), 36 | } 37 | 38 | API_KEY_NAMES = { 39 | "groq": "GROQ_API_KEY", 40 | "lmstudio": None, 41 | "ollama": None, 42 | "openai": "OPENAI_API_KEY", 43 | "anthropic": "ANTHROPIC_API_KEY", 44 | } 45 | 46 | # Retry settings 47 | MAX_RETRIES = 3 48 | RETRY_DELAY = 2 # in seconds 49 | RETRY_TOKEN_LIMIT = 5000 50 | 51 | # Fallback model configurations (used when API fails) 52 | FALLBACK_MODEL_TOKEN_LIMITS = { 53 | "anthropic": { 54 | "claude-3-5-sonnet-20240620": 4096, 55 | "claude-3-opus-20240229": 4096, 56 | "claude-3-sonnet-20240229": 4096, 57 | "claude-3-haiku-20240307": 4096, 58 | "claude-2.1": 100000, 59 | "claude-2.0": 100000, 60 | "claude-instant-1.2": 100000, 61 | }, 62 | "groq": { 63 | "mixtral-8x7b-32768": 32768, 64 | "llama3-70b-8192": 8192, 65 | "llama3-8b-8192": 8192, 66 | "gemma-7b-it": 8192, 67 | }, 68 | "openai": { 69 | "gpt-4": 8192, 70 | "gpt-3.5-turbo": 4096, 71 | }, 72 | "ollama": { 73 | "llama3": 8192, 74 | }, 75 | "lmstudio": { 76 | "instructlab/granite-7b-lab-GGUF": 2048, 77 | "MaziyarPanahi/Codestral-22B-v0.1-GGUF": 32768, 78 | }, 79 | } 80 | 81 | # Database path 82 | FRAMEWORK_DB_PATH = os.environ.get('FRAMEWORK_DB_PATH', default_db_path) 83 | 84 | SUPPORTED_PROVIDERS = ["anthropic", "groq", "lmstudio", "ollama", "openai"] 85 | 86 | BUILT_IN_AGENTS = ["Web Content Retriever", "Code Developer", "Code Tester"] 87 | 88 | AVAILABLE_MODELS: Dict[str, Dict[str, int]] = {} 89 | 90 | def update_available_models(provider: str, models: Dict[str, int]): 91 | """ 92 | Update the available models for a given provider. 93 | 94 | :param provider: The name of the provider (e.g., 'groq', 'openai') 95 | :param models: A dictionary of model names and their token limits 96 | """ 97 | global AVAILABLE_MODELS 98 | AVAILABLE_MODELS[provider] = models -------------------------------------------------------------------------------- /AutoGroq/configs/config_agent.py: -------------------------------------------------------------------------------- 1 | # /configs/config_agent.py 2 | 3 | import datetime 4 | import streamlit as st 5 | 6 | from typing import Dict 7 | 8 | AGENT_CONFIG: Dict = { 9 | "type": "assistant", 10 | "config": { 11 | "name": "", 12 | "llm_config": { 13 | "config_list": [ 14 | { 15 | "user_id": "default", 16 | "timestamp": datetime.datetime.now().isoformat(), 17 | "model": st.session_state.model, 18 | "base_url": st.session_state.api_url, 19 | "api_type": None, 20 | "api_version": None, 21 | "description": "Model configuration" 22 | } 23 | ], 24 | "temperature": st.session_state.temperature, 25 | "cache_seed": None, 26 | "timeout": None, 27 | "max_tokens": None, 28 | "extra_body": None 29 | }, 30 | "human_input_mode": "NEVER", 31 | "max_consecutive_auto_reply": 8, 32 | "system_message": "", 33 | "is_termination_msg": None, 34 | "code_execution_config": None, 35 | "default_auto_reply": "", 36 | "description": "" 37 | }, 38 | "timestamp": datetime.datetime.now().isoformat(), 39 | "user_id": "default", 40 | "tools": [] 41 | } -------------------------------------------------------------------------------- /AutoGroq/configs/config_sessions.py: -------------------------------------------------------------------------------- 1 | # config_sessions.py 2 | 3 | from datetime import datetime 4 | from typing import Dict 5 | 6 | DEFAULT_AGENT_CONFIG: Dict = { 7 | "name": "Default Agent", 8 | "description": "A default agent for initialization purposes in AutoGroq", 9 | "tools": [], # Empty list as default 10 | "config": { 11 | "llm_config": { 12 | "config_list": [ 13 | { 14 | "model": "default", 15 | "api_key": None, 16 | "base_url": None, 17 | "api_type": None, 18 | "api_version": None, 19 | } 20 | ], 21 | "temperature": 0.7, 22 | "max_tokens": 1000, 23 | "top_p": 1.0, 24 | "frequency_penalty": 0.0, 25 | "presence_penalty": 0.0, 26 | }, 27 | "human_input_mode": "NEVER", 28 | "max_consecutive_auto_reply": 10, 29 | }, 30 | "role": "Default Assistant", 31 | "goal": "Assist users with general tasks in AutoGroq", 32 | "backstory": "I am a default AI assistant created to help initialize the AutoGroq system.", 33 | "id": None, # Will be set dynamically when needed 34 | "created_at": datetime.now().isoformat(), 35 | "updated_at": datetime.now().isoformat(), 36 | "user_id": "default_user", 37 | "workflows": None, 38 | "type": "assistant", 39 | "models": [], # Empty list as default 40 | "verbose": False, 41 | "allow_delegation": True, 42 | "new_description": None, 43 | "timestamp": datetime.now().isoformat(), 44 | "is_termination_msg": None, 45 | "code_execution_config": { 46 | "work_dir": "./agent_workspace", 47 | "use_docker": False, 48 | }, 49 | "llm": None, 50 | "function_calling_llm": None, 51 | "max_iter": 25, 52 | "max_rpm": None, 53 | "max_execution_time": 600, # 10 minutes default 54 | "step_callback": None, 55 | "cache": True 56 | } -------------------------------------------------------------------------------- /AutoGroq/configs/current_project.py: -------------------------------------------------------------------------------- 1 | 2 | class Current_Project: 3 | def __init__(self): 4 | self.deliverables = [] 5 | self.re_engineered_prompt = "" 6 | self.implementation_phases = ["Planning", "Development", "Testing", "Deployment"] 7 | self.current_phase = "Planning" 8 | 9 | 10 | def add_deliverable(self, deliverable): 11 | self.deliverables.append({ 12 | "text": deliverable, 13 | "done": False, 14 | "phase": {phase: False for phase in self.implementation_phases} 15 | }) 16 | 17 | 18 | def get_next_unchecked_deliverable(self): 19 | for index, deliverable in enumerate(self.deliverables): 20 | if not deliverable["done"]: 21 | return index, deliverable["text"] 22 | return None, None 23 | 24 | 25 | def get_next_uncompleted_phase(self, index): 26 | if 0 <= index < len(self.deliverables): 27 | for phase in self.implementation_phases: 28 | if not self.deliverables[index]["phase"][phase]: 29 | return phase 30 | return None 31 | 32 | 33 | def is_deliverable_complete(self, index): 34 | if 0 <= index < len(self.deliverables): 35 | return all(self.deliverables[index]["phase"].values()) 36 | return False 37 | 38 | 39 | def mark_deliverable_phase_done(self, index, phase): 40 | if 0 <= index < len(self.deliverables): 41 | self.deliverables[index]["phase"][phase] = True 42 | if self.is_deliverable_complete(index): 43 | self.deliverables[index]["done"] = True 44 | 45 | 46 | def mark_deliverable_undone(self, index): 47 | if 0 <= index < len(self.deliverables): 48 | self.deliverables[index]["done"] = False 49 | 50 | 51 | def move_to_next_phase(self): 52 | current_index = self.implementation_phases.index(self.current_phase) 53 | if current_index < len(self.implementation_phases) - 1: 54 | self.current_phase = self.implementation_phases[current_index + 1] 55 | 56 | 57 | def set_re_engineered_prompt(self, prompt): 58 | self.re_engineered_prompt = prompt 59 | -------------------------------------------------------------------------------- /AutoGroq/llm_providers/anthropic_provider.py: -------------------------------------------------------------------------------- 1 | # llm_providers/anthropic_provider.py 2 | 3 | import anthropic 4 | import streamlit as st 5 | 6 | from llm_providers.base_provider import BaseLLMProvider 7 | 8 | class AnthropicProvider(BaseLLMProvider): 9 | def __init__(self, api_url, api_key): 10 | self.api_key = api_key 11 | self.api_url = api_url or "https://api.anthropic.com/v1/messages" 12 | self.client = anthropic.Anthropic(api_key=self.api_key) 13 | 14 | def get_available_models(self): 15 | return { 16 | "claude-3-5-sonnet-20240620": 4096, 17 | "claude-3-opus-20240229": 4096, 18 | "claude-3-sonnet-20240229": 4096, 19 | "claude-3-haiku-20240307": 4096, 20 | "claude-2.1": 100000, 21 | "claude-2.0": 100000, 22 | "claude-instant-1.2": 100000, 23 | } 24 | 25 | def process_response(self, response): 26 | if response is not None: 27 | return { 28 | "choices": [ 29 | { 30 | "message": { 31 | "content": response.content[0].text 32 | } 33 | } 34 | ] 35 | } 36 | return None 37 | 38 | def send_request(self, data): 39 | try: 40 | model = data['model'] 41 | max_tokens = min(data.get('max_tokens', 1000), self.get_available_models()[model]) 42 | 43 | response = self.client.messages.create( 44 | model=model, 45 | max_tokens=max_tokens, 46 | temperature=data.get('temperature', st.session_state.temperature), 47 | messages=[ 48 | {"role": "user", "content": message["content"]} 49 | for message in data['messages'] 50 | ] 51 | ) 52 | return response 53 | except anthropic.APIError as e: 54 | print(f"Anthropic API error: {e}") 55 | return None -------------------------------------------------------------------------------- /AutoGroq/llm_providers/base_provider.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | class BaseLLMProvider(ABC): 4 | @abstractmethod 5 | def __init__(self, api_key, api_url=None): 6 | pass 7 | 8 | @abstractmethod 9 | def send_request(self, data): 10 | pass 11 | 12 | @abstractmethod 13 | def process_response(self, response): 14 | pass 15 | 16 | @abstractmethod 17 | def get_available_models(self): 18 | pass -------------------------------------------------------------------------------- /AutoGroq/llm_providers/fireworks_provider.py: -------------------------------------------------------------------------------- 1 | 2 | import json 3 | import requests 4 | 5 | from llm_providers.base_provider import BaseLLMProvider 6 | 7 | 8 | class FireworksProvider(BaseLLMProvider): 9 | def __init__(self, api_url, api_key): 10 | self.api_url = api_url 11 | 12 | 13 | def get_available_models(self): 14 | return None 15 | 16 | 17 | def process_response(self, response): 18 | if response.status_code == 200: 19 | return response.json() 20 | else: 21 | raise Exception(f"Request failed with status code {response.status_code}") 22 | 23 | 24 | def send_request(self, data): 25 | headers = { 26 | "Authorization": f"Bearer {self.api_key}", 27 | "Content-Type": "application/json", 28 | } 29 | # Ensure data is a JSON string 30 | if isinstance(data, dict): 31 | json_data = json.dumps(data) 32 | else: 33 | json_data = data 34 | response = requests.post(self.api_url, data=json_data, headers=headers) 35 | return response 36 | -------------------------------------------------------------------------------- /AutoGroq/llm_providers/groq_provider.py: -------------------------------------------------------------------------------- 1 | 2 | import json 3 | import requests 4 | 5 | from llm_providers.base_provider import BaseLLMProvider 6 | 7 | 8 | class GroqProvider: 9 | def __init__(self, api_url, api_key): 10 | self.api_key = api_key 11 | self.api_url = api_url or "https://api.groq.com/openai/v1/chat/completions" 12 | 13 | 14 | def process_response(self, response): 15 | if response.status_code == 200: 16 | return response.json() 17 | else: 18 | raise Exception(f"Request failed with status code {response.status_code}") 19 | 20 | 21 | def send_request(self, data): 22 | headers = { 23 | "Authorization": f"Bearer {self.api_key}", 24 | "Content-Type": "application/json", 25 | } 26 | # Ensure data is a JSON string 27 | if isinstance(data, dict): 28 | json_data = json.dumps(data) 29 | else: 30 | json_data = data 31 | response = requests.post(self.api_url, data=json_data, headers=headers) 32 | return response 33 | 34 | 35 | def get_available_models(self): 36 | response = requests.get("https://api.groq.com/openai/v1/models", headers={ 37 | "Authorization": f"Bearer {self.api_key}", 38 | "Content-Type": "application/json", 39 | }) 40 | if response.status_code == 200: 41 | models_data = response.json().get("data", []) 42 | return {model["id"]: model.get("max_tokens", 4096) for model in models_data} 43 | else: 44 | raise Exception(f"Failed to retrieve models: {response.status_code}") -------------------------------------------------------------------------------- /AutoGroq/llm_providers/lmstudio_provider.py: -------------------------------------------------------------------------------- 1 | # llm_providers/lmstudio_provider.py 2 | 3 | import json 4 | import requests 5 | import streamlit as st 6 | 7 | from llm_providers.base_provider import BaseLLMProvider 8 | 9 | 10 | class LmstudioProvider: 11 | def __init__(self, api_url, api_key): 12 | self.api_url = api_url or "http://localhost:1234/v1/chat/completions" 13 | 14 | 15 | def get_available_models(self): 16 | return { 17 | "instructlab/granite-7b-lab-GGUF": 2048, 18 | "MaziyarPanahi/Codestral-22B-v0.1-GGUF": 32768, 19 | # Add other LMStudio models here 20 | } 21 | 22 | 23 | def process_response(self, response): 24 | if response.status_code == 200: 25 | response_data = response.json() 26 | if "choices" in response_data: 27 | content = response_data["choices"][0]["message"]["content"] 28 | return { 29 | "choices": [ 30 | { 31 | "message": { 32 | "content": content.strip() 33 | } 34 | } 35 | ] 36 | } 37 | else: 38 | raise Exception("Unexpected response format. 'choices' field missing.") 39 | else: 40 | raise Exception(f"Request failed with status code {response.status_code}") 41 | 42 | 43 | def send_request(self, data): 44 | headers = { 45 | "Content-Type": "application/json", 46 | } 47 | 48 | # Construct the request data in the format expected by the LM Studio API 49 | lm_studio_request_data = { 50 | "model": data["model"], 51 | "messages": data["messages"], 52 | "temperature": st.session_state.temperature, 53 | "max_tokens": data.get("max_tokens", 2048), 54 | "stop": data.get("stop", "TERMINATE"), 55 | } 56 | 57 | # Ensure data is a JSON string 58 | if isinstance(lm_studio_request_data, dict): 59 | json_data = json.dumps(lm_studio_request_data) 60 | else: 61 | json_data = lm_studio_request_data 62 | 63 | response = requests.post(self.api_url, data=json_data, headers=headers) 64 | return response 65 | -------------------------------------------------------------------------------- /AutoGroq/llm_providers/ollama_provider.py: -------------------------------------------------------------------------------- 1 | # llm_providers/ollama_provider.py 2 | 3 | import json 4 | import requests 5 | import streamlit as st 6 | 7 | from llm_providers.base_provider import BaseLLMProvider 8 | 9 | 10 | class OllamaProvider: 11 | def __init__(self, api_url, api_key): 12 | self.api_url = api_url or "http://127.0.0.1:11434/api/generate" 13 | 14 | 15 | def get_available_models(self): 16 | return { 17 | "llama3": 8192, 18 | # Add other Ollama models here 19 | } 20 | 21 | 22 | def process_response(self, response): 23 | if response.status_code == 200: 24 | response_data = response.json() 25 | if "response" in response_data: 26 | content = response_data["response"].strip() 27 | if content: 28 | return { 29 | "choices": [ 30 | { 31 | "message": { 32 | "content": content 33 | } 34 | } 35 | ] 36 | } 37 | else: 38 | raise Exception("Empty response received from the Ollama API.") 39 | else: 40 | raise Exception("Unexpected response format. 'response' field missing.") 41 | else: 42 | raise Exception(f"Request failed with status code {response.status_code}") 43 | 44 | 45 | def send_request(self, data): 46 | headers = { 47 | "Content-Type": "application/json", 48 | } 49 | # Construct the request data in the format expected by the Ollama API 50 | ollama_request_data = { 51 | "model": data["model"], 52 | "prompt": data["messages"][0]["content"], 53 | "temperature": st.session_state.temperature, 54 | "max_tokens": data.get("max_tokens", 2048), 55 | "stop": data.get("stop", "TERMINATE"), 56 | "stream": False, 57 | } 58 | # Ensure data is a JSON string 59 | if isinstance(ollama_request_data, dict): 60 | json_data = json.dumps(ollama_request_data) 61 | else: 62 | json_data = ollama_request_data 63 | response = requests.post(self.api_url, data=json_data, headers=headers) 64 | return response -------------------------------------------------------------------------------- /AutoGroq/llm_providers/openai_provider.py: -------------------------------------------------------------------------------- 1 | 2 | import json 3 | import os 4 | import requests 5 | 6 | from llm_providers.base_provider import BaseLLMProvider 7 | 8 | class OpenaiProvider: 9 | def __init__(self, api_url, api_key): 10 | self.api_key = api_key 11 | self.api_url = api_url or "https://api.openai.com/v1/chat/completions" 12 | 13 | 14 | def get_available_models(self): 15 | response = requests.get("https://api.openai.com/v1/models", headers={ 16 | "Authorization": f"Bearer {self.api_key}", 17 | "Content-Type": "application/json", 18 | }) 19 | if response.status_code == 200: 20 | models_data = response.json().get("data", []) 21 | return {model["id"]: model.get("max_tokens", 4096) for model in models_data} 22 | else: 23 | raise Exception(f"Failed to retrieve models: {response.status_code}") 24 | 25 | 26 | def process_response(self, response): 27 | if response.status_code == 200: 28 | return response.json() 29 | else: 30 | raise Exception(f"Request failed with status code {response.status_code}") 31 | 32 | 33 | def send_request(self, data): 34 | print("self.api_url: ", self.api_url) 35 | headers = { 36 | "Authorization": f"Bearer {self.api_key}", 37 | "Content-Type": "application/json", 38 | } 39 | 40 | # Ensure data is a JSON string 41 | if isinstance(data, dict): 42 | json_data = json.dumps(data) 43 | else: 44 | json_data = data 45 | 46 | response = requests.post(self.api_url, data=json_data, headers=headers) 47 | print("response.status_code: ", response.status_code) 48 | print("response.text: ", response.text) 49 | return response 50 | -------------------------------------------------------------------------------- /AutoGroq/main.py: -------------------------------------------------------------------------------- 1 | # main.py 2 | 3 | import streamlit as st 4 | 5 | from agent_management import display_agents 6 | from utils.api_utils import fetch_available_models, get_api_key 7 | from utils.auth_utils import display_api_key_input 8 | from utils.error_handling import setup_logging 9 | from utils.session_utils import initialize_session_variables 10 | from utils.tool_utils import load_tool_functions 11 | from utils.ui_utils import ( 12 | display_reset_and_upload_buttons, 13 | display_user_request_input, handle_user_request, 14 | select_model, select_provider, set_css, 15 | set_temperature, show_interfaces 16 | ) 17 | 18 | 19 | def main(): 20 | setup_logging() 21 | if 'warning_placeholder' not in st.session_state: 22 | st.session_state.warning_placeholder = st.empty() 23 | st.title("AutoGroq™") 24 | 25 | set_css() 26 | initialize_session_variables() 27 | fetch_available_models() 28 | load_tool_functions() 29 | 30 | if st.session_state.get("need_rerun", False): 31 | st.session_state.need_rerun = False 32 | st.rerun() 33 | 34 | display_api_key_input() 35 | get_api_key() 36 | 37 | col1, col2, col3 = st.columns([2, 2, 1]) 38 | with col1: 39 | select_provider() 40 | 41 | with col2: 42 | select_model() 43 | 44 | with col3: 45 | set_temperature() 46 | 47 | if st.session_state.show_request_input: 48 | with st.container(): 49 | if st.session_state.get("rephrased_request", "") == "": 50 | user_request = st.text_input("Enter your request:", key="user_request", value=st.session_state.get("user_request", ""), on_change=handle_user_request, args=(st.session_state,)) 51 | display_user_request_input() 52 | if "agents" in st.session_state and st.session_state.agents: 53 | show_interfaces() 54 | display_reset_and_upload_buttons() 55 | 56 | with st.sidebar: 57 | display_agents() 58 | 59 | 60 | if __name__ == "__main__": 61 | main() -------------------------------------------------------------------------------- /AutoGroq/models/agent_base_model.py: -------------------------------------------------------------------------------- 1 | # models/agent_base_model.py 2 | 3 | import inspect 4 | 5 | from models.tool_base_model import ToolBaseModel 6 | from typing import List, Dict, Callable, Optional, Union 7 | 8 | 9 | class AgentBaseModel: 10 | def __init__( 11 | self, 12 | name: str, 13 | description: str, 14 | tools: List[Union[Dict, ToolBaseModel]], 15 | config: Dict, 16 | role: str, 17 | goal: str, 18 | backstory: str, 19 | provider: Optional[str] = None, 20 | model: Optional[str] = None, 21 | id: Optional[int] = None, 22 | created_at: Optional[str] = None, 23 | updated_at: Optional[str] = None, 24 | user_id: Optional[str] = None, 25 | workflows: Optional[str] = None, 26 | type: Optional[str] = None, 27 | models: Optional[List[Dict]] = None, 28 | verbose: Optional[bool] = False, 29 | allow_delegation: Optional[bool] = True, 30 | new_description: Optional[str] = None, 31 | timestamp: Optional[str] = None, 32 | is_termination_msg: Optional[bool] = None, 33 | code_execution_config: Optional[Dict] = None, 34 | llm: Optional[str] = None, 35 | function_calling_llm: Optional[str] = None, 36 | max_iter: Optional[int] = 25, 37 | max_rpm: Optional[int] = None, 38 | max_execution_time: Optional[int] = None, 39 | step_callback: Optional[Callable] = None, 40 | cache: Optional[bool] = True 41 | ): 42 | self.id = id 43 | self.name = name 44 | self.description = description 45 | self.tools = [tool if isinstance(tool, ToolBaseModel) else ToolBaseModel(**tool) for tool in tools] 46 | self.config = config 47 | self.role = role 48 | self.goal = goal 49 | self.backstory = backstory 50 | self.provider = provider 51 | self.model = model 52 | self.created_at = created_at 53 | self.updated_at = updated_at 54 | self.user_id = user_id 55 | self.workflows = workflows 56 | self.type = type 57 | self.models = models 58 | self.verbose = verbose 59 | self.allow_delegation = allow_delegation 60 | self.new_description = new_description 61 | self.timestamp = timestamp 62 | self.is_termination_msg = is_termination_msg 63 | self.code_execution_config = code_execution_config 64 | self.llm = llm 65 | self.function_calling_llm = function_calling_llm 66 | self.max_iter = max_iter 67 | self.max_rpm = max_rpm 68 | self.max_execution_time = max_execution_time 69 | self.step_callback = step_callback 70 | self.cache = cache 71 | 72 | 73 | def __str__(self): 74 | return f"Agent(name={self.name}, description={self.description})" 75 | 76 | def __repr__(self): 77 | return self.__str__() 78 | 79 | def to_dict(self): 80 | return { 81 | "id": self.id, 82 | "name": self.name, 83 | "description": self.description, 84 | 'tools': [tool.to_dict() if hasattr(tool, 'to_dict') else tool for tool in self.tools], 85 | "provider": self.provider, 86 | "model": self.model, 87 | "config": self.config, 88 | "role": self.role, 89 | "goal": self.goal, 90 | "backstory": self.backstory, 91 | "created_at": self.created_at, 92 | "updated_at": self.updated_at, 93 | "user_id": self.user_id, 94 | "workflows": self.workflows, 95 | "type": self.type, 96 | "models": self.models, 97 | "verbose": self.verbose, 98 | "allow_delegation": self.allow_delegation, 99 | "new_description": self.new_description, 100 | "timestamp": self.timestamp, 101 | "is_termination_msg": self.is_termination_msg, 102 | "code_execution_config": self.code_execution_config, 103 | "llm": self.llm, 104 | "function_calling_llm": self.function_calling_llm, 105 | "max_iter": self.max_iter, 106 | "max_rpm": self.max_rpm, 107 | "max_execution_time": self.max_execution_time, 108 | "step_callback": self.step_callback, 109 | "cache": self.cache 110 | } 111 | 112 | @classmethod 113 | def from_dict(cls, data: Dict): 114 | tools = [ToolBaseModel.from_dict(tool) if isinstance(tool, dict) else tool for tool in data.get('tools', [])] 115 | return cls( 116 | id=data.get("id"), 117 | name=data["name"], 118 | description=data["description"], 119 | tools=tools, 120 | config=data["config"], 121 | role=data.get("role", ""), 122 | goal=data.get("goal", ""), 123 | backstory=data.get("backstory", ""), 124 | provider=data.get("provider"), 125 | model=data.get("model"), 126 | created_at=data.get("created_at"), 127 | updated_at=data.get("updated_at"), 128 | user_id=data.get("user_id"), 129 | workflows=data.get("workflows"), 130 | type=data.get("type"), 131 | models=data.get("models"), 132 | verbose=data.get("verbose", False), 133 | allow_delegation=data.get("allow_delegation", True), 134 | new_description=data.get("new_description"), 135 | timestamp=data.get("timestamp"), 136 | is_termination_msg=data.get("is_termination_msg"), 137 | code_execution_config=data.get("code_execution_config"), 138 | llm=data.get("llm"), 139 | function_calling_llm=data.get("function_calling_llm"), 140 | max_iter=data.get("max_iter", 25), 141 | max_rpm=data.get("max_rpm"), 142 | max_execution_time=data.get("max_execution_time"), 143 | step_callback=data.get("step_callback"), 144 | cache=data.get("cache", True) 145 | ) 146 | 147 | @classmethod 148 | def debug_init(cls): 149 | signature = inspect.signature(cls.__init__) 150 | params = signature.parameters 151 | required_params = [name for name, param in params.items() 152 | if param.default == inspect.Parameter.empty 153 | and param.kind != inspect.Parameter.VAR_KEYWORD] 154 | optional_params = [name for name, param in params.items() 155 | if param.default != inspect.Parameter.empty] 156 | 157 | print(f"Required parameters for {cls.__name__}:") 158 | for param in required_params: 159 | print(f" - {param}") 160 | 161 | print(f"\nOptional parameters for {cls.__name__}:") 162 | for param in optional_params: 163 | print(f" - {param}") 164 | 165 | return required_params, optional_params 166 | 167 | def get(self, key, default=None): 168 | return getattr(self, key, default) 169 | 170 | def __getitem__(self, key): 171 | return getattr(self, key) 172 | 173 | def __contains__(self, key): 174 | return hasattr(self, key) 175 | -------------------------------------------------------------------------------- /AutoGroq/models/project_base_model.py: -------------------------------------------------------------------------------- 1 | from typing import List, Dict, Optional 2 | from datetime import datetime 3 | 4 | class ProjectBaseModel: 5 | def __init__( 6 | self, 7 | re_engineered_prompt: str = "", 8 | deliverables: List[Dict] = None, 9 | id: Optional[int] = None, 10 | created_at: Optional[str] = None, 11 | updated_at: Optional[str] = None, 12 | user_id: Optional[str] = None, 13 | name: Optional[str] = None, 14 | description: Optional[str] = None, 15 | status: Optional[str] = None, 16 | due_date: Optional[str] = None, 17 | priority: Optional[str] = None, 18 | tags: Optional[List[str]] = None, 19 | attachments: Optional[List[str]] = None, 20 | notes: Optional[str] = None, 21 | collaborators: Optional[List[str]] = None, 22 | tools: Optional[List[Dict]] = None, 23 | workflows: Optional[List[Dict]] = None 24 | ): 25 | self.id = id or 1 26 | self.re_engineered_prompt = re_engineered_prompt 27 | self.deliverables = deliverables or [] 28 | self.created_at = created_at or datetime.now().isoformat() 29 | self.updated_at = updated_at 30 | self.user_id = user_id or "user" 31 | self.name = name or "project" 32 | self.description = description 33 | self.status = status or "not started" 34 | self.due_date = due_date 35 | self.priority = priority 36 | self.tags = tags or [] 37 | self.attachments = attachments or [] 38 | self.notes = notes 39 | self.collaborators = collaborators or [] 40 | self.tools = tools or [] 41 | self.workflows = workflows or [] 42 | 43 | 44 | def add_deliverable(self, deliverable: str): 45 | self.deliverables.append({"text": deliverable, "done": False}) 46 | 47 | 48 | def mark_deliverable_done(self, index: int): 49 | if 0 <= index < len(self.deliverables): 50 | self.deliverables[index]["done"] = True 51 | 52 | 53 | def mark_deliverable_undone(self, index: int): 54 | if 0 <= index < len(self.deliverables): 55 | self.deliverables[index]["done"] = False 56 | 57 | 58 | def set_re_engineered_prompt(self, prompt: str): 59 | self.re_engineered_prompt = prompt 60 | 61 | def to_dict(self): 62 | return { 63 | "id": self.id, 64 | "re_engineered_prompt": self.re_engineered_prompt, 65 | "deliverables": self.deliverables, 66 | "created_at": self.created_at, 67 | "updated_at": self.updated_at, 68 | "user_id": self.user_id, 69 | "name": self.name, 70 | "description": self.description, 71 | "status": self.status, 72 | "due_date": self.due_date, 73 | "priority": self.priority, 74 | "tags": self.tags, 75 | "attachments": self.attachments, 76 | "notes": self.notes, 77 | "collaborators": self.collaborators, 78 | "tools": self.tools, 79 | "workflows": self.workflows 80 | } 81 | 82 | @classmethod 83 | def from_dict(cls, data: Dict): 84 | return cls( 85 | id=data.get("id"), 86 | re_engineered_prompt=data.get("re_engineered_prompt", ""), 87 | deliverables=data.get("deliverables", []), 88 | created_at=data.get("created_at"), 89 | updated_at=data.get("updated_at"), 90 | user_id=data.get("user_id"), 91 | name=data.get("name"), 92 | description=data.get("description"), 93 | status=data.get("status"), 94 | due_date=data.get("due_date"), 95 | priority=data.get("priority"), 96 | tags=data.get("tags"), 97 | attachments=data.get("attachments"), 98 | notes=data.get("notes"), 99 | collaborators=data.get("collaborators") 100 | ) 101 | -------------------------------------------------------------------------------- /AutoGroq/models/tool_base_model.py: -------------------------------------------------------------------------------- 1 | # tool_base_model.py 2 | 3 | from typing import List, Dict, Optional, Callable 4 | 5 | class ToolBaseModel: 6 | def __init__( 7 | self, 8 | name: str, 9 | description: str, 10 | title: str, 11 | file_name: str, 12 | content: str, 13 | function: Optional[Callable] = None, 14 | id: Optional[int] = None, 15 | created_at: Optional[str] = None, 16 | updated_at: Optional[str] = None, 17 | user_id: Optional[str] = None, 18 | secrets: Optional[Dict] = None, 19 | libraries: Optional[List[str]] = None, 20 | timestamp: Optional[str] = None 21 | ): 22 | self.id = id 23 | self.name = name 24 | self.description = description 25 | self.title = title 26 | self.file_name = file_name 27 | self.content = content 28 | self.function = function 29 | self.created_at = created_at 30 | self.updated_at = updated_at 31 | self.user_id = user_id 32 | self.secrets = secrets if secrets is not None else [] 33 | self.libraries = libraries if libraries is not None else [] 34 | self.timestamp = timestamp 35 | 36 | def execute(self, *args, **kwargs): 37 | if self.function: 38 | return self.function(*args, **kwargs) 39 | else: 40 | raise ValueError(f"No function defined for tool {self.name}") 41 | 42 | def __str__(self): 43 | return f"{self.name}: {self.description}" 44 | 45 | def to_dict(self): 46 | return { 47 | "name": self.name, 48 | "description": self.description, 49 | "title": self.title, 50 | "file_name": self.file_name, 51 | "content": self.content, 52 | "id": self.id, 53 | "created_at": self.created_at, 54 | "updated_at": self.updated_at, 55 | "user_id": self.user_id, 56 | "secrets": self.secrets, 57 | "libraries": self.libraries, 58 | "timestamp": self.timestamp 59 | } 60 | 61 | @classmethod 62 | def from_dict(cls, data: Dict): 63 | return cls( 64 | id=data.get("id"), 65 | name=data.get("name", ""), # Default to empty string if 'name' is missing 66 | description=data.get("description", ""), # Default to empty string if 'description' is missing 67 | title=data["title"], 68 | file_name=data["file_name"], 69 | content=data["content"], 70 | created_at=data.get("created_at"), 71 | updated_at=data.get("updated_at"), 72 | user_id=data.get("user_id"), 73 | secrets=data.get("secrets"), 74 | libraries=data.get("libraries"), 75 | timestamp=data.get("timestamp") 76 | ) 77 | 78 | def get(self, key, default=None): 79 | return getattr(self, key, default) 80 | 81 | def __getitem__(self, key): 82 | return getattr(self, key) 83 | 84 | def __contains__(self, key): 85 | return hasattr(self, key) 86 | -------------------------------------------------------------------------------- /AutoGroq/models/workflow_base_model.py: -------------------------------------------------------------------------------- 1 | from typing import List, Dict, Optional 2 | from models.agent_base_model import AgentBaseModel 3 | 4 | class Sender: 5 | def __init__( 6 | self, 7 | type: str, 8 | config: Dict, 9 | timestamp: str, 10 | user_id: str, 11 | tools: List[Dict], 12 | ): 13 | self.type = type 14 | self.config = config 15 | self.timestamp = timestamp 16 | self.user_id = user_id 17 | self.tools = tools 18 | 19 | def to_dict(self): 20 | return { 21 | "type": self.type, 22 | "config": self.config, 23 | "timestamp": self.timestamp, 24 | "user_id": self.user_id, 25 | "tools": self.tools, 26 | } 27 | 28 | @classmethod 29 | def from_dict(cls, data: Dict): 30 | return cls( 31 | type=data["type"], 32 | config=data["config"], 33 | timestamp=data["timestamp"], 34 | user_id=data["user_id"], 35 | tools=data["tools"], 36 | ) 37 | 38 | class Receiver: 39 | def __init__( 40 | self, 41 | type: str, 42 | config: Dict, 43 | groupchat_config: Dict, 44 | timestamp: str, 45 | user_id: str, 46 | tools: List[Dict], 47 | agents: List[AgentBaseModel], 48 | ): 49 | self.type = type 50 | self.config = config 51 | self.groupchat_config = groupchat_config 52 | self.timestamp = timestamp 53 | self.user_id = user_id 54 | self.tools = tools 55 | self.agents = agents 56 | 57 | def to_dict(self): 58 | return { 59 | "type": self.type, 60 | "config": self.config, 61 | "groupchat_config": self.groupchat_config, 62 | "timestamp": self.timestamp, 63 | "user_id": self.user_id, 64 | "tools": self.tools, 65 | "agents": [agent.to_dict() for agent in self.agents], 66 | } 67 | 68 | @classmethod 69 | def from_dict(cls, data: Dict): 70 | return cls( 71 | type=data["type"], 72 | config=data["config"], 73 | groupchat_config=data["groupchat_config"], 74 | timestamp=data["timestamp"], 75 | user_id=data["user_id"], 76 | tools=data["tools"], 77 | agents=[AgentBaseModel.from_dict(agent) for agent in data.get("agents", [])], 78 | ) 79 | 80 | class WorkflowBaseModel: 81 | def __init__( 82 | self, 83 | name: str, 84 | description: str, 85 | agents: List[AgentBaseModel], 86 | sender: Sender, 87 | receiver: Receiver, 88 | type: str, 89 | user_id: str, 90 | timestamp: str, 91 | summary_method: str, 92 | settings: Dict = None, 93 | groupchat_config: Dict = None, 94 | id: Optional[int] = None, 95 | created_at: Optional[str] = None, 96 | updated_at: Optional[str] = None, 97 | ): 98 | self.id = id 99 | self.name = name 100 | self.description = description 101 | self.agents = agents 102 | self.sender = sender 103 | self.receiver = receiver 104 | self.type = type 105 | self.user_id = user_id 106 | self.timestamp = timestamp 107 | self.summary_method = summary_method 108 | self.settings = settings or {} 109 | self.groupchat_config = groupchat_config or {} 110 | self.created_at = created_at 111 | self.updated_at = updated_at 112 | 113 | def to_dict(self): 114 | return { 115 | "id": self.id, 116 | "name": self.name, 117 | "description": self.description, 118 | "agents": [agent.to_dict() for agent in self.agents], 119 | "sender": self.sender.to_dict(), 120 | "receiver": self.receiver.to_dict(), 121 | "type": self.type, 122 | "user_id": self.user_id, 123 | "timestamp": self.timestamp, 124 | "summary_method": self.summary_method, 125 | "settings": self.settings, 126 | "groupchat_config": self.groupchat_config, 127 | "created_at": self.created_at, 128 | "updated_at": self.updated_at, 129 | } 130 | 131 | @classmethod 132 | def from_dict(cls, data: Dict): 133 | sender = Sender.from_dict(data["sender"]) 134 | receiver = Receiver.from_dict(data["receiver"]) 135 | return cls( 136 | id=data.get("id"), 137 | name=data["name"], 138 | description=data["description"], 139 | agents=[AgentBaseModel.from_dict(agent) for agent in data.get("agents", [])], 140 | sender=sender, 141 | receiver=receiver, 142 | type=data["type"], 143 | user_id=data["user_id"], 144 | timestamp=data["timestamp"], 145 | summary_method=data["summary_method"], 146 | settings=data.get("settings", {}), 147 | groupchat_config=data.get("groupchat_config", {}), 148 | created_at=data.get("created_at"), 149 | updated_at=data.get("updated_at"), 150 | ) -------------------------------------------------------------------------------- /AutoGroq/prompts.py: -------------------------------------------------------------------------------- 1 | # prompts.py 2 | 3 | def create_project_manager_prompt(rephrased_text): 4 | return f""" 5 | As a Project Manager, create a project plan for: 6 | {rephrased_text} 7 | Include: 8 | 9 | Project Outline: 10 | 11 | Comprehensive overview 12 | Logical structure 13 | Key Deliverables: List in order of completion 14 | 15 | 16 | Expert Team: 17 | 18 | Roles based on project needs 19 | Minimum necessary team size 20 | For each expert: 21 | a) Role title 22 | b) Key responsibilities 23 | c) Essential expertise 24 | 25 | 26 | 27 | Format: 28 | Project Outline: 29 | [Your detailed outline] 30 | Key Deliverables: 31 | [Numbered list] 32 | Team of Experts: 33 | [Description of the ideal team of experts] 34 | """ 35 | 36 | 37 | def get_agent_prompt(rephrased_request): 38 | return f""" 39 | Based on the following user request, please create a detailed and comprehensive description 40 | of an AI agent that can effectively assist with the request: 41 | 42 | User Request: "{rephrased_request}" 43 | 44 | Provide a clear and concise description of the agent's role, capabilities, and expertise. 45 | The description should be efficiently written in a concise, professional and engaging manner, 46 | highlighting the agent's ability to understand and respond to the request efficiently. 47 | 48 | Agent Description: 49 | """ 50 | 51 | 52 | def get_agents_prompt(): 53 | return """ 54 | You are an expert system designed to format the JSON describing each member of the team 55 | of AI agents listed in the 'Team of Experts' section below. Follow these guidelines: 56 | 1. Agent Roles: Clearly transcribe the titles of each agent listed. 57 | 2. Expertise Description: Provide a brief but thorough description of each agent's expertise 58 | based on the provided information. 59 | 3. Format: Return the results in JSON format with values labeled as expert_name, description, role, goal, and backstory. 60 | 'expert_name' should be the agent's title, not their given or proper name. 61 | 62 | Return ONLY the JSON array, with no other text: 63 | [ 64 | { 65 | "expert_name": "agent_title", 66 | "description": "agent_description", 67 | "role": "agent_role", 68 | "goal": "agent_goal", 69 | "backstory": "agent_backstory" 70 | } 71 | ] 72 | """ 73 | 74 | # Contributed by ScruffyNerf 75 | def get_generate_tool_prompt(rephrased_tool_request): 76 | return f''' 77 | Based on the rephrased tool request below, please do the following: 78 | 79 | 1. Do step-by-step reasoning and think to better understand the request. 80 | 2. Code the best Autogen Studio Python tool as per the request as a [tool_name].py file. 81 | 3. Return only the tool file, no commentary, intro, or other extra text. If there ARE any non-code lines, 82 | please pre-pend them with a '#' symbol to comment them out. 83 | 4. A proper tool will have these parts: 84 | a. Imports (import libraries needed for the tool) 85 | b. Function definition AND docstrings (this helps the LLM understand what the function does and how to use it) 86 | c. Function body (the actual code that implements the function) 87 | d. (optional) Example usage - ALWAYS commented out 88 | Here is an example of a well formatted tool: 89 | 90 | # Tool filename: save_file_to_disk.py 91 | # Import necessary module(s) 92 | import os 93 | 94 | def save_file_to_disk(contents, file_name): 95 | # docstrings 96 | """ 97 | Saves the given contents to a file with the given file name. 98 | 99 | Parameters: 100 | contents (str): The string contents to save to the file. 101 | file_name (str): The name of the file, including its extension. 102 | 103 | Returns: 104 | str: A message indicating the success of the operation. 105 | """ 106 | 107 | # Body of tool 108 | 109 | # Ensure the directory exists; create it if it doesn't 110 | directory = os.path.dirname(file_name) 111 | if directory and not os.path.exists(directory): 112 | os.makedirs(directory) 113 | 114 | # Write the contents to the file 115 | with open(file_name, 'w') as file: 116 | file.write(contents) 117 | 118 | return f"File file_name has been saved successfully." 119 | 120 | # Example usage: 121 | # contents_to_save = "Hello, world!" 122 | # file_name = "example.txt" 123 | # print(save_file_to_disk(contents_to_save, file_name)) 124 | 125 | Rephrased tool request: "{rephrased_tool_request}" 126 | ''' 127 | 128 | 129 | def get_moderator_prompt(discussion_history, goal, last_comment, last_speaker, team_members_str, current_deliverable, current_phase): 130 | return f""" 131 | This agent is our Moderator Bot. Its goal is to mediate the conversation between a team of AI agents 132 | in a manner that persuades them to act in the most expeditious and thorough manner to accomplish their goal. 133 | This will entail considering the user's stated goal, the conversation thus far, the descriptions 134 | of all the available agent/experts in the current team, the last speaker, and their remark. 135 | Based upon a holistic analysis of all the facts at hand, use logic and reasoning to decide which team member should speak next. 136 | Then draft a prompt directed at that agent that persuades them to act in the most expeditious and thorough manner toward helping this team of agents 137 | accomplish their goal. 138 | 139 | Their overall goal is: {goal}. 140 | The current deliverable they're working on is: {current_deliverable} 141 | The current implementation phase is: {current_phase} 142 | The last speaker was {last_speaker}, who said: {last_comment} 143 | 144 | Here is the current conversational discussion history: {discussion_history} 145 | 146 | And here are the team members and their descriptions: 147 | {team_members_str} 148 | 149 | IMPORTANT: Your response must start with "To [Agent Name]:", where [Agent Name] is one of the valid team members listed above. Do not address tools or non-existent team members. 150 | 151 | This agent's response should be JUST the requested prompt addressed to the next agent, and should not contain 152 | any introduction, narrative, or any other superfluous text whatsoever. 153 | 154 | If you believe the current phase of the deliverable has been satisfactorily completed, include the exact phrase 155 | "PHASE_COMPLETED" at the beginning of your response, followed by your usual prompt to the next agent focusing on 156 | the next phase or deliverable. 157 | 158 | Remember, we are now in the {current_phase} phase. The agents should focus on actually implementing, coding, 159 | testing, or deploying the solutions as appropriate for the current phase, not just planning. 160 | """ 161 | 162 | 163 | def get_rephrased_user_prompt(user_request): 164 | return f"""Act as a professional prompt engineer and refactor the following 165 | user request into an optimized prompt. This agent's goal is to rephrase the request 166 | with a focus on the satisfying all following the criteria without explicitly stating them: 167 | 1. Clarity: Ensure the prompt is clear and unambiguous. 168 | 2. Specific Instructions: Provide detailed steps or guidelines. 169 | 3. Context: Include necessary background information. 170 | 4. Structure: Organize the prompt logically. 171 | 5. Language: Use concise and precise language. 172 | 6. Examples: Offer examples to illustrate the desired output. 173 | 7. Constraints: Define any limits or guidelines. 174 | 8. Engagement: Make the prompt engaging and interesting. 175 | 9. Feedback Mechanism: Suggest a way to improve or iterate on the response. 176 | 177 | Apply introspection and reasoning to reconsider your own prompt[s] to: 178 | Clarify ambiguities 179 | Break down complex tasks 180 | Provide essential context 181 | Structure logically 182 | Use precise, concise language 183 | Include relevant examples 184 | Specify constraints 185 | 186 | Do NOT reply with a direct response to these instructions OR the original user request. Instead, rephrase the user's request as a well-structured prompt, and 187 | return ONLY that rephrased prompt. Do not preface the rephrased prompt with any other text or superfluous narrative. 188 | Do not enclose the rephrased prompt in quotes. This agent will be successful only if it returns a well-formed rephrased prompt ready for submission as an LLM request. 189 | User request: "{user_request}" 190 | Rephrased: 191 | """ 192 | 193 | -------------------------------------------------------------------------------- /AutoGroq/secrets.toml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jgravelle/AutoGroq/ecf1639865edbe8e68e163ac72e779cea833a273/AutoGroq/secrets.toml -------------------------------------------------------------------------------- /AutoGroq/style.css: -------------------------------------------------------------------------------- 1 | /* General styles */ 2 | body { 3 | font-family: Arial, sans-serif; 4 | background-color: #f0f0f0; 5 | } 6 | 7 | /* Sidebar styles */ 8 | .sidebar .sidebar-content { 9 | background-color: #ffffff !important; 10 | padding: 20px !important; 11 | border-radius: 5px !important; 12 | box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1) !important; 13 | } 14 | 15 | .sidebar .st-emotion-cache-k7vsyb h1 { 16 | font-size: 12px !important; 17 | font-weight: bold !important; 18 | color: #007bff !important; 19 | } 20 | 21 | .sidebar h2 { 22 | font-size: 16px !important; 23 | color: #666666 !important; 24 | } 25 | 26 | .sidebar .stButton button { 27 | display: block !important; 28 | width: 100% !important; 29 | padding: 10px !important; 30 | background-color: #007bff !important; 31 | color: #ffffff !important; 32 | text-align: center !important; 33 | text-decoration: none !important; 34 | border-radius: 5px !important; 35 | transition: background-color 0.3s !important; 36 | } 37 | 38 | .sidebar .stButton button:hover { 39 | background-color: #0056b3 !important; 40 | } 41 | 42 | .sidebar a { 43 | display: block !important; 44 | color: #007bff !important; 45 | text-decoration: none !important; 46 | } 47 | 48 | .sidebar a:hover { 49 | text-decoration: underline !important; 50 | } 51 | 52 | /* Main content styles */ 53 | .main .stTextInput input { 54 | width: 100% !important; 55 | padding: 10px !important; 56 | border: 1px solid #cccccc !important; 57 | border-radius: 5px !important; 58 | } 59 | 60 | .main .stTextArea textarea { 61 | width: 100% !important; 62 | padding: 10px !important; 63 | border: 1px solid #cccccc !important; 64 | border-radius: 5px !important; 65 | resize: none !important; 66 | } 67 | 68 | .main .stButton button { 69 | padding: 10px 20px !important; 70 | background-color: #dc3545 !important; 71 | color: #ffffff !important; 72 | border: none !important; 73 | border-radius: 5px !important; 74 | cursor: pointer !important; 75 | transition: background-color 0.3s !important; 76 | } 77 | 78 | .main .stButton button:hover { 79 | background-color: #c82333 !important; 80 | } 81 | 82 | .main h1 { 83 | font-size: 32px !important; 84 | font-weight: bold !important; 85 | color: #007bff !important; 86 | } 87 | 88 | /* Model selection styles */ 89 | .main .stSelectbox select { 90 | width: 100% !important; 91 | padding: 10px !important; 92 | border: 1px solid #cccccc !important; 93 | border-radius: 5px !important; 94 | } 95 | 96 | /* Error message styles */ 97 | .main .stAlert { 98 | color: #dc3545 !important; 99 | } -------------------------------------------------------------------------------- /AutoGroq/tools/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains various tools used by the AutoGroq system. 3 | """ -------------------------------------------------------------------------------- /AutoGroq/tools/code_generator.py: -------------------------------------------------------------------------------- 1 | # tools/code_generator.py 2 | 3 | import inspect 4 | import json 5 | import logging 6 | from models.tool_base_model import ToolBaseModel 7 | from utils.api_utils import get_api_key, get_llm_provider 8 | import streamlit as st 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | def generate_code(request: str, language: str = "Python") -> str: 13 | logger.debug(f"Generating code for request: {request}") 14 | logger.debug(f"Language: {language}") 15 | 16 | if not request.strip(): 17 | return "Error: No specific code generation request provided." 18 | 19 | prompt = f""" 20 | You are an advanced AI language model with expertise in software development. Your task is to generate the best possible software solution for the following request: 21 | **Request:** 22 | {request} 23 | **Language:** 24 | {language} 25 | Please ensure that the code follows best practices for {language}, is optimized for performance and maintainability, and includes comprehensive comments explaining each part of the code. Additionally, provide any necessary context or explanations to help understand the implementation. The solution should be robust, scalable, and adhere to industry standards. 26 | If there are multiple ways to solve the problem, choose the most efficient and elegant approach. If any libraries or frameworks are beneficial, include their usage with appropriate explanations. 27 | Begin your response with a brief overview of the approach you are taking, and then provide the complete code. 28 | Example overview: "To solve the problem of {request}, we will implement a {{specific algorithm/pattern}} using {{specific features/libraries of the language}}. This approach ensures {{benefits of the approach}}." 29 | Here is the code: 30 | """ 31 | 32 | api_key = get_api_key() 33 | llm_provider = get_llm_provider(api_key=api_key) 34 | 35 | llm_request_data = { 36 | "model": st.session_state.get('model', 'default'), 37 | "temperature": st.session_state.get('temperature', 0.7), 38 | "max_tokens": st.session_state.get('max_tokens', 2000), 39 | "top_p": 1, 40 | "frequency_penalty": 0, 41 | "presence_penalty": 0, 42 | "messages": [ 43 | { 44 | "role": "system", 45 | "content": "You are an expert code generator." 46 | }, 47 | { 48 | "role": "user", 49 | "content": prompt 50 | } 51 | ] 52 | } 53 | 54 | try: 55 | response = llm_provider.send_request(llm_request_data) 56 | logger.debug(f"LLM response status code: {response.status_code}") 57 | logger.debug(f"LLM response content: {response.text[:500]}...") # Log first 500 characters of response 58 | 59 | if response.status_code == 200: 60 | response_data = llm_provider.process_response(response) 61 | if "choices" in response_data and response_data["choices"]: 62 | generated_code = response_data["choices"][0]["message"]["content"] 63 | return generated_code.strip() 64 | else: 65 | return "Error: Unexpected response format from the language model." 66 | else: 67 | return f"Error: Received status code {response.status_code} from the language model API." 68 | except Exception as e: 69 | logger.error(f"Error generating code: {str(e)}", exc_info=True) 70 | return f"Error generating code: {str(e)}" 71 | 72 | code_generator_tool = ToolBaseModel( 73 | name="generate_code", 74 | description="Generates code for a specified feature in a given programming language.", 75 | title="Code Generator", 76 | file_name="code_generator.py", 77 | content=inspect.getsource(generate_code), 78 | function=generate_code, 79 | ) 80 | 81 | def get_tool(): 82 | return code_generator_tool -------------------------------------------------------------------------------- /AutoGroq/tools/code_test.py: -------------------------------------------------------------------------------- 1 | # tools/code_test.py 2 | 3 | import inspect 4 | import subprocess 5 | import tempfile 6 | from models.tool_base_model import ToolBaseModel 7 | 8 | def test_code(language: str, code: str, test_cases: str) -> str: 9 | """ 10 | Tests the given code with provided test cases. 11 | 12 | Args: 13 | language (str): The programming language of the code (e.g., "Python", "JavaScript"). 14 | code (str): The code to be tested. 15 | test_cases (str): A string containing test cases, each on a new line. 16 | 17 | Returns: 18 | str: The test results as a string. 19 | """ 20 | if language.lower() != "python": 21 | return f"Testing for {language} is not supported yet." 22 | 23 | with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as temp_file: 24 | temp_file.write(code) 25 | temp_file.write("\n\n# Test cases\n") 26 | temp_file.write(test_cases) 27 | temp_file_name = temp_file.name 28 | 29 | try: 30 | result = subprocess.run(['python', temp_file_name], capture_output=True, text=True, timeout=10) 31 | if result.returncode == 0: 32 | return f"Tests passed successfully.\nOutput:\n{result.stdout}" 33 | else: 34 | return f"Tests failed.\nError:\n{result.stderr}" 35 | except subprocess.TimeoutExpired: 36 | return "Test execution timed out." 37 | except Exception as e: 38 | return f"An error occurred during testing: {str(e)}" 39 | 40 | code_test_tool = ToolBaseModel( 41 | name="test_code", 42 | description="Tests the given code with provided test cases.", 43 | title="Code Tester", 44 | file_name="code_test.py", 45 | content=inspect.getsource(test_code), 46 | function=test_code, 47 | ) 48 | 49 | def get_tool(): 50 | return code_test_tool 51 | -------------------------------------------------------------------------------- /AutoGroq/tools/fetch_web_content.py: -------------------------------------------------------------------------------- 1 | # tools/fetch_web_content.py 2 | 3 | import inspect 4 | import json 5 | import logging 6 | import requests 7 | 8 | from bs4 import BeautifulSoup 9 | from models.tool_base_model import ToolBaseModel 10 | from urllib.parse import urlparse, urlunparse 11 | 12 | 13 | def fetch_web_content(url: str) -> dict: 14 | """ 15 | Fetches the text content from a website. 16 | 17 | Args: 18 | url (str): The URL of the website. 19 | 20 | Returns: 21 | dict: A dictionary containing the status, URL, and content (or error message). 22 | """ 23 | try: 24 | cleaned_url = clean_url(url) 25 | logging.info(f"Fetching content from cleaned URL: {cleaned_url}") 26 | 27 | headers = { 28 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' 29 | } 30 | response = requests.get(cleaned_url, headers=headers, timeout=10) 31 | response.raise_for_status() 32 | 33 | logging.info(f"Response status code: {response.status_code}") 34 | logging.info(f"Response headers: {response.headers}") 35 | 36 | soup = BeautifulSoup(response.text, "html.parser") 37 | 38 | logging.info(f"Parsed HTML structure: {soup.prettify()[:500]}...") # Log first 500 characters of prettified HTML 39 | 40 | # Try to get content from article tags first 41 | article_content = soup.find('article') 42 | if article_content: 43 | content = article_content.get_text(strip=True) 44 | else: 45 | # If no article tag, fall back to body content 46 | body_content = soup.body 47 | if body_content: 48 | content = body_content.get_text(strip=True) 49 | else: 50 | raise ValueError("No content found in the webpage") 51 | 52 | logging.info(f"Extracted text content (first 500 chars): {content[:500]}...") 53 | result = { 54 | "status": "success", 55 | "url": cleaned_url, 56 | "content": content 57 | } 58 | print(f"DEBUG: fetch_web_content result: {str(result)[:500]}...") # Debug print 59 | return result 60 | 61 | except requests.RequestException as e: 62 | error_message = f"Error fetching content from {cleaned_url}: {str(e)}" 63 | logging.error(error_message) 64 | return { 65 | "status": "error", 66 | "url": cleaned_url, 67 | "message": error_message 68 | } 69 | except Exception as e: 70 | error_message = f"Unexpected error while fetching content from {cleaned_url}: {str(e)}" 71 | logging.error(error_message) 72 | return { 73 | "status": "error", 74 | "url": cleaned_url, 75 | "message": error_message 76 | } 77 | 78 | # Create the ToolBaseModel instance 79 | fetch_web_content_tool = ToolBaseModel( 80 | name="fetch_web_content", 81 | description="Fetches the text content from a website.", 82 | title="Fetch Web Content", 83 | file_name="fetch_web_content.py", 84 | content=inspect.getsource(fetch_web_content), 85 | function=fetch_web_content, 86 | ) 87 | 88 | # Function to get the tool 89 | def get_tool(): 90 | return fetch_web_content_tool 91 | 92 | 93 | def clean_url(url: str) -> str: 94 | """ 95 | Clean and validate the URL. 96 | 97 | Args: 98 | url (str): The URL to clean. 99 | 100 | Returns: 101 | str: The cleaned URL. 102 | """ 103 | url = url.strip().strip("'\"") 104 | if not url.startswith(('http://', 'https://')): 105 | url = 'https://' + url 106 | parsed = urlparse(url) 107 | return urlunparse(parsed) 108 | -------------------------------------------------------------------------------- /AutoGroq/utils/agent_utils.py: -------------------------------------------------------------------------------- 1 | # utils/agent_utils.py 2 | 3 | import datetime 4 | import streamlit as st 5 | 6 | from configs.config import LLM_PROVIDER 7 | 8 | from utils.text_utils import normalize_config 9 | 10 | 11 | def create_agent_data(agent): 12 | expert_name = agent['name'] 13 | description = agent.get('description', '') 14 | current_timestamp = datetime.datetime.now().isoformat() 15 | provider = agent.get('config', {}).get('provider', st.session_state.get('provider', LLM_PROVIDER)) 16 | 17 | # Use normalize_config to get the standardized config 18 | normalized_config = normalize_config(agent, expert_name) 19 | 20 | autogen_agent_data = { 21 | "name": normalized_config['name'], 22 | "description": description, 23 | "config": normalized_config, 24 | "tools": agent.get('tools', []), 25 | "role": agent.get('role', normalized_config['name']), 26 | "goal": agent.get('goal', f"Assist with tasks related to {description}"), 27 | "backstory": agent.get('backstory', f"As an AI assistant, I specialize in {description}"), 28 | "provider": provider, 29 | "model": st.session_state.get('model', 'default') 30 | } 31 | 32 | crewai_agent_data = { 33 | "name": normalized_config['name'], 34 | "description": description, 35 | "verbose": True, 36 | "allow_delegation": True 37 | } 38 | 39 | return autogen_agent_data, crewai_agent_data 40 | -------------------------------------------------------------------------------- /AutoGroq/utils/api_utils.py: -------------------------------------------------------------------------------- 1 | # utils/api_utils.py 2 | 3 | import importlib 4 | import os 5 | import requests 6 | import streamlit as st 7 | import time 8 | 9 | from configs.config import FALLBACK_MODEL_TOKEN_LIMITS, LLM_PROVIDER, RETRY_DELAY, RETRY_TOKEN_LIMIT 10 | 11 | 12 | def display_api_key_input(provider=None): 13 | if provider is None: 14 | provider = st.session_state.get('provider', LLM_PROVIDER) 15 | api_key_env_var = f"{provider.upper()}_API_KEY" 16 | api_key = os.environ.get(api_key_env_var) 17 | 18 | if api_key is None: 19 | st.session_state.warning_placeholder.warning(f"{provider.upper()} API Key not found. Please enter your API key, or select a different provider.") 20 | api_key = st.text_input(f"Enter your {provider.upper()} API Key:", type="password", key=f"api_key_input_{provider}") 21 | if api_key: 22 | st.session_state[api_key_env_var] = api_key 23 | os.environ[api_key_env_var] = api_key 24 | # st.success(f"{provider.upper()} API Key entered successfully.") 25 | st.session_state.warning_placeholder.empty() 26 | return api_key 27 | 28 | 29 | def fetch_available_models(provider=None): 30 | if provider is None: 31 | provider = st.session_state.get('provider', LLM_PROVIDER) 32 | api_key = get_api_key(provider) 33 | llm_provider = get_llm_provider(api_key=api_key, provider=provider) 34 | try: 35 | models = llm_provider.get_available_models() 36 | st.session_state.available_models = models 37 | return models 38 | except Exception as e: 39 | st.error(f"Failed to fetch available models for {provider}: {str(e)}") 40 | return FALLBACK_MODEL_TOKEN_LIMITS.get(provider, {}) 41 | 42 | 43 | def get_api_key(provider=None): 44 | if provider is None: 45 | provider = st.session_state.get('provider', LLM_PROVIDER) 46 | api_key_env_var = f"{provider.upper()}_API_KEY" 47 | api_key = os.environ.get(api_key_env_var) 48 | if api_key is None: 49 | api_key = st.session_state.get(api_key_env_var) 50 | return api_key 51 | 52 | 53 | def get_llm_provider(api_key=None, api_url=None, provider=None): 54 | if provider is None: 55 | provider = st.session_state.get('provider', LLM_PROVIDER) 56 | provider_module = importlib.import_module(f"llm_providers.{provider}_provider") 57 | provider_class = getattr(provider_module, f"{provider.capitalize()}Provider") 58 | if api_url is None: 59 | api_url = st.session_state.get(f'{provider.upper()}_API_URL') 60 | return provider_class(api_url=api_url, api_key=api_key) 61 | 62 | 63 | def make_api_request(url, data, headers, api_key): 64 | time.sleep(RETRY_DELAY) # Throttle the request to ensure at least 2 seconds between calls 65 | try: 66 | if not api_key: 67 | llm = LLM_PROVIDER.upper() 68 | raise ValueError(f"{llm}_API_KEY not found. Please enter your API key.") 69 | headers["Authorization"] = f"Bearer {api_key}" 70 | response = requests.post(url, json=data, headers=headers) 71 | if response.status_code == 200: 72 | return response.json() 73 | elif response.status_code == 429: 74 | error_message = response.json().get("error", {}).get("message", "") 75 | st.error(f"Rate limit reached for the current model. If you click 'Update' again, we'll retry with a reduced token count. Or you can try selecting a different model.") 76 | st.error(f"Error details: {error_message}") 77 | return None 78 | else: 79 | print(f"Error: API request failed with status {response.status_code}, response: {response.text}") 80 | return None 81 | except requests.RequestException as e: 82 | print(f"Error: Request failed {e}") 83 | return None 84 | 85 | 86 | def send_request_with_retry(url, data, headers, api_key): 87 | response = make_api_request(url, data, headers, api_key) 88 | if response is None: 89 | # Add a retry button 90 | if st.button("Retry with decreased token limit"): 91 | # Update the token limit in the request data 92 | data["max_tokens"] = RETRY_TOKEN_LIMIT 93 | # Retry the request with the decreased token limit 94 | print(f"Retrying the request with decreased token limit.") 95 | print(f"URL: {url}") 96 | print(f"Retry token limit: {RETRY_TOKEN_LIMIT}") 97 | response = make_api_request(url, data, headers, api_key) 98 | if response is not None: 99 | print(f"Retry successful. Response: {response}") 100 | else: 101 | print("Retry failed.") 102 | return response 103 | 104 | 105 | def set_llm_provider_title(): 106 | # "What's life without whimsy?" ~Sheldon Cooper 107 | if LLM_PROVIDER == "groq": 108 | st.title("AutoGroq™") 109 | elif LLM_PROVIDER == "ollama": 110 | st.title("Auto̶G̶r̶o̶qOllama") 111 | elif LLM_PROVIDER == "lmstudio": 112 | st.title("Auto̶G̶r̶o̶qLM_Studio") 113 | elif LLM_PROVIDER == "openai": 114 | st.title("Auto̶G̶r̶o̶qChatGPT") 115 | elif LLM_PROVIDER == "anthropic": 116 | st.title("Auto̶G̶r̶o̶qClaude") 117 | 118 | -------------------------------------------------------------------------------- /AutoGroq/utils/auth_utils.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import streamlit as st 4 | 5 | from configs.config import LLM_PROVIDER 6 | from utils.api_utils import display_api_key_input 7 | 8 | 9 | def check_api_key(provider=None): 10 | # Ensure we have a warning placeholder 11 | if 'warning_placeholder' not in st.session_state: 12 | st.session_state.warning_placeholder = st.empty() 13 | 14 | # Check for API key of the default provider on initial load 15 | if 'initial_api_check' not in st.session_state: 16 | st.session_state.initial_api_check = True 17 | default_provider = st.session_state.get('provider', LLM_PROVIDER) 18 | if not check_api_key(default_provider): 19 | display_api_key_input(default_provider) 20 | return True 21 | 22 | 23 | def get_api_url(): 24 | api_url_env_var = f"{LLM_PROVIDER.upper()}_API_URL" 25 | api_url = os.environ.get(api_url_env_var) 26 | if api_url is None: 27 | api_url = globals().get(api_url_env_var) 28 | if api_url is None: 29 | if api_url_env_var not in st.session_state: 30 | api_url = st.text_input(f"Enter the {LLM_PROVIDER.upper()} API URL:", type="password", key=f"{LLM_PROVIDER}_api_url_input") 31 | if api_url: 32 | st.session_state[api_url_env_var] = api_url 33 | st.success("API URL entered successfully.") 34 | else: 35 | st.warning(f"Please enter the {LLM_PROVIDER.upper()} API URL to use the app.") 36 | else: 37 | api_url = st.session_state.get(api_url_env_var) 38 | return api_url 39 | -------------------------------------------------------------------------------- /AutoGroq/utils/db_utils.py: -------------------------------------------------------------------------------- 1 | # db_utils.py 2 | 3 | import datetime 4 | import json 5 | import sqlite3 6 | import streamlit as st 7 | import traceback 8 | import uuid 9 | 10 | from configs.config import FRAMEWORK_DB_PATH 11 | 12 | from utils.text_utils import normalize_config 13 | from utils.workflow_utils import get_workflow_from_agents 14 | 15 | 16 | def export_to_autogen(): 17 | db_path = FRAMEWORK_DB_PATH 18 | print(f"Database path: {db_path}") 19 | if db_path: 20 | export_data(db_path) 21 | else: 22 | st.warning("Please provide a valid database path in config.py.") 23 | 24 | 25 | def export_data(db_path): 26 | print(f"Exporting data to: {db_path}") 27 | 28 | if db_path: 29 | try: 30 | conn = sqlite3.connect(db_path) 31 | cursor = conn.cursor() 32 | print("Connected to the database successfully.") 33 | 34 | agents = st.session_state.agents 35 | print(f"Number of agents: {len(agents)}") 36 | 37 | for index, agent in enumerate(agents): 38 | try: 39 | print(f"Processing agent {index + 1}: {agent.name}") 40 | 41 | current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") 42 | 43 | # Normalize the config 44 | normalized_config = normalize_config(agent.to_dict(), agent.name) 45 | 46 | agent_data = ( 47 | None, # id (AUTO INCREMENT) 48 | current_time, # created_at 49 | current_time, # updated_at 50 | 'guestuser@gmail.com', # user_id 51 | '0.0.1', # version 52 | 'assistant', # type 53 | json.dumps(normalized_config), # config (JSON) 54 | normalized_config['system_message'] # task_instruction 55 | ) 56 | 57 | print(f"Inserting agent data: {agent_data}") 58 | 59 | cursor.execute(""" 60 | INSERT INTO agent (id, created_at, updated_at, user_id, version, type, config, task_instruction) 61 | VALUES (?, ?, ?, ?, ?, ?, ?, ?) 62 | """, agent_data) 63 | 64 | print(f"Inserted agent: {agent.name}") 65 | 66 | except Exception as e: 67 | print(f"Error processing agent {index + 1}: {str(e)}") 68 | print(f"Agent data: {agent.__dict__}") 69 | traceback.print_exc() 70 | 71 | # Handle skills/tools 72 | for tool in st.session_state.tool_models: 73 | try: 74 | current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") 75 | skill_data = ( 76 | None, # id (AUTO INCREMENT) 77 | current_time, # created_at 78 | current_time, # updated_at 79 | 'guestuser@gmail.com', # user_id 80 | '0.0.1', # version 81 | tool.name, 82 | tool.content, 83 | tool.description, 84 | json.dumps(tool.secrets) if hasattr(tool, 'secrets') else '{}', 85 | json.dumps(tool.libraries) if hasattr(tool, 'libraries') else '[]' 86 | ) 87 | cursor.execute(""" 88 | INSERT INTO skill (id, created_at, updated_at, user_id, version, name, content, description, secrets, libraries) 89 | VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) 90 | """, skill_data) 91 | print(f"Inserted skill: {tool.name}") 92 | except Exception as e: 93 | print(f"Error inserting skill {tool.name}: {str(e)}") 94 | traceback.print_exc() 95 | 96 | # Handle the workflow 97 | try: 98 | workflow_data, _ = get_workflow_from_agents(agents) 99 | current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") 100 | workflow_insert_data = ( 101 | None, # id (AUTO INCREMENT) 102 | current_time, # created_at 103 | current_time, # updated_at 104 | 'guestuser@gmail.com', # user_id 105 | '0.0.1', # version 106 | workflow_data.get('name', 'AutoGroq Workflow'), 107 | workflow_data.get('description', 'Workflow auto-generated by AutoGroq.'), 108 | workflow_data.get('type', 'autonomous'), # Default to 'autonomous' if not specified 109 | workflow_data.get('summary_method', 'last')[:4], # VARCHAR(4) 110 | json.dumps(workflow_data.get('sample_tasks', [])) 111 | ) 112 | cursor.execute(""" 113 | INSERT INTO workflow (id, created_at, updated_at, user_id, version, name, description, type, summary_method, sample_tasks) 114 | VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) 115 | """, workflow_insert_data) 116 | print("Inserted workflow data.") 117 | except Exception as e: 118 | print(f"Error inserting workflow: {str(e)}") 119 | traceback.print_exc() 120 | 121 | conn.commit() 122 | print("Changes committed to the database.") 123 | 124 | conn.close() 125 | print("Database connection closed.") 126 | 127 | st.success("Data exported to Autogen successfully!") 128 | except sqlite3.Error as e: 129 | st.error(f"Error exporting data to Autogen: {str(e)}") 130 | print(f"Error exporting data to Autogen: {str(e)}") 131 | traceback.print_exc() 132 | 133 | 134 | def get_table_info(table_name): 135 | conn = sqlite3.connect(FRAMEWORK_DB_PATH) 136 | cursor = conn.cursor() 137 | cursor.execute(f"PRAGMA table_info({table_name})") 138 | columns = cursor.fetchall() 139 | conn.close() 140 | return columns 141 | 142 | 143 | def insert_or_get_skill(cursor, tool): 144 | tool_name = tool.name if hasattr(tool, 'name') else tool.get('name', '') 145 | cursor.execute("SELECT id FROM skill WHERE name = ?", (tool_name,)) 146 | result = cursor.fetchone() 147 | if result: 148 | return result[0] 149 | else: 150 | print(f"Inserting new skill: {tool}") 151 | 152 | skill_data = ( 153 | None, # id is INTEGER PRIMARY KEY, let SQLite auto-increment 154 | datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), # created_at 155 | datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), # updated_at 156 | 'default', # user_id 157 | '0.0.1', # version 158 | str(tool_name), 159 | str(tool.content if hasattr(tool, 'content') else tool.get('content', '')), 160 | str(tool.description if hasattr(tool, 'description') else tool.get('description', '')), 161 | json.dumps(tool.secrets if hasattr(tool, 'secrets') else tool.get('secrets', {})), 162 | json.dumps(tool.libraries if hasattr(tool, 'libraries') else tool.get('libraries', [])) 163 | ) 164 | 165 | print(f"Skill data to be inserted: {skill_data}") 166 | 167 | try: 168 | cursor.execute(""" 169 | INSERT INTO skill (id, created_at, updated_at, user_id, version, name, content, description, secrets, libraries) 170 | VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) 171 | """, skill_data) 172 | return cursor.lastrowid 173 | except sqlite3.Error as e: 174 | print(f"SQLite error: {e}") 175 | print("Data types:") 176 | for i, item in enumerate(skill_data): 177 | print(f" {i}: {type(item)}") 178 | raise 179 | 180 | 181 | def insert_or_get_model(cursor, model_config): 182 | cursor.execute("SELECT id FROM model WHERE model = ?", (model_config['model'],)) 183 | result = cursor.fetchone() 184 | if result: 185 | return result[0] 186 | else: 187 | model_data = ( 188 | str(uuid.uuid4()), # id 189 | datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), # created_at 190 | datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), # updated_at 191 | 'guestuser@gmail.com', # user_id 192 | '0.0.1', # version 193 | model_config['model'], 194 | model_config.get('api_key'), 195 | model_config.get('base_url'), 196 | model_config.get('api_type', '')[:6], # VARCHAR(6) 197 | model_config.get('api_version'), 198 | model_config.get('description', '') 199 | ) 200 | cursor.execute(""" 201 | INSERT INTO model (id, created_at, updated_at, user_id, version, model, api_key, base_url, api_type, api_version, description) 202 | VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) 203 | """, model_data) 204 | return cursor.lastrowid 205 | 206 | 207 | def insert_workflow(cursor, workflow_data): 208 | workflow_insert_data = ( 209 | None, # id is INTEGER PRIMARY KEY, let SQLite auto-increment 210 | datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), # created_at 211 | datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), # updated_at 212 | 'guestuser@gmail.com', # user_id (matching existing entries) 213 | '0.0.1', # version 214 | workflow_data.get('name', 'AutoGroq Workflow'), 215 | workflow_data.get('description', 'Workflow auto-generated by AutoGroq.'), 216 | workflow_data.get('type', 'groupchat')[:10], # VARCHAR(10) 217 | workflow_data.get('summary_method', 'last')[:4], # VARCHAR(4) 218 | json.dumps(workflow_data.get('sample_tasks', [])) 219 | ) 220 | print(f"Inserting workflow data: {workflow_insert_data}") 221 | try: 222 | cursor.execute(""" 223 | INSERT INTO workflow (id, created_at, updated_at, user_id, version, name, description, type, summary_method, sample_tasks) 224 | VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) 225 | """, workflow_insert_data) 226 | return cursor.lastrowid 227 | except sqlite3.Error as e: 228 | print(f"SQLite error: {e}") 229 | print("Data types:") 230 | for i, item in enumerate(workflow_insert_data): 231 | print(f" {i}: {type(item)}") 232 | raise 233 | 234 | 235 | def sql_to_db(sql: str, params: tuple = None): 236 | try: 237 | conn = sqlite3.connect(FRAMEWORK_DB_PATH) 238 | cursor = conn.cursor() 239 | print("Connected to the database successfully.") 240 | if params: 241 | cursor.execute(sql, params) 242 | else: 243 | cursor.execute(sql) 244 | conn.commit() 245 | print("SQL executed successfully.") 246 | except sqlite3.Error as e: 247 | print(f"Error executing SQL: {str(e)}") 248 | print(f"SQL: {sql}") 249 | print(f"Params: {params}") 250 | raise 251 | finally: 252 | if conn: 253 | conn.close() 254 | print("Database connection closed.") 255 | 256 | 257 | #FUTURE functions for exporting to new Autogen Studio schema: 258 | 259 | # def create_or_update_agent(agent: dict, db_path: str): 260 | # with sqlite3.connect(db_path) as conn: 261 | # cursor = conn.cursor() 262 | # cursor.execute(""" 263 | # INSERT OR REPLACE INTO Agent (id, skills, created_at, updated_at, user_id, workflows, type, config, models) 264 | # VALUES (:id, :skills, :created_at, :updated_at, :user_id, :workflows, :type, :config, :models) 265 | # """, agent) 266 | # conn.commit() 267 | 268 | # def create_or_update_skill(skill: dict, db_path: str): 269 | # with sqlite3.connect(db_path) as conn: 270 | # cursor = conn.cursor() 271 | # cursor.execute(""" 272 | # INSERT OR REPLACE INTO Skill (id, created_at, updated_at, user_id, name, content, description, secrets, libraries) 273 | # VALUES (:id, :created_at, :updated_at, :user_id, :name, :content, :description, :secrets, :libraries) 274 | # """, skill) 275 | # conn.commit() 276 | 277 | # def create_or_update_workflow(workflow: dict, db_path: str): 278 | # with sqlite3.connect(db_path) as conn: 279 | # cursor = conn.cursor() 280 | # cursor.execute(""" 281 | # INSERT OR REPLACE INTO Workflow (id, agents, created_at, updated_at, user_id, name, description, type, summary_method) 282 | # VALUES (:id, :agents, :created_at, :updated_at, :user_id, :name, :description, :type, :summary_method) 283 | # """, workflow) 284 | # conn.commit() 285 | 286 | # def get_agent_by_id(agent_id: int, db_path: str) -> Optional[dict]: 287 | # with sqlite3.connect(db_path) as conn: 288 | # cursor = conn.cursor() 289 | # cursor.execute("SELECT * FROM Agent WHERE id = ?", (agent_id,)) 290 | # row = cursor.fetchone() 291 | # if row: 292 | # columns = [column[0] for column in cursor.description] 293 | # return dict(zip(columns, row)) 294 | # return None 295 | 296 | # def get_skill_by_id(skill_id: int, db_path: str) -> Optional[dict]: 297 | # with sqlite3.connect(db_path) as conn: 298 | # cursor = conn.cursor() 299 | # cursor.execute("SELECT * FROM Skill WHERE id = ?", (skill_id,)) 300 | # row = cursor.fetchone() 301 | # if row: 302 | # columns = [column[0] for column in cursor.description] 303 | # return dict(zip(columns, row)) 304 | # return None 305 | 306 | # def get_workflow_by_id(workflow_id: int, db_path: str) -> Optional[dict]: 307 | # with sqlite3.connect(db_path) as conn: 308 | # cursor = conn.cursor() 309 | # cursor.execute("SELECT * FROM Workflow WHERE id = ?", (workflow_id,)) 310 | # row = cursor.fetchone() 311 | # if row: 312 | # columns = [column[0] for column in cursor.description] 313 | # return dict(zip(columns, row)) 314 | # return None 315 | -------------------------------------------------------------------------------- /AutoGroq/utils/error_handling.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | def setup_logging(): 4 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 5 | 6 | def log_error(error_message): 7 | logging.error(error_message) 8 | 9 | def log_tool_execution(tool_name, args, result): 10 | logging.info(f"Executed tool: {tool_name} with args: {args}. Result: {result}") -------------------------------------------------------------------------------- /AutoGroq/utils/file_utils.py: -------------------------------------------------------------------------------- 1 | 2 | import datetime 3 | import io 4 | import json 5 | import streamlit as st 6 | import zipfile 7 | 8 | from utils.db_utils import normalize_config 9 | from utils.text_utils import sanitize_text 10 | from utils.workflow_utils import get_workflow_from_agents 11 | 12 | 13 | 14 | def create_workflow_data(workflow): 15 | # Sanitize the workflow name 16 | sanitized_workflow_name = sanitize_text(workflow["name"]) 17 | sanitized_workflow_name = sanitized_workflow_name.lower().replace(' ', '_') 18 | 19 | return workflow 20 | 21 | 22 | def create_zip_file(zip_buffer, file_data): 23 | with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file: 24 | for file_name, file_content in file_data.items(): 25 | zip_file.writestr(file_name, file_content) 26 | 27 | 28 | def regenerate_json_files_and_zip(): 29 | # Get the updated workflow data 30 | workflow_data, _ = get_workflow_from_agents(st.session_state.agents) 31 | workflow_data["updated_at"] = datetime.datetime.now().isoformat() 32 | 33 | # Regenerate the zip files 34 | autogen_zip_buffer, crewai_zip_buffer = zip_files_in_memory(workflow_data) 35 | 36 | # Update the zip buffers in the session state 37 | st.session_state.autogen_zip_buffer = autogen_zip_buffer 38 | st.session_state.crewai_zip_buffer = crewai_zip_buffer 39 | 40 | 41 | def regenerate_zip_files(): 42 | if "agents" in st.session_state: 43 | workflow_data, _ = get_workflow_from_agents(st.session_state.agents) 44 | 45 | workflow_data["updated_at"] = datetime.datetime.now().isoformat() 46 | autogen_zip_buffer, crewai_zip_buffer = zip_files_in_memory(workflow_data) 47 | st.session_state.autogen_zip_buffer = autogen_zip_buffer 48 | st.session_state.crewai_zip_buffer = crewai_zip_buffer 49 | print("Zip files regenerated.") 50 | else: 51 | print("No agents found. Skipping zip file regeneration.") 52 | 53 | 54 | def zip_files_in_memory(workflow_data): 55 | autogen_zip_buffer = io.BytesIO() 56 | crewai_zip_buffer = io.BytesIO() 57 | 58 | with zipfile.ZipFile(autogen_zip_buffer, 'w', zipfile.ZIP_DEFLATED) as autogen_zip: 59 | for agent in st.session_state.agents: 60 | agent_data = agent.to_dict() 61 | agent_name = agent_data['name'] 62 | agent_file_name = f"{agent_name}.json" 63 | autogen_zip.writestr(f"agents/{agent_file_name}", json.dumps(agent_data, indent=2)) 64 | 65 | # Add tools to the zip file 66 | for tool in st.session_state.tool_models: 67 | tool_data = tool.to_dict() 68 | tool_name = tool_data['name'] 69 | tool_file_name = f"{tool_name}.json" 70 | autogen_zip.writestr(f"tools/{tool_file_name}", json.dumps(tool_data, indent=2)) 71 | 72 | # Add workflow data 73 | autogen_zip.writestr("workflow.json", json.dumps(workflow_data, indent=2)) 74 | 75 | with zipfile.ZipFile(crewai_zip_buffer, 'w', zipfile.ZIP_DEFLATED) as crewai_zip: 76 | for agent in st.session_state.agents: 77 | agent_data = normalize_config(agent.to_dict(), agent.name) 78 | agent_name = agent_data['name'] 79 | crewai_agent_data = { 80 | "name": agent_name, 81 | "description": agent_data.get('description', ''), 82 | "verbose": True, 83 | "allow_delegation": True 84 | } 85 | crewai_zip.writestr(f"agents/{agent_name}.json", json.dumps(crewai_agent_data, indent=2)) 86 | 87 | autogen_zip_buffer.seek(0) 88 | crewai_zip_buffer.seek(0) 89 | 90 | return autogen_zip_buffer, crewai_zip_buffer 91 | -------------------------------------------------------------------------------- /AutoGroq/utils/sandbox.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | 4 | def execute_in_sandbox(tool_name, *args): 5 | # Create a temporary Python file with the tool execution 6 | with open('temp_tool_execution.py', 'w') as f: 7 | f.write(f"from tools.{tool_name} import {tool_name}\n") 8 | f.write(f"result = {tool_name}(*{args})\n") 9 | f.write("print(result)\n") 10 | 11 | # Execute the temporary file in a separate process with restricted permissions 12 | try: 13 | result = subprocess.run(['python', 'temp_tool_execution.py'], 14 | capture_output=True, text=True, timeout=10) 15 | return result.stdout.strip() 16 | finally: 17 | os.remove('temp_tool_execution.py') -------------------------------------------------------------------------------- /AutoGroq/utils/session_utils.py: -------------------------------------------------------------------------------- 1 | 2 | import streamlit as st 3 | 4 | from agents.code_developer import CodeDeveloperAgent 5 | from agents.code_tester import CodeTesterAgent 6 | from agents.web_content_retriever import WebContentRetrieverAgent 7 | from configs.config import LLM_PROVIDER, SUPPORTED_PROVIDERS 8 | from configs.config_sessions import DEFAULT_AGENT_CONFIG 9 | from configs.current_project import Current_Project 10 | from datetime import datetime 11 | from models.agent_base_model import AgentBaseModel 12 | from models.project_base_model import ProjectBaseModel 13 | from models.tool_base_model import ToolBaseModel 14 | from models.workflow_base_model import WorkflowBaseModel 15 | from utils.ui_utils import handle_user_request 16 | 17 | 18 | def create_default_agent(): 19 | return AgentBaseModel(**DEFAULT_AGENT_CONFIG) 20 | 21 | 22 | def initialize_session_variables(): 23 | 24 | if "agent_model" not in st.session_state: 25 | st.session_state.agent_model = create_default_agent() 26 | 27 | if "agent_models" not in st.session_state: 28 | st.session_state.agent_models = [] 29 | 30 | if "agents" not in st.session_state: 31 | st.session_state.agents = [] 32 | 33 | # Ensure built-in agents are always present 34 | built_in_agents = [ 35 | WebContentRetrieverAgent.create_default(), 36 | CodeDeveloperAgent.create_default(), 37 | CodeTesterAgent.create_default() 38 | ] 39 | 40 | # Add built-in agents if they're not already in the list 41 | for built_in_agent in built_in_agents: 42 | if not any(agent.name == built_in_agent.name for agent in st.session_state.agents): 43 | st.session_state.agents.append(built_in_agent) 44 | 45 | if "api_key" not in st.session_state: 46 | st.session_state.api_key = "" 47 | 48 | if "api_url" not in st.session_state: 49 | st.session_state.api_url = None 50 | 51 | if "autogen_zip_buffer" not in st.session_state: 52 | st.session_state.autogen_zip_buffer = None 53 | 54 | if "crewai_zip_buffer" not in st.session_state: 55 | st.session_state.crewai_zip_buffer = None 56 | 57 | if "current_project" not in st.session_state: 58 | st.session_state.current_project = Current_Project() 59 | 60 | if "discussion_history" not in st.session_state: 61 | st.session_state.discussion_history = "" 62 | 63 | if "last_agent" not in st.session_state: 64 | st.session_state.last_agent = "" 65 | 66 | if "last_comment" not in st.session_state: 67 | st.session_state.last_comment = "" 68 | 69 | if "max_tokens" not in st.session_state: 70 | st.session_state.max_tokens = 4096 71 | 72 | if "model" not in st.session_state: 73 | st.session_state.model = "default" 74 | 75 | if "most_recent_response" not in st.session_state: 76 | st.session_state.most_recent_response = "" 77 | 78 | if "previous_user_request" not in st.session_state: 79 | st.session_state.previous_user_request = "" 80 | 81 | if "project_model" not in st.session_state: 82 | st.session_state.project_model = ProjectBaseModel() 83 | 84 | if "provider" not in st.session_state: 85 | st.session_state.provider = LLM_PROVIDER 86 | 87 | if "reference_html" not in st.session_state: 88 | st.session_state.reference_html = {} 89 | 90 | if "reference_url" not in st.session_state: 91 | st.session_state.reference_url = "" 92 | 93 | if "rephrased_request" not in st.session_state: 94 | st.session_state.rephrased_request = "" 95 | 96 | if "response_text" not in st.session_state: 97 | st.session_state.response_text = "" 98 | 99 | if "show_edit" not in st.session_state: 100 | st.session_state.show_edit = False 101 | 102 | if "selected_tools" not in st.session_state: 103 | st.session_state.selected_tools = [] 104 | 105 | if "show_request_input" not in st.session_state: 106 | st.session_state.show_request_input = True 107 | 108 | if "temperature_slider" not in st.session_state: 109 | st.session_state.temperature_slider = 0.3 110 | 111 | if "tool_model" not in st.session_state: 112 | st.session_state.tool_model = ToolBaseModel( 113 | name="", 114 | description="", 115 | title="", 116 | file_name="", 117 | content="", 118 | id=None, 119 | created_at=None, 120 | updated_at=None, 121 | user_id=None, 122 | secrets=None, 123 | libraries=None, 124 | timestamp=None 125 | ) 126 | 127 | if "tool_models" not in st.session_state: 128 | st.session_state.tool_models = [] 129 | 130 | 131 | # if "tools" not in st.session_state: 132 | # st.session_state.tools = [] 133 | 134 | if "tool_functions" not in st.session_state: 135 | st.session_state.tool_functions = {} 136 | 137 | if "tool_name" not in st.session_state: 138 | st.session_state.tool_name = None 139 | 140 | if "tool_request" not in st.session_state: 141 | st.session_state.tool_request = "" 142 | 143 | if "tool_result_string" not in st.session_state: 144 | st.session_state.tool_result_string = "" 145 | 146 | if "top_p" not in st.session_state: 147 | st.session_state.top_p = 1 148 | 149 | if "uploaded_data" not in st.session_state: 150 | st.session_state.uploaded_data = None 151 | 152 | if "user_input" not in st.session_state: 153 | st.session_state.user_input = "" 154 | 155 | if "user_input_widget_auto_moderate" not in st.session_state: 156 | st.session_state.user_input_widget_auto_moderate = "" 157 | 158 | if st.session_state.get("user_request"): 159 | handle_user_request(st.session_state) 160 | 161 | if "whiteboard_content" not in st.session_state: 162 | st.session_state.whiteboard_content = "" 163 | 164 | if "workflow" not in st.session_state: 165 | st.session_state.workflow = WorkflowBaseModel( 166 | name="", 167 | created_at=datetime.now(), 168 | description="", 169 | agents=[], 170 | sender=None, 171 | receiver=None, 172 | type="", 173 | user_id="default", 174 | timestamp=datetime.now(), 175 | summary_method="" 176 | ) 177 | 178 | for provider in SUPPORTED_PROVIDERS: 179 | if f"{provider.upper()}_API_URL" not in st.session_state: 180 | st.session_state[f"{provider.upper()}_API_URL"] = None -------------------------------------------------------------------------------- /AutoGroq/utils/text_utils.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | 4 | def normalize_config(config, agent_name): 5 | """Normalize the config dictionary to match the format of default entries.""" 6 | normalized = { 7 | "name": normalize_name(config.get('name', agent_name)), 8 | "human_input_mode": "NEVER", 9 | "max_consecutive_auto_reply": 25, 10 | "system_message": config.get('system_message', f"You are a helpful AI assistant that can act as {agent_name}."), 11 | "is_termination_msg": None, 12 | "code_execution_config": "none", 13 | "default_auto_reply": "", 14 | "description": "Assistant Agent", 15 | "llm_config": { 16 | "config_list": [], 17 | "temperature": 0, 18 | "cache_seed": None, 19 | "timeout": None, 20 | "max_tokens": 2048, 21 | "extra_body": None 22 | }, 23 | "admin_name": "Admin", 24 | "messages": [], 25 | "max_round": 100, 26 | "speaker_selection_method": "auto", 27 | "allow_repeat_speaker": True 28 | } 29 | 30 | return normalized 31 | 32 | 33 | def normalize_name(name): 34 | """Convert name to lowercase and replace spaces with underscores.""" 35 | return sanitize_text(name).lower().replace(' ', '_') 36 | 37 | 38 | def sanitize_text(text): 39 | # Remove non-ASCII characters 40 | text = re.sub(r'[^\x00-\x7F]+', '', text) 41 | # Remove non-alphanumeric characters except for standard punctuation 42 | text = re.sub(r'[^a-zA-Z0-9\s.,!?:;\'"-]+', '', text) 43 | return text -------------------------------------------------------------------------------- /AutoGroq/utils/tool_execution.py: -------------------------------------------------------------------------------- 1 | # utils/tool_execution.py 2 | 3 | import inspect 4 | import logging 5 | 6 | from utils.sandbox import execute_in_sandbox 7 | 8 | 9 | logging.basicConfig(level=logging.DEBUG) 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | def execute_tool(tool_name, function_map, *args, **kwargs): 14 | logger.debug(f"Attempting to execute tool: {tool_name}") 15 | logger.debug(f"Available tools: {list(function_map.keys())}") 16 | logger.debug(f"Args: {args}") 17 | logger.debug(f"Kwargs: {kwargs}") 18 | 19 | if tool_name not in function_map: 20 | raise ValueError(f"Tool '{tool_name}' not found in function map") 21 | 22 | tool_function = function_map[tool_name] 23 | logger.debug(f"Tool function: {tool_function}") 24 | 25 | try: 26 | result = tool_function(*args, **kwargs) 27 | logger.debug(f"Tool execution result: {result[:500]}...") # Log first 500 characters of result 28 | return result 29 | except Exception as e: 30 | logger.error(f"Error executing tool {tool_name}: {str(e)}", exc_info=True) 31 | raise 32 | 33 | 34 | def get_tool_signature(tool_name, function_map): 35 | if tool_name not in function_map: 36 | raise ValueError(f"Tool '{tool_name}' not found in function map") 37 | 38 | tool_function = function_map[tool_name] 39 | return inspect.signature(tool_function) -------------------------------------------------------------------------------- /AutoGroq/utils/tool_utils.py: -------------------------------------------------------------------------------- 1 | 2 | import datetime 3 | import importlib 4 | import json 5 | import os 6 | import re 7 | import sqlite3 8 | import streamlit as st 9 | import uuid 10 | 11 | from models.tool_base_model import ToolBaseModel 12 | from prompts import get_generate_tool_prompt 13 | from utils.api_utils import get_api_key 14 | from utils.db_utils import sql_to_db 15 | from utils.file_utils import regenerate_zip_files 16 | from utils.ui_utils import get_llm_provider 17 | 18 | 19 | def create_tool_data(python_code): 20 | # Extract the function name from the Python code 21 | function_name_match = re.search(r"def\s+(\w+)\(", python_code) 22 | if function_name_match: 23 | function_name = function_name_match.group(1) 24 | else: 25 | function_name = "unnamed_function" 26 | 27 | # Extract the tool description from the docstring 28 | docstring_match = re.search(r'"""(.*?)"""', python_code, re.DOTALL) 29 | if docstring_match: 30 | tool_description = docstring_match.group(1).strip() 31 | else: 32 | tool_description = "No description available" 33 | 34 | # Get the current timestamp 35 | current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") 36 | 37 | # Update st.session_state.tool_model with the tool data 38 | st.session_state.tool_model.name = function_name 39 | st.session_state.tool_model.description = tool_description 40 | st.session_state.tool_model.title = function_name 41 | st.session_state.tool_model.file_name = f"{function_name}.py" 42 | st.session_state.tool_model.content = python_code 43 | st.session_state.tool_model.user_id = "default" 44 | st.session_state.tool_model.created_at = current_time 45 | st.session_state.tool_model.updated_at = current_time 46 | st.session_state.tool_model.version = "0.0.1" 47 | 48 | 49 | secrets = [] 50 | libraries = [] 51 | 52 | # Simple regex to find import statements 53 | import_pattern = r'import\s+(\w+)' 54 | libraries = re.findall(import_pattern, python_code) 55 | 56 | # Simple regex to find potential API keys or secrets 57 | secret_pattern = r'([A-Z_]+_API_KEY|[A-Z_]+_SECRET)' 58 | secrets = re.findall(secret_pattern, python_code) 59 | 60 | st.session_state.tool_model.secrets = [{"secret": s, "value": None} for s in secrets] 61 | st.session_state.tool_model.libraries = libraries 62 | 63 | 64 | def export_tool_as_skill(tool_name: str, edited_skill: str): 65 | print(f"Exporting skill '{tool_name}'...") 66 | try: 67 | create_tool_data(edited_skill) 68 | print(f"Skill data: {st.session_state.tool_model.to_dict()}") 69 | current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") 70 | skill_tuple = ( 71 | str(uuid.uuid4()), # id (TEXT) 72 | current_time, # created_at (TEXT) 73 | current_time, # updated_at (TEXT) 74 | 'default', # user_id (TEXT) 75 | '0.0.1', # version (TEXT) 76 | tool_name, # name (TEXT) 77 | edited_skill, # content (TEXT) 78 | st.session_state.tool_model.description, # description (TEXT) 79 | json.dumps(st.session_state.tool_model.secrets), # secrets (TEXT) 80 | json.dumps(st.session_state.tool_model.libraries) # libraries (TEXT) 81 | ) 82 | print(f"Inserting skill data: {skill_tuple}") 83 | sql = """ 84 | INSERT INTO skill (id, created_at, updated_at, user_id, version, name, content, description, secrets, libraries) 85 | VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) 86 | """ 87 | sql_to_db(sql, skill_tuple) 88 | st.success(f"Skill '{tool_name}' exported to Autogen successfully!") 89 | except sqlite3.Error as e: 90 | st.error(f"Error exporting skill: {str(e)}") 91 | print(f"Error exporting skill: {str(e)}") 92 | print(f"Skill tuple: {skill_tuple}") 93 | 94 | 95 | def generate_tool(rephrased_tool_request): 96 | temperature_value = st.session_state.get('temperature', 0.1) 97 | max_tokens_value = st.session_state.get('max_tokens', 100) 98 | top_p_value = st.session_state.get('top_p', 1) 99 | llm_request_data = { 100 | "model": st.session_state.model, 101 | "temperature": st.session_state.temperature, 102 | "max_tokens": max_tokens_value, 103 | "top_p": top_p_value, 104 | "stop": "TERMINATE", 105 | "messages": [ 106 | { 107 | "role": "user", 108 | "content": get_generate_tool_prompt(rephrased_tool_request) 109 | } 110 | ] 111 | } 112 | api_key = get_api_key() 113 | llm_provider = get_llm_provider(api_key=api_key) 114 | response = llm_provider.send_request(llm_request_data) 115 | if response.status_code == 200: 116 | response_data = llm_provider.process_response(response) 117 | print(f"Response data: {response_data}") 118 | if "choices" in response_data and response_data["choices"]: 119 | proposed_tool = response_data["choices"][0]["message"]["content"].strip() 120 | match = re.search(r"def\s+(\w+)\(", proposed_tool) 121 | if match: 122 | tool_name = match.group(1) 123 | 124 | # Update the st.session_state.tool_model with the proposed tool data 125 | create_tool_data(proposed_tool) 126 | 127 | return proposed_tool, tool_name 128 | else: 129 | print("Error: Failed to extract tool name from the proposed tool.") 130 | return None, None 131 | return None, None 132 | 133 | 134 | def extract_tool_description(proposed_tool): 135 | docstring_match = re.search(r'"""(.*?)"""', proposed_tool, re.DOTALL) 136 | if docstring_match: 137 | return docstring_match.group(1).strip() 138 | else: 139 | return "No description available" 140 | 141 | 142 | def load_tool_functions(): 143 | st.session_state.tool_functions = {} 144 | st.session_state.tool_models = [] 145 | 146 | parent_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 147 | tools_folder_path = os.path.join(parent_directory, 'tools') 148 | tool_files = [f for f in os.listdir(tools_folder_path) if f.endswith('.py') and f != '__init__.py'] 149 | 150 | for tool_file in tool_files: 151 | tool_name = os.path.splitext(tool_file)[0] 152 | try: 153 | tool_module = importlib.import_module(f"tools.{tool_name}") 154 | 155 | if hasattr(tool_module, 'get_tool'): 156 | tool = tool_module.get_tool() 157 | if isinstance(tool, ToolBaseModel): 158 | st.session_state.tool_models.append(tool) 159 | st.session_state.tool_functions[tool.name] = tool.function 160 | print(f"Loaded tool: {tool.name}") 161 | else: 162 | print(f"Warning: get_tool() in {tool_file} did not return a ToolBaseModel instance") 163 | else: 164 | print(f"Warning: {tool_file} does not have a get_tool() function") 165 | except Exception as e: 166 | print(f"Error loading tool from {tool_file}: {str(e)}") 167 | 168 | print(f"Loaded {len(st.session_state.tool_models)} tools.") 169 | 170 | # Debug: Print loaded tools 171 | for tool in st.session_state.tool_models: 172 | print(f"Loaded tool model: {tool.name}") 173 | for tool_name, tool_function in st.session_state.tool_functions.items(): 174 | print(f"Loaded tool function: {tool_name} -> {tool_function}") 175 | 176 | 177 | def populate_tool_models(): 178 | project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 179 | tool_folder = os.path.join(project_root, "tools") 180 | tool_files = [f for f in os.listdir(tool_folder) if f.endswith(".py")] 181 | 182 | tool_models = [] 183 | for tool_file in tool_files: 184 | tool_name = os.path.splitext(tool_file)[0] 185 | tool_file_path = os.path.join(tool_folder, tool_file) 186 | with open(tool_file_path, 'r') as file: 187 | tool_data = file.read() 188 | create_tool_data(tool_data) 189 | tool_model = ToolBaseModel( 190 | name=st.session_state.tool_model.name, 191 | description=st.session_state.tool_model.description, 192 | title=st.session_state.tool_model.title, 193 | file_name=st.session_state.tool_model.file_name, 194 | content=st.session_state.tool_model.content, 195 | id=len(tool_models) + 1, 196 | created_at=datetime.datetime.now().isoformat(), 197 | updated_at=datetime.datetime.now().isoformat(), 198 | user_id=st.session_state.tool_model.user_id, 199 | secrets=st.session_state.tool_model.secrets, 200 | libraries=st.session_state.tool_model.libraries, 201 | timestamp=st.session_state.tool_model.timestamp 202 | ) 203 | tool_models.append(tool_model) 204 | 205 | st.session_state.tool_models = tool_models 206 | st.session_state.project_model.tools = tool_models 207 | 208 | 209 | def process_tool_request(): 210 | if st.session_state.tool_request and not st.session_state.get('tool_processed', False): 211 | tool_request = st.session_state.tool_request 212 | rephrased_tool_request = rephrase_tool(tool_request) 213 | if rephrased_tool_request: 214 | proposed_tool, tool_name = generate_tool(rephrased_tool_request) 215 | if proposed_tool: 216 | match = re.search(r"def\s+(\w+(?:_\w+)*)\(", proposed_tool) 217 | if match: 218 | tool_name = match.group(1) 219 | st.write(f"Proposed tool: {tool_name}") 220 | st.code(proposed_tool) 221 | 222 | with st.form(key=f"export_form_{tool_name}"): 223 | submit_export = st.form_submit_button("Export/Write") 224 | if submit_export: 225 | new_tool = ToolBaseModel( 226 | name=tool_name, 227 | description=extract_tool_description(proposed_tool), 228 | title=tool_name, 229 | file_name=f"{tool_name}.py", 230 | content=proposed_tool, 231 | id=len(st.session_state.tool_models) + 1, 232 | created_at=datetime.datetime.now().isoformat(), 233 | updated_at=datetime.datetime.now().isoformat(), 234 | user_id="default", 235 | secrets={}, 236 | libraries=[], 237 | timestamp=datetime.datetime.now().isoformat() 238 | ) 239 | st.session_state.tool_models.append(new_tool) 240 | st.session_state.selected_tools.append(tool_name) # Add this line 241 | export_tool_as_skill(tool_name, proposed_tool) 242 | st.success(f"Tool {tool_name} exported and added to the tool list!") 243 | st.session_state.show_tool_input = False 244 | st.session_state.tool_request = "" 245 | st.session_state.proposed_tool = None 246 | st.session_state.tool_name = None 247 | st.session_state.tool_processed = True 248 | st.experimental_rerun() 249 | else: 250 | st.error("Failed to extract tool name from the proposed tool.") 251 | else: 252 | st.error("No proposed tool generated.") 253 | 254 | 255 | def rephrase_tool(tool_request): 256 | print("Debug: Rephrasing tool: ", tool_request) 257 | temperature_value = st.session_state.get('temperature', 0.1) 258 | llm_request_data = { 259 | "model": st.session_state.model, 260 | "temperature": st.session_state.temperature, 261 | "max_tokens": st.session_state.max_tokens, 262 | "top_p": 1, 263 | "stop": "TERMINATE", 264 | "messages": [ 265 | { 266 | "role": "user", 267 | "content": f""" 268 | Act as a professional tool creator and rephrase the following tool request into an optimized prompt: 269 | 270 | tool request: "{tool_request}" 271 | 272 | Rephrased: 273 | """ 274 | } 275 | ] 276 | } 277 | api_key = get_api_key() 278 | llm_provider = get_llm_provider(api_key=api_key) 279 | response = llm_provider.send_request(llm_request_data) 280 | if response.status_code == 200: 281 | response_data = llm_provider.process_response(response) 282 | if "choices" in response_data and response_data["choices"]: 283 | rephrased = response_data["choices"][0]["message"]["content"].strip() 284 | print(f"Debug: Rephrased tool: {rephrased}") 285 | return rephrased 286 | return None 287 | 288 | 289 | def save_tool(tool_name, edited_tool): 290 | with open(f"{tool_name}.py", "w") as f: 291 | f.write(edited_tool) 292 | st.success(f"tool {tool_name} saved successfully!") 293 | 294 | 295 | def show_tools(): 296 | with st.expander("Tools"): 297 | selected_tools = [] 298 | select_all = st.checkbox("Select All", key="select_all_tools") 299 | for idx, tool_model in enumerate(st.session_state.tool_models): 300 | tool_name = tool_model.name 301 | if select_all: 302 | tool_checkbox = st.checkbox(f"Add {tool_name} tool to all agents", value=True, key=f"tool_{tool_name}_{idx}") 303 | else: 304 | tool_checkbox = st.checkbox(f"Add {tool_name} tool to all agents", value=tool_name in st.session_state.selected_tools, key=f"tool_{tool_name}_{idx}") 305 | if tool_checkbox: 306 | selected_tools.append(tool_name) 307 | 308 | if select_all: 309 | st.session_state.selected_tools = [tool_model.name for tool_model in st.session_state.tool_models] 310 | else: 311 | st.session_state.selected_tools = selected_tools 312 | 313 | # Update the 'tools' attribute of each agent with the selected tools 314 | for agent in st.session_state.agents: 315 | agent.tools = [tool_model for tool_model in st.session_state.tool_models if tool_model.name in st.session_state.selected_tools] 316 | 317 | regenerate_zip_files() 318 | 319 | if st.button("Add Tool", key="add_tool_button"): 320 | st.session_state.show_tool_input = True 321 | st.session_state.tool_request = "" 322 | st.session_state.tool_processed = False 323 | 324 | if st.session_state.get('show_tool_input'): 325 | tool_request = st.text_input("Need a new tool? Describe what it should do:", key="tool_request_input") 326 | if tool_request: 327 | st.session_state.tool_request = tool_request 328 | process_tool_request() 329 | -------------------------------------------------------------------------------- /AutoGroq/utils/ui_utils.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import json 3 | import os 4 | import pandas as pd 5 | import re 6 | import requests 7 | import streamlit as st 8 | import time 9 | 10 | import logging 11 | 12 | logging.basicConfig(level=logging.DEBUG) 13 | logger = logging.getLogger(__name__) 14 | 15 | from configs.config import (DEBUG, LLM_PROVIDER, MAX_RETRIES, 16 | FALLBACK_MODEL_TOKEN_LIMITS, RETRY_DELAY, SUPPORTED_PROVIDERS) 17 | 18 | from anthropic.types import Message 19 | from configs.current_project import Current_Project 20 | from models.agent_base_model import AgentBaseModel 21 | from models.workflow_base_model import WorkflowBaseModel 22 | from prompts import create_project_manager_prompt, get_agents_prompt, get_rephrased_user_prompt, get_moderator_prompt 23 | from tools.fetch_web_content import fetch_web_content 24 | from typing import Any, List, Dict, Tuple 25 | from utils.agent_utils import create_agent_data 26 | from utils.api_utils import fetch_available_models, get_api_key, get_llm_provider 27 | from utils.auth_utils import display_api_key_input 28 | from utils.db_utils import export_to_autogen 29 | from utils.file_utils import zip_files_in_memory 30 | from utils.workflow_utils import get_workflow_from_agents 31 | 32 | 33 | def create_agents(json_data: List[Dict[str, Any]]) -> Tuple[List[AgentBaseModel], List[Dict[str, Any]]]: 34 | autogen_agents = [] 35 | crewai_agents = [] 36 | 37 | for agent_data in json_data: 38 | expert_name = agent_data.get('expert_name', '') 39 | description = agent_data.get('description', '') 40 | 41 | if not expert_name: 42 | print("Missing agent name. Skipping...") 43 | continue 44 | 45 | autogen_agent_data, crewai_agent_data = create_agent_data({ 46 | "name": expert_name, 47 | "description": description, 48 | "role": agent_data.get('role', expert_name), 49 | "goal": agent_data.get('goal', f"Assist with tasks related to {description}"), 50 | "backstory": agent_data.get('backstory', f"As an AI assistant, I specialize in {description}") 51 | }) 52 | 53 | try: 54 | agent_model = AgentBaseModel( 55 | name=autogen_agent_data['name'], 56 | description=autogen_agent_data['description'], 57 | tools=autogen_agent_data.get('tools', []), 58 | config=autogen_agent_data.get('config', {}), 59 | role=autogen_agent_data['role'], 60 | goal=autogen_agent_data['goal'], 61 | backstory=autogen_agent_data['backstory'], 62 | provider=autogen_agent_data.get('provider', ''), 63 | model=autogen_agent_data.get('model', '') 64 | ) 65 | print(f"Created agent: {agent_model.name} with description: {agent_model.description}") 66 | autogen_agents.append(agent_model) 67 | crewai_agents.append(crewai_agent_data) 68 | except Exception as e: 69 | print(f"Error creating agent {expert_name}: {str(e)}") 70 | print(f"Agent data: {autogen_agent_data}") 71 | continue 72 | 73 | return autogen_agents, crewai_agents 74 | 75 | 76 | def create_project_manager(rephrased_text): 77 | print(f"Creating Project Manager") 78 | temperature_value = st.session_state.get('temperature', 0.1) 79 | llm_request_data = { 80 | "model": st.session_state.model, 81 | "temperature": st.session_state.temperature, 82 | "max_tokens": st.session_state.max_tokens, 83 | "top_p": 1, 84 | "stop": "TERMINATE", 85 | "messages": [ 86 | { 87 | "role": "user", 88 | "content": create_project_manager_prompt(rephrased_text) 89 | } 90 | ] 91 | } 92 | 93 | api_key = get_api_key() 94 | llm_provider = get_llm_provider(api_key=api_key) 95 | response = llm_provider.send_request(llm_request_data) 96 | 97 | if response is not None: 98 | response_data = llm_provider.process_response(response) 99 | if "choices" in response_data and response_data["choices"]: 100 | content = response_data["choices"][0]["message"]["content"] 101 | return content.strip() 102 | 103 | return None 104 | 105 | 106 | def display_discussion_and_whiteboard(): 107 | tabs = st.tabs(["Discussion", "Whiteboard", "History", "Deliverables", "Download", "Debug"]) 108 | discussion_history = get_discussion_history() 109 | 110 | with tabs[0]: 111 | # Display only the most recent agent response 112 | if 'most_recent_response' in st.session_state and st.session_state.most_recent_response: 113 | st.text_area("Most Recent Response", value=st.session_state.most_recent_response, height=400, key="discussion") 114 | else: 115 | st.text_area("Discussion", value="No responses yet.", height=400, key="discussion") 116 | 117 | with tabs[1]: 118 | # Extract code snippets from the full discussion history 119 | code_snippets = extract_code_from_response(discussion_history) 120 | 121 | # Display code snippets in the whiteboard, allowing editing 122 | new_whiteboard_content = st.text_area("Whiteboard (Code Snippets)", value=code_snippets, height=400, key="whiteboard") 123 | 124 | # Update the whiteboard content in the session state if it has changed 125 | if new_whiteboard_content != st.session_state.get('whiteboard_content', ''): 126 | st.session_state.whiteboard_content = new_whiteboard_content 127 | 128 | with tabs[2]: 129 | st.write(discussion_history) 130 | 131 | 132 | with tabs[3]: 133 | if "current_project" in st.session_state: 134 | current_project = st.session_state.current_project 135 | for index, deliverable in enumerate(current_project.deliverables): 136 | if deliverable["text"].strip(): # Check if the deliverable text is not empty 137 | checkbox_key = f"deliverable_{index}" 138 | done = st.checkbox( 139 | deliverable["text"], 140 | value=current_project.is_deliverable_complete(index), 141 | key=checkbox_key, 142 | on_change=update_deliverable_status, 143 | args=(index,) 144 | ) 145 | if done != deliverable["done"]: 146 | if done: 147 | current_project.mark_deliverable_phase_done(index, current_project.current_phase) 148 | else: 149 | current_project.deliverables[index]["done"] = False 150 | for phase in current_project.implementation_phases: 151 | current_project.deliverables[index]["phase"][phase] = False 152 | 153 | 154 | with tabs[4]: 155 | display_download_button() 156 | if st.button("Export to Autogen"): 157 | export_to_autogen() 158 | 159 | with tabs[5]: 160 | if DEBUG: 161 | if "project_model" in st.session_state: 162 | project_model = st.session_state.project_model 163 | with st.expander("Project Details"): 164 | st.write("ID:", project_model.id) 165 | st.write("Re-engineered Prompt:", project_model.re_engineered_prompt) 166 | st.write("Deliverables:", project_model.deliverables) 167 | st.write("Created At:", project_model.created_at) 168 | st.write("Updated At:", project_model.updated_at) 169 | st.write("User ID:", project_model.user_id) 170 | st.write("Name:", project_model.name) 171 | st.write("Description:", project_model.description) 172 | st.write("Status:", project_model.status) 173 | st.write("Due Date:", project_model.due_date) 174 | st.write("Priority:", project_model.priority) 175 | st.write("Tags:", project_model.tags) 176 | st.write("Attachments:", project_model.attachments) 177 | st.write("Notes:", project_model.notes) 178 | st.write("Collaborators:", project_model.collaborators) 179 | st.write("Workflows:", project_model.workflows) 180 | if project_model.tools: 181 | st.write("Tools:") 182 | for tool in project_model.tools: 183 | substring = "init" 184 | if not substring in tool.name: 185 | st.write(f"- {tool.name}") 186 | st.code(tool.content, language="python") 187 | else: 188 | st.write("Tools: []") 189 | 190 | 191 | if "project_model" in st.session_state and st.session_state.project_model.workflows: 192 | workflow_data = st.session_state.project_model.workflows[0] 193 | workflow = WorkflowBaseModel.from_dict({**workflow_data, 'settings': workflow_data.get('settings', {})}) 194 | with st.expander("Workflow Details"): 195 | st.write("ID:", workflow.id) 196 | st.write("Name:", workflow.name) 197 | st.write("Description:", workflow.description) 198 | 199 | # Display the agents in the workflow 200 | st.write("Agents:") 201 | for agent in workflow.receiver.groupchat_config["agents"]: 202 | st.write(f"- {agent['config']['name']}") 203 | 204 | st.write("Settings:", workflow.settings) 205 | st.write("Created At:", workflow.created_at) 206 | st.write("Updated At:", workflow.updated_at) 207 | st.write("User ID:", workflow.user_id) 208 | st.write("Type:", workflow.type) 209 | st.write("Summary Method:", workflow.summary_method) 210 | 211 | # Display sender details 212 | st.write("Sender:") 213 | st.write("- Type:", workflow.sender.type) 214 | st.write("- Config:", workflow.sender.config) 215 | st.write("- Timestamp:", workflow.sender.timestamp) 216 | st.write("- User ID:", workflow.sender.user_id) 217 | st.write("- Tools:", workflow.sender.tools) 218 | 219 | # Display receiver details 220 | st.write("Receiver:") 221 | st.write("- Type:", workflow.receiver.type) 222 | st.write("- Config:", workflow.receiver.config) 223 | st.write("- Groupchat Config:", workflow.receiver.groupchat_config) 224 | st.write("- Timestamp:", workflow.receiver.timestamp) 225 | st.write("- User ID:", workflow.receiver.user_id) 226 | st.write("- Tools:", workflow.receiver.tools) 227 | st.write("- Agents:", [agent.to_dict() for agent in workflow.receiver.agents]) 228 | 229 | st.write("Timestamp:", workflow.timestamp) 230 | else: 231 | st.warning("No workflow data available.") 232 | 233 | 234 | if "agents" in st.session_state: 235 | with st.expander("Agent Details"): 236 | agent_names = ["Select one..."] + [agent.get('name', f"Agent {index + 1}") for index, agent in enumerate(st.session_state.agents)] 237 | selected_agent = st.selectbox("Select an agent:", agent_names) 238 | 239 | if selected_agent != "Select one...": 240 | agent_index = agent_names.index(selected_agent) - 1 241 | agent = st.session_state.agents[agent_index] 242 | 243 | st.subheader(selected_agent) 244 | st.write("ID:", agent.get('id')) 245 | st.write("Name:", agent.get('name')) 246 | st.write("Description:", agent.get('description')) 247 | 248 | # Display the selected tools for the agent 249 | st.write("Tools:", ", ".join(agent.get('tools', []))) 250 | 251 | st.write("Config:", agent.get('config')) 252 | st.write("Created At:", agent.get('created_at')) 253 | st.write("Updated At:", agent.get('updated_at')) 254 | st.write("User ID:", agent.get('user_id')) 255 | st.write("Workflows:", agent.get('workflows')) 256 | st.write("Type:", agent.get('type')) 257 | st.write("Models:", agent.get('models')) 258 | st.write("Verbose:", agent.get('verbose')) 259 | st.write("Allow Delegation:", agent.get('allow_delegation')) 260 | st.write("New Description:", agent.get('new_description')) 261 | st.write("Timestamp:", agent.get('timestamp')) 262 | else: 263 | st.warning("No agent data available.") 264 | 265 | if len(st.session_state.tool_models) > 0: 266 | with st.expander("Tool Details"): 267 | tool_names = ["Select one..."] + [tool.name for tool in st.session_state.tool_models] 268 | selected_tool = st.selectbox("Select a tool:", tool_names) 269 | 270 | if selected_tool != "Select one...": 271 | tool_index = tool_names.index(selected_tool) - 1 272 | tool = st.session_state.tool_models[tool_index] 273 | 274 | st.subheader(selected_tool) 275 | 276 | # Display tool details in a more visually appealing way 277 | col1, col2 = st.columns(RETRY_DELAY) 278 | 279 | with col1: 280 | st.markdown(f"**ID:** {tool.id}") 281 | st.markdown(f"**Name:** {tool.name}") 282 | st.markdown(f"**Created At:** {tool.created_at}") 283 | st.markdown(f"**Updated At:** {tool.updated_at}") 284 | st.markdown(f"**User ID:** {tool.user_id}") 285 | 286 | with col2: 287 | st.markdown(f"**Secrets:** {tool.secrets}") 288 | st.markdown(f"**Libraries:** {tool.libraries}") 289 | st.markdown(f"**File Name:** {tool.file_name}") 290 | st.markdown(f"**Timestamp:** {tool.timestamp}") 291 | st.markdown(f"**Title:** {tool.title}") 292 | 293 | st.markdown(f"**Description:** {tool.description}") 294 | 295 | # Display the tool's content in a code block 296 | st.markdown("**Content:**") 297 | st.code(tool.content, language="python") 298 | else: 299 | st.warning("No tool data available.") 300 | 301 | else: 302 | st.warning("Debugging disabled.") 303 | 304 | 305 | def display_download_button(): 306 | col1, col2 = st.columns(RETRY_DELAY) 307 | 308 | with col1: 309 | if st.session_state.get('autogen_zip_buffer') is not None: 310 | st.download_button( 311 | label="Download Autogen Files", 312 | data=st.session_state.autogen_zip_buffer, 313 | file_name="autogen_files.zip", 314 | mime="application/zip", 315 | key=f"autogen_download_button_{int(time.time())}" 316 | ) 317 | else: 318 | st.warning("Autogen files are not available for download.") 319 | 320 | with col2: 321 | if st.session_state.get('crewai_zip_buffer') is not None: 322 | st.download_button( 323 | label="Download CrewAI Files", 324 | data=st.session_state.crewai_zip_buffer, 325 | file_name="crewai_files.zip", 326 | mime="application/zip", 327 | key=f"crewai_download_button_{int(time.time())}" 328 | ) 329 | else: 330 | st.warning("CrewAI files are not available for download.") 331 | 332 | 333 | def display_download_and_export_buttons(): 334 | display_download_button() 335 | if st.button("Export to Autogen"): 336 | export_to_autogen() 337 | 338 | 339 | def display_goal(): 340 | if "current_project" in st.session_state: 341 | current_project = st.session_state.current_project 342 | if current_project.re_engineered_prompt: 343 | st.expander("Goal").markdown(f"**OUR CURRENT GOAL:**\n\r {current_project.re_engineered_prompt}") 344 | 345 | 346 | def display_user_input(): 347 | user_input = st.text_area("Additional Input:", value=st.session_state.get("user_input", ""), key="user_input_widget", height=100, on_change=update_user_input) 348 | reference_url = st.text_input("URL:", key="reference_url_widget") 349 | 350 | if user_input: 351 | url_pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+') 352 | url_match = url_pattern.search(user_input) 353 | if url_match: 354 | url = url_match.group() 355 | if "reference_html" not in st.session_state or url not in st.session_state.reference_html: 356 | html_content = fetch_web_content(url) 357 | if html_content: 358 | st.session_state.reference_html[url] = html_content 359 | else: 360 | st.warning("Failed to fetch HTML content.") 361 | else: 362 | st.session_state.reference_html = {} 363 | else: 364 | st.session_state.reference_html = {} 365 | else: 366 | st.session_state.reference_html = {} 367 | 368 | return user_input, reference_url 369 | 370 | 371 | def display_reset_and_upload_buttons(): 372 | col1, col2 = st.columns(RETRY_DELAY) 373 | with col1: 374 | if st.button("Reset", key="reset_button"): 375 | # Define the keys of session state variables to clear 376 | keys_to_reset = [ 377 | "rephrased_request", "discussion", "whiteboard", "user_request", 378 | "user_input", "agents", "zip_buffer", "crewai_zip_buffer", 379 | "autogen_zip_buffer", "uploaded_file_content", "discussion_history", 380 | "last_comment", "user_api_key", "reference_url" 381 | ] 382 | # Reset each specified key 383 | for key in keys_to_reset: 384 | if key in st.session_state: 385 | del st.session_state[key] 386 | # Additionally, explicitly reset user_input to an empty string 387 | st.session_state.user_input = "" 388 | st.session_state.show_begin_button = True 389 | st.experimental_rerun() 390 | 391 | with col2: 392 | uploaded_file = st.file_uploader("Upload a sample .csv of your data (optional)", type="csv") 393 | 394 | if uploaded_file is not None: 395 | try: 396 | # Attempt to read the uploaded file as a DataFrame 397 | df = pd.read_csv(uploaded_file).head(5) 398 | 399 | # Display the DataFrame in the app 400 | st.write("Data successfully uploaded and read as DataFrame:") 401 | st.dataframe(df) 402 | 403 | # Store the DataFrame in the session state 404 | st.session_state.uploaded_data = df 405 | except Exception as e: 406 | st.error(f"Error reading the file: {e}") 407 | 408 | 409 | def display_user_request_input(): 410 | if st.session_state.show_request_input: 411 | if st.session_state.get("previous_user_request") != st.session_state.get("user_request", ""): 412 | st.session_state.previous_user_request = st.session_state.get("user_request", "") 413 | if st.session_state.get("user_request", ""): 414 | handle_user_request(st.session_state) 415 | else: 416 | st.session_state.agents = [] 417 | st.session_state.show_request_input = False 418 | st.experimental_rerun() 419 | 420 | 421 | def extract_code_from_response(response): 422 | code_pattern = r"```(.*?)```" 423 | code_blocks = re.findall(code_pattern, response, re.DOTALL) 424 | 425 | html_pattern = r".*?" 426 | html_blocks = re.findall(html_pattern, response, re.DOTALL | re.IGNORECASE) 427 | 428 | js_pattern = r".*?" 429 | js_blocks = re.findall(js_pattern, response, re.DOTALL | re.IGNORECASE) 430 | 431 | css_pattern = r".*?" 432 | css_blocks = re.findall(css_pattern, response, re.DOTALL | re.IGNORECASE) 433 | 434 | all_code_blocks = code_blocks + html_blocks + js_blocks + css_blocks 435 | unique_code_blocks = list(set(all_code_blocks)) 436 | 437 | return "\n\n".join(unique_code_blocks) 438 | 439 | 440 | def extract_content(response: Any) -> str: 441 | if hasattr(response, 'content') and isinstance(response.content, list): 442 | # Anthropic-specific handling 443 | return response.content[0].text 444 | elif isinstance(response, requests.models.Response): 445 | # Groq and potentially other providers using requests.Response 446 | try: 447 | json_response = response.json() 448 | if 'choices' in json_response and json_response['choices']: 449 | return json_response['choices'][0]['message']['content'] 450 | except json.JSONDecodeError: 451 | print("Failed to decode JSON from response") 452 | return "" 453 | elif isinstance(response, dict): 454 | if 'choices' in response and response['choices']: 455 | return response['choices'][0]['message']['content'] 456 | elif 'content' in response: 457 | return response['content'] 458 | elif isinstance(response, str): 459 | return response 460 | print(f"Unexpected response format: {type(response)}") 461 | return "" 462 | 463 | 464 | def extract_json_objects(text: str) -> List[Dict]: 465 | objects = [] 466 | stack = [] 467 | start_index = 0 468 | for i, char in enumerate(text): 469 | if char == "{": 470 | if not stack: 471 | start_index = i 472 | stack.append(char) 473 | elif char == "}": 474 | if stack: 475 | stack.pop() 476 | if not stack: 477 | objects.append(text[start_index:i+1]) 478 | parsed_objects = [] 479 | for obj_str in objects: 480 | try: 481 | parsed_obj = json.loads(obj_str) 482 | parsed_objects.append(parsed_obj) 483 | except json.JSONDecodeError as e: 484 | print(f"Error parsing JSON object: {e}") 485 | print(f"JSON string: {obj_str}") 486 | return parsed_objects 487 | 488 | 489 | def get_agents_from_text(text: str) -> Tuple[List[AgentBaseModel], List[Dict[str, Any]]]: 490 | print("Getting agents from text...") 491 | 492 | instructions = get_agents_prompt() 493 | combined_content = f"{instructions}\n\nTeam of Experts:\n{text}" 494 | 495 | llm_request_data = { 496 | "model": st.session_state.model, 497 | "temperature": st.session_state.temperature, 498 | "max_tokens": st.session_state.max_tokens, 499 | "messages": [ 500 | {"role": "user", "content": combined_content} 501 | ] 502 | } 503 | 504 | api_key = get_api_key() 505 | llm_provider = get_llm_provider(api_key=api_key) 506 | 507 | try: 508 | response = llm_provider.send_request(llm_request_data) 509 | print(f"Response type: {type(response)}") 510 | print(f"Response: {response}") 511 | 512 | content = extract_content(response) 513 | 514 | if not content: 515 | print("No content extracted from response.") 516 | return [], [] 517 | 518 | print(f"Extracted content: {content}") 519 | 520 | json_data = parse_json(content) 521 | 522 | if not json_data: 523 | print("Failed to parse JSON data.") 524 | return [], [] 525 | 526 | return create_agents(json_data) 527 | 528 | except Exception as e: 529 | print(f"Error in get_agents_from_text: {e}") 530 | return [], [] 531 | 532 | 533 | def get_discussion_history(): 534 | return st.session_state.discussion_history 535 | 536 | 537 | @st.cache_data(ttl=3600) # Cache the result for 1 hour 538 | def get_provider_models(provider=None): 539 | if provider is None: 540 | provider = st.session_state.get('provider', LLM_PROVIDER) 541 | return st.session_state.get('available_models') or FALLBACK_MODEL_TOKEN_LIMITS.get(provider, {}) 542 | 543 | 544 | def handle_user_request(session_state): 545 | print("Debug: Handling user request for session state: ", session_state) 546 | user_request = session_state.user_request 547 | max_retries = MAX_RETRIES 548 | retry_delay = RETRY_DELAY 549 | 550 | for retry in range(max_retries): 551 | try: 552 | print("Debug: Sending request to rephrase_prompt") 553 | model = session_state.model 554 | print(f"Debug: Model: {model}") 555 | rephrased_text = rephrase_prompt(user_request, model) 556 | print(f"Debug: Rephrased text: {rephrased_text}") 557 | if rephrased_text: 558 | session_state.rephrased_request = rephrased_text 559 | break 560 | else: 561 | print("Error: Failed to rephrase the user request.") 562 | st.warning("Failed to rephrase the user request. Please try again.") 563 | return 564 | except Exception as e: 565 | print(f"Error occurred in handle_user_request: {str(e)}") 566 | if retry < max_retries - 1: 567 | print(f"Retrying in {retry_delay} second(s)...") 568 | time.sleep(retry_delay) 569 | else: 570 | print("Max retries exceeded.") 571 | st.warning("An error occurred. Please try again.") 572 | return 573 | 574 | if "rephrased_request" not in session_state: 575 | st.warning("Failed to rephrase the user request. Please try again.") 576 | return 577 | 578 | session_state.project_model.description = session_state.user_request 579 | rephrased_text = session_state.rephrased_request 580 | session_state.project_model.set_re_engineered_prompt(rephrased_text) 581 | 582 | if "project_manager_output" not in session_state: 583 | project_manager_output = create_project_manager(rephrased_text) 584 | 585 | if not project_manager_output: 586 | print("Error: Failed to create Project Manager.") 587 | st.warning("Failed to create Project Manager. Please try again.") 588 | return 589 | 590 | session_state.project_manager_output = project_manager_output 591 | 592 | current_project = Current_Project() 593 | current_project.set_re_engineered_prompt(rephrased_text) 594 | 595 | deliverables_patterns = [ 596 | r"(?:Deliverables|Key Deliverables):\n(.*?)(?=Timeline|Team of Experts|$)", 597 | r"\*\*(?:Deliverables|Key Deliverables):\*\*\n(.*?)(?=\*\*Timeline|\*\*Team of Experts|$)" 598 | ] 599 | 600 | deliverables_text = None 601 | for pattern in deliverables_patterns: 602 | match = re.search(pattern, project_manager_output, re.DOTALL) 603 | if match: 604 | deliverables_text = match.group(1).strip() 605 | break 606 | 607 | if deliverables_text: 608 | deliverables = re.findall(r'\d+\.\s*(.*)', deliverables_text) 609 | for deliverable in deliverables: 610 | current_project.add_deliverable(deliverable.strip()) 611 | session_state.project_model.add_deliverable(deliverable.strip()) 612 | else: 613 | print("Warning: 'Deliverables' or 'Key Deliverables' section not found in Project Manager's output.") 614 | 615 | session_state.current_project = current_project 616 | 617 | update_discussion_and_whiteboard("Project Manager", project_manager_output, "") 618 | else: 619 | project_manager_output = session_state.project_manager_output 620 | 621 | team_of_experts_patterns = [ 622 | r"\*\*Team of Experts:\*\*\n(.*)", 623 | r"Team of Experts:\n(.*)" 624 | ] 625 | 626 | team_of_experts_text = None 627 | for pattern in team_of_experts_patterns: 628 | match = re.search(pattern, project_manager_output, re.DOTALL) 629 | if match: 630 | team_of_experts_text = match.group(1).strip() 631 | break 632 | 633 | if team_of_experts_text: 634 | autogen_agents, crewai_agents = get_agents_from_text(team_of_experts_text) 635 | 636 | if not autogen_agents: 637 | print("Error: No agents created.") 638 | st.warning("Failed to create agents. Please try again.") 639 | return 640 | 641 | session_state.agents = autogen_agents 642 | session_state.workflow.agents = session_state.agents 643 | 644 | # Generate the workflow data 645 | workflow_data, _ = get_workflow_from_agents(autogen_agents) 646 | workflow_data["created_at"] = datetime.datetime.now().isoformat() 647 | print(f"Debug: Workflow data: {workflow_data}") 648 | print(f"Debug: CrewAI agents: {crewai_agents}") 649 | 650 | if workflow_data: 651 | autogen_zip_buffer, crewai_zip_buffer = zip_files_in_memory(workflow_data) 652 | session_state.autogen_zip_buffer = autogen_zip_buffer 653 | session_state.crewai_zip_buffer = crewai_zip_buffer 654 | else: 655 | session_state.autogen_zip_buffer = None 656 | session_state.crewai_zip_buffer = None 657 | 658 | # Update the project session state with the workflow data 659 | session_state.project_model.workflows = [workflow_data] 660 | 661 | print("Debug: Agents in session state project workflow:") 662 | for agent in workflow_data["receiver"]["groupchat_config"]["agents"]: 663 | print(agent) 664 | 665 | # Indicate that a rerun is needed 666 | session_state.need_rerun = True 667 | else: 668 | print("Error: 'Team of Experts' section not found in Project Manager's output.") 669 | st.warning("Failed to extract the team of experts from the Project Manager's output. Please try again.") 670 | return 671 | 672 | 673 | def key_prompt(): 674 | api_key = get_api_key() 675 | api_key = display_api_key_input() 676 | if api_key is None: 677 | llm = LLM_PROVIDER.upper() 678 | st.warning(f"{llm}_API_KEY not found, or select a different provider.") 679 | return 680 | 681 | 682 | def parse_json(content: str) -> List[Dict[str, Any]]: 683 | try: 684 | json_data = json.loads(content) 685 | if isinstance(json_data, list): 686 | return json_data 687 | else: 688 | print("JSON data is not a list as expected.") 689 | return [] 690 | except json.JSONDecodeError as e: 691 | print(f"Error parsing JSON: {e}") 692 | print(f"Content: {content}") 693 | return [] 694 | 695 | 696 | def rephrase_prompt(user_request, model, max_tokens=None, llm_provider=None, provider=None): 697 | print("Executing rephrase_prompt()") 698 | 699 | refactoring_prompt = get_rephrased_user_prompt(user_request) 700 | 701 | if llm_provider is None: 702 | api_key = get_api_key() 703 | try: 704 | llm_provider = get_llm_provider(api_key=api_key, provider=provider) 705 | except Exception as e: 706 | print(f"Error initializing LLM provider: {str(e)}") 707 | return None 708 | 709 | if max_tokens is None: 710 | max_tokens = llm_provider.get_available_models().get(model, 4096) 711 | 712 | llm_request_data = { 713 | "model": model, 714 | "temperature": st.session_state.temperature, 715 | "max_tokens": max_tokens, 716 | "top_p": 1, 717 | "stop": "TERMINATE", 718 | "messages": [ 719 | { 720 | "role": "user", 721 | "content": refactoring_prompt, 722 | }, 723 | ], 724 | } 725 | 726 | try: 727 | print("Sending request to LLM API...") 728 | print(f"Request Details:") 729 | print(f"Provider: {provider}") 730 | print(f"llm_provider: {llm_provider}") 731 | print(f" Model: {model}") 732 | print(f" Max Tokens: {max_tokens}") 733 | print(f" Messages: {llm_request_data['messages']}") 734 | 735 | response = llm_provider.send_request(llm_request_data) 736 | 737 | if response is None: 738 | print("Error: No response received from the LLM provider.") 739 | return None 740 | 741 | print(f"Response received. Processing response...") 742 | response_data = llm_provider.process_response(response) 743 | print(f"Response Data: {json.dumps(response_data, indent=2)}") 744 | 745 | if "choices" in response_data and len(response_data["choices"]) > 0: 746 | rephrased = response_data["choices"][0]["message"]["content"] 747 | return rephrased.strip() 748 | else: 749 | print("Error: Unexpected response format. 'choices' field missing or empty.") 750 | return None 751 | except Exception as e: 752 | print(f"An error occurred: {str(e)}") 753 | return None 754 | 755 | 756 | def select_model(): 757 | provider = st.session_state.get('provider', LLM_PROVIDER) 758 | provider_models = get_provider_models(provider) 759 | 760 | if not provider_models: 761 | st.warning(f"No models available for {provider}. Please check your API key and connection.") 762 | return None 763 | 764 | if 'model' not in st.session_state or st.session_state.model not in provider_models: 765 | default_model = next(iter(provider_models)) 766 | else: 767 | default_model = st.session_state.model 768 | 769 | selected_model = st.selectbox( 770 | 'Select Model', 771 | options=list(provider_models.keys()), 772 | index=list(provider_models.keys()).index(default_model), 773 | key='model_selection' 774 | ) 775 | 776 | st.session_state.model = selected_model 777 | st.session_state.max_tokens = provider_models[selected_model] 778 | 779 | return selected_model 780 | 781 | 782 | def select_provider(): 783 | selected_provider = st.selectbox( 784 | 'Select Provider', 785 | options=SUPPORTED_PROVIDERS, 786 | index=SUPPORTED_PROVIDERS.index(st.session_state.get('provider', LLM_PROVIDER)), 787 | key='provider_selection' 788 | ) 789 | 790 | if selected_provider != st.session_state.get('provider'): 791 | st.session_state.provider = selected_provider 792 | update_api_url(selected_provider) 793 | 794 | # Clear any existing warnings 795 | if 'warning_placeholder' in st.session_state: 796 | st.session_state.warning_placeholder.empty() 797 | 798 | # Check for API key and prompt if not found 799 | api_key = get_api_key(selected_provider) 800 | if api_key is None: 801 | display_api_key_input(selected_provider) 802 | else: 803 | # Fetch available models for the selected provider 804 | fetch_available_models(selected_provider) 805 | 806 | # Clear the model selection when changing providers 807 | if 'model' in st.session_state: 808 | del st.session_state.model 809 | 810 | # Trigger a rerun to update the UI 811 | st.experimental_rerun() 812 | 813 | return selected_provider 814 | 815 | 816 | def set_css(): 817 | parent_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 818 | css_file = os.path.join(parent_directory, "style.css") 819 | 820 | if os.path.exists(css_file): 821 | with open(css_file) as f: 822 | st.markdown(f'', unsafe_allow_html=True) 823 | else: 824 | st.error(f"CSS file not found: {os.path.abspath(css_file)}") 825 | 826 | 827 | def set_temperature(): 828 | def update_temperature(value): 829 | st.session_state.temperature = value 830 | 831 | temperature_slider = st.slider( 832 | "Set Temperature", 833 | min_value=0.0, 834 | max_value=1.0, 835 | step=0.01, 836 | key='temperature_slider', 837 | on_change=update_temperature, 838 | args=(st.session_state.temperature_slider,) 839 | ) 840 | 841 | if 'temperature' not in st.session_state: 842 | st.session_state.temperature = temperature_slider 843 | 844 | 845 | def show_interfaces(): 846 | with st.container(): 847 | col1, col2 = st.columns([3, 1]) 848 | with col1: 849 | display_discussion_and_whiteboard() 850 | with col2: 851 | auto_moderate = st.checkbox("Auto-moderate (slow, eats tokens, but very cool)", key="auto_moderate", on_change=trigger_moderator_agent_if_checked) 852 | if auto_moderate: 853 | with st.spinner("Auto-moderating..."): 854 | moderator_response = trigger_moderator_agent() 855 | if moderator_response: 856 | if st.session_state.next_agent: 857 | st.session_state.user_input = f"To {st.session_state.next_agent}: {moderator_response}" 858 | else: 859 | st.session_state.user_input = moderator_response 860 | st.success("Auto-moderation complete. New input has been generated.") 861 | else: 862 | st.warning("Auto-moderation failed due to rate limiting. Please wait a moment and try again, or proceed manually.") 863 | 864 | user_input = st.text_area("Additional Input:", value=st.session_state.user_input, height=200, key="user_input_widget") 865 | reference_url = st.text_input("URL:", key="reference_url_widget") 866 | 867 | return user_input, reference_url 868 | 869 | 870 | def trigger_moderator_agent(): 871 | current_project = st.session_state.current_project 872 | goal = current_project.re_engineered_prompt 873 | last_speaker = st.session_state.last_agent 874 | last_comment = st.session_state.last_comment 875 | discussion_history = st.session_state.discussion_history 876 | 877 | deliverable_index, current_deliverable = current_project.get_next_unchecked_deliverable() 878 | 879 | if current_deliverable is None: 880 | if current_project.current_phase != "Deployment": 881 | current_project.move_to_next_phase() 882 | st.success(f"Moving to {current_project.current_phase} phase!") 883 | deliverable_index, current_deliverable = current_project.get_next_unchecked_deliverable() 884 | else: 885 | st.success("All deliverables have been completed and deployed!") 886 | return None 887 | 888 | current_phase = current_project.get_next_uncompleted_phase(deliverable_index) 889 | 890 | team_members = [] 891 | for agent in st.session_state.agents: 892 | if isinstance(agent, AgentBaseModel): 893 | team_members.append(f"{agent.name}: {agent.description}") 894 | else: 895 | # Fallback for dictionary-like structure 896 | agent_name = agent.get('config', {}).get('name', agent.get('name', 'Unknown')) 897 | agent_description = agent.get('description', 'No description') 898 | team_members.append(f"{agent_name}: {agent_description}") 899 | team_members_str = "\n".join(team_members) 900 | 901 | moderator_prompt = get_moderator_prompt(discussion_history, goal, last_comment, last_speaker, team_members_str, current_deliverable, current_phase) 902 | 903 | for attempt in range(MAX_RETRIES): 904 | api_key = get_api_key() 905 | llm_provider = get_llm_provider(api_key=api_key) 906 | llm_request_data = { 907 | "model": st.session_state.model, 908 | "temperature": st.session_state.temperature, 909 | "max_tokens": st.session_state.max_tokens, 910 | "top_p": 1, 911 | "stop": "TERMINATE", 912 | "messages": [ 913 | { 914 | "role": "user", 915 | "content": moderator_prompt 916 | } 917 | ] 918 | } 919 | retry_delay = RETRY_DELAY 920 | time.sleep(retry_delay) 921 | response = llm_provider.send_request(llm_request_data) 922 | 923 | if isinstance(response, dict) and 'choices' in response: 924 | # Handle response from providers like Groq 925 | content = response['choices'][0]['message']['content'] 926 | elif hasattr(response, 'content') and isinstance(response.content, list): 927 | # Handle Anthropic-style response 928 | content = response.content[0].text 929 | else: 930 | print(f"Unexpected response format: {type(response)}") 931 | continue 932 | 933 | if content: 934 | # Extract the agent name from the content 935 | agent_name_match = re.match(r"To (\w+( \w+)*):", content) 936 | if agent_name_match: 937 | next_agent = agent_name_match.group(1) 938 | # Check if the extracted name is a valid agent and not a tool 939 | if any(agent.name.lower() == next_agent.lower() for agent in st.session_state.agents): 940 | st.session_state.next_agent = next_agent 941 | # Remove the "To [Agent Name]:" prefix from the content 942 | content = re.sub(r"^To \w+( \w+)*:\s*", "", content).strip() 943 | else: 944 | st.warning(f"'{next_agent}' is not a valid agent. Please select a valid agent.") 945 | st.session_state.next_agent = None 946 | else: 947 | st.session_state.next_agent = None 948 | 949 | if "PHASE_COMPLETED" in content: 950 | current_project.mark_deliverable_phase_done(deliverable_index, current_phase) 951 | content = content.replace("PHASE_COMPLETED", "").strip() 952 | st.success(f"Phase {current_phase} completed for deliverable: {current_deliverable}") 953 | 954 | if "DELIVERABLE_COMPLETED" in content: 955 | current_project.mark_deliverable_done(deliverable_index) 956 | content = content.replace("DELIVERABLE_COMPLETED", "").strip() 957 | st.success(f"Deliverable completed: {current_deliverable}") 958 | 959 | return content.strip() 960 | 961 | logger.error("All retry attempts failed.") 962 | return None 963 | 964 | 965 | def trigger_moderator_agent_if_checked(): 966 | if st.session_state.get("auto_moderate", False): 967 | with st.spinner("Auto-moderating..."): 968 | moderator_response = trigger_moderator_agent() 969 | if moderator_response: 970 | st.session_state.user_input = moderator_response 971 | st.success("Auto-moderation complete. New input has been generated.") 972 | else: 973 | st.warning("Auto-moderation did not produce a response. Please try again or proceed manually.") 974 | st.experimental_rerun() 975 | 976 | 977 | def update_api_url(provider): 978 | api_url_key = f"{provider.upper()}_API_URL" 979 | st.session_state.api_url = st.session_state.get(api_url_key) 980 | 981 | 982 | def update_deliverable_status(index): 983 | current_project = st.session_state.current_project 984 | is_checked = st.session_state[f"deliverable_{index}"] 985 | if is_checked: 986 | for phase in current_project.implementation_phases: 987 | current_project.mark_deliverable_phase_done(index, phase) 988 | else: 989 | current_project.deliverables[index]["done"] = False 990 | for phase in current_project.implementation_phases: 991 | current_project.deliverables[index]["phase"][phase] = False 992 | st.experimental_rerun() 993 | 994 | 995 | def update_discussion_and_whiteboard(agent_name, response, user_input): 996 | # Update the full discussion history 997 | if user_input: 998 | user_input_text = f"\n\nUser: {user_input}\n\n" 999 | st.session_state.discussion_history += user_input_text 1000 | 1001 | # Format the most recent response 1002 | st.session_state.most_recent_response = f"{agent_name}:\n\n{response}\n\n" 1003 | 1004 | # Add the new response to the full discussion history 1005 | st.session_state.discussion_history += st.session_state.most_recent_response 1006 | 1007 | st.session_state.last_agent = agent_name 1008 | st.session_state.last_comment = response 1009 | 1010 | # Force a rerun to update the UI 1011 | st.experimental_rerun() 1012 | 1013 | 1014 | def update_user_input(): 1015 | if st.session_state.get("auto_moderate"): 1016 | st.session_state.user_input = st.session_state.user_input_widget_auto_moderate 1017 | else: 1018 | st.session_state.user_input = st.session_state.user_input_widget 1019 | -------------------------------------------------------------------------------- /AutoGroq/utils/workflow_utils.py: -------------------------------------------------------------------------------- 1 | # utils/workflow_utils.py 2 | import datetime 3 | import streamlit as st 4 | 5 | from configs.config import FALLBACK_MODEL_TOKEN_LIMITS 6 | 7 | from tools.fetch_web_content import fetch_web_content_tool 8 | from utils.agent_utils import create_agent_data 9 | from utils.text_utils import sanitize_text 10 | 11 | 12 | def get_workflow_from_agents(agents): 13 | current_timestamp = datetime.datetime.now().isoformat() 14 | temperature_value = st.session_state.get('temperature', 0.3) 15 | selected_model = st.session_state.get('model') # Get the selected model from session state 16 | 17 | workflow = { 18 | "name": "AutoGroq Workflow", 19 | "description": "Workflow auto-generated by AutoGroq.", 20 | "sender": { 21 | "type": "userproxy", 22 | "config": { 23 | "name": "userproxy", 24 | "llm_config": False, 25 | "human_input_mode": "NEVER", 26 | "max_consecutive_auto_reply": 5, 27 | "system_message": "You are a helpful assistant.", 28 | "is_termination_msg": None, 29 | "code_execution_config": { 30 | "work_dir": None, 31 | "use_docker": False 32 | }, 33 | "default_auto_reply": "", 34 | "description": None 35 | }, 36 | "timestamp": current_timestamp, 37 | "user_id": "default", 38 | "tools": [] 39 | }, 40 | "receiver": { 41 | "type": "groupchat", 42 | "config": { 43 | "name": "group_chat_manager", 44 | "llm_config": { 45 | "config_list": [ 46 | { 47 | "user_id": "default", 48 | "timestamp": current_timestamp, 49 | "model": selected_model, # Use the selected model 50 | "base_url": None, 51 | "api_type": None, 52 | "api_version": None, 53 | "description": "OpenAI model configuration" 54 | } 55 | ], 56 | "temperature": temperature_value, 57 | "cache_seed": 42, 58 | "timeout": 600, 59 | "max_tokens": FALLBACK_MODEL_TOKEN_LIMITS.get(selected_model, 4096), # Use the selected model 60 | "extra_body": None 61 | }, 62 | "human_input_mode": "NEVER", 63 | "max_consecutive_auto_reply": 10, 64 | "system_message": "Group chat manager", 65 | "is_termination_msg": None, 66 | "code_execution_config": None, 67 | "default_auto_reply": "", 68 | "description": None 69 | }, 70 | "groupchat_config": { 71 | "agents": [], 72 | "admin_name": "Admin", 73 | "messages": [], 74 | "max_round": 10, 75 | "speaker_selection_method": "auto", 76 | "allow_repeat_speaker": True 77 | }, 78 | "timestamp": current_timestamp, 79 | "user_id": "default", 80 | "tools": [] 81 | }, 82 | "type": "autonomous", 83 | "user_id": "default", 84 | "timestamp": current_timestamp, 85 | "summary_method": "last", 86 | "sample_tasks": [], 87 | } 88 | 89 | for index, agent in enumerate(agents): 90 | agent_dict = agent.to_dict() 91 | agent_name = agent_dict["name"] 92 | description = agent_dict["description"] 93 | formatted_agent_name = sanitize_text(agent_name).lower().replace(' ', '_') 94 | sanitized_description = sanitize_text(description) 95 | 96 | system_message = f"You are a helpful assistant that can act as {agent_name} who {sanitized_description}." 97 | if index == 0: 98 | other_agent_names = [sanitize_text(a.name).lower().replace(' ', '_') for a in agents[1:]] 99 | system_message += f" You are the primary coordinator who will receive suggestions or advice from all the other agents ({', '.join(other_agent_names)}). You must ensure that the final response integrates the suggestions from other agents or team members. YOUR FINAL RESPONSE MUST OFFER THE COMPLETE RESOLUTION TO THE USER'S REQUEST. When the user's request has been satisfied and all perspectives are integrated, you can respond with TERMINATE." 100 | 101 | agent_config = { 102 | "type": "assistant", 103 | "config": { 104 | "name": formatted_agent_name, 105 | "llm_config": { 106 | "config_list": [ 107 | { 108 | "user_id": "default", 109 | "timestamp": current_timestamp, 110 | "model": selected_model, # Use the selected model 111 | "base_url": None, 112 | "api_type": None, 113 | "api_version": None, 114 | "description": "OpenAI model configuration" 115 | } 116 | ], 117 | "temperature": temperature_value, 118 | "cache_seed": 42, 119 | "timeout": 600, 120 | "max_tokens": FALLBACK_MODEL_TOKEN_LIMITS.get(selected_model, 4096), # Use the selected model 121 | "extra_body": None 122 | }, 123 | "human_input_mode": "NEVER", 124 | "max_consecutive_auto_reply": 8, 125 | "system_message": system_message, 126 | "is_termination_msg": None, 127 | "code_execution_config": None, 128 | "default_auto_reply": "", 129 | "description": None 130 | }, 131 | "timestamp": current_timestamp, 132 | "user_id": "default", 133 | "tools": [], 134 | "role": agent_dict["role"], 135 | "goal": agent_dict["goal"], 136 | "backstory": agent_dict["backstory"] 137 | } 138 | 139 | if agent.name == "Web Content Retriever": 140 | agent_config['tools'] = [fetch_web_content_tool.to_dict()] 141 | 142 | workflow["receiver"]["groupchat_config"]["agents"].append(agent_config) 143 | 144 | 145 | 146 | print("Debug: Workflow agents assigned:") 147 | for agent in workflow["receiver"]["groupchat_config"]["agents"]: 148 | print(agent) 149 | 150 | crewai_agents = [] 151 | for agent in agents: 152 | _, crewai_agent_data = create_agent_data(agent.to_dict()) 153 | crewai_agents.append(crewai_agent_data) 154 | 155 | return workflow, crewai_agents 156 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Star History 2 | 3 | [![Star History Chart](https://api.star-history.com/svg?repos=jgravelle/AutoGroq&type=Timeline)](https://star-history.com/#jgravelle/AutoGroq&Timeline) 4 | 5 | # AutoGroq™ 6 | 7 | AutoGroq is a groundbreaking tool that revolutionizes the way users interact with AI assistants. By dynamically generating tailored teams of AI agents based on your project requirements, AutoGroq eliminates the need for manual configuration and allows you to tackle any question, problem, or project with ease and efficiency. 8 | 9 | ## NEW THIS WEEK: SKILL GENERATION! 10 | ![image](https://github.com/jgravelle/AutoGroq/assets/3400540/c47f6bc7-03a9-4695-86ab-46dbbda06bec) 11 | 12 | 13 | ## Why AutoGroq? 14 | 15 | AutoGroq was born out of the realization that the traditional approach to building AI agents was backwards. Instead of creating agents in anticipation of problems, AutoGroq uses the syntax of the users' needs as the basis for constructing the perfect AI team. It's how we wished Autogen worked from the very beginning. 16 | 17 | With AutoGroq, a fully configured workflow, team of agents, and skillset are just a few clicks and a couple of minutes away, without any programming necessary. Our rapidly growing user base of nearly 8000 developers is a testament to the power and effectiveness of AutoGroq. 18 | 19 | ![image](https://github.com/jgravelle/AutoGroq/assets/3400540/a5294491-2c78-4e07-a587-8a1eacb17a0a) 20 | 21 | ## Key Features 22 | 23 | - **Dynamic Expert Agent Generation**: AutoGroq automatically creates expert agents specialized in various domains or topics, ensuring you receive the most relevant support for your inquiries. 24 | - **Dynamic Workflow Generation**: With AutoGroq, you're just minutes away from having a custom team of experts working on your project. Watch our video tutorial to see it in action! 25 | - **Natural Conversation Flow**: Engage in intuitive and contextually aware conversations with AutoGroq's expert agents, facilitating a seamless exchange of information. 26 | - **Code Snippet Extraction**: AutoGroq intelligently extracts and presents code snippets within a dedicated "Whiteboard" section, making it convenient to reference, copy, or modify code during your interaction. 27 | - **Flexible Agent Management**: Customize your panel of expert agents according to your evolving project needs. Add new agents, modify their expertise, or remove them as required. 28 | - **Advanced Prompt Rephrasing**: AutoGroq employs sophisticated natural language processing techniques to rephrase user inputs, enhancing clarity and ensuring accurate responses from expert agents. 29 | - **Bulk File Upload to Autogen**: With AutoGroq, you can import multiple agents, skills, and workflows into Autogen with a single click, saving you time and effort. 30 | - **Support for Multiple LLMs**: AutoGroq supports Groq, ChatGPT, Ollama, and more, making it compatible with a wide range of language models. You can even create your own provider model to integrate with your preferred LLM. 31 | - **Skill Integration**: Extend your agents' capabilities by adding custom skills. Simply drop a valid skill file into the skills folder, and it will be automatically available for your agents to use. 32 | 33 | ## Getting Started 34 | 35 | To get started with AutoGroq, follow these steps: 36 | 37 | 1. Install Autogen following Matt Berman's instructions: https://www.youtube.com/watch?v=mUEFwUU0IfE 38 | 2. Install Mini-conda: https://docs.anaconda.com/free/miniconda/miniconda-install/ 39 | 3. Open a command prompt and run the following commands: 40 | md c:\AutoGroq 41 | cd c:\AutoGroq 42 | conda create -n AutoGroq python=3.11 43 | conda activate AutoGroq 44 | git clone https://github.com/jgravelle/AutoGroq.git 45 | cd AutoGroq 46 | pip install -r requirements.txt 47 | streamlit run c:\AutoGroq\AutoGroq\main.py 48 | 49 | ## Configuration 50 | 51 | To customize the configurations for your local environment, follow these steps: 52 | 53 | 1. Create a new file called `config_local.py` in the same directory as `config.py`. 54 | 2. Copy the contents of `config_local.py.example` into `config_local.py`. 55 | 3. Modify the values in `config_local.py` according to your specific setup, such as API keys and URLs. 56 | 4. Save the `config_local.py` file. 57 | 58 | Note: The `config_local.py` file is not tracked by Git, so your customizations will not be overwritten when pulling updates from the repository. 59 | 60 | ## How It Works 61 | 62 | 1. **Initiation**: Begin by entering your query or request in the designated input area. 63 | 2. **Engagement**: Click the "Begin" button to initiate the interaction. AutoGroq will rephrase your request and generate the appropriate expert agents. 64 | 3. **Interaction**: Select an expert agent to receive specialized assistance tailored to your needs. 65 | 4. **Dialogue**: Continue the conversation by providing additional input or context as required, guiding the flow of information. 66 | 5. **Review**: The "Discussion" section will display your dialogue history, while the "Whiteboard" section will showcase any extracted code snippets. 67 | 6. **Reset**: Use the "Reset" button to clear the current conversation and start a new one whenever needed. 68 | 69 | ## Live Demo and Video Tutorial 70 | 71 | Experience AutoGroq's capabilities firsthand by accessing our online beta version: [AutoGroq Live Demo](https://autogroq.streamlit.app/) 72 | 73 | For a step-by-step guide on using AutoGroq, watch our updated video tutorials: [AutoGroq Video Tutorials](https://www.youtube.com/watch?v=hoMqUmUeifU&list=PLPu97iZ5SLTsGX3WWJjQ5GNHy7ZX66ryP&index=15) 74 | 75 | ## Contributing 76 | 77 | We value your feedback and contributions in shaping the future of AutoGroq. If you encounter any issues or have ideas for new features, please share them with us on our [GitHub repository](https://github.com/jgravelle/AutoGroq.git). 78 | 79 | ## License 80 | 81 | AutoGroq is proudly open-source and released under the [MIT License](https://opensource.org/licenses/MIT). 82 | 83 | Thank you for choosing AutoGroq as your AI-powered conversational assistant. We are committed to redefining the boundaries of what AI can achieve and empowering you to tackle any question, problem, or project with ease and efficiency. 84 | 85 | ## Copyright (c)2024 J. Gravelle 86 | 87 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 88 | 89 | **1. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.** 90 | 91 | **2. Any modifications made to the Software must clearly indicate that they are derived from the original work, and the name of the original author (J. Gravelle) must remain intact.** 92 | 93 | **3. Redistributions of the Software in source code form must also include a prominent notice that the code has been modified from the original.** 94 | 95 | THE SOFTWARE IS PROVIDED "AS IS," WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 96 | 97 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | streamlit==1.35.0 2 | requests==2.28.2 3 | beautifulsoup4==4.12.3 4 | langchain-community==0.2.1 5 | tiktoken==0.7.0 6 | matplotlib==3.9.0 7 | openai == 0.27.10 8 | anthropic == 0.29.0 --------------------------------------------------------------------------------