├── .python-version ├── assets └── transformers.pdf ├── src ├── intermediate │ ├── job_search │ │ ├── resume.pdf │ │ ├── agents.py │ │ ├── tasks.py │ │ ├── tools.py │ │ └── main.py │ ├── script_writer │ │ ├── tools.py │ │ ├── main.py │ │ ├── tasks.py │ │ └── agents.py │ ├── devops │ │ ├── tools.py │ │ ├── main.py │ │ ├── logs │ │ │ ├── kubernetes_deployment_error.txt │ │ │ └── database_connection_error.txt │ │ ├── tasks.py │ │ └── agents.py │ └── investment_advisor │ │ ├── tasks.py │ │ ├── start_server.py │ │ ├── agents.py │ │ ├── tools.py │ │ ├── main.py │ │ └── api_server.py ├── advanced │ └── orion_ai_coding_agent │ │ ├── requirements.txt │ │ ├── run.sh │ │ ├── src │ │ ├── agents │ │ │ ├── __init__.py │ │ │ ├── environment_manager_agent.py │ │ │ ├── repository_scanner_agent.py │ │ │ ├── git_operations_agent.py │ │ │ └── code_tester_agent.py │ │ ├── auth_setup.py │ │ ├── cli_interface.py │ │ ├── code_explainer.py │ │ ├── base_agent.py │ │ ├── workflow.py │ │ └── discord_integration.py │ │ ├── README.md │ │ └── main.py └── beginner │ ├── conflict_detector.py │ ├── pdf_summarizer.py │ └── web_scraper.py ├── pyproject.toml ├── LICENSE ├── README.md └── .gitignore /.python-version: -------------------------------------------------------------------------------- 1 | 3.11 2 | -------------------------------------------------------------------------------- /assets/transformers.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ishandutta0098/vector-ai-agents-lab/HEAD/assets/transformers.pdf -------------------------------------------------------------------------------- /src/intermediate/job_search/resume.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ishandutta0098/vector-ai-agents-lab/HEAD/src/intermediate/job_search/resume.pdf -------------------------------------------------------------------------------- /src/advanced/orion_ai_coding_agent/requirements.txt: -------------------------------------------------------------------------------- 1 | langgraph>=0.0.40 2 | langsmith>=0.0.50 3 | langchain-openai>=0.1.0 4 | langchain-core>=0.2.0 5 | composio>=0.3.0 6 | python-dotenv>=1.0.0 7 | pydantic>=2.0.0 8 | openai>=1.0.0 9 | discord.py>=2.3.2 -------------------------------------------------------------------------------- /src/intermediate/script_writer/tools.py: -------------------------------------------------------------------------------- 1 | # Web Search Tool 2 | import os 3 | 4 | from crewai_tools import EXASearchTool 5 | 6 | 7 | def exa_search_tool(): 8 | """ 9 | Search the web for information using ExaSearchTool 10 | """ 11 | exa_search_tool = EXASearchTool() 12 | 13 | return exa_search_tool 14 | -------------------------------------------------------------------------------- /src/advanced/orion_ai_coding_agent/run.sh: -------------------------------------------------------------------------------- 1 | conda activate orion 2 | 3 | python -m main \ 4 | --prompt "Change the checkpoint path to cifar_trained.ckpt in the script src/basic/level_05_pretrained_model/pretrained_model.py and improve the code" \ 5 | --repo-url "https://github.com/ishandutta0098/zero-to-lightning" \ 6 | --no-venv \ 7 | --no-testing \ 8 | --conda-env "ml" -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "vector-ai-agents-lab" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.11" 7 | dependencies = [ 8 | "black>=25.9.0", 9 | "crewai==0.148.0", 10 | "crewai_tools==0.17.0", 11 | "exa-py>=1.16.1", 12 | "fastapi>=0.119.0", 13 | "isort>=6.0.1", 14 | "ruff>=0.13.2", 15 | "yfinance>=0.2.66", 16 | ] 17 | -------------------------------------------------------------------------------- /src/intermediate/script_writer/main.py: -------------------------------------------------------------------------------- 1 | from agents import content_explorer, script_writer 2 | from crewai import Crew, Process 3 | from tasks import create_a_script, get_details 4 | 5 | 6 | def main(): 7 | 8 | # Define the crew with agents and tasks in sequential process 9 | crew = Crew( 10 | agents=[content_explorer, script_writer], 11 | tasks=[get_details, create_a_script], 12 | verbose=True, 13 | process=Process.sequential, 14 | ) 15 | 16 | crew.kickoff(inputs={"topic": "AI Agents in the year 2025"}) 17 | 18 | 19 | if __name__ == "__main__": 20 | main() 21 | -------------------------------------------------------------------------------- /src/intermediate/script_writer/tasks.py: -------------------------------------------------------------------------------- 1 | from agents import content_explorer, script_writer 2 | from crewai import Task 3 | 4 | # Task to gather Latest information 5 | get_details = Task( 6 | description="Get latest, trending, interesting information and news about {topic}", 7 | expected_output="Latest news, interesting information and trivia about {topic}", 8 | agent=content_explorer, 9 | ) 10 | 11 | ## Task to create script. 12 | create_a_script = Task( 13 | description="Considering the given details in time order make an interesting conversation", 14 | expected_output="A humorous conversation connecting key details", 15 | agent=script_writer, 16 | context=[get_details], 17 | output_file="src/intermediate/script_writer/task_outputs/script.txt", 18 | ) 19 | -------------------------------------------------------------------------------- /src/intermediate/devops/tools.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from crewai_tools import EXASearchTool, FileReadTool 4 | from dotenv import load_dotenv 5 | 6 | load_dotenv() 7 | 8 | # TOOL 1: FileReadTool 9 | # Initialize FileReadTool for reading log files 10 | log_reader_tool = FileReadTool() 11 | 12 | # TOOL 2: EXASearchTool 13 | os.environ["EXA_API_KEY"] = os.getenv("EXA_API_KEY") 14 | 15 | try: 16 | exa_search_tool = EXASearchTool() 17 | except Exception as e: 18 | print(f"EXA Search Tool initialization failed: {e}") 19 | # Fallback: try with empty lists for domains 20 | try: 21 | exa_search_tool = EXASearchTool(include_domains=[], exclude_domains=[]) 22 | except Exception as e2: 23 | print(f"Fallback EXA Search Tool initialization also failed: {e2}") 24 | # Use basic initialization as last resort 25 | exa_search_tool = EXASearchTool() 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Ishan Dutta 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/advanced/orion_ai_coding_agent/src/agents/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Agents Package for Orion AI Agent System 3 | 4 | This package contains all the specialized agents that work together to provide 5 | AI-powered code generation and repository management capabilities. 6 | """ 7 | 8 | from .ai_generator_agent import AIGeneratorAgent 9 | from .code_tester_agent import CodeTesterAgent 10 | from .environment_manager_agent import EnvironmentManagerAgent 11 | from .git_operations_agent import GitOperationsAgent 12 | from .github_integration_agent import GitHubIntegrationAgent 13 | from .langgraph_orchestrator_agent import LangGraphOrchestratorAgent 14 | from .repository_scanner_agent import RepositoryScannerAgent 15 | from .task_classifier_agent import TaskClassifierAgent 16 | 17 | # Keep the old name for backwards compatibility 18 | WorkflowOrchestratorAgent = LangGraphOrchestratorAgent 19 | 20 | __all__ = [ 21 | "AIGeneratorAgent", 22 | "CodeTesterAgent", 23 | "EnvironmentManagerAgent", 24 | "GitOperationsAgent", 25 | "GitHubIntegrationAgent", 26 | "LangGraphOrchestratorAgent", 27 | "WorkflowOrchestratorAgent", # Backwards compatibility 28 | ] 29 | 30 | __version__ = "1.0.0" 31 | -------------------------------------------------------------------------------- /src/advanced/orion_ai_coding_agent/src/auth_setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from composio import Composio 4 | from composio.types import auth_scheme 5 | from dotenv import load_dotenv 6 | 7 | load_dotenv() 8 | 9 | # Replace these with your actual values 10 | github_auth_config_id = os.getenv("GITHUB_AUTH_CONFIG_ID") 11 | user_id = os.getenv("USER_ID") 12 | 13 | composio = Composio(api_key=os.getenv("COMPOSIO_API_KEY")) 14 | 15 | print(github_auth_config_id) 16 | print(user_id) 17 | 18 | 19 | def authenticate_toolkit(user_id: str, auth_config_id: str): 20 | connection_request = composio.connected_accounts.initiate( 21 | user_id=user_id, 22 | auth_config_id=auth_config_id, 23 | ) 24 | 25 | print(f"Visit this URL to authenticate GitHub: {connection_request.redirect_url}") 26 | 27 | # This will wait for the auth flow to be completed 28 | connection_request.wait_for_connection(timeout=15) 29 | return connection_request.id 30 | 31 | 32 | connection_id = authenticate_toolkit(user_id, github_auth_config_id) 33 | 34 | # You can also verify the connection status using: 35 | connected_account = composio.connected_accounts.get(connection_id) 36 | print(f"Connected account: {connected_account}") 37 | -------------------------------------------------------------------------------- /src/intermediate/script_writer/agents.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from crewai import Agent 4 | from crewai.llm import LLM 5 | from tools import exa_search_tool 6 | 7 | os.environ["EXA_API_KEY"] = os.getenv("EXA_API_KEY") 8 | os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") 9 | 10 | llm = LLM(model="gpt-4o", temperature=0.9) 11 | 12 | # Agent 1: Content Explorer - Gathers information about the topic from the internet 13 | content_explorer = Agent( 14 | role="content explorer", 15 | goal="Gather and provide latest information about the topic from internet", 16 | llm=llm, 17 | verbose=True, 18 | backstory=( 19 | "You are an expert researcher, who can gather detailed information about a topic.\ 20 | Gather at least 10 information." 21 | ), 22 | tools=[exa_search_tool()], 23 | cache=True, 24 | max_iter=5, 25 | ) 26 | 27 | # Agent 2: Script Writer - Creates a script out of the information 28 | script_writer = Agent( 29 | role="Script Writer", 30 | goal="With the details given to you create an interesting conversational script out of it", 31 | llm=llm, 32 | verbose=True, 33 | backstory=( 34 | "You are an expert in literature. You are very good in creating conversations with the given chain of information.\ 35 | Tell as a script in 200 words." 36 | ), 37 | ) 38 | -------------------------------------------------------------------------------- /src/intermediate/devops/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | from agents import issue_investigator, log_analyzer, solution_specialist 5 | from crewai import Crew, Process 6 | from tasks import analyze_logs_task, investigate_issue_task, provide_solution_task 7 | 8 | memory_dir = Path(__file__).parent / "crewai_memory" 9 | memory_dir.mkdir(exist_ok=True) 10 | os.environ["CREWAI_STORAGE_DIR"] = str(memory_dir.absolute()) 11 | 12 | # Enhanced DevOps crew with advanced configuration 13 | devops_crew = Crew( 14 | agents=[log_analyzer, issue_investigator, solution_specialist], 15 | tasks=[analyze_logs_task, investigate_issue_task, provide_solution_task], 16 | verbose=True, 17 | process=Process.sequential, 18 | memory=True, 19 | cache=True, 20 | max_rpm=30, 21 | ) 22 | 23 | if __name__ == "__main__": 24 | print("🚀 Starting Enhanced DevOps Issue Analysis...") 25 | 26 | # Scenario 1: Analyze Kubernetes deployment error 27 | print("\n📋 Scenario 1: Kubernetes Deployment Analysis") 28 | result = devops_crew.kickoff( 29 | inputs={ 30 | "log_file_path": "src/intermediate/devops/logs/kubernetes_deployment_error.txt" 31 | } 32 | ) 33 | 34 | # Scenario 2: Analyze database connection error 35 | # print("\n📋 Scenario 2: Database Connection Analysis") 36 | # result = devops_crew.kickoff( 37 | # inputs={ 38 | # "log_file_path": "src/intermediate/devops/logs/database_connection_error.txt" 39 | # } 40 | # ) 41 | 42 | print("\n🎉 DevOps analysis completed!") 43 | -------------------------------------------------------------------------------- /src/intermediate/investment_advisor/tasks.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from agents import analyst, data_explorer, fin_expert, news_info_explorer 4 | from crewai import Task 5 | 6 | # Create output directory for task results 7 | os.makedirs("src/intermediate/investment_advisor/task_outputs", exist_ok=True) 8 | 9 | # Task to gather financial data of a stock 10 | get_company_financials = Task( 11 | description="Get financial data like income statements and other fundamental ratios for stock: {stock}", 12 | expected_output="Detailed information from income statement, key ratios for {stock}. " 13 | "Indicate also about current financial status and trend over the period.", 14 | agent=data_explorer, 15 | ) 16 | 17 | # Task to gather company news 18 | get_company_news = Task( 19 | description="Get latest news and business information about company: {stock}", 20 | expected_output="Latest news and business information about the company. Provide a summary also.", 21 | agent=news_info_explorer, 22 | ) 23 | 24 | # Task to analyze financial data and news 25 | analyse = Task( 26 | description="Make thorough analysis based on given financial data and latest news of a stock", 27 | expected_output="Comprehensive analysis of a stock outlining financial health, stock valuation, risks, and news. " 28 | "Mention currency information and number units in Indian context (lakh/crore).", 29 | agent=analyst, 30 | context=[get_company_financials, get_company_news], 31 | output_file="src/intermediate/investment_advisor/task_outputs/financial_analysis.md", 32 | ) 33 | 34 | # Task to provide financial advice 35 | advise = Task( 36 | description="Make a recommendation about investing in a stock, based on analysis provided and current stock price. " 37 | "Explain the reasons.", 38 | expected_output="Recommendation (Buy / Hold / Sell) of a stock backed with reasons elaborated." 39 | "Response in Mark down format.", 40 | agent=fin_expert, 41 | context=[analyse], 42 | output_file="src/intermediate/investment_advisor/task_outputs/investment_recommendation.md", 43 | ) 44 | -------------------------------------------------------------------------------- /src/intermediate/devops/logs/kubernetes_deployment_error.txt: -------------------------------------------------------------------------------- 1 | 2024-10-10T14:32:15.123Z [INFO] Starting deployment of myapp:v1.2.3 2 | 2024-10-10T14:32:15.456Z [INFO] Applying deployment configuration... 3 | 2024-10-10T14:32:15.789Z [INFO] Creating deployment myapp-deployment in namespace production 4 | 2024-10-10T14:32:16.012Z [INFO] Deployment created successfully 5 | 2024-10-10T14:32:16.234Z [INFO] Waiting for pods to be ready... 6 | 2024-10-10T14:32:16.567Z [WARNING] Pod myapp-deployment-7b8c9d5f4-abc12 is in Pending state 7 | 2024-10-10T14:32:17.890Z [ERROR] Pod myapp-deployment-7b8c9d5f4-abc12 failed to start 8 | 2024-10-10T14:32:18.123Z [ERROR] Event: Failed to pull image "myapp:v1.2.3": rpc error: code = Unknown desc = Error response from daemon: pull access denied for myapp, repository does not exist or may require 'docker login' 9 | 2024-10-10T14:32:18.456Z [ERROR] Pod myapp-deployment-7b8c9d5f4-abc12 status: ImagePullBackOff 10 | 2024-10-10T14:32:19.789Z [WARNING] Back-off pulling image "myapp:v1.2.3" 11 | 2024-10-10T14:32:20.012Z [ERROR] kubelet: Failed to pull image "myapp:v1.2.3": rpc error: code = Unknown desc = Error response from daemon: pull access denied for myapp, repository does not exist or may require 'docker login' 12 | 2024-10-10T14:32:21.345Z [ERROR] kubelet: Error syncing pod: ErrImagePull 13 | 2024-10-10T14:32:22.678Z [WARNING] Pod myapp-deployment-7b8c9d5f4-abc12 has been in ImagePullBackOff state for 5 seconds 14 | 2024-10-10T14:32:25.901Z [ERROR] Deployment rollout failed: deployment "myapp-deployment" exceeded its progress deadline 15 | 2024-10-10T14:32:26.234Z [ERROR] ReplicaSet myapp-deployment-7b8c9d5f4 has 0 ready replicas out of 3 desired 16 | 2024-10-10T14:32:26.567Z [INFO] Current deployment status: 0/3 pods ready 17 | 2024-10-10T14:32:27.890Z [WARNING] Deployment health check failed: no healthy pods found 18 | 2024-10-10T14:32:28.123Z [ERROR] Service myapp-service has no available endpoints 19 | 2024-10-10T14:32:29.456Z [CRITICAL] Production deployment failed - rollback initiated 20 | 2024-10-10T14:32:30.789Z [INFO] Rolling back to previous version myapp:v1.2.2 21 | 2024-10-10T14:32:31.012Z [INFO] Rollback completed successfully -------------------------------------------------------------------------------- /src/advanced/orion_ai_coding_agent/src/cli_interface.py: -------------------------------------------------------------------------------- 1 | """Utility functions for the command line interface.""" 2 | 3 | from textwrap import dedent 4 | 5 | 6 | def show_help_summary() -> None: 7 | """Print a summary of the available CLI commands with LangGraph features.""" 8 | help_text = dedent( 9 | """ 10 | 🚀 Orion AI Agent - Intelligent Workflow System 11 | =============================================== 12 | ✨ Powered by LangGraph for intelligent orchestration 13 | 14 | 📚 REPOSITORY MANAGEMENT: 15 | --list-repos List repositories from your GitHub account 16 | --repo-url URL GitHub repository URL to operate on 17 | --branch BRANCH Name of the branch to operate on 18 | --setup-auth Run authentication setup 19 | 20 | 🤖 AI WORKFLOW: 21 | --prompt TEXT Instruction for the AI agent 22 | --workdir PATH Working directory for cloning the repository 23 | 24 | 🔧 WORKFLOW OPTIONS: 25 | --debug Enable detailed LangGraph workflow information 26 | --no-testing Disable code testing of generated files 27 | --no-venv Disable virtual environment creation 28 | --strict-testing Abort commit if tests fail (enables smart recovery) 29 | --commit Commit the generated changes 30 | --create-pr Create a pull request (requires --commit) 31 | 32 | 🤖 INTEGRATIONS: 33 | --discord-bot Run the Discord bot to receive prompts 34 | --repo-limit N Number of repositories to list when using --list-repos 35 | --show-commands Show this help summary 36 | 37 | 🎯 LANGGRAPH FEATURES: 38 | • Intelligent workflow routing based on context analysis 39 | • Parallel agent execution for independent tasks 40 | • Advanced error recovery with multiple retry strategies 41 | • State-based decision making throughout workflow 42 | • Built-in checkpointing and state persistence 43 | 44 | 💡 EXAMPLES: 45 | python main.py --prompt "Add logging functionality" 46 | python main.py --prompt "Build API with tests" --commit --create-pr --debug 47 | python main.py --prompt "Explain" --repo-url --branch 48 | python main.py --discord-bot --debug 49 | """ 50 | ) 51 | print(help_text.strip()) 52 | -------------------------------------------------------------------------------- /src/intermediate/devops/logs/database_connection_error.txt: -------------------------------------------------------------------------------- 1 | 2024-10-10T09:15:23.456Z [INFO] Application startup initiated 2 | 2024-10-10T09:15:23.789Z [INFO] Loading configuration from /app/config/production.yaml 3 | 2024-10-10T09:15:24.012Z [INFO] Initializing database connection pool 4 | 2024-10-10T09:15:24.345Z [INFO] Database host: postgres-prod.cluster-xyz.us-west-2.rds.amazonaws.com:5432 5 | 2024-10-10T09:15:24.678Z [INFO] Database name: ecommerce_prod 6 | 2024-10-10T09:15:24.901Z [INFO] Connection pool size: 20 7 | 2024-10-10T09:15:25.234Z [WARNING] Attempting to connect to database... 8 | 2024-10-10T09:15:30.567Z [ERROR] Database connection failed: connection to server at "postgres-prod.cluster-xyz.us-west-2.rds.amazonaws.com" (10.0.1.45), port 5432 failed: FATAL: password authentication failed for user "app_user" 9 | 2024-10-10T09:15:30.890Z [ERROR] Connection attempt 1/5 failed, retrying in 5 seconds... 10 | 2024-10-10T09:15:35.123Z [ERROR] Database connection failed: connection to server at "postgres-prod.cluster-xyz.us-west-2.rds.amazonaws.com" (10.0.1.45), port 5432 failed: FATAL: password authentication failed for user "app_user" 11 | 2024-10-10T09:15:35.456Z [ERROR] Connection attempt 2/5 failed, retrying in 10 seconds... 12 | 2024-10-10T09:15:45.789Z [ERROR] Database connection failed: connection to server at "postgres-prod.cluster-xyz.us-west-2.rds.amazonaws.com" (10.0.1.45), port 5432 failed: FATAL: password authentication failed for user "app_user" 13 | 2024-10-10T09:15:45.012Z [ERROR] Connection attempt 3/5 failed, retrying in 15 seconds... 14 | 2024-10-10T09:16:00.345Z [ERROR] Database connection failed: connection to server at "postgres-prod.cluster-xyz.us-west-2.rds.amazonaws.com" (10.0.1.45), port 5432 failed: FATAL: password authentication failed for user "app_user" 15 | 2024-10-10T09:16:00.678Z [ERROR] Connection attempt 4/5 failed, retrying in 20 seconds... 16 | 2024-10-10T09:16:20.901Z [ERROR] Database connection failed: connection to server at "postgres-prod.cluster-xyz.us-west-2.rds.amazonaws.com" (10.0.1.45), port 5432 failed: FATAL: password authentication failed for user "app_user" 17 | 2024-10-10T09:16:21.234Z [CRITICAL] All database connection attempts failed (5/5) 18 | 2024-10-10T09:16:21.567Z [ERROR] Unable to initialize application: database connection pool creation failed 19 | 2024-10-10T09:16:21.890Z [ERROR] Health check endpoint returning 503 Service Unavailable 20 | 2024-10-10T09:16:22.123Z [WARNING] Load balancer detecting unhealthy instances 21 | 2024-10-10T09:16:22.456Z [ERROR] Application startup failed - exiting with code 1 22 | 2024-10-10T09:16:22.789Z [INFO] Cleanup initiated 23 | 2024-10-10T09:16:23.012Z [INFO] Closing existing connections... 24 | 2024-10-10T09:16:23.345Z [INFO] Application shutdown complete -------------------------------------------------------------------------------- /src/beginner/conflict_detector.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script creates an agent that can detect conflicts in a text using CrewAI. 3 | """ 4 | 5 | import os 6 | 7 | from crewai import Agent, Crew, Task 8 | from crewai.llm import LLM 9 | from dotenv import load_dotenv 10 | 11 | load_dotenv() 12 | 13 | # Create an instance of OpenAI's LLM 14 | os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") 15 | 16 | 17 | def create_agent() -> Agent: 18 | """ 19 | Create an agent that can detect conflicts in a text 20 | Returns: 21 | An agent that can detect conflicts in a text 22 | """ 23 | 24 | llm = LLM( 25 | model="gpt-4o", 26 | temperature=0.7, 27 | max_tokens=4000, 28 | timeout=120, 29 | ) 30 | 31 | # Define your agent with OpenAI LLM 32 | agent = Agent( 33 | role="Critical Thinker", 34 | goal="Analyse the text and identify if any conflicting information within", 35 | llm=llm, 36 | backstory=( 37 | "You are a critical thinker who understands details very well and expert negotiator. \ 38 | You can identify conflicting statements, information in given text" 39 | ), 40 | ) 41 | 42 | return agent 43 | 44 | 45 | def create_task(agent: Agent) -> Task: 46 | """ 47 | Create a task that can detect conflicts in a text 48 | Args: 49 | agent: The agent that can detect conflicts in a text 50 | Returns: 51 | A task that can detect conflicts in a text 52 | """ 53 | 54 | task = Task( 55 | description=( 56 | "Find if there are any conflicting statement / information in text. \n Text : \n{text}" 57 | ), 58 | expected_output="Respond with 'conflict' / 'no conflict'", 59 | agent=agent, 60 | ) 61 | return task 62 | 63 | 64 | def create_crew(agent: Agent, task: Task) -> Crew: 65 | """ 66 | Create a crew that can detect conflicts in a text 67 | Args: 68 | agent: The agent that can detect conflicts in a text 69 | task: The task that the agent needs to complete 70 | Returns: 71 | A crew that can detect conflicts in a text 72 | """ 73 | crew = Crew( 74 | agents=[agent], 75 | tasks=[task], 76 | verbose=True, 77 | ) 78 | return crew 79 | 80 | 81 | def main(): 82 | agent = create_agent() 83 | task = create_task(agent) 84 | crew = create_crew(agent, task) 85 | 86 | Text = "After a long day at office, I was going back home in the late evening. Then, I met my friend on the way to office." 87 | # Text = "I love to travel to new places and explore the culture and food of the place." 88 | # Text = "I went to the library to study, but I forgot to bring my books and studied all of them." 89 | # Text = "She said she has never been to Paris, yet she described the Eiffel Tower in great detail from her last trip." 90 | 91 | crew.kickoff(inputs={"text": Text}) 92 | 93 | 94 | if __name__ == "__main__": 95 | main() 96 | -------------------------------------------------------------------------------- /src/advanced/orion_ai_coding_agent/src/code_explainer.py: -------------------------------------------------------------------------------- 1 | import ast 2 | import os 3 | import subprocess 4 | from typing import Optional 5 | 6 | from openai import OpenAI 7 | 8 | 9 | def _extract_docstring(file_path: str) -> Optional[str]: 10 | """Extract the top-level docstring from a Python file.""" 11 | try: 12 | with open(file_path, "r", encoding="utf-8") as f: 13 | tree = ast.parse(f.read()) 14 | docstring = ast.get_docstring(tree) 15 | if docstring: 16 | first_line = docstring.strip().splitlines()[0] 17 | return first_line 18 | except Exception: 19 | pass 20 | return None 21 | 22 | 23 | def _summarize_repository(repo_path: str) -> str: 24 | """Generate a simple summary of the repository's Python files.""" 25 | lines = [] 26 | for root, _, files in os.walk(repo_path): 27 | for name in files: 28 | if name.endswith(".py"): 29 | path = os.path.join(root, name) 30 | rel_path = os.path.relpath(path, repo_path) 31 | doc = _extract_docstring(path) 32 | if doc: 33 | lines.append(f"{rel_path}: {doc}") 34 | else: 35 | lines.append(f"{rel_path}: (no docstring)") 36 | if not lines: 37 | return "No Python files found in repository." 38 | return "\n".join(sorted(lines)) 39 | 40 | 41 | def explain_repository( 42 | repo_url: str, workdir: str, branch: Optional[str] = None 43 | ) -> str: 44 | """Clone a repository and generate an OpenAI-powered explanation. 45 | 46 | Returns the explanation string so it can be consumed by callers 47 | (e.g., the Discord bot) in addition to being printed to the 48 | terminal. 49 | """ 50 | repo_name = os.path.splitext(os.path.basename(repo_url.rstrip("/")))[0] 51 | repo_path = os.path.join(workdir, repo_name) 52 | 53 | if not os.path.exists(repo_path): 54 | subprocess.run(["git", "clone", repo_url, repo_path], check=True) 55 | if branch: 56 | subprocess.run(["git", "fetch"], cwd=repo_path, check=True) 57 | subprocess.run(["git", "checkout", branch], cwd=repo_path, check=True) 58 | 59 | summary = _summarize_repository(repo_path) 60 | 61 | prompt = ( 62 | "You are an expert software engineer. Given the following summary of a " 63 | "Python repository, provide a clear and detailed explanation of the " 64 | "codebase, describing the purpose of each module and how they work together.\n\n" 65 | f"Repository summary:\n{summary}" 66 | ) 67 | 68 | explanation = None 69 | try: 70 | client = OpenAI() 71 | response = client.chat.completions.create( 72 | model="gpt-5-mini", 73 | messages=[{"role": "user", "content": prompt}], 74 | ) 75 | explanation = response.choices[0].message.content.strip() 76 | except Exception as e: 77 | explanation = ( 78 | f"Failed to generate explanation via OpenAI API: {e}\n" f"\n{summary}" 79 | ) 80 | 81 | print(f"\n📚 Codebase overview for {repo_name} (branch: {branch or 'default'})") 82 | print("=" * 60) 83 | print(explanation) 84 | 85 | return explanation 86 | -------------------------------------------------------------------------------- /src/advanced/orion_ai_coding_agent/README.md: -------------------------------------------------------------------------------- 1 | # 🚀 Orion AI Agent 2 | 3 | A smart multi-agent system powered by **LangGraph** that turns prompts into GitHub PRs with intelligent workflow orchestration, parallel processing, and advanced error recovery. 4 | 5 | ## ✨ Key Features 6 | 7 | - 🧠 **Intelligent Workflow Routing**: Dynamic decision-making based on repository analysis 8 | - ⚡ **Parallel Agent Execution**: Multiple agents working simultaneously for better performance 9 | - 🔄 **Smart Error Recovery**: Multiple retry strategies and alternative workflow paths 10 | - 📊 **Advanced State Management**: Sophisticated state tracking with built-in persistence 11 | - 🎯 **Context-Aware Decisions**: Adaptive workflow based on task complexity and requirements 12 | 13 | ## 🚀 Quick Start 14 | 15 | ### Installation 16 | 17 | ```bash 18 | git clone 19 | cd orion 20 | pip install -r requirements.txt 21 | ``` 22 | 23 | ### Setup Authentication 24 | 25 | ```bash 26 | python main.py --setup-auth 27 | ``` 28 | 29 | ### Basic Usage 30 | 31 | ```bash 32 | # Basic code generation 33 | python main.py --prompt "Add logging functionality to the project" 34 | 35 | # With commit and PR creation 36 | python main.py --prompt "Build REST API with comprehensive tests" --commit --create-pr 37 | 38 | # Enable debug mode for detailed workflow information 39 | python main.py --prompt "Refactor database layer" --debug 40 | 41 | # List available repositories 42 | python main.py --list-repos 43 | 44 | # Discord bot integration 45 | python main.py --discord-bot 46 | 47 | # Explain repository structure with an OpenAI summary (no changes made) 48 | python main.py --prompt "Explain" --repo-url --branch 49 | ``` 50 | 51 | ## 📋 Available Commands 52 | 53 | | Command | Description | 54 | |---------|-------------| 55 | | `--prompt "text"` | Instruction for the AI agent | 56 | | `--repo-url URL` | GitHub repository URL to work with | 57 | | `--branch BRANCH` | Name of the branch to work on | 58 | | `--commit` | Commit the generated changes | 59 | | `--create-pr` | Create a pull request (auto-enables --commit) | 60 | | `--debug` | Enable detailed LangGraph workflow information | 61 | | `--strict-testing` | Abort commit if tests fail | 62 | | `--no-testing` | Disable code testing | 63 | | `--no-venv` | Disable virtual environment creation | 64 | | `--list-repos` | List repositories from your GitHub account | 65 | | `--setup-auth` | Run authentication setup | 66 | | `--discord-bot` | Start Discord bot for interactive prompts | 67 | | `--prompt "Explain"` | Use OpenAI to describe the repository and ignore commit/PR flags | 68 | 69 | ## 🎯 LangGraph Architecture 70 | 71 | Orion uses **LangGraph** for production-grade workflow orchestration: 72 | 73 | - **Intelligent Routing**: Analyzes repository structure to choose optimal workflow paths 74 | - **Parallel Processing**: Runs independent tasks simultaneously (30-50% faster execution) 75 | - **Error Recovery**: Smart retry strategies with context-aware fallbacks 76 | - **State Persistence**: Resume interrupted workflows from any checkpoint 77 | - **Adaptive Workflows**: Dynamic execution based on task complexity 78 | 79 | ### Workflow Phases 80 | 81 | 1. **Repository Analysis** → Intelligent context gathering 82 | 2. **Code Generation** → AI-powered implementation 83 | 3. **Environment Setup** → Automated dependency management 84 | 4. **Testing & Validation** → Quality assurance with smart recovery 85 | 5. **Git Operations** → Commit and PR creation 86 | -------------------------------------------------------------------------------- /src/beginner/pdf_summarizer.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script creates an agent that can summarize a PDF file using CrewAI the PDFSearchTool. 3 | 4 | Resources: 5 | https://docs.crewai.com/en/tools/file-document/pdfsearchtool#pdf-rag-search 6 | """ 7 | 8 | from crewai import Agent, Crew, Task 9 | from crewai_tools import PDFSearchTool 10 | 11 | 12 | def create_agent(query: str, pdf_path: str) -> Agent: 13 | """ 14 | Create an agent that can summarize a PDF file 15 | Args: 16 | query: The query to search the PDF file 17 | pdf_path: The path to the PDF file 18 | Returns: 19 | An agent that can summarize the PDF file 20 | """ 21 | 22 | pdf_search_tool = PDFSearchTool( 23 | query=query, 24 | pdf=pdf_path, 25 | ) 26 | 27 | agent = Agent( 28 | role="PDF Summarizer", 29 | goal="Given a PDF file, summarize the content", 30 | backstory="You are a helpful assistant that summarizes PDF files", 31 | verbose=True, 32 | llm="gpt-4o-mini", 33 | max_iter=1, 34 | max_retry_limit=2, 35 | respect_context_window=True, 36 | reasoning=False, 37 | tools=[pdf_search_tool], 38 | ) 39 | 40 | return agent 41 | 42 | 43 | def create_task(agent: Agent) -> Task: 44 | """ 45 | Create a task that can summarize a PDF file 46 | Args: 47 | agent: The agent that can summarize the PDF file 48 | Returns: 49 | A task that can summarize the PDF file 50 | """ 51 | 52 | description = "You are an expert assistant specializing in reading and summarizing PDF documents. \ 53 | Your task is to carefully analyze the provided PDF file, extract the most important points, \ 54 | and generate a clear, concise, and accurate summary. Focus on identifying the main ideas, key arguments, \ 55 | and any significant findings or conclusions presented in the document. Avoid copying text verbatim; instead, \ 56 | paraphrase the content in your own words to ensure clarity and coherence. If the document contains sections, \ 57 | figures, or tables that are crucial to understanding the overall message, include brief explanations of their relevance. \ 58 | Your summary should be accessible to someone who has not read the original PDF, providing them with a comprehensive understanding \ 59 | of its core content and purpose" 60 | 61 | task = Task( 62 | description=description, 63 | expected_output="A summary of the PDF file", 64 | agent=agent, 65 | ) 66 | return task 67 | 68 | 69 | def create_crew(agent: Agent, task: Task) -> Crew: 70 | """ 71 | Create a crew that can summarize a PDF file 72 | Args: 73 | agent: The agent that can summarize the PDF file 74 | task: The task that the agent needs to complete 75 | Returns: 76 | A crew that can summarize the PDF file 77 | """ 78 | 79 | crew = Crew( 80 | agents=[agent], 81 | tasks=[task], 82 | verbose=True, 83 | ) 84 | 85 | return crew 86 | 87 | 88 | def main(): 89 | agent = create_agent( 90 | query="What is the main idea of the paper?", 91 | pdf_path="assets/transformers.pdf", 92 | ) 93 | task = create_task(agent) 94 | crew = create_crew(agent, task) 95 | crew.kickoff() 96 | 97 | 98 | if __name__ == "__main__": 99 | main() 100 | -------------------------------------------------------------------------------- /src/intermediate/job_search/agents.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from crewai import Agent 4 | from dotenv import load_dotenv 5 | from langchain_openai import ChatOpenAI 6 | from tools import search_jobs 7 | 8 | load_dotenv() 9 | 10 | # Verify API key is set from .env file 11 | if not os.getenv("OPENAI_API_KEY"): 12 | raise ValueError("Please set OPENAI_API_KEY in your .env file") 13 | 14 | llm = ChatOpenAI(model="gpt-4.1-2025-04-14") 15 | 16 | 17 | def create_agents(resume_content: str = ""): 18 | """ 19 | Create all AI agents with enhanced capabilities 20 | 21 | Args: 22 | resume_content: Parsed resume content for personalized recommendations 23 | 24 | Returns: 25 | Dictionary containing all agents 26 | """ 27 | resume_context = ( 28 | f"\n\nCandidate's Resume Content:\n{resume_content}" if resume_content else "" 29 | ) 30 | 31 | job_searcher_agent = Agent( 32 | role="Senior Job Search Specialist", 33 | goal="Find the most relevant job opportunities that match the candidate's profile and specified criteria", 34 | backstory=f"""You are an expert job search specialist with extensive experience in 35 | identifying high-quality job opportunities. You excel at understanding both job requirements 36 | and candidate profiles to find the perfect matches.{resume_context}""", 37 | verbose=True, 38 | llm=llm, 39 | allow_delegation=True, 40 | tools=[search_jobs], 41 | ) 42 | 43 | skills_development_agent = Agent( 44 | role="Personalized Skills Development Advisor", 45 | goal="Analyze job requirements against the candidate's current skills and provide targeted development recommendations", 46 | backstory=f"""You are a seasoned career development expert who specializes in 47 | identifying skill gaps by comparing job requirements with candidate backgrounds. 48 | You create personalized learning paths based on individual experience and career goals.{resume_context}""", 49 | verbose=True, 50 | allow_delegation=True, 51 | llm=llm, 52 | ) 53 | 54 | interview_preparation_coach = Agent( 55 | role="Personalized Interview Preparation Expert", 56 | goal="Prepare candidates for interviews by leveraging their specific background and experience", 57 | backstory=f"""You are a professional interview coach who creates personalized interview 58 | strategies. You help candidates highlight their unique strengths and address potential 59 | weaknesses based on their specific background and target roles.{resume_context}""", 60 | verbose=True, 61 | allow_delegation=True, 62 | llm=llm, 63 | ) 64 | 65 | career_advisor = Agent( 66 | role="Personalized Career Strategy Advisor", 67 | goal="Provide strategic career advice tailored to the candidate's specific background and goals", 68 | backstory=f"""You are a senior career strategist who creates personalized career 69 | advancement plans. You understand how to position candidates based on their unique 70 | background, optimize their personal brand, and create targeted networking strategies.{resume_context}""", 71 | verbose=True, 72 | allow_delegation=True, 73 | llm=llm, 74 | ) 75 | 76 | return { 77 | "job_searcher": job_searcher_agent, 78 | "skills_development": skills_development_agent, 79 | "interview_prep": interview_preparation_coach, 80 | "career_advisor": career_advisor, 81 | } 82 | -------------------------------------------------------------------------------- /src/intermediate/devops/tasks.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from agents import issue_investigator, log_analyzer, solution_specialist 4 | from crewai import Task 5 | 6 | # Create output directory for task results 7 | os.makedirs("task_outputs", exist_ok=True) 8 | 9 | # Task 1: Analyze log file to identify issues 10 | analyze_logs_task = Task( 11 | description="""Analyze the log file at {log_file_path} to identify and extract specific issues. 12 | 13 | Your analysis should: 14 | 1. Read through the entire log file carefully 15 | 2. Identify all ERROR, CRITICAL, and WARNING messages 16 | 3. Extract the main issue or failure pattern 17 | 4. Determine the timeline of events leading to the failure 18 | 5. Identify the root cause from the log entries 19 | 20 | Focus on finding the primary issue that needs to be resolved.""", 21 | expected_output="""A detailed analysis report containing: 22 | - Primary issue description (clear and concise) 23 | - Key error messages and codes 24 | - Timeline of failure events 25 | - Root cause analysis based on log evidence 26 | - Relevant technical context and affected components""", 27 | agent=log_analyzer, 28 | output_file="src/intermediate/devops/task_outputs/log_analysis.md", 29 | ) 30 | 31 | # Task 2: Investigate the identified issue online 32 | investigate_issue_task = Task( 33 | description="""Based on the log analysis findings, investigate the identified issue online. 34 | 35 | Your investigation should: 36 | 1. Search for similar errors and issues in documentation and forums 37 | 2. Find official documentation related to the error 38 | 3. Look for community solutions and best practices 39 | 4. Identify common causes and scenarios for this type of issue 40 | 5. Gather information about proven fixes and workarounds 41 | 42 | Focus on finding reliable, well-documented solutions.""", 43 | expected_output="""A comprehensive investigation report including: 44 | - Similar issues found online with references 45 | - Official documentation links and explanations 46 | - Common causes ranked by likelihood 47 | - Community-verified solutions and workarounds 48 | - Best practices to prevent similar issues""", 49 | agent=issue_investigator, 50 | context=[analyze_logs_task], 51 | output_file="src/intermediate/devops/task_outputs/investigation_report.md", 52 | ) 53 | 54 | # Task 3: Provide actionable solution 55 | provide_solution_task = Task( 56 | description="""Based on the log analysis and investigation findings, provide a complete solution. 57 | 58 | Your solution should: 59 | 1. Create a step-by-step remediation plan 60 | 2. Include specific commands and configurations 61 | 3. Provide verification steps to confirm the fix 62 | 4. Suggest monitoring and prevention measures 63 | 5. Include rollback procedures if needed 64 | 65 | Ensure all solutions are practical and well-tested.""", 66 | expected_output="""A detailed remediation plan with: 67 | - Primary solution with step-by-step commands 68 | - Configuration changes required (if any) 69 | - Verification and testing procedures 70 | - Alternative solutions (if applicable) 71 | - Prevention strategies and monitoring recommendations 72 | - Rollback plan in case of issues 73 | - Links to official documentation and references""", 74 | agent=solution_specialist, 75 | context=[analyze_logs_task, investigate_issue_task], 76 | output_file="src/intermediate/devops/task_outputs/solution_plan.md", 77 | ) 78 | -------------------------------------------------------------------------------- /src/intermediate/investment_advisor/start_server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Startup script for the Financial Analysis API server. 4 | This script provides an easy way to start the FastAPI server with proper configuration. 5 | """ 6 | 7 | import os 8 | import subprocess 9 | import sys 10 | from pathlib import Path 11 | 12 | from dotenv import load_dotenv 13 | 14 | load_dotenv() 15 | 16 | 17 | def check_requirements(): 18 | """Check if required packages are installed.""" 19 | try: 20 | import crewai 21 | import fastapi 22 | import uvicorn 23 | 24 | print("✅ All required packages are installed") 25 | return True 26 | except ImportError as e: 27 | print(f"❌ Missing required package: {e}") 28 | print("Please install the package") 29 | return False 30 | 31 | 32 | def check_env_vars(): 33 | """Check if required environment variables are set.""" 34 | required_vars = ["OPENAI_API_KEY", "EXA_API_KEY"] 35 | missing_vars = [] 36 | 37 | for var in required_vars: 38 | if not os.getenv(var): 39 | missing_vars.append(var) 40 | 41 | if missing_vars: 42 | print(f"❌ Missing environment variables: {', '.join(missing_vars)}") 43 | print("Please set these in your .env file or environment") 44 | return False 45 | 46 | print("✅ All required environment variables are set") 47 | return True 48 | 49 | 50 | def start_server(host="0.0.0.0", port=8000, reload=True): 51 | """Start the FastAPI server.""" 52 | print(f"🚀 Starting Financial Analysis API server on {host}:{port}") 53 | print(f"📖 API documentation will be available at: http://{host}:{port}/docs") 54 | print(f"🔄 Auto-reload: {'enabled' if reload else 'disabled'}") 55 | 56 | try: 57 | import uvicorn 58 | 59 | uvicorn.run( 60 | "api_server:app", host=host, port=port, reload=reload, log_level="info" 61 | ) 62 | except KeyboardInterrupt: 63 | print("\n👋 Server stopped by user") 64 | except Exception as e: 65 | print(f"❌ Error starting server: {e}") 66 | 67 | 68 | def main(): 69 | """Main function to start the server with checks.""" 70 | print("🔍 Checking system requirements...") 71 | 72 | # Check if we're in the right directory 73 | if not Path("src/intermediate/investment_advisor/api_server.py").exists(): 74 | print( 75 | "❌ api_server.py not found. Please run this script from the root directory" 76 | ) 77 | sys.exit(1) 78 | 79 | # Check requirements 80 | if not check_requirements(): 81 | sys.exit(1) 82 | 83 | # Check environment variables 84 | if not check_env_vars(): 85 | print("\n💡 Tip: Create a .env file with your API keys:") 86 | print("OPENAI_API_KEY=your_openai_key_here") 87 | print("EXA_API_KEY=your_exa_key_here") 88 | sys.exit(1) 89 | 90 | # Parse command line arguments 91 | import argparse 92 | 93 | parser = argparse.ArgumentParser( 94 | description="Start the Financial Analysis API server" 95 | ) 96 | parser.add_argument( 97 | "--host", default="0.0.0.0", help="Host to bind to (default: 0.0.0.0)" 98 | ) 99 | parser.add_argument( 100 | "--port", type=int, default=8000, help="Port to bind to (default: 8000)" 101 | ) 102 | parser.add_argument("--no-reload", action="store_true", help="Disable auto-reload") 103 | 104 | args = parser.parse_args() 105 | 106 | # Start the server 107 | start_server(host=args.host, port=args.port, reload=not args.no_reload) 108 | 109 | 110 | if __name__ == "__main__": 111 | main() 112 | -------------------------------------------------------------------------------- /src/beginner/web_scraper.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script creates an agent that can scrape a website using CrewAI the ScrapeWebsiteTool. 3 | 4 | Resources: 5 | https://docs.crewai.com/en/tools/web-scraping/scrapewebsitetool 6 | """ 7 | 8 | import os 9 | from pathlib import Path 10 | 11 | from crewai import Agent, Crew, Task 12 | from crewai_tools import ScrapeWebsiteTool 13 | from dotenv import load_dotenv 14 | 15 | load_dotenv() 16 | 17 | os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") 18 | 19 | # Get absolute path for memory directory 20 | memory_dir = Path(__file__).parent / "crewai_memory" 21 | memory_dir.mkdir(exist_ok=True) 22 | os.environ["CREWAI_STORAGE_DIR"] = str(memory_dir.absolute()) 23 | 24 | 25 | def create_agent(website_url: str) -> Agent: 26 | """ 27 | Create an agent that can scrape a website 28 | Args: 29 | website_url: The URL of the website to scrape 30 | Returns: 31 | An agent that can scrape the website 32 | """ 33 | 34 | scrape_website_tool = ScrapeWebsiteTool(website_url=website_url) 35 | 36 | agent = Agent( 37 | role="Website Scraper", 38 | goal="Given a website, scrape the content", 39 | backstory="You are a helpful assistant that scrapes websites", 40 | verbose=True, 41 | llm="gpt-4o-mini", 42 | max_iter=2, 43 | max_retry_limit=2, 44 | respect_context_window=True, 45 | reasoning=False, 46 | tools=[scrape_website_tool], 47 | memory=True, 48 | ) 49 | 50 | return agent 51 | 52 | 53 | def create_task(agent: Agent) -> Task: 54 | """ 55 | Create a task that can scrape a website 56 | Args: 57 | agent: The agent that can scrape the website 58 | Returns: 59 | A task that can scrape the website 60 | """ 61 | 62 | description = "You are an expert assistant specializing in web data extraction and analysis. \ 63 | Your task is to thoroughly scrape the provided website, identify and extract the most relevant and valuable information, \ 64 | and present a clear, concise, and well-organized summary of the website's content. Focus on capturing the main topics, key sections, \ 65 | important data points, and any notable features or insights presented on the site. Avoid copying large blocks of text verbatim; instead, \ 66 | paraphrase and synthesize the information to ensure clarity and coherence. If the website contains structured data such as tables, lists, \ 67 | or infographics, summarize their contents and explain their significance. Your summary should be accessible to someone who has not visited \ 68 | the website, providing them with a comprehensive understanding of its purpose, structure, and core content." 69 | 70 | task = Task( 71 | description=description, 72 | expected_output="A summary of the website's content", 73 | agent=agent, 74 | ) 75 | return task 76 | 77 | 78 | def create_crew(agent: Agent, task: Task) -> Crew: 79 | """ 80 | Create a crew that can scrape a website 81 | Args: 82 | agent: The agent that can scrape the website 83 | task: The task that the agent needs to complete 84 | Returns: 85 | A crew that can scrape the website 86 | """ 87 | 88 | crew = Crew( 89 | agents=[agent], 90 | tasks=[task], 91 | verbose=True, 92 | memory=True, 93 | ) 94 | 95 | return crew 96 | 97 | 98 | def main(): 99 | agent = create_agent( 100 | website_url="https://www.crewai.com/", 101 | ) 102 | task = create_task(agent) 103 | crew = create_crew(agent, task) 104 | crew.kickoff() 105 | 106 | 107 | if __name__ == "__main__": 108 | main() 109 | -------------------------------------------------------------------------------- /src/intermediate/investment_advisor/agents.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from crewai import Agent 4 | from crewai.llm import LLM 5 | from dotenv import load_dotenv 6 | from tools import ( 7 | exa_search_tool, 8 | get_company_info, 9 | get_current_stock_price, 10 | get_income_statements, 11 | ) 12 | 13 | load_dotenv() 14 | 15 | os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") 16 | 17 | llm = LLM( 18 | model="gpt-4.1-2025-04-14", 19 | temperature=0.7, 20 | max_tokens=4000, 21 | timeout=120, # 2 minutes timeout 22 | ) 23 | # Agent for gathering company news and information 24 | news_info_explorer = Agent( 25 | role="News and Info Researcher", 26 | goal="Gather and provide the latest news and information about a company from the internet", 27 | llm=llm, 28 | verbose=True, 29 | backstory=( 30 | "You are an expert researcher, who can gather detailed information about a company." 31 | ), 32 | tools=[exa_search_tool], 33 | cache=True, 34 | max_iter=5, 35 | max_rpm=15, # Rate limiting: max 15 requests per minute 36 | memory=True, # Enable memory for learning from previous searches 37 | max_execution_time=600, # 10 minutes max execution time 38 | respect_context_window=True, # Respect model's context window 39 | ) 40 | 41 | # Agent for gathering financial data 42 | data_explorer = Agent( 43 | role="Data Researcher", 44 | goal="Gather and provide financial data and company information about a stock", 45 | llm=llm, 46 | verbose=True, 47 | backstory=( 48 | "You are an expert researcher, who can gather detailed information about a company or stock. " 49 | 'When using tools, use the stock symbol and add a suffix ".NS" to it. try with and without the suffix and see what works' 50 | ), 51 | tools=[get_company_info, get_income_statements], 52 | cache=True, 53 | max_iter=5, 54 | max_rpm=12, # Rate limiting: max 12 requests per minute 55 | memory=True, # Enable memory for learning from previous data searches 56 | max_execution_time=450, # 7.5 minutes max execution time 57 | respect_context_window=True, # Respect model's context window 58 | ) 59 | 60 | # Agent for analyzing data 61 | analyst = Agent( 62 | role="Data Analyst", 63 | goal="Consolidate financial data, stock information, and provide a summary", 64 | llm=llm, 65 | verbose=True, 66 | backstory=( 67 | "You are an expert in analyzing financial data, stock/company-related current information, and " 68 | "making a comprehensive analysis. Use Indian units for numbers (lakh, crore)." 69 | ), 70 | max_iter=4, 71 | max_rpm=10, # Rate limiting: max 10 requests per minute 72 | memory=True, # Enable memory for learning from previous analyses 73 | max_execution_time=300, # 5 minutes max execution time 74 | respect_context_window=True, # Respect model's context window 75 | ) 76 | 77 | # Agent for financial recommendations 78 | fin_expert = Agent( 79 | role="Financial Expert", 80 | goal="Considering financial analysis of a stock, make investment recommendations", 81 | llm=llm, 82 | verbose=True, 83 | tools=[get_current_stock_price], 84 | max_iter=5, 85 | max_rpm=8, # Conservative rate limit for recommendation generation 86 | memory=True, # Remember successful recommendations for similar stocks 87 | max_execution_time=360, # 6 minutes max execution time 88 | respect_context_window=True, # Respect model's context window 89 | backstory=( 90 | "You are an expert financial advisor who can provide investment recommendations. " 91 | "Consider the financial analysis, current information about the company, current stock price, " 92 | "and make recommendations about whether to buy/hold/sell a stock along with reasons." 93 | 'When using tools, try with and without the suffix ".NS" to the stock symbol and see what works.' 94 | ), 95 | ) 96 | -------------------------------------------------------------------------------- /src/intermediate/devops/agents.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from crewai import Agent 4 | from crewai.llm import LLM 5 | from dotenv import load_dotenv 6 | 7 | load_dotenv() 8 | 9 | from tools import exa_search_tool, log_reader_tool 10 | 11 | os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") 12 | 13 | llm = LLM( 14 | model="gpt-4o", 15 | temperature=0.1, 16 | max_tokens=4000, 17 | timeout=120, # 2 minutes timeout 18 | ) 19 | 20 | 21 | def system_template_devops(): 22 | """Custom system template for DevOps agents""" 23 | return """You are an expert DevOps engineer with extensive experience in: 24 | - Infrastructure automation and orchestration 25 | - Container technologies (Docker, Kubernetes) 26 | - CI/CD pipelines and deployment strategies 27 | - Monitoring, logging, and observability 28 | - Cloud platforms (AWS, GCP, Azure) 29 | - Security best practices and compliance 30 | 31 | Always provide: 32 | 1. Detailed technical analysis 33 | 2. Step-by-step solutions 34 | 3. Best practices and recommendations 35 | 4. Risk assessment and mitigation strategies 36 | 5. References to official documentation 37 | 38 | Focus on practical, production-ready solutions.""" 39 | 40 | 41 | # Agent 1: Log Analyzer - Analyzes log files to identify issues 42 | log_analyzer = Agent( 43 | role="DevOps Log Analyzer", 44 | goal="Analyze log files to identify and extract specific issues, errors, and failure patterns", 45 | llm=llm, 46 | backstory="""You are a senior DevOps engineer with 10 years of experience in 47 | analyzing production logs and identifying critical issues. You excel at parsing 48 | through complex log files, identifying error patterns, extracting relevant error 49 | messages, and determining the root cause of failures from log data.""", 50 | tools=[log_reader_tool], 51 | verbose=True, 52 | max_iter=3, 53 | max_rpm=10, # Rate limiting: max 10 requests per minute 54 | memory=True, # Enable memory for learning from previous analyses 55 | system_template=system_template_devops(), 56 | max_execution_time=300, # 5 minutes max execution time 57 | respect_context_window=True, # Respect model's context window 58 | ) 59 | 60 | # Agent 2: Issue Investigator - Searches for solutions online 61 | issue_investigator = Agent( 62 | role="DevOps Issue Investigator", 63 | goal="Investigate identified issues by searching documentation, forums, and known solutions online", 64 | llm=llm, 65 | backstory="""You are a DevOps troubleshooting specialist who excels at quickly 66 | finding solutions to technical problems. You know how to search effectively for 67 | similar issues, identify reliable sources, and gather comprehensive information 68 | about error patterns and their solutions.""", 69 | tools=[exa_search_tool], 70 | verbose=True, 71 | max_iter=5, 72 | max_rpm=15, # Higher rate limit for search operations 73 | memory=True, # Remember previous search patterns and results 74 | system_template=system_template_devops(), 75 | max_execution_time=600, # 10 minutes for thorough investigation 76 | respect_context_window=True, 77 | ) 78 | 79 | # Agent 3: Solution Specialist - Provides actionable solutions 80 | solution_specialist = Agent( 81 | role="DevOps Solution Specialist", 82 | goal="Provide clear, actionable solutions with step-by-step instructions based on investigation findings", 83 | llm=llm, 84 | backstory="""You are a DevOps solutions architect who specializes in creating 85 | reliable, step-by-step remediation plans for infrastructure and deployment issues. 86 | You always provide official documentation references, tested solutions, and 87 | preventive measures to avoid future occurrences.""", 88 | verbose=True, 89 | max_iter=4, 90 | max_rpm=8, # Conservative rate limit for solution generation 91 | memory=True, # Remember successful solutions for similar issues 92 | system_template=system_template_devops(), 93 | max_execution_time=450, # 7.5 minutes for comprehensive solutions 94 | respect_context_window=True, 95 | ) 96 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Vector: AI Agents Lab 2 | 3 | Vector is an AI Agents Lab developed for people willing to learn how to build AI Agents. 4 | 5 | --- 6 | 7 | ## Sponsored By: 8 |

9 | 10 | Outskill 11 | 12 | 13 | DeepStation 14 | 15 |

16 | 17 | --- 18 | 19 | ## Project Overview 20 | 21 | 22 | The projects in Vector are divided into 3 categories based on their complexity - 23 | 1. `src/beginner` - 24 | - Entry Level Projects meant for people just starting out with Agents. 25 | - All agents are under 100 lines of code. 26 | - Single Agent Single Tool Systems 27 | 28 | 2. `src/intermediate` - 29 | - These projects are aimed at people well versed with basics for AI Agents. 30 | - Multi Agent Multi Tool projects. 31 | 32 | 3. `src/advanced` - 33 | - End to End projects utilizing multiple concepts within AI Agents. 34 | - Takes care of best practices and aimed at building products. 35 | 36 | 37 | ## Project Setup 38 | 39 | This project uses [uv](https://docs.astral.sh/uv/) as a package manager for fast, reliable dependency management. 40 | 41 | ### Prerequisites 42 | - Python 3.11 or higher 43 | - pip (for installing uv) 44 | 45 | ### Setup Steps 46 | 47 | 1. **Install `uv`** 48 | Follow instructions at the [installation](https://docs.astral.sh/uv/getting-started/installation/) page or run: 49 | ```bash 50 | pip install uv 51 | ``` 52 | 53 | 2. **Clone the repository** 54 | ```bash 55 | git clone https://github.com/ishandutta0098/vector-ai-agents-lab.git 56 | cd vector-ai-agents-lab 57 | ``` 58 | 59 | 3. **Install dependencies** 60 | ```bash 61 | uv sync 62 | ``` 63 | This command: 64 | - Creates a virtual environment in `.venv/` 65 | - Installs all required packages from `pyproject.toml` 66 | - Locks dependencies for reproducible builds 67 | 68 | 4. **Run a script** 69 | ```bash 70 | uv run src/beginner/pdf_summarizer.py 71 | ``` 72 | 73 | ## Usage 74 | 75 | ### Running Scripts 76 | Use `uv run` to execute any Python script in the project: 77 | ```bash 78 | uv run .py 79 | ``` 80 | 81 | The `uv run` command automatically: 82 | - Uses the project's virtual environment 83 | - Ensures all dependencies are available 84 | - Handles Python version compatibility 85 | 86 | ### Adding Dependencies 87 | ```bash 88 | uv add 89 | ``` 90 | 91 | ### Removing Dependencies 92 | ```bash 93 | uv remove 94 | ``` 95 | 96 | ## Notes 97 | 98 | - Dependencies are managed in `pyproject.toml` 99 | - The virtual environment is created in `.venv/` (git-ignored) 100 | - `uv.lock` ensures reproducible installations across different machines 101 | - No need to manually activate the virtual environment when using `uv run` 102 | 103 | ## Troubleshooting 104 | 105 | **Issue**: Import errors when running scripts 106 | **Solution**: Make sure you've run `uv sync` to install all dependencies 107 | 108 | **Issue**: Python version mismatch 109 | **Solution**: Ensure you have Python 3.11+ installed: `python --version` 110 | 111 | **Issue**: `uv` command not found 112 | **Solution**: Reinstall uv with `pip install uv` and ensure pip's bin directory is in your PATH 113 | 114 | ## Contributing 115 | Contributions are welcome to Vector! 116 | 117 | If you have a project idea raise an Issue with it along with how you plan to implement it. 118 | Please don't raise a PR until your project has been approved. 119 | 120 | ## License 121 | This project is under the MIT License. 122 | 123 | ## Contact 124 | Email - duttaishan0098@gmail.com 125 | [LinkedIn](https://www.linkedin.com/in/ishandutta0098/) 126 | [X](https://x.com/ishandutta0098) 127 | [Instagram](https://www.instagram.com/ishandutta.ai) 128 | -------------------------------------------------------------------------------- /src/intermediate/investment_advisor/tools.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import time 4 | 5 | import yfinance as yf 6 | from crewai.tools import tool 7 | from crewai_tools import EXASearchTool 8 | from curl_cffi import requests 9 | from dotenv import load_dotenv 10 | 11 | load_dotenv() 12 | 13 | session = requests.Session(impersonate="chrome") 14 | 15 | 16 | os.environ["EXA_API_KEY"] = os.getenv("EXA_API_KEY") 17 | 18 | try: 19 | exa_search_tool = EXASearchTool() 20 | except Exception as e: 21 | print(f"EXA Search Tool initialization failed: {e}") 22 | # Fallback: try with empty lists for domains 23 | try: 24 | exa_search_tool = EXASearchTool(include_domains=[], exclude_domains=[]) 25 | except Exception as e2: 26 | print(f"Fallback EXA Search Tool initialization also failed: {e2}") 27 | # Use basic initialization as last resort 28 | exa_search_tool = EXASearchTool() 29 | 30 | 31 | # Define Finance Tools 32 | @tool("Get current stock price") 33 | def get_current_stock_price(symbol: str) -> str: 34 | """Use this function to get the current stock price for a given symbol. 35 | 36 | Args: 37 | symbol (str): The stock symbol. 38 | 39 | Returns: 40 | str: The current stock price or error message. 41 | """ 42 | try: 43 | time.sleep(0.5) 44 | stock = yf.Ticker(symbol, session=session) 45 | 46 | current_price = stock.info.get( 47 | "regularMarketPrice", stock.info.get("currentPrice") 48 | ) 49 | return ( 50 | f"{current_price:.2f}" 51 | if current_price 52 | else f"Could not fetch current price for {symbol}" 53 | ) 54 | except Exception as e: 55 | return f"Error fetching current price for {symbol}: {e}" 56 | 57 | 58 | @tool 59 | def get_company_info(symbol: str): 60 | """Use this function to get company information and current financial snapshot for a given stock symbol. 61 | 62 | Args: 63 | symbol (str): The stock symbol. 64 | 65 | Returns: 66 | JSON containing company profile and current financial snapshot. 67 | """ 68 | try: 69 | company_info_full = yf.Ticker(symbol, session=session).info 70 | if company_info_full is None: 71 | return f"Could not fetch company info for {symbol}" 72 | 73 | company_info_cleaned = { 74 | "Name": company_info_full.get("shortName"), 75 | "Symbol": company_info_full.get("symbol"), 76 | "Current Stock Price": f"{company_info_full.get('regularMarketPrice', company_info_full.get('currentPrice'))} {company_info_full.get('currency', 'USD')}", 77 | "Market Cap": f"{company_info_full.get('marketCap', company_info_full.get('enterpriseValue'))} {company_info_full.get('currency', 'USD')}", 78 | "Sector": company_info_full.get("sector"), 79 | "Industry": company_info_full.get("industry"), 80 | "City": company_info_full.get("city"), 81 | "Country": company_info_full.get("country"), 82 | "EPS": company_info_full.get("trailingEps"), 83 | "P/E Ratio": company_info_full.get("trailingPE"), 84 | "52 Week Low": company_info_full.get("fiftyTwoWeekLow"), 85 | "52 Week High": company_info_full.get("fiftyTwoWeekHigh"), 86 | "50 Day Average": company_info_full.get("fiftyDayAverage"), 87 | "200 Day Average": company_info_full.get("twoHundredDayAverage"), 88 | "Employees": company_info_full.get("fullTimeEmployees"), 89 | "Total Cash": company_info_full.get("totalCash"), 90 | "Free Cash flow": company_info_full.get("freeCashflow"), 91 | "Operating Cash flow": company_info_full.get("operatingCashflow"), 92 | "EBITDA": company_info_full.get("ebitda"), 93 | "Revenue Growth": company_info_full.get("revenueGrowth"), 94 | "Gross Margins": company_info_full.get("grossMargins"), 95 | "Ebitda Margins": company_info_full.get("ebitdaMargins"), 96 | } 97 | return json.dumps(company_info_cleaned) 98 | except Exception as e: 99 | return f"Error fetching company profile for {symbol}: {e}" 100 | 101 | 102 | @tool 103 | def get_income_statements(symbol: str): 104 | """Use this function to get income statements for a given stock symbol. 105 | 106 | Args: 107 | symbol (str): The stock symbol. 108 | 109 | Returns: 110 | JSON containing income statements or an empty dictionary. 111 | """ 112 | try: 113 | stock = yf.Ticker(symbol, session=session) 114 | financials = stock.financials 115 | return financials.to_json(orient="index") 116 | except Exception as e: 117 | return f"Error fetching income statements for {symbol}: {e}" 118 | -------------------------------------------------------------------------------- /src/intermediate/job_search/tasks.py: -------------------------------------------------------------------------------- 1 | from crewai import Task 2 | from crewai.tasks.task_output import TaskOutput 3 | from tools import search_jobs 4 | 5 | 6 | def callback_function(output: TaskOutput): 7 | """Save task output to file""" 8 | try: 9 | with open("task_output.txt", "a", encoding="utf-8") as file: 10 | file.write(f"=== {output.agent} - {output.description} ===\n") 11 | file.write(f"{output.result}\n\n") 12 | print(f"✅ Result saved to task_output.txt") 13 | except Exception as e: 14 | print(f"❌ Error saving output: {e}") 15 | 16 | 17 | def create_tasks(agents: dict, resume_content: str = ""): 18 | """ 19 | Create all tasks for the agents with resume context 20 | 21 | Args: 22 | agents: Dictionary containing all agents 23 | resume_content: Parsed resume content for personalized recommendations 24 | 25 | Returns: 26 | Dictionary containing all tasks 27 | """ 28 | 29 | job_search_task = Task( 30 | description="""Search for current job openings based on the specified role and location. 31 | Use the Job Search tool with the following parameters: 32 | - Find 5-10 relevant positions 33 | - Focus on quality over quantity 34 | - Include detailed job descriptions and requirements 35 | - Highlight key qualifications and skills needed 36 | 37 | Format your search as JSON: {'role': '', 'location': '', 'num_results': }""", 38 | expected_output="A formatted list of job openings with titles, companies, locations, salaries, descriptions, and URLs", 39 | agent=agents["job_searcher"], 40 | tools=[search_jobs], 41 | callback=callback_function, 42 | ) 43 | 44 | skills_analysis_task = Task( 45 | description=f"""Analyze the job openings and create a PERSONALIZED skills assessment: 46 | 47 | 1. Compare the candidate's current skills (from resume) with job requirements 48 | 2. Identify SPECIFIC skill gaps and strengths 49 | 3. Categorize skills as: Already Have, Need to Improve, Need to Learn 50 | 4. Provide targeted recommendations including: 51 | - Specific courses/certifications for identified gaps 52 | - How to better highlight existing skills 53 | - Timeline for skill development based on current level 54 | - Which skills to prioritize for maximum impact 55 | 5. Create a personalized learning roadmap 56 | 57 | {f'Use the candidate resume content for context: {resume_content}' if resume_content else 'No resume provided - provide general recommendations.'}""", 58 | expected_output="A personalized skills gap analysis with specific recommendations tailored to the candidate's background", 59 | agent=agents["skills_development"], 60 | context=[job_search_task], 61 | callback=callback_function, 62 | ) 63 | 64 | interview_prep_task = Task( 65 | description=f"""Create a PERSONALIZED interview preparation strategy: 66 | 67 | 1. Generate role-specific questions tailored to the candidate's background 68 | 2. Create STAR method examples using the candidate's actual experience 69 | 3. Identify potential interview challenges based on resume gaps or career changes 70 | 4. Provide specific talking points to highlight candidate's unique strengths 71 | 5. Address potential concerns employers might have 72 | 6. Create customized salary negotiation strategy based on experience level 73 | 7. Develop elevator pitch based on candidate's background 74 | 75 | {f'Base recommendations on candidate resume: {resume_content}' if resume_content else 'Provide general interview preparation advice.'}""", 76 | expected_output="A personalized interview preparation guide with customized questions, answers, and strategies", 77 | agent=agents["interview_prep"], 78 | context=[job_search_task, skills_analysis_task], 79 | callback=callback_function, 80 | ) 81 | 82 | career_strategy_task = Task( 83 | description=f"""Develop a PERSONALIZED career strategy plan: 84 | 85 | 1. Analyze current resume and suggest specific improvements for target roles 86 | 2. Create LinkedIn optimization strategy based on existing profile content 87 | 3. Identify networking opportunities relevant to candidate's industry/background 88 | 4. Suggest specific portfolio projects based on current skills and target roles 89 | 5. Create personal branding strategy that highlights unique value proposition 90 | 6. Develop application strategy tailored to candidate's experience level 91 | 7. Provide specific action items with timeline for career advancement 92 | 93 | {f'Base all recommendations on candidate background: {resume_content}' if resume_content else 'Provide general career strategy advice.'}""", 94 | expected_output="A personalized career strategy plan with specific, actionable recommendations", 95 | agent=agents["career_advisor"], 96 | context=[job_search_task, skills_analysis_task], 97 | callback=callback_function, 98 | ) 99 | 100 | return { 101 | "job_search": job_search_task, 102 | "skills_analysis": skills_analysis_task, 103 | "interview_prep": interview_prep_task, 104 | "career_strategy": career_strategy_task, 105 | } 106 | -------------------------------------------------------------------------------- /src/intermediate/job_search/tools.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | import pdfplumber 5 | import PyPDF2 6 | import requests 7 | from crewai.tools import tool 8 | 9 | 10 | @tool("Resume Parser Tool") 11 | def parse_resume(file_path: str) -> str: 12 | """ 13 | Parse resume PDF and extract text content. 14 | 15 | Args: 16 | file_path: Path to the resume PDF file 17 | 18 | Returns 19 | Extracted text content from the resume 20 | """ 21 | if not os.path.exists(file_path): 22 | return ( 23 | f"Error: Resume file not found at {file_path}. Please check the file path." 24 | ) 25 | 26 | try: 27 | # Try using pdfplumber first (better text extraction) 28 | with pdfplumber.open(file_path) as pdf: 29 | text = "" 30 | for page in pdf.pages: 31 | page_text = page.extract_text() 32 | if page_text: 33 | text += page_text + "\n" 34 | 35 | if text.strip(): 36 | return f"✅ Resume parsed successfully!\n\nResume Content:\n{text}" 37 | except Exception as e: 38 | print(f"pdfplumber failed: {e}, trying PyPDF2...") 39 | 40 | try: 41 | # Fallback to PyPDF2 42 | with open(file_path, "rb") as file: 43 | pdf_reader = PyPDF2.PdfReader(file) 44 | text = "" 45 | for page in pdf_reader.pages: 46 | text += page.extract_text() + "\n" 47 | 48 | if text.strip(): 49 | return f"✅ Resume parsed successfully!\n\nResume Content:\n{text}" 50 | else: 51 | return "Error: Could not extract text from PDF. The file might be image-based or corrupted." 52 | 53 | except Exception as e: 54 | return f"Error: Failed to parse resume PDF. {str(e)}" 55 | 56 | 57 | @tool("Job Search Tool") 58 | def search_jobs(input_json: str) -> str: 59 | """ 60 | Search for job listings using the Adzuna API. 61 | 62 | Args: 63 | input_json: JSON string with schema {'role': '', 'location': '', 'num_results': } 64 | 65 | Returns: 66 | Formatted string of job listings 67 | """ 68 | try: 69 | # Check if required environment variables are loaded 70 | required_vars = ["OPENAI_API_KEY", "ADZUNA_APP_ID", "ADZUNA_API_KEY"] 71 | missing_vars = [var for var in required_vars if not os.getenv(var)] 72 | 73 | if missing_vars: 74 | error_msg = "❌ Missing required environment variables in .env file:\n" 75 | for var in missing_vars: 76 | error_msg += f" - {var}\n" 77 | error_msg += "\n📝 Create a .env file in your project directory with:\n" 78 | error_msg += "OPENAI_API_KEY=your_openai_api_key_here\n" 79 | error_msg += "ADZUNA_APP_ID=your_adzuna_app_id_here\n" 80 | error_msg += "ADZUNA_API_KEY=your_adzuna_api_key_here" 81 | return error_msg 82 | 83 | input_data = json.loads(input_json) 84 | role = input_data["role"] 85 | location = input_data["location"] 86 | num_results = input_data.get("num_results", 5) 87 | except (json.JSONDecodeError, KeyError) as e: 88 | return """Error: The tool accepts input in JSON format with the 89 | following schema: {'role': '', 'location': '', 'num_results': }. 90 | Ensure to format the input accordingly.""" 91 | 92 | app_id = os.getenv("ADZUNA_APP_ID") 93 | api_key = os.getenv("ADZUNA_API_KEY") 94 | 95 | if not app_id or not api_key: 96 | return "Error: Please set ADZUNA_APP_ID and ADZUNA_API_KEY in your .env file." 97 | 98 | base_url = "http://api.adzuna.com/v1/api/jobs" 99 | url = f"{base_url}/us/search/1" 100 | 101 | params = { 102 | "app_id": app_id, 103 | "app_key": api_key, 104 | "results_per_page": num_results, 105 | "what": role, 106 | "where": location, 107 | "content-type": "application/json", 108 | } 109 | 110 | try: 111 | response = requests.get(url, params=params) 112 | response.raise_for_status() 113 | jobs_data = response.json() 114 | 115 | job_listings = [] 116 | for job in jobs_data.get("results", []): 117 | job_details = { 118 | "title": job.get("title", "N/A"), 119 | "company": job.get("company", {}).get("display_name", "N/A"), 120 | "location": job.get("location", {}).get("display_name", "N/A"), 121 | "salary": job.get("salary_min", "Not specified"), 122 | "description": ( 123 | job.get("description", "")[:300] + "..." 124 | if job.get("description") 125 | else "No description" 126 | ), 127 | "url": job.get("redirect_url", "N/A"), 128 | } 129 | 130 | formatted_job = f""" 131 | Title: {job_details['title']} 132 | Company: {job_details['company']} 133 | Location: {job_details['location']} 134 | Salary: {job_details['salary']} 135 | Description: {job_details['description']} 136 | URL: {job_details['url']} 137 | ---""" 138 | job_listings.append(formatted_job) 139 | 140 | return ( 141 | "\n".join(job_listings) 142 | if job_listings 143 | else "No jobs found for the specified criteria." 144 | ) 145 | 146 | except requests.exceptions.HTTPError as err: 147 | return f"HTTP Error: {err}" 148 | except requests.exceptions.RequestException as e: 149 | return f"Request Error: {e}" 150 | except Exception as e: 151 | return f"Unexpected error: {e}" 152 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[codz] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py.cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # UV 98 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | #uv.lock 102 | 103 | # poetry 104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 105 | # This is especially recommended for binary packages to ensure reproducibility, and is more 106 | # commonly ignored for libraries. 107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 108 | #poetry.lock 109 | #poetry.toml 110 | 111 | # pdm 112 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 113 | # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python. 114 | # https://pdm-project.org/en/latest/usage/project/#working-with-version-control 115 | #pdm.lock 116 | #pdm.toml 117 | .pdm-python 118 | .pdm-build/ 119 | 120 | # pixi 121 | # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control. 122 | #pixi.lock 123 | # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one 124 | # in the .venv directory. It is recommended not to include this directory in version control. 125 | .pixi 126 | 127 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 128 | __pypackages__/ 129 | 130 | # Celery stuff 131 | celerybeat-schedule 132 | celerybeat.pid 133 | 134 | # SageMath parsed files 135 | *.sage.py 136 | 137 | # Environments 138 | .env 139 | .envrc 140 | .venv 141 | env/ 142 | venv/ 143 | ENV/ 144 | env.bak/ 145 | venv.bak/ 146 | 147 | # Spyder project settings 148 | .spyderproject 149 | .spyproject 150 | 151 | # Rope project settings 152 | .ropeproject 153 | 154 | # mkdocs documentation 155 | /site 156 | 157 | # mypy 158 | .mypy_cache/ 159 | .dmypy.json 160 | dmypy.json 161 | 162 | # Pyre type checker 163 | .pyre/ 164 | 165 | # pytype static type analyzer 166 | .pytype/ 167 | 168 | # Cython debug symbols 169 | cython_debug/ 170 | 171 | # PyCharm 172 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 173 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 174 | # and can be added to the global gitignore or merged into this file. For a more nuclear 175 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 176 | #.idea/ 177 | 178 | # Abstra 179 | # Abstra is an AI-powered process automation framework. 180 | # Ignore directories containing user credentials, local state, and settings. 181 | # Learn more at https://abstra.io/docs 182 | .abstra/ 183 | 184 | # Visual Studio Code 185 | # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore 186 | # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore 187 | # and can be added to the global gitignore or merged into this file. However, if you prefer, 188 | # you could uncomment the following to ignore the entire vscode folder 189 | # .vscode/ 190 | 191 | # Ruff stuff: 192 | .ruff_cache/ 193 | 194 | # PyPI configuration file 195 | .pypirc 196 | 197 | # Cursor 198 | # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to 199 | # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data 200 | # refer to https://docs.cursor.com/context/ignore-files 201 | .cursorignore 202 | .cursorindexingignore 203 | 204 | # Marimo 205 | marimo/_static/ 206 | marimo/_lsp/ 207 | __marimo__/ 208 | 209 | # Miscellaneous 210 | .DS_Store 211 | *.db 212 | *.bin 213 | *.sqlite3 214 | *outputs* -------------------------------------------------------------------------------- /src/advanced/orion_ai_coding_agent/src/base_agent.py: -------------------------------------------------------------------------------- 1 | """ 2 | Base Agent Class for Orion AI Agent System 3 | 4 | This module provides the base agent functionality that all specific agents inherit from. 5 | """ 6 | 7 | import logging 8 | import time 9 | from abc import ABC, abstractmethod 10 | from typing import Any, Dict, List, Optional 11 | 12 | 13 | class BaseAgent(ABC): 14 | """ 15 | Base class for all agents in the Orion AI Agent system. 16 | 17 | Provides common functionality like logging, state management, and error handling. 18 | """ 19 | 20 | def __init__(self, name: str, debug: bool = False): 21 | """ 22 | Initialize the base agent. 23 | 24 | Args: 25 | name: Name of the agent 26 | debug: Whether to enable debug mode 27 | """ 28 | self.name = name 29 | self.debug = debug 30 | self.state = {} 31 | self.execution_history = [] 32 | 33 | # Setup logging 34 | self.logger = logging.getLogger(f"orion.{name}") 35 | if debug: 36 | self.logger.setLevel(logging.DEBUG) 37 | else: 38 | self.logger.setLevel(logging.INFO) 39 | 40 | # Create console handler if not exists 41 | if not self.logger.handlers: 42 | handler = logging.StreamHandler() 43 | formatter = logging.Formatter(f"🤖 [{name}] %(levelname)s: %(message)s") 44 | handler.setFormatter(formatter) 45 | self.logger.addHandler(handler) 46 | 47 | def log(self, message: str, level: str = "info") -> None: 48 | """ 49 | Log a message with the specified level. 50 | 51 | Args: 52 | message: Message to log 53 | level: Log level (info, debug, warning, error) 54 | """ 55 | getattr(self.logger, level.lower())(message) 56 | 57 | def update_state(self, key: str, value: Any) -> None: 58 | """ 59 | Update the agent's state. 60 | 61 | Args: 62 | key: State key 63 | value: State value 64 | """ 65 | self.state[key] = value 66 | self.log(f"State updated: {key} = {value}", "debug") 67 | 68 | def get_state(self, key: str, default: Any = None) -> Any: 69 | """ 70 | Get a value from the agent's state. 71 | 72 | Args: 73 | key: State key 74 | default: Default value if key not found 75 | 76 | Returns: 77 | State value or default 78 | """ 79 | return self.state.get(key, default) 80 | 81 | def record_execution(self, action: str, result: Any, duration: float) -> None: 82 | """ 83 | Record an execution in the agent's history. 84 | 85 | Args: 86 | action: Action performed 87 | result: Result of the action 88 | duration: Time taken to execute 89 | """ 90 | execution_record = { 91 | "timestamp": time.time(), 92 | "action": action, 93 | "result": result, 94 | "duration": duration, 95 | "success": result is not None and not isinstance(result, Exception), 96 | } 97 | self.execution_history.append(execution_record) 98 | self.log(f"Recorded execution: {action} (took {duration:.2f}s)", "debug") 99 | 100 | def execute_with_tracking(self, action_name: str, func, *args, **kwargs) -> Any: 101 | """ 102 | Execute a function with automatic tracking and error handling. 103 | 104 | Args: 105 | action_name: Name of the action being performed 106 | func: Function to execute 107 | *args: Function arguments 108 | **kwargs: Function keyword arguments 109 | 110 | Returns: 111 | Function result or None if error occurred 112 | """ 113 | self.log(f"Starting action: {action_name}") 114 | start_time = time.time() 115 | 116 | try: 117 | result = func(*args, **kwargs) 118 | duration = time.time() - start_time 119 | self.record_execution(action_name, result, duration) 120 | self.log(f"Completed action: {action_name} ✅") 121 | return result 122 | 123 | except Exception as e: 124 | duration = time.time() - start_time 125 | self.record_execution(action_name, e, duration) 126 | self.log(f"Failed action: {action_name} ❌ Error: {e}", "error") 127 | return None 128 | 129 | def get_execution_summary(self) -> Dict[str, Any]: 130 | """ 131 | Get a summary of the agent's execution history. 132 | 133 | Returns: 134 | Dictionary containing execution statistics 135 | """ 136 | if not self.execution_history: 137 | return {"total_actions": 0, "success_rate": 0, "total_time": 0} 138 | 139 | successful = sum(1 for record in self.execution_history if record["success"]) 140 | total_time = sum(record["duration"] for record in self.execution_history) 141 | 142 | return { 143 | "total_actions": len(self.execution_history), 144 | "successful_actions": successful, 145 | "failed_actions": len(self.execution_history) - successful, 146 | "success_rate": successful / len(self.execution_history) * 100, 147 | "total_time": total_time, 148 | "average_time": total_time / len(self.execution_history), 149 | } 150 | 151 | @abstractmethod 152 | def execute(self, *args, **kwargs) -> Any: 153 | """ 154 | Main execution method that each agent must implement. 155 | 156 | Args: 157 | *args: Variable arguments 158 | **kwargs: Keyword arguments 159 | 160 | Returns: 161 | Execution result 162 | """ 163 | pass 164 | 165 | def __str__(self) -> str: 166 | """String representation of the agent.""" 167 | return f"{self.__class__.__name__}(name='{self.name}')" 168 | 169 | def __repr__(self) -> str: 170 | """Detailed representation of the agent.""" 171 | return f"{self.__class__.__name__}(name='{self.name}', state={self.state})" 172 | -------------------------------------------------------------------------------- /src/intermediate/investment_advisor/main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import asyncio 3 | import os 4 | import time 5 | from concurrent.futures import ThreadPoolExecutor, as_completed 6 | 7 | from agents import analyst, data_explorer, fin_expert, llm, news_info_explorer 8 | from crewai import Crew, Process 9 | from tasks import advise, analyse, get_company_financials, get_company_news 10 | 11 | os.environ["CREWAI_STORAGE_DIR"] = "src/intermediate/investment_advisor/crewai_memory" 12 | 13 | # Configuration 14 | ENABLE_PARALLEL_EXECUTION = True # Set to False for sequential execution 15 | 16 | # Create separate crews for parallel execution 17 | financial_crew = Crew( 18 | agents=[data_explorer], 19 | tasks=[get_company_financials], 20 | verbose=True, 21 | process=Process.sequential, 22 | memory=True, 23 | cache=True, 24 | max_rpm=15, 25 | ) 26 | 27 | news_crew = Crew( 28 | agents=[news_info_explorer], 29 | tasks=[get_company_news], 30 | verbose=True, 31 | process=Process.sequential, 32 | memory=True, 33 | cache=True, 34 | max_rpm=15, 35 | ) 36 | 37 | # Analysis crew for sequential tasks that depend on parallel results 38 | analysis_crew = Crew( 39 | agents=[analyst, fin_expert], 40 | tasks=[analyse, advise], 41 | verbose=True, 42 | process=Process.sequential, 43 | memory=True, 44 | cache=True, 45 | max_rpm=15, 46 | ) 47 | 48 | # Traditional sequential crew (for when parallel execution is disabled) 49 | sequential_crew = Crew( 50 | agents=[data_explorer, news_info_explorer, analyst, fin_expert], 51 | tasks=[get_company_financials, get_company_news, analyse, advise], 52 | verbose=True, 53 | process=Process.sequential, 54 | memory=True, 55 | cache=True, 56 | max_rpm=35, 57 | ) 58 | 59 | 60 | def run_crew_task(crew, inputs, task_name): 61 | """Helper function to run a crew task.""" 62 | print(f"🚀 Starting {task_name}...") 63 | result = crew.kickoff(inputs=inputs) 64 | print(f"✅ Completed {task_name}") 65 | return result 66 | 67 | 68 | def run_parallel_execution(stock_input): 69 | """Run financial analysis with parallel execution.""" 70 | print("🚀 Starting Enhanced Financial Analysis with Parallel Execution...") 71 | 72 | # Phase 1: Run financial data gathering and news gathering in parallel 73 | print("\n🔄 Phase 1: Running Financial Data & News Gathering in Parallel...") 74 | parallel_start = time.time() 75 | 76 | with ThreadPoolExecutor(max_workers=2) as executor: 77 | # Submit both tasks to run in parallel 78 | financial_future = executor.submit( 79 | run_crew_task, financial_crew, stock_input, "Financial Data Gathering" 80 | ) 81 | news_future = executor.submit( 82 | run_crew_task, news_crew, stock_input, "News Gathering" 83 | ) 84 | 85 | # Wait for both tasks to complete 86 | financial_result = financial_future.result() 87 | news_result = news_future.result() 88 | 89 | parallel_end = time.time() 90 | parallel_time = parallel_end - parallel_start 91 | print(f"✅ Phase 1 completed in {parallel_time:.2f} seconds") 92 | 93 | # Phase 2: Run analysis and recommendation sequentially (they depend on Phase 1 results) 94 | print("\n🔄 Phase 2: Running Analysis & Recommendation...") 95 | analysis_start = time.time() 96 | 97 | # The analysis crew will use the context from the completed tasks 98 | analysis_result = analysis_crew.kickoff(inputs=stock_input) 99 | 100 | analysis_end = time.time() 101 | analysis_time = analysis_end - analysis_start 102 | print(f"✅ Phase 2 completed in {analysis_time:.2f} seconds") 103 | 104 | return parallel_time, analysis_time 105 | 106 | 107 | def run_sequential_execution(stock_input): 108 | """Run financial analysis with traditional sequential execution.""" 109 | print("🚀 Starting Enhanced Financial Analysis with Sequential Execution...") 110 | 111 | print("\n🔄 Running All Tasks Sequentially...") 112 | sequential_start = time.time() 113 | 114 | result = sequential_crew.kickoff(inputs=stock_input) 115 | 116 | sequential_end = time.time() 117 | sequential_time = sequential_end - sequential_start 118 | print(f"✅ All tasks completed in {sequential_time:.2f} seconds") 119 | 120 | return sequential_time, 0 # Return 0 for analysis_time since it's all combined 121 | 122 | 123 | def main(): 124 | """Main function to run the financial analysis with configurable execution mode.""" 125 | # Parse command line arguments 126 | parser = argparse.ArgumentParser( 127 | description="Financial Analysis with Configurable Execution Mode" 128 | ) 129 | parser.add_argument( 130 | "--parallel", 131 | action="store_true", 132 | help="Enable parallel execution (overrides config)", 133 | ) 134 | parser.add_argument( 135 | "--sequential", 136 | action="store_true", 137 | help="Enable sequential execution (overrides config)", 138 | ) 139 | parser.add_argument( 140 | "--stock", 141 | default="RELIANCE", 142 | help="Stock symbol to analyze (default: RELIANCE)", 143 | ) 144 | 145 | args = parser.parse_args() 146 | 147 | # Determine execution mode 148 | if args.parallel: 149 | use_parallel = True 150 | elif args.sequential: 151 | use_parallel = False 152 | else: 153 | use_parallel = ENABLE_PARALLEL_EXECUTION 154 | 155 | # Record start time 156 | start_time = time.time() 157 | 158 | # Scenario: Analyze specified stock 159 | print(f"\n📋 Stock Analysis: {args.stock}") 160 | stock_input = {"stock": args.stock} 161 | 162 | # Execute based on mode 163 | if use_parallel: 164 | parallel_time, analysis_time = run_parallel_execution(stock_input) 165 | 166 | # Calculate and display results 167 | end_time = time.time() 168 | execution_time = end_time - start_time 169 | 170 | print("\n🎉 Financial analysis completed!") 171 | print(f"⏱️ Phase 1 (Parallel): {parallel_time:.2f} seconds") 172 | print(f"⏱️ Phase 2 (Sequential): {analysis_time:.2f} seconds") 173 | print( 174 | f"⏱️ Total execution time: {execution_time:.2f} seconds ({execution_time/60:.2f} minutes)" 175 | ) 176 | 177 | # Show potential time savings 178 | estimated_sequential_time = parallel_time * 2 + analysis_time 179 | time_saved = estimated_sequential_time - execution_time 180 | print( 181 | f"💡 Estimated time saved by parallel execution: {time_saved:.2f} seconds" 182 | ) 183 | 184 | else: 185 | sequential_time, _ = run_sequential_execution(stock_input) 186 | 187 | # Calculate and display results 188 | end_time = time.time() 189 | execution_time = end_time - start_time 190 | 191 | print("\n🎉 Financial analysis completed!") 192 | print( 193 | f"⏱️ Total execution time: {execution_time:.2f} seconds ({execution_time/60:.2f} minutes)" 194 | ) 195 | print( 196 | "💡 Running in sequential mode - use --parallel flag for faster execution" 197 | ) 198 | 199 | 200 | if __name__ == "__main__": 201 | main() 202 | -------------------------------------------------------------------------------- /src/intermediate/investment_advisor/api_server.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | from typing import Dict, Optional 4 | 5 | from fastapi import BackgroundTasks, FastAPI, HTTPException 6 | from fastapi.middleware.cors import CORSMiddleware 7 | from main import ( 8 | analysis_crew, 9 | financial_crew, 10 | news_crew, 11 | run_crew_task, 12 | run_parallel_execution, 13 | run_sequential_execution, 14 | sequential_crew, 15 | ) 16 | from pydantic import BaseModel 17 | 18 | # FastAPI app initialization 19 | app = FastAPI( 20 | title="Financial Analysis API", 21 | description="A FastAPI server for running financial analysis using CrewAI agents", 22 | version="1.0.0", 23 | ) 24 | 25 | # Add CORS middleware 26 | app.add_middleware( 27 | CORSMiddleware, 28 | allow_origins=["*"], 29 | allow_credentials=True, 30 | allow_methods=["*"], 31 | allow_headers=["*"], 32 | ) 33 | 34 | 35 | # Pydantic models for request/response 36 | class AnalysisRequest(BaseModel): 37 | stock: str 38 | execution_mode: str = "parallel" # "parallel" or "sequential" 39 | 40 | 41 | class AnalysisResponse(BaseModel): 42 | status: str 43 | message: str 44 | execution_time: Optional[float] = None 45 | parallel_time: Optional[float] = None 46 | analysis_time: Optional[float] = None 47 | time_saved: Optional[float] = None 48 | task_id: Optional[str] = None 49 | 50 | 51 | class TaskStatus(BaseModel): 52 | task_id: str 53 | status: str 54 | result: Optional[Dict] = None 55 | execution_time: Optional[float] = None 56 | error: Optional[str] = None 57 | 58 | 59 | # In-memory storage for task results 60 | task_results = {} 61 | 62 | 63 | async def run_analysis_background(task_id: str, stock: str, execution_mode: str): 64 | """Background task to run the analysis.""" 65 | start_time = time.time() 66 | stock_input = {"stock": stock} 67 | 68 | task_results[task_id] = { 69 | "status": "running", 70 | "start_time": start_time, 71 | "stock": stock, 72 | "execution_mode": execution_mode, 73 | } 74 | 75 | if execution_mode == "parallel": 76 | parallel_time, analysis_time = run_parallel_execution(stock_input) 77 | 78 | end_time = time.time() 79 | execution_time = end_time - start_time 80 | 81 | # Calculate time savings 82 | estimated_sequential_time = parallel_time * 2 + analysis_time 83 | time_saved = estimated_sequential_time - execution_time 84 | 85 | task_results[task_id] = { 86 | "status": "completed", 87 | "execution_time": execution_time, 88 | "parallel_time": parallel_time, 89 | "analysis_time": analysis_time, 90 | "time_saved": time_saved, 91 | "stock": stock, 92 | "execution_mode": execution_mode, 93 | } 94 | else: 95 | sequential_time, _ = run_sequential_execution(stock_input) 96 | 97 | end_time = time.time() 98 | execution_time = end_time - start_time 99 | 100 | task_results[task_id] = { 101 | "status": "completed", 102 | "execution_time": execution_time, 103 | "stock": stock, 104 | "execution_mode": execution_mode, 105 | } 106 | 107 | 108 | @app.get("/") 109 | async def root(): 110 | """Root endpoint with API information.""" 111 | return { 112 | "message": "Financial Analysis API", 113 | "version": "1.0.0", 114 | "endpoints": { 115 | "POST /analyze": "Start financial analysis", 116 | "GET /status/{task_id}": "Get analysis status", 117 | "GET /health": "Health check", 118 | }, 119 | } 120 | 121 | 122 | @app.get("/health") 123 | async def health_check(): 124 | """Health check endpoint.""" 125 | return {"status": "healthy", "timestamp": time.time()} 126 | 127 | 128 | @app.post("/analyze", response_model=AnalysisResponse) 129 | async def start_analysis(request: AnalysisRequest, background_tasks: BackgroundTasks): 130 | """Start financial analysis for a given stock.""" 131 | # Generate unique task ID 132 | task_id = f"{request.stock}_{int(time.time())}" 133 | 134 | # Start background task 135 | background_tasks.add_task( 136 | run_analysis_background, task_id, request.stock, request.execution_mode 137 | ) 138 | 139 | return AnalysisResponse( 140 | status="started", 141 | message=f"Analysis started for {request.stock} in {request.execution_mode} mode", 142 | task_id=task_id, 143 | ) 144 | 145 | 146 | @app.get("/status/{task_id}", response_model=TaskStatus) 147 | async def get_task_status(task_id: str): 148 | """Get the status of a running analysis task.""" 149 | task_data = task_results[task_id] 150 | 151 | return TaskStatus( 152 | task_id=task_id, 153 | status=task_data["status"], 154 | result=task_data.get("results"), 155 | execution_time=task_data.get("execution_time"), 156 | error=task_data.get("error"), 157 | ) 158 | 159 | 160 | @app.get("/tasks") 161 | async def list_tasks(): 162 | """List all tasks and their statuses.""" 163 | return { 164 | "tasks": [ 165 | { 166 | "task_id": task_id, 167 | "status": data["status"], 168 | "stock": data.get("stock"), 169 | "execution_mode": data.get("execution_mode"), 170 | "execution_time": data.get("execution_time"), 171 | } 172 | for task_id, data in task_results.items() 173 | ] 174 | } 175 | 176 | 177 | @app.delete("/tasks/{task_id}") 178 | async def delete_task(task_id: str): 179 | """Delete a task from memory.""" 180 | del task_results[task_id] 181 | return {"message": f"Task {task_id} deleted successfully"} 182 | 183 | 184 | @app.post("/analyze/sync", response_model=AnalysisResponse) 185 | async def analyze_sync(request: AnalysisRequest): 186 | """Run financial analysis synchronously (blocking).""" 187 | start_time = time.time() 188 | stock_input = {"stock": request.stock} 189 | 190 | if request.execution_mode == "parallel": 191 | parallel_time, analysis_time = run_parallel_execution(stock_input) 192 | 193 | end_time = time.time() 194 | execution_time = end_time - start_time 195 | 196 | # Calculate time savings 197 | estimated_sequential_time = parallel_time * 2 + analysis_time 198 | time_saved = estimated_sequential_time - execution_time 199 | 200 | return AnalysisResponse( 201 | status="completed", 202 | message=f"Analysis completed for {request.stock}", 203 | execution_time=execution_time, 204 | parallel_time=parallel_time, 205 | analysis_time=analysis_time, 206 | time_saved=time_saved, 207 | ) 208 | else: 209 | sequential_time, _ = run_sequential_execution(stock_input) 210 | 211 | end_time = time.time() 212 | execution_time = end_time - start_time 213 | 214 | return AnalysisResponse( 215 | status="completed", 216 | message=f"Analysis completed for {request.stock}", 217 | execution_time=execution_time, 218 | ) 219 | 220 | 221 | if __name__ == "__main__": 222 | import uvicorn 223 | 224 | uvicorn.run(app, host="0.0.0.0", port=8000) 225 | -------------------------------------------------------------------------------- /src/advanced/orion_ai_coding_agent/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Orion AI Agent - Main Entry Point 4 | 5 | This script orchestrates AI-powered code generation for GitHub repositories using 6 | LangGraph for intelligent workflow orchestration, parallel processing, and advanced 7 | error recovery capabilities. 8 | """ 9 | 10 | import argparse 11 | import os 12 | import subprocess 13 | import sys 14 | 15 | from dotenv import load_dotenv 16 | from src.agents import GitHubIntegrationAgent 17 | from src.cli_interface import show_help_summary 18 | from src.code_explainer import explain_repository 19 | from src.discord_integration import start_discord_bot 20 | 21 | # Import LangGraph workflow as the default 22 | from src.workflow import run_intelligent_workflow 23 | 24 | # Load environment variables 25 | load_dotenv() 26 | 27 | 28 | def main(): 29 | """Main entry point for the Orion AI Agent with LangGraph orchestration.""" 30 | parser = argparse.ArgumentParser( 31 | description="Run Orion AI agent with intelligent LangGraph workflow orchestration", 32 | formatter_class=argparse.RawDescriptionHelpFormatter, 33 | epilog=""" 34 | 🚀 LANGGRAPH FEATURES: 35 | • Intelligent workflow routing based on context analysis 36 | • Parallel agent execution for improved performance 37 | • Advanced error recovery with multiple retry strategies 38 | • State-based decision making throughout workflow 39 | • Built-in checkpointing and state persistence 40 | • Dynamic workflow adaptation based on repository analysis 41 | 42 | 💡 TIP: Use --debug for detailed workflow information! 43 | 💡 TIP: Use --conda-env [env_name] to specify conda environment (default: ml) 44 | 💡 TIP: Use --no-venv to skip environment creation and use conda instead 45 | """, 46 | ) 47 | parser.add_argument( 48 | "--list-repos", 49 | action="store_true", 50 | help="List repositories from your GitHub account", 51 | ) 52 | parser.add_argument( 53 | "--repo-url", 54 | help="GitHub repository URL", 55 | default="https://github.com/ishandutta0098/open-clip", 56 | ) 57 | parser.add_argument( 58 | "--branch", 59 | help="Name of the branch to work on", 60 | default=None, 61 | ) 62 | parser.add_argument( 63 | "--prompt", 64 | help="Instruction for the AI agent", 65 | default="Create a python script to use clip model from transformers library", 66 | ) 67 | parser.add_argument( 68 | "--workdir", 69 | help="Working directory for cloning", 70 | default="/Users/ishandutta/Documents/code/orion-backend", 71 | ) 72 | parser.add_argument( 73 | "--repo-limit", 74 | type=int, 75 | default=5, 76 | help="Number of repositories to list (default: 5)", 77 | ) 78 | parser.add_argument( 79 | "--setup-auth", action="store_true", help="Run authentication setup" 80 | ) 81 | parser.add_argument( 82 | "--debug", 83 | action="store_true", 84 | help="Enable debug mode to show raw API responses", 85 | ) 86 | parser.add_argument( 87 | "--show-commands", 88 | action="store_true", 89 | help="Show available commands and examples", 90 | ) 91 | parser.add_argument( 92 | "--no-testing", action="store_true", help="Disable code testing" 93 | ) 94 | parser.add_argument( 95 | "--no-venv", action="store_true", help="Disable virtual environment creation" 96 | ) 97 | parser.add_argument( 98 | "--conda-env", 99 | help="Conda environment to use for running code", 100 | default="ml", 101 | ) 102 | parser.add_argument( 103 | "--strict-testing", action="store_true", help="Abort commit if tests fail" 104 | ) 105 | parser.add_argument( 106 | "--commit", action="store_true", help="Commit the generated changes" 107 | ) 108 | parser.add_argument( 109 | "--create-pr", 110 | action="store_true", 111 | help="Create a pull request (requires --commit)", 112 | ) 113 | parser.add_argument( 114 | "--discord-bot", 115 | action="store_true", 116 | help="Run Discord bot to receive prompts", 117 | ) 118 | args = parser.parse_args() 119 | 120 | if args.create_pr: 121 | args.commit = True 122 | 123 | # Set debug mode if requested 124 | if args.debug: 125 | os.environ["DEBUG"] = "true" 126 | print( 127 | "🔧 Debug mode enabled - detailed LangGraph workflow information will be shown" 128 | ) 129 | 130 | # Validate argument combinations 131 | if args.create_pr and not args.commit: 132 | print("❌ Error: --create-pr requires --commit") 133 | print( 134 | "💡 Use: --commit --create-pr to commit changes and create a pull request" 135 | ) 136 | sys.exit(1) 137 | 138 | if args.discord_bot: 139 | start_discord_bot( 140 | repo_url=args.repo_url, 141 | workdir=args.workdir, 142 | commit_changes=args.commit, 143 | create_pr=args.create_pr, 144 | enable_testing=not args.no_testing, 145 | create_venv=not args.no_venv, 146 | conda_env=args.conda_env, 147 | strict_testing=args.strict_testing, 148 | ) 149 | return 150 | 151 | if args.show_commands: 152 | show_help_summary() 153 | elif args.setup_auth: 154 | print("🔧 Running authentication setup...") 155 | subprocess.run(["python", "src/auth_setup.py"]) 156 | elif args.list_repos: 157 | print("📚 Listing repositories from your GitHub account...") 158 | 159 | # Use the new GitHub Integration Agent 160 | github_agent = GitHubIntegrationAgent(debug=args.debug) 161 | result = github_agent.list_repositories(args.repo_limit) 162 | 163 | if result: 164 | print(result) 165 | else: 166 | print("❌ Failed to list repositories") 167 | 168 | print("\n💡 Tip: Use --debug flag for detailed LangGraph workflow information") 169 | print("💡 Tip: LangGraph provides intelligent routing and error recovery") 170 | print( 171 | "💡 Tip: Run 'python main.py --show-commands' to see all available commands" 172 | ) 173 | else: 174 | # Check authentication before running the main workflow 175 | github_agent = GitHubIntegrationAgent(debug=args.debug) 176 | if not github_agent.check_authentication(): 177 | print( 178 | "\n💡 Tip: Run 'python main.py --setup-auth' to set up authentication" 179 | ) 180 | print( 181 | "💡 Tip: Run 'python main.py --show-commands' to see all available commands" 182 | ) 183 | sys.exit(1) 184 | 185 | if args.prompt.strip().lower() == "explain": 186 | if args.commit or args.create_pr: 187 | print("ℹ️ Explanation mode ignores commit and PR options.") 188 | explain_repository( 189 | args.repo_url, 190 | args.workdir, 191 | branch=args.branch, 192 | ) 193 | return 194 | 195 | print(f"🤖 Running AI agent on repository: {args.repo_url}") 196 | print(f"📝 Task: {args.prompt}") 197 | print("✨ Using LangGraph for intelligent workflow orchestration") 198 | run_intelligent_workflow( 199 | args.repo_url, 200 | args.prompt, 201 | args.workdir, 202 | enable_testing=not args.no_testing, 203 | create_venv=not args.no_venv, 204 | conda_env=args.conda_env, 205 | strict_testing=args.strict_testing, 206 | commit_changes=args.commit, 207 | create_pr=args.create_pr, 208 | branch=args.branch, 209 | ) 210 | 211 | 212 | if __name__ == "__main__": 213 | main() 214 | -------------------------------------------------------------------------------- /src/advanced/orion_ai_coding_agent/src/workflow.py: -------------------------------------------------------------------------------- 1 | """ 2 | Workflow Module for Orion AI Agent System 3 | 4 | This module provides the main workflow function using LangGraph for intelligent 5 | agent coordination, parallel processing, and enhanced error recovery capabilities. 6 | """ 7 | 8 | import os 9 | from typing import Optional 10 | 11 | from .agents.langgraph_orchestrator_agent import LangGraphOrchestratorAgent 12 | 13 | 14 | def run_intelligent_workflow( 15 | repo_url: str, 16 | user_prompt: str, 17 | workdir: Optional[str] = None, 18 | enable_testing: bool = True, 19 | create_venv: bool = True, 20 | conda_env: str = "ml", 21 | strict_testing: bool = False, 22 | commit_changes: bool = False, 23 | create_pr: bool = False, 24 | branch: Optional[str] = None, 25 | ) -> Optional[dict]: 26 | """ 27 | Main workflow for the agent using LangGraph for intelligent coordination. 28 | 29 | This workflow provides significant enhancements over traditional approaches: 30 | 31 | ✨ Key Features: 32 | - 🧠 Intelligent workflow routing based on repository analysis 33 | - ⚡ Parallel agent execution for independent tasks 34 | - 🔄 Smart error recovery with multiple retry strategies 35 | - 📊 State-based decision making throughout the workflow 36 | - 🎯 Conditional workflow paths based on context 37 | - 💾 Built-in state persistence and checkpointing 38 | 39 | Args: 40 | repo_url: GitHub repository URL 41 | user_prompt: Task description for the AI 42 | workdir: Working directory for cloning 43 | enable_testing: Whether to test the generated code 44 | create_venv: Whether to create a virtual environment 45 | conda_env: Conda environment to use for running code 46 | strict_testing: Whether to abort on test failures 47 | commit_changes: Whether to commit the changes 48 | create_pr: Whether to create a pull request 49 | branch: Target branch to clone and work on 50 | 51 | Returns: 52 | Optional[dict]: Enhanced workflow result with detailed state tracking 53 | """ 54 | # Determine debug mode from environment 55 | debug_mode = os.getenv("DEBUG", "false").lower() == "true" 56 | 57 | # Initialize the LangGraph orchestrator 58 | orchestrator = LangGraphOrchestratorAgent(debug=debug_mode) 59 | 60 | if debug_mode: 61 | print("🚀 Starting Enhanced AI Workflow with LangGraph") 62 | print("=" * 60) 63 | print("🔥 FEATURES ENABLED:") 64 | print(" 🧠 Intelligent workflow routing") 65 | print(" ⚡ Parallel agent processing") 66 | print(" 🔄 Smart error recovery") 67 | print(" 📊 Advanced state management") 68 | print(" 🎯 Context-aware decisions") 69 | print("=" * 60) 70 | 71 | # Run the intelligent workflow 72 | result = orchestrator.run_intelligent_workflow( 73 | repo_url=repo_url, 74 | user_prompt=user_prompt, 75 | workdir=workdir, 76 | enable_testing=enable_testing, 77 | create_venv=create_venv, 78 | conda_env=conda_env, 79 | strict_testing=strict_testing, 80 | commit_changes=commit_changes, 81 | create_pr=create_pr, 82 | branch=branch, 83 | ) 84 | 85 | # Enhanced result processing and display 86 | if result: 87 | print("\n" + "=" * 60) 88 | print("📊 LANGGRAPH WORKFLOW SUMMARY") 89 | print("=" * 60) 90 | 91 | status = result.get("status", "unknown") 92 | session_id = result.get("session_id", "unknown") 93 | 94 | if debug_mode: 95 | print(f"🆔 Session ID: {session_id}") 96 | 97 | if status == "completed": 98 | print("✅ Status: Completed Successfully") 99 | elif status == "failed": 100 | print("❌ Status: Failed") 101 | error = result.get("error", "Unknown error") 102 | print(f"❌ Error: {error}") 103 | 104 | # Show retry information 105 | retry_count = result.get("retry_count", 0) 106 | if retry_count > 0: 107 | print(f"🔄 Retry attempts: {retry_count}") 108 | else: 109 | print(f"⚠️ Status: {status}") 110 | 111 | # Show completed phases 112 | completed_phases = result.get("completed_phases", []) 113 | if completed_phases: 114 | print("✅ Completed Phases:") 115 | for phase in completed_phases: 116 | print(f" ✓ {phase.replace('_', ' ').title()}") 117 | 118 | # Show failed phases (if any) 119 | failed_phases = result.get("failed_phases", []) 120 | if failed_phases: 121 | print("❌ Failed Phases:") 122 | for phase in failed_phases: 123 | print(f" ✗ {phase.replace('_', ' ').title()}") 124 | 125 | # Show created files 126 | created_files = result.get("created_files", []) 127 | if created_files: 128 | print(f"📁 Created Files: {', '.join(created_files)}") 129 | 130 | # Show current phase 131 | current_phase = result.get("current_phase") 132 | if current_phase and debug_mode: 133 | print(f"📍 Last Phase: {current_phase.replace('_', ' ').title()}") 134 | 135 | # Show PR URL if available 136 | pr_url = result.get("pr_url") 137 | if pr_url: 138 | print(f"🔗 Pull Request: {pr_url}") 139 | elif result.get("create_pr"): 140 | # If PR creation was requested but no URL found, show debug info 141 | pr_info = result.get("pr_info") 142 | if pr_info: 143 | print(f"🔧 PR Info Available: {pr_info}") 144 | else: 145 | print("⚠️ PR creation was requested but no PR info found in result") 146 | 147 | # Show intelligent workflow benefits 148 | if debug_mode: 149 | print("\n🎯 LANGGRAPH ADVANTAGES UTILIZED:") 150 | 151 | parallel_tasks = result.get("parallel_tasks", []) 152 | if parallel_tasks: 153 | print(f" ⚡ Parallel processing: {', '.join(parallel_tasks)}") 154 | 155 | if failed_phases and status != "failed": 156 | print(" 🔄 Smart error recovery: Continued despite failures") 157 | 158 | if len(completed_phases) > 0: 159 | print(f" 📊 State management: Tracked {len(completed_phases)} phases") 160 | 161 | # Show duration if available 162 | duration = result.get("duration") 163 | if duration: 164 | print(f"⏱️ Duration: {duration:.2f} seconds") 165 | 166 | print("=" * 60) 167 | 168 | # Enhanced debug information 169 | if debug_mode: 170 | print("\n🔧 LANGGRAPH DEBUG INFORMATION") 171 | print("=" * 60) 172 | 173 | # Show state transitions 174 | messages = result.get("messages", []) 175 | if messages: 176 | print(f"💬 Message history: {len(messages)} state transitions") 177 | 178 | # Show retry information 179 | retry_count = result.get("retry_count", 0) 180 | print(f"🔄 Retry count: {retry_count}") 181 | 182 | # Show next agent if workflow was interrupted 183 | next_agent = result.get("next_agent") 184 | if next_agent: 185 | print(f"➡️ Next planned agent: {next_agent}") 186 | 187 | print("=" * 60) 188 | 189 | else: 190 | print("❌ LangGraph workflow failed to complete - no result returned") 191 | 192 | return result 193 | 194 | 195 | # Keep the old function name for backwards compatibility 196 | def run( 197 | repo_url: str, 198 | user_prompt: str, 199 | workdir: Optional[str] = None, 200 | enable_testing: bool = True, 201 | create_venv: bool = True, 202 | strict_testing: bool = False, 203 | commit_changes: bool = False, 204 | create_pr: bool = False, 205 | branch: Optional[str] = None, 206 | ) -> Optional[dict]: 207 | """ 208 | Backwards compatibility wrapper for the intelligent workflow. 209 | 210 | This function maintains API compatibility while using LangGraph underneath. 211 | """ 212 | return run_intelligent_workflow( 213 | repo_url=repo_url, 214 | user_prompt=user_prompt, 215 | workdir=workdir, 216 | enable_testing=enable_testing, 217 | create_venv=create_venv, 218 | strict_testing=strict_testing, 219 | commit_changes=commit_changes, 220 | create_pr=create_pr, 221 | branch=branch, 222 | ) 223 | -------------------------------------------------------------------------------- /src/intermediate/job_search/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Enhanced CrewAI Job Search Agent System with Resume Analysis 4 | A comprehensive AI-powered job search automation system that analyzes your resume 5 | for personalized recommendations using CrewAI framework. 6 | 7 | Required .env file format: 8 | OPENAI_API_KEY=your_openai_api_key_here 9 | ADZUNA_APP_ID=your_adzuna_app_id_here 10 | ADZUNA_API_KEY=your_adzuna_api_key_here 11 | 12 | Installation: 13 | pip install crewai langchain langchain-openai requests python-dotenv PyPDF2 pdfplumber 14 | """ 15 | 16 | import json 17 | import os 18 | from datetime import datetime 19 | 20 | import pdfplumber 21 | import PyPDF2 22 | from agents import create_agents, llm 23 | from crewai import Crew, Process 24 | from dotenv import load_dotenv 25 | from langchain_openai import ChatOpenAI 26 | from tasks import create_tasks 27 | 28 | # Load environment variables from .env file 29 | load_dotenv() 30 | 31 | 32 | class EnhancedJobSearchAgentSystem: 33 | """Enhanced Job Search Agent System with Resume Analysis""" 34 | 35 | def __init__(self, resume_path: str = None): 36 | """ 37 | Initialize the Enhanced Job Search Agent System 38 | 39 | Args: 40 | resume_path: Path to the resume PDF file for personalized analysis 41 | """ 42 | self.resume_path = resume_path 43 | self.resume_content = "" 44 | 45 | # Parse resume if provided 46 | if resume_path: 47 | self.parse_resume() 48 | 49 | # Create agents and tasks 50 | self.agents = create_agents(self.resume_content) 51 | self.tasks = create_tasks(self.agents, self.resume_content) 52 | self.setup_crew() 53 | 54 | def parse_resume(self): 55 | """Parse the resume and store content for agent context""" 56 | if self.resume_path: 57 | print(f"📄 Parsing resume from: {self.resume_path}") 58 | self.resume_content = self._parse_resume_direct(self.resume_path) 59 | if "✅ Resume parsed successfully!" in self.resume_content: 60 | print("✅ Resume parsed and ready for analysis!") 61 | else: 62 | print("❌ Resume parsing failed. Proceeding without resume context.") 63 | self.resume_content = "" 64 | 65 | def _parse_resume_direct(self, file_path: str) -> str: 66 | """Direct resume parsing function without tool decorator""" 67 | if not os.path.exists(file_path): 68 | return f"Error: Resume file not found at {file_path}. Please check the file path." 69 | 70 | try: 71 | # Try using pdfplumber first (better text extraction) 72 | with pdfplumber.open(file_path) as pdf: 73 | text = "" 74 | for page in pdf.pages: 75 | page_text = page.extract_text() 76 | if page_text: 77 | text += page_text + "\n" 78 | 79 | if text.strip(): 80 | return f"✅ Resume parsed successfully!\n\nResume Content:\n{text}" 81 | except Exception as e: 82 | print(f"pdfplumber failed: {e}, trying PyPDF2...") 83 | 84 | try: 85 | # Fallback to PyPDF2 86 | with open(file_path, "rb") as file: 87 | pdf_reader = PyPDF2.PdfReader(file) 88 | text = "" 89 | for page in pdf_reader.pages: 90 | text += page.extract_text() + "\n" 91 | 92 | if text.strip(): 93 | return f"✅ Resume parsed successfully!\n\nResume Content:\n{text}" 94 | else: 95 | return "Error: Could not extract text from PDF. The file might be image-based or corrupted." 96 | 97 | except Exception as e: 98 | return f"Error: Failed to parse resume PDF. {str(e)}" 99 | 100 | def setup_crew(self): 101 | """Initialize the CrewAI crew""" 102 | self.crew = Crew( 103 | agents=[ 104 | self.agents["job_searcher"], 105 | self.agents["skills_development"], 106 | self.agents["interview_prep"], 107 | self.agents["career_advisor"], 108 | ], 109 | tasks=[ 110 | self.tasks["job_search"], 111 | self.tasks["skills_analysis"], 112 | self.tasks["interview_prep"], 113 | self.tasks["career_strategy"], 114 | ], 115 | process=Process.hierarchical, 116 | manager_llm=llm, 117 | verbose=True, 118 | ) 119 | 120 | def search_jobs(self, role: str, location: str, num_results: int = 5): 121 | """ 122 | Execute the personalized job search process 123 | 124 | Args: 125 | role: Job title or role to search for 126 | location: Geographic location for job search 127 | num_results: Number of job results to return (default: 5) 128 | 129 | Returns: 130 | Complete personalized analysis and recommendations from all agents 131 | """ 132 | print(f"🚀 Starting PERSONALIZED job search for '{role}' in '{location}'...") 133 | if self.resume_content: 134 | print("📄 Using resume content for personalized recommendations") 135 | else: 136 | print("⚠️ No resume provided - using general recommendations") 137 | 138 | print("📝 This process will:") 139 | print(" 1. Search for relevant job openings") 140 | print(" 2. Compare job requirements with your background") 141 | print(" 3. Create personalized skill development plan") 142 | print(" 4. Prepare customized interview strategies") 143 | print(" 5. Generate targeted career optimization plan") 144 | print(" 6. Provide actionable next steps") 145 | print("\n" + "=" * 50) 146 | 147 | # Clear previous output file 148 | with open("task_output.txt", "w") as file: 149 | file.write(f"PERSONALIZED Job Search Analysis Report\n") 150 | file.write(f"Role: {role}\n") 151 | file.write(f"Location: {location}\n") 152 | file.write(f"Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n") 153 | file.write(f"Resume Analyzed: {'Yes' if self.resume_content else 'No'}\n") 154 | file.write("=" * 50 + "\n\n") 155 | 156 | # Update the job search task with specific parameters 157 | search_params = json.dumps( 158 | {"role": role, "location": location, "num_results": num_results} 159 | ) 160 | 161 | self.tasks[ 162 | "job_search" 163 | ].description = f"""Search for current job openings for the {role} role in {location} 164 | using the Job Search tool. Find {num_results} relevant positions that would be suitable for the candidate's background. 165 | Use this exact input: {search_params}""" 166 | 167 | try: 168 | # Execute the crew 169 | result = self.crew.kickoff() 170 | 171 | print("\n" + "=" * 50) 172 | print("✅ Personalized job search analysis complete!") 173 | print("📄 Detailed results saved to 'task_output.txt'") 174 | if self.resume_content: 175 | print( 176 | "🎯 All recommendations are tailored to your specific background!" 177 | ) 178 | print("=" * 50) 179 | 180 | return result 181 | 182 | except Exception as e: 183 | print(f"❌ Error during job search execution: {e}") 184 | return None 185 | 186 | 187 | def main(): 188 | """Main function to run the enhanced job search system""" 189 | 190 | print("🔧 Enhanced Job Search System Setup:") 191 | print("✅ Loading configuration from .env file...") 192 | print( 193 | "📦 Required packages: pip install crewai langchain langchain-openai requests python-dotenv PyPDF2 pdfplumber" 194 | ) 195 | print("\n" + "=" * 50) 196 | 197 | try: 198 | # Resume file path - using 'resume.pdf' as default 199 | resume_path = "src/intermediate/job_search/resume.pdf" 200 | 201 | # Check if resume file exists 202 | if os.path.exists(resume_path): 203 | print(f"📄 Found resume file: {resume_path}") 204 | job_search_system = EnhancedJobSearchAgentSystem(resume_path=resume_path) 205 | else: 206 | print(f"⚠️ Resume file not found at: {resume_path}") 207 | print("💡 Proceeding without resume analysis (general recommendations)") 208 | print( 209 | "💡 To use resume analysis, place your resume.pdf in the project directory" 210 | ) 211 | job_search_system = EnhancedJobSearchAgentSystem() 212 | 213 | # Example usage - CUSTOMIZE THESE PARAMETERS 214 | role = "Senior Data Scientist" 215 | location = "New York" 216 | num_results = 5 217 | 218 | # Execute personalized job search 219 | result = job_search_system.search_jobs( 220 | role=role, location=location, num_results=num_results 221 | ) 222 | 223 | if result: 224 | print("\n📊 Final Summary:") 225 | print(result) 226 | 227 | except ValueError as e: 228 | print(f"❌ .env Configuration Error: {e}") 229 | print("💡 Make sure your .env file exists and contains all required API keys") 230 | except Exception as e: 231 | print(f"❌ Unexpected Error: {e}") 232 | 233 | 234 | if __name__ == "__main__": 235 | main() 236 | -------------------------------------------------------------------------------- /src/advanced/orion_ai_coding_agent/src/discord_integration.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import os 4 | import re 5 | 6 | import discord 7 | 8 | from .code_explainer import explain_repository 9 | from .workflow import run_intelligent_workflow 10 | 11 | 12 | def parse_discord_input(message_content: str) -> tuple[str, str, str] | None: 13 | """ 14 | Parse Discord message input in the specified format. 15 | 16 | Expected format: 17 | URL: 18 | BRANCH: (optional, defaults to 'main') 19 | TASK: 20 | 21 | Args: 22 | message_content: The Discord message content 23 | 24 | Returns: 25 | tuple[str, str, str] | None: (repo_url, branch, task) or None if parsing fails 26 | """ 27 | # Clean up the message content 28 | content = message_content.strip() 29 | 30 | # Use regex to extract URL, BRANCH, and TASK 31 | url_pattern = r"URL:\s*(.+)" 32 | branch_pattern = r"BRANCH:\s*(.+)" 33 | task_pattern = r"TASK:\s*(.+)" 34 | 35 | url_match = re.search(url_pattern, content, re.IGNORECASE | re.MULTILINE) 36 | branch_match = re.search(branch_pattern, content, re.IGNORECASE | re.MULTILINE) 37 | task_match = re.search(task_pattern, content, re.IGNORECASE | re.MULTILINE) 38 | 39 | # URL and TASK are required 40 | if not (url_match and task_match): 41 | return None 42 | 43 | repo_url = url_match.group(1).strip() 44 | task = task_match.group(1).strip() 45 | 46 | # Default branch to 'main' if not provided 47 | if branch_match and branch_match.group(1).strip(): 48 | branch = branch_match.group(1).strip() 49 | else: 50 | branch = "main" 51 | 52 | return repo_url, branch, task 53 | 54 | 55 | def _chunk_text(text: str, max_length: int = 1900): 56 | """Yield successive chunks of text under the Discord message limit.""" 57 | for i in range(0, len(text), max_length): 58 | yield text[i : i + max_length] 59 | 60 | 61 | class OrionClient(discord.Client): 62 | def __init__( 63 | self, 64 | repo_url: str | None = None, 65 | workdir: str | None = None, 66 | commit_changes: bool = False, 67 | create_pr: bool = False, 68 | enable_testing: bool = True, 69 | create_venv: bool = True, 70 | conda_env: str = "ml", 71 | strict_testing: bool = False, 72 | **kwargs, 73 | ) -> None: 74 | # Setup intents from kwargs or use defaults with required permissions 75 | intents = kwargs.get("intents", discord.Intents.default()) 76 | intents.message_content = True 77 | intents.messages = True 78 | 79 | super().__init__(intents=intents) 80 | # Note: repo_url will now be extracted from message, but keeping for backwards compatibility 81 | self.default_repo_url = repo_url or os.environ.get( 82 | "REPO_URL", "https://github.com/ishandutta0098/open-clip" 83 | ) 84 | self.workdir = workdir or os.environ.get("WORKDIR", os.getcwd()) 85 | self.commit_changes = commit_changes 86 | self.create_pr = create_pr 87 | self.enable_testing = enable_testing 88 | self.create_venv = create_venv 89 | self.conda_env = conda_env 90 | self.strict_testing = strict_testing 91 | 92 | async def on_ready(self) -> None: 93 | print("=" * 60) 94 | print(f"🤖 **ORION AI AGENT ONLINE** 🚀") 95 | print(f"👤 Logged in as: {self.user}") 96 | print("=" * 60) 97 | print(f"⚙️ **CONFIGURATION:**") 98 | print(f" 📦 Default Repository: {self.default_repo_url}") 99 | print(f" 📂 Working Dir: {self.workdir}") 100 | print(f" 💾 Auto-commit: {'✅' if self.commit_changes else '❌'}") 101 | print(f" 🚀 Auto-PR: {'✅' if self.create_pr else '❌'}") 102 | print(f" 🧪 Testing: {'✅' if self.enable_testing else '❌'}") 103 | print(f" 🐍 Virtual Env: {'✅' if self.create_venv else '❌'}") 104 | print(f" 🐍 Conda Env: {self.conda_env}") 105 | print(f" 🔒 Strict Testing: {'✅' if self.strict_testing else '❌'}") 106 | print("=" * 60) 107 | print(f"📝 **Expected Input Format:**") 108 | print(f" URL: ") 109 | print(f" BRANCH: (optional, default: 'main')") 110 | print(f" TASK: ") 111 | print("=" * 60) 112 | print(f"✨ **Ready to process AI tasks!** ✨") 113 | print("=" * 60) 114 | 115 | async def on_message(self, message: discord.Message) -> None: 116 | if message.author == self.user: 117 | return 118 | text = message.content.strip() 119 | if not text: 120 | return 121 | 122 | try: 123 | # Parse the Discord input format 124 | parsed_input = parse_discord_input(text) 125 | 126 | if not parsed_input: 127 | # Send format error message 128 | error_msg = ( 129 | "❌ **Invalid Input Format** ❌\n\n" 130 | "📝 **Expected Format:**\n" 131 | "```\n" 132 | "URL: \n" 133 | "BRANCH: (optional, defaults to 'main')\n" 134 | "TASK: \n" 135 | "```\n\n" 136 | "📌 **Example:**\n" 137 | "```\n" 138 | "URL: https://github.com/username/repo\n" 139 | "TASK: Add a new feature to calculate fibonacci numbers\n" 140 | "```\n\n" 141 | "🤖 **Orion AI Agent** - Please try again with the correct format!" 142 | ) 143 | await message.channel.send(error_msg) 144 | return 145 | 146 | repo_url, branch, task = parsed_input 147 | 148 | if task.strip().lower() == "explain": 149 | await message.channel.send( 150 | f"📚 Generating codebase explanation for {repo_url} (branch: {branch})" 151 | ) 152 | loop = asyncio.get_event_loop() 153 | explanation = await loop.run_in_executor( 154 | None, explain_repository, repo_url, self.workdir, branch 155 | ) 156 | for chunk in _chunk_text(explanation or "No explanation generated"): 157 | await message.channel.send(f"```{chunk}```") 158 | return 159 | 160 | # Send initial response 161 | status_msg = ( 162 | "🤖 **Hello Sir!** 👋\n\n" 163 | "🚀 **AI Agent Initiated** 🚀\n" 164 | f"📦 **Repository:** {repo_url}\n" 165 | f"🌿 **Branch:** {branch}\n" 166 | f"📝 **Task:** {task}\n\n" 167 | "⚡ **Status:** Processing your request...\n" 168 | ) 169 | if self.create_pr: 170 | status_msg += ( 171 | "📋 **Action:** Will create a Pull Request after completion 🎯" 172 | ) 173 | elif self.commit_changes: 174 | status_msg += "💾 **Action:** Will commit changes after completion ✨" 175 | else: 176 | status_msg += ( 177 | "🔄 **Action:** Will update you once processing is complete 📊" 178 | ) 179 | 180 | await message.channel.send(status_msg) 181 | 182 | # Send progress update 183 | progress_msg = ( 184 | "⚙️ **Processing in progress...** ⚙️\n\n" 185 | f"🔄 Cloning repository from branch '{branch}'...\n" 186 | "🤖 Generating AI code...\n" 187 | "🧪 Running tests...\n" 188 | "📝 Preparing output...\n\n" 189 | "⏳ This may take a few moments..." 190 | ) 191 | progress_message = await message.channel.send(progress_msg) 192 | 193 | # Run the LangGraph workflow in a separate thread to avoid blocking 194 | loop = asyncio.get_event_loop() 195 | result = await loop.run_in_executor( 196 | None, 197 | run_intelligent_workflow, 198 | repo_url, 199 | task, 200 | self.workdir, 201 | self.enable_testing, 202 | self.create_venv, 203 | self.conda_env, 204 | self.strict_testing, 205 | self.commit_changes, 206 | self.create_pr, 207 | branch, # Add branch parameter 208 | ) 209 | 210 | # Delete the progress message 211 | try: 212 | await progress_message.delete() 213 | except: 214 | pass # Ignore if message was already deleted 215 | 216 | # Enhanced completion message with more details 217 | completion_msg = "🎉 **Task Completed Successfully!** 🎉\n\n" 218 | 219 | if self.create_pr: 220 | completion_msg += "✅ **Pull Request Created** 🚀\n" 221 | completion_msg += "📦 **Repository Updated** with AI-generated code\n" 222 | 223 | # Try multiple ways to get the PR URL 224 | pr_url = None 225 | if result: 226 | pr_url = result.get("pr_url") 227 | if not pr_url and result.get("pr_info"): 228 | pr_url = result.get("pr_info", {}).get("pr_url") 229 | 230 | if pr_url: 231 | completion_msg += f"\n🔗 **PR Link:** {pr_url}\n" 232 | completion_msg += "👀 **Ready for Review** - Check out the changes!" 233 | else: 234 | completion_msg += ( 235 | "\n⚠️ **Note:** PR was created but link unavailable" 236 | ) 237 | # Add debug information if available 238 | if result: 239 | completion_msg += ( 240 | f"\n🔧 **Debug:** Status={result.get('status', 'unknown')}" 241 | ) 242 | elif self.commit_changes: 243 | completion_msg += "✅ **Changes Committed** 💾\n" 244 | completion_msg += "📦 **Repository Updated** with AI-generated code\n" 245 | completion_msg += "🎯 **Status:** Ready for next steps" 246 | else: 247 | completion_msg += "✅ **Processing Complete** 🎯\n" 248 | completion_msg += "📊 **Analysis Finished** - Check logs for details" 249 | 250 | # Add execution summary if available 251 | if result: 252 | duration = result.get("duration") 253 | if duration: 254 | completion_msg += ( 255 | f"\n\n⏱️ **Execution Time:** {duration:.1f} seconds" 256 | ) 257 | 258 | created_files = result.get("created_files", []) 259 | if created_files: 260 | completion_msg += ( 261 | f"\n📁 **Files Created:** {len(created_files)} file(s)" 262 | ) 263 | if len(created_files) <= 3: 264 | completion_msg += f" ({', '.join(created_files)})" 265 | 266 | # Add status indicators 267 | status = result.get("status") 268 | if status == "completed": 269 | completion_msg += "\n\n🟢 **Status:** All operations successful" 270 | elif status == "failed": 271 | completion_msg += "\n\n🔴 **Status:** Some operations failed" 272 | error = result.get("error") 273 | if error: 274 | completion_msg += f"\n❌ **Error:** {error[:100]}..." 275 | 276 | completion_msg += "\n\n🤖 **Powered by Orion AI Agent** ⚡" 277 | 278 | await message.channel.send(completion_msg) 279 | 280 | except Exception as e: 281 | error_msg = ( 282 | "🚨 **Oops! Something went wrong** 🚨\n\n" 283 | "❌ **Error Details:**\n" 284 | f"```{str(e)[:200]}{'...' if len(str(e)) > 200 else ''}```\n\n" 285 | "🔧 **Next Steps:**\n" 286 | "• Check your input format\n" 287 | "• Verify repository access\n" 288 | "• Ensure branch exists\n" 289 | "• Contact support if issue persists\n\n" 290 | "🤖 **Orion AI Agent** - We'll fix this!" 291 | ) 292 | await message.channel.send(error_msg) 293 | print(f"Error in on_message: {e}") 294 | 295 | 296 | def start_discord_bot( 297 | repo_url: str | None = None, 298 | workdir: str | None = None, 299 | commit_changes: bool = False, 300 | create_pr: bool = False, 301 | enable_testing: bool = True, 302 | create_venv: bool = True, 303 | conda_env: str = "ml", 304 | strict_testing: bool = False, 305 | ) -> None: 306 | """Start a Discord bot to receive prompts and run the workflow.""" 307 | token = os.environ.get("DISCORD_BOT_TOKEN") 308 | if not token: 309 | print("=" * 60) 310 | print("❌ **MISSING DISCORD TOKEN** ❌") 311 | print("🔑 DISCORD_BOT_TOKEN environment variable not found") 312 | print("💡 Please set your Discord bot token:") 313 | print(" export DISCORD_BOT_TOKEN='your_token_here'") 314 | print("=" * 60) 315 | return 316 | 317 | # Enable proper intents 318 | intents = discord.Intents.default() 319 | intents.message_content = True 320 | intents.messages = True 321 | 322 | client = OrionClient( 323 | repo_url=repo_url, 324 | workdir=workdir, 325 | commit_changes=commit_changes, 326 | create_pr=create_pr, 327 | enable_testing=enable_testing, 328 | create_venv=create_venv, 329 | conda_env=conda_env, 330 | strict_testing=strict_testing, 331 | intents=intents, 332 | ) 333 | 334 | print("=" * 60) 335 | print("🚀 **STARTING ORION DISCORD BOT** 🚀") 336 | print("=" * 60) 337 | print("🔧 Required permissions: 68608") 338 | print(" 📖 Read Messages/View Channels") 339 | print(" 💬 Send Messages") 340 | print(" 📚 Read Message History") 341 | print("=" * 60) 342 | 343 | try: 344 | client.run(token) 345 | except discord.LoginFailure: 346 | print("=" * 60) 347 | print("❌ **LOGIN FAILED** ❌") 348 | print("🔑 Invalid Discord token") 349 | print("💡 Please check your DISCORD_BOT_TOKEN environment variable") 350 | print("=" * 60) 351 | except discord.ConnectionClosed: 352 | print("=" * 60) 353 | print("❌ **CONNECTION CLOSED** ❌") 354 | print("🌐 Discord connection was lost") 355 | print("💡 Please check your internet connection") 356 | print("=" * 60) 357 | except Exception as e: 358 | print(f"❌ Bot error: {e}") 359 | -------------------------------------------------------------------------------- /src/advanced/orion_ai_coding_agent/src/agents/environment_manager_agent.py: -------------------------------------------------------------------------------- 1 | """ 2 | Environment Manager Agent for Orion AI Agent System 3 | 4 | This agent handles virtual environment management and dependency installation. 5 | """ 6 | 7 | import os 8 | import platform 9 | import subprocess 10 | import sys 11 | import time 12 | from typing import Dict, List, Optional 13 | 14 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 15 | from src.base_agent import BaseAgent 16 | 17 | 18 | class EnvironmentManagerAgent(BaseAgent): 19 | """ 20 | Agent responsible for environment management operations. 21 | 22 | Capabilities: 23 | - Create virtual environments 24 | - Install dependencies 25 | - Manage Python environments 26 | - Generate requirements files 27 | """ 28 | 29 | def __init__(self, debug: bool = False): 30 | """ 31 | Initialize the Environment Manager Agent. 32 | 33 | Args: 34 | debug: Whether to enable debug mode 35 | """ 36 | super().__init__("EnvironmentManager", debug) 37 | self.update_state("environments", {}) 38 | self.update_state("current_environment", None) 39 | self.update_state("installed_packages", {}) 40 | 41 | def create_virtual_environment( 42 | self, repo_path: str, env_name: str = ".venv" 43 | ) -> Optional[str]: 44 | """ 45 | Create a virtual environment for the repository. 46 | 47 | Args: 48 | repo_path: Path to the repository 49 | env_name: Name of the virtual environment directory 50 | 51 | Returns: 52 | str: Path to the virtual environment, or None if failed 53 | """ 54 | 55 | def _create_venv_operation(): 56 | venv_path = os.path.join(repo_path, env_name) 57 | 58 | if os.path.exists(venv_path): 59 | self.log("🔄 Virtual environment already exists, using existing one") 60 | self.update_state("current_environment", venv_path) 61 | return venv_path 62 | 63 | self.log("🐍 Creating virtual environment...") 64 | subprocess.run( 65 | ["python", "-m", "venv", venv_path], check=True, cwd=repo_path 66 | ) 67 | self.log("✅ Virtual environment created successfully") 68 | 69 | # Update state 70 | self.update_state("current_environment", venv_path) 71 | environments = self.get_state("environments", {}) 72 | environments[repo_path] = { 73 | "path": venv_path, 74 | "created_at": time.time(), 75 | "python_version": platform.python_version(), 76 | "packages": [], 77 | } 78 | self.update_state("environments", environments) 79 | 80 | return venv_path 81 | 82 | return self.execute_with_tracking( 83 | "create_virtual_environment", _create_venv_operation 84 | ) 85 | 86 | def get_venv_python(self, venv_path: str) -> str: 87 | """ 88 | Get the path to the Python executable in the virtual environment. 89 | 90 | Args: 91 | venv_path: Path to the virtual environment 92 | 93 | Returns: 94 | str: Path to the Python executable 95 | """ 96 | if platform.system() == "Windows": 97 | return os.path.join(venv_path, "Scripts", "python.exe") 98 | else: 99 | return os.path.join(venv_path, "bin", "python") 100 | 101 | def install_dependencies( 102 | self, repo_path: str, venv_python: Optional[str] = None 103 | ) -> bool: 104 | """ 105 | Install dependencies from requirements.txt or detect and install common ones. 106 | 107 | Args: 108 | repo_path: Path to the repository 109 | venv_python: Path to the virtual environment Python executable 110 | 111 | Returns: 112 | bool: True if installation was successful 113 | """ 114 | 115 | def _install_deps_operation(): 116 | if not venv_python: 117 | current_env = self.get_state("current_environment") 118 | if not current_env: 119 | raise ValueError( 120 | "No virtual environment specified and no current environment set" 121 | ) 122 | python_exec = self.get_venv_python(current_env) 123 | else: 124 | python_exec = venv_python 125 | 126 | # First, upgrade pip 127 | self.log("📦 Upgrading pip...") 128 | subprocess.run( 129 | [python_exec, "-m", "pip", "install", "--upgrade", "pip"], 130 | check=True, 131 | cwd=repo_path, 132 | capture_output=True, 133 | ) 134 | 135 | # Check for requirements.txt 136 | requirements_file = os.path.join(repo_path, "requirements.txt") 137 | installed_packages = [] 138 | 139 | if os.path.exists(requirements_file): 140 | self.log("📋 Installing dependencies from requirements.txt...") 141 | subprocess.run( 142 | [python_exec, "-m", "pip", "install", "-r", "requirements.txt"], 143 | check=True, 144 | cwd=repo_path, 145 | ) 146 | self.log("✅ Dependencies installed from requirements.txt") 147 | 148 | # Read requirements to track installed packages 149 | with open(requirements_file, "r") as f: 150 | installed_packages = [ 151 | line.strip() 152 | for line in f 153 | if line.strip() and not line.startswith("#") 154 | ] 155 | else: 156 | # If no requirements.txt, install common dependencies for AI/ML projects 157 | self.log( 158 | "📦 No requirements.txt found, installing common dependencies..." 159 | ) 160 | common_deps = ["torch", "transformers", "pillow", "numpy", "requests"] 161 | 162 | for dep in common_deps: 163 | try: 164 | self.log(f" Installing {dep}...") 165 | subprocess.run( 166 | [python_exec, "-m", "pip", "install", dep], 167 | check=True, 168 | cwd=repo_path, 169 | capture_output=True, 170 | ) 171 | installed_packages.append(dep) 172 | except subprocess.CalledProcessError: 173 | self.log(f" ⚠️ Failed to install {dep}, skipping...", "warning") 174 | 175 | self.log("✅ Common dependencies installed") 176 | 177 | # Update state 178 | environments = self.get_state("environments", {}) 179 | if repo_path in environments: 180 | environments[repo_path]["packages"] = installed_packages 181 | environments[repo_path]["last_install"] = time.time() 182 | self.update_state("environments", environments) 183 | 184 | installed_packages_state = self.get_state("installed_packages", {}) 185 | installed_packages_state[repo_path] = installed_packages 186 | self.update_state("installed_packages", installed_packages_state) 187 | 188 | return True 189 | 190 | return ( 191 | self.execute_with_tracking("install_dependencies", _install_deps_operation) 192 | is not None 193 | ) 194 | 195 | def create_requirements_file( 196 | self, repo_path: str, venv_python: Optional[str] = None 197 | ) -> bool: 198 | """ 199 | Create or update requirements.txt with installed packages. 200 | 201 | Args: 202 | repo_path: Path to the repository 203 | venv_python: Path to the virtual environment Python executable 204 | 205 | Returns: 206 | bool: True if successful, False otherwise 207 | """ 208 | 209 | def _create_requirements_operation(): 210 | if not venv_python: 211 | current_env = self.get_state("current_environment") 212 | if not current_env: 213 | raise ValueError( 214 | "No virtual environment specified and no current environment set" 215 | ) 216 | python_exec = self.get_venv_python(current_env) 217 | else: 218 | python_exec = venv_python 219 | 220 | self.log("📝 Generating requirements.txt...") 221 | result = subprocess.run( 222 | [python_exec, "-m", "pip", "freeze"], 223 | capture_output=True, 224 | text=True, 225 | check=True, 226 | cwd=repo_path, 227 | ) 228 | 229 | requirements_path = os.path.join(repo_path, "requirements.txt") 230 | with open(requirements_path, "w") as f: 231 | f.write(result.stdout) 232 | 233 | self.log("✅ requirements.txt created/updated") 234 | 235 | # Update state 236 | package_count = ( 237 | len(result.stdout.strip().split("\n")) if result.stdout.strip() else 0 238 | ) 239 | environments = self.get_state("environments", {}) 240 | if repo_path in environments: 241 | environments[repo_path]["requirements_generated"] = time.time() 242 | environments[repo_path]["package_count"] = package_count 243 | self.update_state("environments", environments) 244 | 245 | return True 246 | 247 | return ( 248 | self.execute_with_tracking( 249 | "create_requirements_file", _create_requirements_operation 250 | ) 251 | is not None 252 | ) 253 | 254 | def get_environment_info(self, repo_path: str) -> Optional[Dict]: 255 | """ 256 | Get information about the environment for a specific repository. 257 | 258 | Args: 259 | repo_path: Path to the repository 260 | 261 | Returns: 262 | Dict: Environment information, or None if not found 263 | """ 264 | environments = self.get_state("environments", {}) 265 | if repo_path in environments: 266 | env_info = environments[repo_path].copy() 267 | 268 | # Add current status 269 | venv_path = env_info.get("path") 270 | if venv_path and os.path.exists(venv_path): 271 | env_info["status"] = "active" 272 | env_info["python_executable"] = self.get_venv_python(venv_path) 273 | else: 274 | env_info["status"] = "missing" 275 | 276 | return env_info 277 | 278 | return None 279 | 280 | def list_installed_packages( 281 | self, repo_path: str, venv_python: Optional[str] = None 282 | ) -> Optional[List[str]]: 283 | """ 284 | List all installed packages in the virtual environment. 285 | 286 | Args: 287 | repo_path: Path to the repository 288 | venv_python: Path to the virtual environment Python executable 289 | 290 | Returns: 291 | List[str]: List of installed packages, or None if failed 292 | """ 293 | 294 | def _list_packages_operation(): 295 | if not venv_python: 296 | current_env = self.get_state("current_environment") 297 | if not current_env: 298 | raise ValueError( 299 | "No virtual environment specified and no current environment set" 300 | ) 301 | python_exec = self.get_venv_python(current_env) 302 | else: 303 | python_exec = venv_python 304 | 305 | result = subprocess.run( 306 | [python_exec, "-m", "pip", "list", "--format=freeze"], 307 | capture_output=True, 308 | text=True, 309 | check=True, 310 | cwd=repo_path, 311 | ) 312 | 313 | packages = [] 314 | for line in result.stdout.strip().split("\n"): 315 | if line.strip(): 316 | packages.append(line.strip()) 317 | 318 | return packages 319 | 320 | return self.execute_with_tracking( 321 | "list_installed_packages", _list_packages_operation 322 | ) 323 | 324 | def cleanup_environment(self, repo_path: str) -> bool: 325 | """ 326 | Clean up the virtual environment for a repository. 327 | 328 | Args: 329 | repo_path: Path to the repository 330 | 331 | Returns: 332 | bool: True if successful, False otherwise 333 | """ 334 | 335 | def _cleanup_operation(): 336 | environments = self.get_state("environments", {}) 337 | if repo_path not in environments: 338 | self.log(f"No environment found for {repo_path}", "warning") 339 | return False 340 | 341 | env_info = environments[repo_path] 342 | venv_path = env_info.get("path") 343 | 344 | if venv_path and os.path.exists(venv_path): 345 | import shutil 346 | 347 | shutil.rmtree(venv_path) 348 | self.log(f"🗑️ Removed virtual environment: {venv_path}") 349 | 350 | # Remove from state 351 | del environments[repo_path] 352 | self.update_state("environments", environments) 353 | 354 | installed_packages = self.get_state("installed_packages", {}) 355 | if repo_path in installed_packages: 356 | del installed_packages[repo_path] 357 | self.update_state("installed_packages", installed_packages) 358 | 359 | return True 360 | 361 | return ( 362 | self.execute_with_tracking("cleanup_environment", _cleanup_operation) 363 | is not None 364 | ) 365 | 366 | def get_environment_summary(self) -> Dict[str, any]: 367 | """ 368 | Get a summary of all managed environments. 369 | 370 | Returns: 371 | Dict: Summary of environments 372 | """ 373 | environments = self.get_state("environments", {}) 374 | 375 | summary = { 376 | "total_environments": len(environments), 377 | "active_environments": 0, 378 | "total_packages": 0, 379 | "environments": [], 380 | } 381 | 382 | for repo_path, env_info in environments.items(): 383 | venv_path = env_info.get("path") 384 | is_active = venv_path and os.path.exists(venv_path) 385 | 386 | if is_active: 387 | summary["active_environments"] += 1 388 | 389 | package_count = env_info.get("package_count", 0) 390 | summary["total_packages"] += package_count 391 | 392 | env_summary = { 393 | "repo_path": repo_path, 394 | "status": "active" if is_active else "missing", 395 | "created_at": env_info.get("created_at"), 396 | "package_count": package_count, 397 | "python_version": env_info.get("python_version"), 398 | } 399 | summary["environments"].append(env_summary) 400 | 401 | return summary 402 | 403 | def execute(self, action: str, **kwargs) -> any: 404 | """ 405 | Main execution method for the Environment Manager Agent. 406 | 407 | Args: 408 | action: The action to perform 409 | **kwargs: Action-specific arguments 410 | 411 | Returns: 412 | Result of the action 413 | """ 414 | action_map = { 415 | "create_venv": self.create_virtual_environment, 416 | "install_deps": self.install_dependencies, 417 | "create_requirements": self.create_requirements_file, 418 | "get_info": self.get_environment_info, 419 | "list_packages": self.list_installed_packages, 420 | "cleanup": self.cleanup_environment, 421 | "summary": self.get_environment_summary, 422 | } 423 | 424 | if action not in action_map: 425 | self.log(f"Unknown action: {action}", "error") 426 | return None 427 | 428 | try: 429 | return action_map[action](**kwargs) 430 | except Exception as e: 431 | self.log(f"Error executing action {action}: {e}", "error") 432 | return None 433 | -------------------------------------------------------------------------------- /src/advanced/orion_ai_coding_agent/src/agents/repository_scanner_agent.py: -------------------------------------------------------------------------------- 1 | """ 2 | Repository Scanner Agent for Orion AI Agent System 3 | 4 | This agent handles repository scanning, file discovery, and basic code analysis 5 | to understand the structure and content of existing codebases. 6 | """ 7 | 8 | import os 9 | import re 10 | import sys 11 | from pathlib import Path 12 | from typing import Dict, List, Optional, Set, Tuple 13 | 14 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 15 | from src.base_agent import BaseAgent 16 | 17 | 18 | class RepositoryScannerAgent(BaseAgent): 19 | """ 20 | Agent responsible for scanning and analyzing repository structure and content. 21 | 22 | Capabilities: 23 | - Scan repository file structure 24 | - Analyze file types and sizes 25 | - Extract basic code information (functions, classes, imports) 26 | - Build repository inventory 27 | - Identify modification targets 28 | """ 29 | 30 | def __init__(self, debug: bool = False): 31 | """ 32 | Initialize the Repository Scanner Agent. 33 | 34 | Args: 35 | debug: Whether to enable debug mode 36 | """ 37 | super().__init__("RepositoryScanner", debug) 38 | self.update_state("scanned_repositories", {}) 39 | self.update_state("file_inventory", {}) 40 | self.update_state("code_analysis", {}) 41 | 42 | def scan_repository(self, repo_path: str) -> Dict: 43 | """ 44 | Perform a comprehensive scan of the repository. 45 | 46 | Args: 47 | repo_path: Path to the repository to scan 48 | 49 | Returns: 50 | Dict: Comprehensive repository analysis 51 | """ 52 | 53 | def _scan_operation(): 54 | self.log(f"🔍 Scanning repository: {repo_path}") 55 | 56 | if not os.path.exists(repo_path): 57 | raise Exception(f"Repository path does not exist: {repo_path}") 58 | 59 | # Build file inventory 60 | file_inventory = self._build_file_inventory(repo_path) 61 | 62 | # Analyze code files 63 | code_analysis = self._analyze_code_files(repo_path, file_inventory) 64 | 65 | # Create repository summary 66 | repo_summary = { 67 | "repo_path": repo_path, 68 | "total_files": len(file_inventory), 69 | "file_inventory": file_inventory, 70 | "code_analysis": code_analysis, 71 | "file_types": self._categorize_files(file_inventory), 72 | "python_files": [f for f in file_inventory.keys() if f.endswith(".py")], 73 | "modification_candidates": self._identify_modification_candidates( 74 | file_inventory, code_analysis 75 | ), 76 | } 77 | 78 | # Update state 79 | scanned_repos = self.get_state("scanned_repositories", {}) 80 | scanned_repos[repo_path] = repo_summary 81 | self.update_state("scanned_repositories", scanned_repos) 82 | self.update_state("file_inventory", file_inventory) 83 | self.update_state("code_analysis", code_analysis) 84 | 85 | self.log(f"✅ Repository scan complete. Found {len(file_inventory)} files") 86 | return repo_summary 87 | 88 | return self.execute_with_tracking("scan_repository", _scan_operation) 89 | 90 | def _build_file_inventory(self, repo_path: str) -> Dict[str, Dict]: 91 | """ 92 | Build an inventory of all files in the repository. 93 | 94 | Args: 95 | repo_path: Path to the repository 96 | 97 | Returns: 98 | Dict: File inventory with metadata 99 | """ 100 | file_inventory = {} 101 | repo_pathobj = Path(repo_path) 102 | 103 | # Define files/directories to ignore 104 | ignore_patterns = { 105 | ".git", 106 | "__pycache__", 107 | ".pytest_cache", 108 | "node_modules", 109 | ".venv", 110 | "venv", 111 | ".env", 112 | "dist", 113 | "build", 114 | ".DS_Store", 115 | } 116 | 117 | for file_path in repo_pathobj.rglob("*"): 118 | if file_path.is_file(): 119 | # Skip ignored files/directories 120 | if any(ignore in file_path.parts for ignore in ignore_patterns): 121 | continue 122 | 123 | relative_path = str(file_path.relative_to(repo_pathobj)) 124 | 125 | try: 126 | file_stats = file_path.stat() 127 | file_inventory[relative_path] = { 128 | "absolute_path": str(file_path), 129 | "size_bytes": file_stats.st_size, 130 | "extension": file_path.suffix, 131 | "is_python": file_path.suffix == ".py", 132 | "is_text": self._is_text_file(file_path), 133 | "last_modified": file_stats.st_mtime, 134 | } 135 | except (OSError, PermissionError): 136 | # Skip files we can't access 137 | continue 138 | 139 | return file_inventory 140 | 141 | def _is_text_file(self, file_path: Path) -> bool: 142 | """ 143 | Check if a file is likely a text file. 144 | 145 | Args: 146 | file_path: Path to the file 147 | 148 | Returns: 149 | bool: True if file appears to be text 150 | """ 151 | text_extensions = { 152 | ".py", 153 | ".txt", 154 | ".md", 155 | ".json", 156 | ".yaml", 157 | ".yml", 158 | ".toml", 159 | ".cfg", 160 | ".ini", 161 | ".sh", 162 | ".bash", 163 | ".zsh", 164 | ".fish", 165 | ".ps1", 166 | ".js", 167 | ".ts", 168 | ".html", 169 | ".css", 170 | ".xml", 171 | ".csv", 172 | ".sql", 173 | ".dockerfile", 174 | ".gitignore", 175 | ".gitattributes", 176 | } 177 | 178 | if file_path.suffix.lower() in text_extensions: 179 | return True 180 | 181 | # Check if file has no extension but might be text (like Dockerfile) 182 | if not file_path.suffix: 183 | try: 184 | with open(file_path, "rb") as f: 185 | chunk = f.read(1024) 186 | return chunk.isascii() or not chunk 187 | except: 188 | return False 189 | 190 | return False 191 | 192 | def _analyze_code_files(self, repo_path: str, file_inventory: Dict) -> Dict: 193 | """ 194 | Analyze Python code files to extract structure information. 195 | 196 | Args: 197 | repo_path: Path to the repository 198 | file_inventory: File inventory from scanning 199 | 200 | Returns: 201 | Dict: Code analysis results 202 | """ 203 | code_analysis = {} 204 | 205 | for relative_path, file_info in file_inventory.items(): 206 | if file_info["is_python"]: 207 | try: 208 | analysis = self._analyze_python_file(file_info["absolute_path"]) 209 | if analysis: 210 | code_analysis[relative_path] = analysis 211 | except Exception as e: 212 | self.log(f"⚠️ Could not analyze {relative_path}: {e}", "warning") 213 | 214 | return code_analysis 215 | 216 | def _analyze_python_file(self, file_path: str) -> Optional[Dict]: 217 | """ 218 | Analyze a single Python file to extract structure information. 219 | 220 | Args: 221 | file_path: Path to the Python file 222 | 223 | Returns: 224 | Optional[Dict]: Analysis results or None if failed 225 | """ 226 | try: 227 | with open(file_path, "r", encoding="utf-8") as f: 228 | content = f.read() 229 | 230 | analysis = { 231 | "imports": self._extract_imports(content), 232 | "classes": self._extract_classes(content), 233 | "functions": self._extract_functions(content), 234 | "constants": self._extract_constants(content), 235 | "docstring": self._extract_module_docstring(content), 236 | "line_count": len(content.split("\n")), 237 | "has_main_guard": 'if __name__ == "__main__"' in content, 238 | } 239 | 240 | return analysis 241 | 242 | except (UnicodeDecodeError, OSError): 243 | return None 244 | 245 | def _extract_imports(self, content: str) -> List[str]: 246 | """Extract import statements from Python code.""" 247 | imports = [] 248 | 249 | # Match import statements 250 | import_patterns = [ 251 | r"^import\s+([^\n]+)", 252 | r"^from\s+([^\s]+)\s+import\s+([^\n]+)", 253 | ] 254 | 255 | for line in content.split("\n"): 256 | line = line.strip() 257 | for pattern in import_patterns: 258 | match = re.match(pattern, line) 259 | if match: 260 | imports.append(line) 261 | break 262 | 263 | return imports 264 | 265 | def _extract_classes(self, content: str) -> List[Dict]: 266 | """Extract class definitions from Python code.""" 267 | classes = [] 268 | 269 | class_pattern = r"^class\s+(\w+)(?:\([^)]*\))?:" 270 | 271 | for i, line in enumerate(content.split("\n"), 1): 272 | line = line.strip() 273 | match = re.match(class_pattern, line) 274 | if match: 275 | class_name = match.group(1) 276 | classes.append( 277 | {"name": class_name, "line_number": i, "definition": line} 278 | ) 279 | 280 | return classes 281 | 282 | def _extract_functions(self, content: str) -> List[Dict]: 283 | """Extract function definitions from Python code.""" 284 | functions = [] 285 | 286 | function_pattern = r"^def\s+(\w+)\s*\([^)]*\):" 287 | 288 | for i, line in enumerate(content.split("\n"), 1): 289 | line = line.strip() 290 | match = re.match(function_pattern, line) 291 | if match: 292 | func_name = match.group(1) 293 | functions.append( 294 | {"name": func_name, "line_number": i, "definition": line} 295 | ) 296 | 297 | return functions 298 | 299 | def _extract_constants(self, content: str) -> List[str]: 300 | """Extract constants (ALL_CAPS variables) from Python code.""" 301 | constants = [] 302 | 303 | constant_pattern = r"^([A-Z_][A-Z0-9_]*)\s*=" 304 | 305 | for line in content.split("\n"): 306 | line = line.strip() 307 | match = re.match(constant_pattern, line) 308 | if match: 309 | constants.append(match.group(1)) 310 | 311 | return constants 312 | 313 | def _extract_module_docstring(self, content: str) -> Optional[str]: 314 | """Extract module-level docstring.""" 315 | # Look for triple-quoted strings at the beginning of the file 316 | docstring_pattern = r'^\s*"""(.*?)"""' 317 | match = re.search(docstring_pattern, content, re.DOTALL) 318 | 319 | if match: 320 | return match.group(1).strip() 321 | 322 | # Try single quotes 323 | docstring_pattern = r"^\s*'''(.*?)'''" 324 | match = re.search(docstring_pattern, content, re.DOTALL) 325 | 326 | if match: 327 | return match.group(1).strip() 328 | 329 | return None 330 | 331 | def _categorize_files(self, file_inventory: Dict) -> Dict[str, int]: 332 | """ 333 | Categorize files by type. 334 | 335 | Args: 336 | file_inventory: File inventory 337 | 338 | Returns: 339 | Dict: File type counts 340 | """ 341 | categories = {} 342 | 343 | for file_info in file_inventory.values(): 344 | ext = file_info["extension"].lower() 345 | if not ext: 346 | ext = "no_extension" 347 | 348 | categories[ext] = categories.get(ext, 0) + 1 349 | 350 | return categories 351 | 352 | def _identify_modification_candidates( 353 | self, file_inventory: Dict, code_analysis: Dict 354 | ) -> List[str]: 355 | """ 356 | Identify files that are good candidates for modification. 357 | 358 | Args: 359 | file_inventory: File inventory 360 | code_analysis: Code analysis results 361 | 362 | Returns: 363 | List[str]: List of files suitable for modification 364 | """ 365 | candidates = [] 366 | 367 | for relative_path, file_info in file_inventory.items(): 368 | # Python files are primary candidates 369 | if file_info["is_python"]: 370 | # Skip very large files (>10KB) or very small files (<50 bytes) 371 | if 50 <= file_info["size_bytes"] <= 10000: 372 | candidates.append(relative_path) 373 | 374 | # Configuration files 375 | elif file_info["extension"] in [".json", ".yaml", ".yml", ".toml", ".cfg"]: 376 | candidates.append(relative_path) 377 | 378 | return candidates 379 | 380 | def get_file_content( 381 | self, repo_path: str, relative_file_path: str 382 | ) -> Optional[str]: 383 | """ 384 | Get the content of a specific file. 385 | 386 | Args: 387 | repo_path: Path to the repository 388 | relative_file_path: Relative path to the file within the repository 389 | 390 | Returns: 391 | Optional[str]: File content or None if failed 392 | """ 393 | 394 | def _read_operation(): 395 | file_path = os.path.join(repo_path, relative_file_path) 396 | 397 | if not os.path.exists(file_path): 398 | raise Exception(f"File does not exist: {file_path}") 399 | 400 | try: 401 | with open(file_path, "r", encoding="utf-8") as f: 402 | content = f.read() 403 | 404 | self.log(f"📖 Read file: {relative_file_path}") 405 | return content 406 | 407 | except UnicodeDecodeError: 408 | # Try with different encoding 409 | with open(file_path, "r", encoding="latin-1") as f: 410 | content = f.read() 411 | 412 | self.log(f"📖 Read file (latin-1): {relative_file_path}") 413 | return content 414 | 415 | return self.execute_with_tracking("get_file_content", _read_operation) 416 | 417 | def find_files_by_pattern(self, repo_path: str, pattern: str) -> List[str]: 418 | """ 419 | Find files matching a specific pattern. 420 | 421 | Args: 422 | repo_path: Path to the repository 423 | pattern: Pattern to match (can be filename, extension, or regex) 424 | 425 | Returns: 426 | List[str]: List of matching file paths 427 | """ 428 | scanned_repos = self.get_state("scanned_repositories", {}) 429 | 430 | if repo_path not in scanned_repos: 431 | self.log(f"Repository not scanned: {repo_path}. Scanning now...") 432 | self.scan_repository(repo_path) 433 | scanned_repos = self.get_state("scanned_repositories", {}) 434 | 435 | file_inventory = scanned_repos[repo_path]["file_inventory"] 436 | matching_files = [] 437 | 438 | for relative_path in file_inventory.keys(): 439 | # Simple pattern matching 440 | if ( 441 | pattern.lower() in relative_path.lower() 442 | or relative_path.endswith(pattern) 443 | or re.search(pattern, relative_path, re.IGNORECASE) 444 | ): 445 | matching_files.append(relative_path) 446 | 447 | return matching_files 448 | 449 | def get_repository_summary(self, repo_path: str) -> Optional[Dict]: 450 | """ 451 | Get a summary of a scanned repository. 452 | 453 | Args: 454 | repo_path: Path to the repository 455 | 456 | Returns: 457 | Optional[Dict]: Repository summary or None if not scanned 458 | """ 459 | scanned_repos = self.get_state("scanned_repositories", {}) 460 | return scanned_repos.get(repo_path) 461 | 462 | def execute(self, action: str, **kwargs) -> any: 463 | """ 464 | Main execution method for the Repository Scanner Agent. 465 | 466 | Args: 467 | action: The action to perform 468 | **kwargs: Action-specific arguments 469 | 470 | Returns: 471 | Result of the action 472 | """ 473 | action_map = { 474 | "scan": self.scan_repository, 475 | "read": self.get_file_content, 476 | "find": self.find_files_by_pattern, 477 | "summary": self.get_repository_summary, 478 | } 479 | 480 | if action not in action_map: 481 | self.log(f"Unknown action: {action}", "error") 482 | return None 483 | 484 | try: 485 | return action_map[action](**kwargs) 486 | except Exception as e: 487 | self.log(f"Error executing action {action}: {e}", "error") 488 | return None 489 | -------------------------------------------------------------------------------- /src/advanced/orion_ai_coding_agent/src/agents/git_operations_agent.py: -------------------------------------------------------------------------------- 1 | """ 2 | Git Operations Agent for Orion AI Agent System 3 | 4 | This agent handles all git-related operations including cloning, branching, and repository management. 5 | """ 6 | 7 | import os 8 | import shutil 9 | import subprocess 10 | import sys 11 | import time 12 | from typing import Optional 13 | 14 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 15 | from src.base_agent import BaseAgent 16 | 17 | 18 | class GitOperationsAgent(BaseAgent): 19 | """ 20 | Agent responsible for all git operations. 21 | 22 | Capabilities: 23 | - Clone repositories 24 | - Manage branches 25 | - Handle git state and history 26 | """ 27 | 28 | def __init__(self, debug: bool = False): 29 | """ 30 | Initialize the Git Operations Agent. 31 | 32 | Args: 33 | debug: Whether to enable debug mode 34 | """ 35 | super().__init__("GitOperations", debug) 36 | self.update_state("repositories", {}) 37 | self.update_state("current_repo", None) 38 | self.update_state("current_branch", None) 39 | 40 | def clone_repository( 41 | self, repo_url: str, clone_path: str, target_branch: Optional[str] = None 42 | ) -> bool: 43 | """ 44 | Clone a GitHub repository to the specified path. 45 | 46 | Args: 47 | repo_url: GitHub repository URL 48 | clone_path: Path where to clone the repository 49 | target_branch: Specific branch to clone (optional) 50 | 51 | Returns: 52 | bool: True if successful, False otherwise 53 | """ 54 | 55 | def _clone_operation(): 56 | if os.path.exists(clone_path): 57 | self.log(f"Repository already exists at {clone_path}") 58 | return self._handle_existing_repository( 59 | clone_path, repo_url, target_branch 60 | ) 61 | 62 | if target_branch: 63 | self.log( 64 | f"Cloning repository {repo_url} (branch: {target_branch}) to {clone_path}" 65 | ) 66 | subprocess.run( 67 | ["git", "clone", "-b", target_branch, repo_url, clone_path], 68 | check=True, 69 | ) 70 | else: 71 | self.log(f"Cloning repository {repo_url} to {clone_path}") 72 | subprocess.run(["git", "clone", repo_url, clone_path], check=True) 73 | 74 | # Update state 75 | repo_name = os.path.basename(clone_path) 76 | self.update_state("current_repo", clone_path) 77 | repositories = self.get_state("repositories", {}) 78 | repositories[repo_name] = { 79 | "url": repo_url, 80 | "path": clone_path, 81 | "target_branch": target_branch, 82 | "cloned_at": time.time(), 83 | } 84 | self.update_state("repositories", repositories) 85 | 86 | return True 87 | 88 | return ( 89 | self.execute_with_tracking("clone_repository", _clone_operation) is not None 90 | ) 91 | 92 | def _handle_existing_repository( 93 | self, clone_path: str, repo_url: str, target_branch: Optional[str] = None 94 | ) -> bool: 95 | """ 96 | Handle an existing repository by validating and updating it. 97 | 98 | Args: 99 | clone_path: Path to the existing repository 100 | repo_url: Expected repository URL 101 | target_branch: Specific branch to switch to (optional) 102 | 103 | Returns: 104 | bool: True if successful, False otherwise 105 | """ 106 | try: 107 | original_dir = os.getcwd() 108 | os.chdir(clone_path) 109 | 110 | # Check if it's a valid git repository 111 | subprocess.run(["git", "status"], check=True, capture_output=True) 112 | self.log("✅ Using existing repository") 113 | 114 | # Fetch latest changes 115 | self.log("🔄 Fetching latest changes...") 116 | subprocess.run(["git", "fetch", "origin"], check=True, capture_output=True) 117 | 118 | # Switch to target branch or main/master branch 119 | if target_branch: 120 | self.log(f"🌿 Switching to target branch: {target_branch}") 121 | try: 122 | subprocess.run( 123 | ["git", "checkout", target_branch], 124 | check=True, 125 | capture_output=True, 126 | ) 127 | self.update_state("current_branch", target_branch) 128 | except subprocess.CalledProcessError: 129 | self.log( 130 | f"⚠️ Could not switch to target branch {target_branch}, trying origin/{target_branch}" 131 | ) 132 | try: 133 | subprocess.run( 134 | [ 135 | "git", 136 | "checkout", 137 | "-b", 138 | target_branch, 139 | f"origin/{target_branch}", 140 | ], 141 | check=True, 142 | capture_output=True, 143 | ) 144 | self.update_state("current_branch", target_branch) 145 | except subprocess.CalledProcessError: 146 | self.log( 147 | f"❌ Could not switch to target branch {target_branch}" 148 | ) 149 | raise Exception(f"Target branch {target_branch} does not exist") 150 | else: 151 | # Switch to main/master branch 152 | self._switch_to_main_branch() 153 | 154 | # Update state 155 | self.update_state("current_repo", clone_path) 156 | 157 | return True 158 | 159 | except subprocess.CalledProcessError: 160 | self.log( 161 | "⚠️ Directory exists but is not a valid git repository. Removing and cloning fresh..." 162 | ) 163 | shutil.rmtree(clone_path) 164 | if target_branch: 165 | subprocess.run( 166 | ["git", "clone", "-b", target_branch, repo_url, clone_path], 167 | check=True, 168 | ) 169 | else: 170 | subprocess.run(["git", "clone", repo_url, clone_path], check=True) 171 | return True 172 | 173 | finally: 174 | os.chdir(original_dir) 175 | 176 | def _switch_to_main_branch(self) -> None: 177 | """Switch to the main or master branch.""" 178 | try: 179 | subprocess.run(["git", "checkout", "main"], check=True, capture_output=True) 180 | self.update_state("current_branch", "main") 181 | except subprocess.CalledProcessError: 182 | try: 183 | subprocess.run( 184 | ["git", "checkout", "master"], check=True, capture_output=True 185 | ) 186 | self.update_state("current_branch", "master") 187 | except subprocess.CalledProcessError: 188 | self.log( 189 | "⚠️ Could not switch to main/master branch, staying on current branch", 190 | "warning", 191 | ) 192 | 193 | def create_unique_branch( 194 | self, base_name: str, repo_path: Optional[str] = None 195 | ) -> Optional[str]: 196 | """ 197 | Create a unique branch name that doesn't conflict with existing branches. 198 | 199 | Args: 200 | base_name: The base name for the branch 201 | repo_path: Path to the git repository (uses current repo if None) 202 | 203 | Returns: 204 | str: A unique branch name, or None if failed 205 | """ 206 | 207 | def _create_branch_operation(): 208 | target_repo = repo_path or self.get_state("current_repo") 209 | if not target_repo: 210 | raise ValueError("No repository path specified and no current repo set") 211 | 212 | # Get list of all branches 213 | result = subprocess.run( 214 | ["git", "branch", "-a"], 215 | capture_output=True, 216 | text=True, 217 | check=True, 218 | cwd=target_repo, 219 | ) 220 | 221 | existing_branches = set() 222 | for line in result.stdout.split("\n"): 223 | line = line.strip() 224 | if line and not line.startswith("*"): 225 | # Remove 'remotes/origin/' prefix if present 226 | branch_name = line.replace("remotes/origin/", "").strip() 227 | if branch_name and branch_name != "HEAD": 228 | existing_branches.add(branch_name) 229 | 230 | # Create the full branch name with orion prefix 231 | full_base_name = f"orion/{base_name}" 232 | 233 | # Check if full base name is available 234 | if full_base_name not in existing_branches: 235 | return full_base_name 236 | 237 | # Generate unique name with counter 238 | counter = 1 239 | while f"orion/{base_name}-{counter}" in existing_branches: 240 | counter += 1 241 | 242 | return f"orion/{base_name}-{counter}" 243 | 244 | branch_name = self.execute_with_tracking( 245 | "create_unique_branch", _create_branch_operation 246 | ) 247 | 248 | if branch_name: 249 | self.log(f"Generated unique branch name: {branch_name}") 250 | self.update_state("planned_branch", branch_name) 251 | 252 | return branch_name 253 | 254 | def create_and_switch_branch( 255 | self, branch_name: str, repo_path: Optional[str] = None 256 | ) -> bool: 257 | """ 258 | Create and switch to a new branch. 259 | 260 | Args: 261 | branch_name: Name of the branch to create 262 | repo_path: Path to the git repository (uses current repo if None) 263 | 264 | Returns: 265 | bool: True if successful, False otherwise 266 | """ 267 | 268 | def _branch_operation(): 269 | target_repo = repo_path or self.get_state("current_repo") 270 | if not target_repo: 271 | raise ValueError("No repository path specified and no current repo set") 272 | 273 | original_dir = os.getcwd() 274 | try: 275 | os.chdir(target_repo) 276 | self.log(f"Creating and switching to branch: {branch_name}") 277 | subprocess.run(["git", "checkout", "-b", branch_name], check=True) 278 | 279 | # Update state 280 | self.update_state("current_branch", branch_name) 281 | return True 282 | 283 | finally: 284 | os.chdir(original_dir) 285 | 286 | return ( 287 | self.execute_with_tracking("create_and_switch_branch", _branch_operation) 288 | is not None 289 | ) 290 | 291 | def commit_changes(self, message: str, repo_path: Optional[str] = None) -> bool: 292 | """ 293 | Stage and commit all changes in the repository. 294 | 295 | Args: 296 | message: Commit message 297 | repo_path: Path to the git repository (uses current repo if None) 298 | 299 | Returns: 300 | bool: True if successful, False otherwise 301 | """ 302 | 303 | def _commit_operation(): 304 | # Don't add prefix if message already has orion prefix 305 | if not message.startswith(":robot: [orion]"): 306 | formatted_message = f":robot: [orion] {message}" 307 | else: 308 | formatted_message = message 309 | 310 | target_repo = repo_path or self.get_state("current_repo") 311 | if not target_repo: 312 | raise ValueError("No repository path specified and no current repo set") 313 | 314 | original_dir = os.getcwd() 315 | try: 316 | os.chdir(target_repo) 317 | 318 | # Stage all changes 319 | self.log("Staging all changes...") 320 | subprocess.run(["git", "add", "."], check=True) 321 | 322 | # Commit changes 323 | self.log(f"Committing with message: {formatted_message}") 324 | subprocess.run(["git", "commit", "-m", formatted_message], check=True) 325 | 326 | # Update state 327 | commit_info = { 328 | "message": formatted_message, 329 | "timestamp": time.time(), 330 | "branch": self.get_state("current_branch"), 331 | } 332 | commits = self.get_state("commits", []) 333 | commits.append(commit_info) 334 | self.update_state("commits", commits) 335 | 336 | return True 337 | 338 | finally: 339 | os.chdir(original_dir) 340 | 341 | return ( 342 | self.execute_with_tracking("commit_changes", _commit_operation) is not None 343 | ) 344 | 345 | def push_branch( 346 | self, branch_name: Optional[str] = None, repo_path: Optional[str] = None 347 | ) -> bool: 348 | """ 349 | Push the current branch to origin. 350 | 351 | Args: 352 | branch_name: Name of the branch to push (uses current branch if None) 353 | repo_path: Path to the git repository (uses current repo if None) 354 | 355 | Returns: 356 | bool: True if successful, False otherwise 357 | """ 358 | 359 | def _push_operation(): 360 | target_repo = repo_path or self.get_state("current_repo") 361 | target_branch = branch_name or self.get_state("current_branch") 362 | 363 | if not target_repo: 364 | raise ValueError("No repository path specified and no current repo set") 365 | if not target_branch: 366 | raise ValueError("No branch name specified and no current branch set") 367 | 368 | original_dir = os.getcwd() 369 | try: 370 | os.chdir(target_repo) 371 | self.log(f"Pushing branch: {target_branch}") 372 | subprocess.run(["git", "push", "origin", target_branch], check=True) 373 | 374 | # Update state 375 | self.update_state("last_pushed_branch", target_branch) 376 | return True 377 | 378 | finally: 379 | os.chdir(original_dir) 380 | 381 | return self.execute_with_tracking("push_branch", _push_operation) is not None 382 | 383 | def get_repository_status(self, repo_path: Optional[str] = None) -> Optional[dict]: 384 | """ 385 | Get the current status of the repository. 386 | 387 | Args: 388 | repo_path: Path to the git repository (uses current repo if None) 389 | 390 | Returns: 391 | dict: Repository status information, or None if failed 392 | """ 393 | 394 | def _status_operation(): 395 | target_repo = repo_path or self.get_state("current_repo") 396 | if not target_repo: 397 | raise ValueError("No repository path specified and no current repo set") 398 | 399 | original_dir = os.getcwd() 400 | try: 401 | os.chdir(target_repo) 402 | 403 | # Get current branch 404 | branch_result = subprocess.run( 405 | ["git", "branch", "--show-current"], 406 | capture_output=True, 407 | text=True, 408 | check=True, 409 | ) 410 | current_branch = branch_result.stdout.strip() 411 | 412 | # Get status 413 | status_result = subprocess.run( 414 | ["git", "status", "--porcelain"], 415 | capture_output=True, 416 | text=True, 417 | check=True, 418 | ) 419 | 420 | # Count changes 421 | status_lines = ( 422 | status_result.stdout.strip().split("\n") 423 | if status_result.stdout.strip() 424 | else [] 425 | ) 426 | modified_files = [ 427 | line for line in status_lines if line.startswith(" M") 428 | ] 429 | added_files = [line for line in status_lines if line.startswith("A")] 430 | untracked_files = [ 431 | line for line in status_lines if line.startswith("??") 432 | ] 433 | 434 | return { 435 | "current_branch": current_branch, 436 | "modified_files": len(modified_files), 437 | "added_files": len(added_files), 438 | "untracked_files": len(untracked_files), 439 | "has_changes": len(status_lines) > 0, 440 | "repo_path": target_repo, 441 | } 442 | 443 | finally: 444 | os.chdir(original_dir) 445 | 446 | return self.execute_with_tracking("get_repository_status", _status_operation) 447 | 448 | def execute(self, action: str, **kwargs) -> any: 449 | """ 450 | Main execution method for the Git Operations Agent. 451 | 452 | Args: 453 | action: The action to perform 454 | **kwargs: Action-specific arguments 455 | 456 | Returns: 457 | Result of the action 458 | """ 459 | action_map = { 460 | "clone": self.clone_repository, 461 | "create_branch": self.create_unique_branch, 462 | "switch_branch": self.create_and_switch_branch, 463 | "commit": self.commit_changes, 464 | "push": self.push_branch, 465 | "status": self.get_repository_status, 466 | } 467 | 468 | if action not in action_map: 469 | self.log(f"Unknown action: {action}", "error") 470 | return None 471 | 472 | try: 473 | return action_map[action](**kwargs) 474 | except Exception as e: 475 | self.log(f"Error executing action {action}: {e}", "error") 476 | return None 477 | -------------------------------------------------------------------------------- /src/advanced/orion_ai_coding_agent/src/agents/code_tester_agent.py: -------------------------------------------------------------------------------- 1 | """ 2 | Code Tester Agent for Orion AI Agent System 3 | 4 | This agent handles code testing and validation operations. 5 | """ 6 | 7 | import io 8 | import os 9 | import subprocess 10 | import sys 11 | import tempfile 12 | import time 13 | from typing import Dict, List, Optional 14 | 15 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 16 | from src.base_agent import BaseAgent 17 | 18 | 19 | class CodeTesterAgent(BaseAgent): 20 | """ 21 | Agent responsible for code testing and validation operations. 22 | 23 | Capabilities: 24 | - Test generated code 25 | - Create test wrappers 26 | - Validate syntax 27 | - Generate test reports 28 | """ 29 | 30 | def __init__(self, debug: bool = False): 31 | """ 32 | Initialize the Code Tester Agent. 33 | 34 | Args: 35 | debug: Whether to enable debug mode 36 | """ 37 | super().__init__("CodeTester", debug) 38 | self.update_state("test_results", {}) 39 | self.update_state("test_history", []) 40 | self.update_state("last_test_session", None) 41 | 42 | def test_generated_code( 43 | self, repo_path: str, venv_python: str, created_files: List[str] 44 | ) -> bool: 45 | """ 46 | Test the generated code to ensure it runs without errors. 47 | 48 | Args: 49 | repo_path: Path to the repository 50 | venv_python: Path to the virtual environment Python executable 51 | created_files: List of files that were created 52 | 53 | Returns: 54 | bool: True if all tests passed 55 | """ 56 | 57 | def _test_operation(): 58 | if not created_files: 59 | self.log("⚠️ No files to test", "warning") 60 | return True 61 | 62 | self.log("🧪 Testing generated code...") 63 | 64 | test_session = { 65 | "repo_path": repo_path, 66 | "venv_python": venv_python, 67 | "files_tested": [], 68 | "results": {}, 69 | "start_time": time.time(), 70 | "all_passed": True, 71 | } 72 | 73 | all_tests_passed = True 74 | 75 | for filename in created_files: 76 | if not filename.endswith(".py"): 77 | continue 78 | 79 | filepath = os.path.join(repo_path, filename) 80 | if not os.path.exists(filepath): 81 | continue 82 | 83 | try: 84 | self.log(f" Testing {filename}...") 85 | 86 | # First, check syntax by compiling 87 | with open(filepath, "r") as f: 88 | code = f.read() 89 | 90 | syntax_ok = self._check_syntax(code, filepath, filename) 91 | if not syntax_ok: 92 | all_tests_passed = False 93 | test_session["results"][filename] = { 94 | "syntax_check": False, 95 | "execution_check": False, 96 | "error": "Syntax error", 97 | } 98 | continue 99 | 100 | # Create and run test wrapper 101 | execution_ok = self._create_and_run_test_wrapper( 102 | repo_path, filename, venv_python 103 | ) 104 | 105 | test_session["results"][filename] = { 106 | "syntax_check": True, 107 | "execution_check": execution_ok, 108 | "error": None if execution_ok else "Execution failed", 109 | } 110 | test_session["files_tested"].append(filename) 111 | 112 | if execution_ok: 113 | self.log(f" ✅ {filename} - All checks passed") 114 | else: 115 | self.log(f" ❌ {filename} - Execution failed") 116 | all_tests_passed = False 117 | 118 | except Exception as e: 119 | self.log(f" ❌ {filename} - Test failed: {e}", "error") 120 | test_session["results"][filename] = { 121 | "syntax_check": False, 122 | "execution_check": False, 123 | "error": str(e), 124 | } 125 | all_tests_passed = False 126 | 127 | test_session["end_time"] = time.time() 128 | test_session["duration"] = ( 129 | test_session["end_time"] - test_session["start_time"] 130 | ) 131 | test_session["all_passed"] = all_tests_passed 132 | 133 | # Update state 134 | self.update_state("last_test_session", test_session) 135 | test_history = self.get_state("test_history", []) 136 | test_history.append(test_session) 137 | self.update_state("test_history", test_history) 138 | 139 | test_results = self.get_state("test_results", {}) 140 | test_results[repo_path] = test_session 141 | self.update_state("test_results", test_results) 142 | 143 | if all_tests_passed: 144 | self.log("✅ All code tests passed") 145 | else: 146 | self.log("❌ Some tests failed") 147 | 148 | return all_tests_passed 149 | 150 | return ( 151 | self.execute_with_tracking("test_generated_code", _test_operation) or False 152 | ) 153 | 154 | def _check_syntax(self, code: str, filepath: str, filename: str) -> bool: 155 | """ 156 | Check the syntax of the code by compiling it. 157 | 158 | Args: 159 | code: Code content 160 | filepath: Full path to the file 161 | filename: Name of the file 162 | 163 | Returns: 164 | bool: True if syntax is valid 165 | """ 166 | try: 167 | compile(code, filepath, "exec") 168 | self.log(f" ✅ {filename} - Syntax OK") 169 | return True 170 | except SyntaxError as e: 171 | self.log(f" ❌ {filename} - Syntax Error: {e}", "error") 172 | return False 173 | 174 | def _create_and_run_test_wrapper( 175 | self, repo_path: str, filename: str, venv_python: str 176 | ) -> bool: 177 | """ 178 | Create a test wrapper script that provides dummy inputs and runs the target script. 179 | 180 | Args: 181 | repo_path: Path to the repository 182 | filename: Name of the file to test 183 | venv_python: Path to the virtual environment Python executable 184 | 185 | Returns: 186 | bool: True if execution was successful 187 | """ 188 | try: 189 | # Read the original file to analyze what it needs 190 | filepath = os.path.join(repo_path, filename) 191 | with open(filepath, "r") as f: 192 | code = f.read() 193 | 194 | # Create a test wrapper script 195 | test_wrapper_content = self._generate_test_wrapper(code, filename) 196 | 197 | # Write the test wrapper 198 | test_wrapper_path = os.path.join(repo_path, f"test_{filename}") 199 | with open(test_wrapper_path, "w") as f: 200 | f.write(test_wrapper_content) 201 | 202 | # Run the test wrapper 203 | try: 204 | result = subprocess.run( 205 | [venv_python, f"test_{filename}"], 206 | cwd=repo_path, 207 | capture_output=True, 208 | text=True, 209 | timeout=60, # 60 second timeout 210 | ) 211 | 212 | # Clean up the test wrapper 213 | os.remove(test_wrapper_path) 214 | 215 | if result.returncode == 0: 216 | if result.stdout: 217 | self.log(f" Output: {result.stdout[:200]}...", "debug") 218 | return True 219 | else: 220 | self.log(f" ❌ Exit code: {result.returncode}") 221 | if result.stderr: 222 | self.log(f" Error: {result.stderr[:300]}...", "error") 223 | if result.stdout: 224 | self.log(f" Output: {result.stdout[:200]}...", "debug") 225 | return False 226 | 227 | except subprocess.TimeoutExpired: 228 | self.log(f" ❌ Execution timeout (60s)", "error") 229 | # Clean up the test wrapper 230 | try: 231 | os.remove(test_wrapper_path) 232 | except: 233 | pass 234 | return False 235 | except Exception as e: 236 | self.log(f" ❌ Execution error: {e}", "error") 237 | # Clean up the test wrapper 238 | try: 239 | os.remove(test_wrapper_path) 240 | except: 241 | pass 242 | return False 243 | 244 | except Exception as e: 245 | self.log(f" ❌ Test wrapper creation failed: {e}", "error") 246 | return False 247 | 248 | def _generate_test_wrapper(self, code: str, filename: str) -> str: 249 | """ 250 | Generate a test wrapper script that provides dummy inputs and handles common scenarios. 251 | 252 | Args: 253 | code: The original code content 254 | filename: Name of the original file 255 | 256 | Returns: 257 | str: Test wrapper script content 258 | """ 259 | # Analyze the code to determine what kind of dummy inputs we need 260 | needs_image = any( 261 | keyword in code.lower() 262 | for keyword in ["image", "pil", "cv2", "pillow", "imread"] 263 | ) 264 | needs_text = any( 265 | keyword in code.lower() for keyword in ["text", "input(", "clip"] 266 | ) 267 | needs_file_path = "file" in code.lower() and any( 268 | keyword in code for keyword in ["open(", "load", "read"] 269 | ) 270 | has_main_guard = 'if __name__ == "__main__"' in code 271 | 272 | wrapper = f'''#!/usr/bin/env python3 273 | """ 274 | Test wrapper for {filename} 275 | This script provides dummy inputs and tests the execution. 276 | """ 277 | 278 | import sys 279 | import os 280 | import tempfile 281 | import io 282 | from unittest.mock import patch, MagicMock 283 | 284 | # Add current directory to path 285 | sys.path.insert(0, '.') 286 | 287 | def create_dummy_image(): 288 | """Create a dummy image for testing.""" 289 | try: 290 | from PIL import Image 291 | import numpy as np 292 | # Create a small dummy image 293 | dummy_img = Image.fromarray(np.random.randint(0, 255, (224, 224, 3), dtype=np.uint8)) 294 | return dummy_img 295 | except ImportError: 296 | return None 297 | 298 | def create_dummy_file(): 299 | """Create a dummy file for testing.""" 300 | temp_file = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.txt') 301 | temp_file.write("This is a dummy file for testing purposes.\\n") 302 | temp_file.write("It contains sample text content.\\n") 303 | temp_file.close() 304 | return temp_file.name 305 | 306 | def mock_input_function(prompt=""): 307 | """Mock input function that returns dummy values.""" 308 | if 'image' in prompt.lower() or 'file' in prompt.lower(): 309 | return create_dummy_file() 310 | elif 'text' in prompt.lower(): 311 | return "This is a sample text for testing" 312 | else: 313 | return "dummy_input" 314 | 315 | def main(): 316 | """Run the test with proper mocking and error handling.""" 317 | print(f"🧪 Testing {filename}...") 318 | 319 | # Prepare dummy data 320 | dummy_inputs = ["dummy_input", "test", "sample", create_dummy_file()] 321 | input_counter = 0 322 | 323 | def mock_input_with_counter(prompt=""): 324 | nonlocal input_counter 325 | if input_counter < len(dummy_inputs): 326 | result = dummy_inputs[input_counter] 327 | input_counter += 1 328 | return str(result) 329 | return mock_input_function(prompt) 330 | 331 | # Mock various functions that might cause issues 332 | mocks = { 333 | 'input': mock_input_with_counter, 334 | 'open': lambda *args, **kwargs: open(*args, **kwargs) if len(args) > 0 and os.path.exists(args[0]) else io.StringIO("dummy content"), 335 | } 336 | 337 | # Additional mocks for image processing 338 | if {needs_image}: 339 | try: 340 | from PIL import Image 341 | dummy_image = create_dummy_image() 342 | if dummy_image: 343 | Image.open = lambda *args, **kwargs: dummy_image 344 | except ImportError: 345 | pass 346 | 347 | try: 348 | # Capture stdout/stderr 349 | old_stdout = sys.stdout 350 | old_stderr = sys.stderr 351 | 352 | captured_output = io.StringIO() 353 | captured_errors = io.StringIO() 354 | 355 | sys.stdout = captured_output 356 | sys.stderr = captured_errors 357 | 358 | # Mock input and other problematic functions 359 | with patch('builtins.input', side_effect=mock_input_with_counter): 360 | ''' 361 | 362 | # Add the import and execution of the original module 363 | module_name = filename.replace(".py", "") 364 | 365 | if has_main_guard: 366 | # If the script has a main guard, we can import it safely 367 | wrapper += f""" 368 | # Import and run the module 369 | import {module_name} 370 | 371 | # If there's a main function, try to call it 372 | if hasattr({module_name}, 'main'): 373 | {module_name}.main() 374 | elif hasattr({module_name}, 'run'): 375 | {module_name}.run() 376 | """ 377 | else: 378 | # If no main guard, execute the file directly but carefully 379 | wrapper += f""" 380 | # Execute the file content 381 | with open('{filename}', 'r') as f: 382 | code_content = f.read() 383 | 384 | # Execute in a controlled environment 385 | exec(code_content, {{'__name__': '__main__'}}) 386 | """ 387 | 388 | wrapper += f""" 389 | # Restore stdout/stderr 390 | sys.stdout = old_stdout 391 | sys.stderr = old_stderr 392 | 393 | # Check for any errors 394 | error_output = captured_errors.getvalue() 395 | if error_output and ('error' in error_output.lower() or 'exception' in error_output.lower()): 396 | print(f"❌ Errors detected in output:") 397 | print(error_output[:500]) 398 | return False 399 | 400 | # Print captured output (truncated) 401 | output = captured_output.getvalue() 402 | if output: 403 | print(f"✅ Script executed successfully. Output:") 404 | print(output[:300] + "..." if len(output) > 300 else output) 405 | else: 406 | print(f"✅ Script executed successfully (no output)") 407 | 408 | return True 409 | 410 | except Exception as e: 411 | # Restore stdout/stderr 412 | sys.stdout = old_stdout 413 | sys.stderr = old_stderr 414 | 415 | print(f"❌ Execution failed: {{type(e).__name__}}: {{e}}") 416 | return False 417 | 418 | finally: 419 | # Clean up any temporary files 420 | for dummy_input in dummy_inputs: 421 | if isinstance(dummy_input, str) and os.path.exists(dummy_input) and dummy_input.startswith('/tmp'): 422 | try: 423 | os.unlink(dummy_input) 424 | except: 425 | pass 426 | 427 | if __name__ == "__main__": 428 | success = main() 429 | sys.exit(0 if success else 1) 430 | """ 431 | 432 | return wrapper 433 | 434 | def get_test_results(self, repo_path: Optional[str] = None) -> Dict: 435 | """ 436 | Get test results for a specific repository or all repositories. 437 | 438 | Args: 439 | repo_path: Path to the repository (optional) 440 | 441 | Returns: 442 | Dict: Test results 443 | """ 444 | test_results = self.get_state("test_results", {}) 445 | 446 | if repo_path: 447 | return test_results.get(repo_path, {}) 448 | 449 | return test_results 450 | 451 | def get_test_summary(self) -> Dict[str, any]: 452 | """ 453 | Get a summary of all test sessions. 454 | 455 | Returns: 456 | Dict: Test summary 457 | """ 458 | test_history = self.get_state("test_history", []) 459 | 460 | if not test_history: 461 | return { 462 | "total_sessions": 0, 463 | "total_files_tested": 0, 464 | "success_rate": 0, 465 | "average_duration": 0, 466 | } 467 | 468 | total_sessions = len(test_history) 469 | total_files_tested = sum( 470 | len(session.get("files_tested", [])) for session in test_history 471 | ) 472 | successful_sessions = sum( 473 | 1 for session in test_history if session.get("all_passed", False) 474 | ) 475 | total_duration = sum(session.get("duration", 0) for session in test_history) 476 | 477 | return { 478 | "total_sessions": total_sessions, 479 | "successful_sessions": successful_sessions, 480 | "failed_sessions": total_sessions - successful_sessions, 481 | "success_rate": ( 482 | (successful_sessions / total_sessions * 100) 483 | if total_sessions > 0 484 | else 0 485 | ), 486 | "total_files_tested": total_files_tested, 487 | "average_files_per_session": ( 488 | total_files_tested / total_sessions if total_sessions > 0 else 0 489 | ), 490 | "total_duration": total_duration, 491 | "average_duration": ( 492 | total_duration / total_sessions if total_sessions > 0 else 0 493 | ), 494 | } 495 | 496 | def execute(self, action: str, **kwargs) -> any: 497 | """ 498 | Main execution method for the Code Tester Agent. 499 | 500 | Args: 501 | action: The action to perform 502 | **kwargs: Action-specific arguments 503 | 504 | Returns: 505 | Result of the action 506 | """ 507 | action_map = { 508 | "test": self.test_generated_code, 509 | "results": self.get_test_results, 510 | "summary": self.get_test_summary, 511 | } 512 | 513 | if action not in action_map: 514 | self.log(f"Unknown action: {action}", "error") 515 | return None 516 | 517 | try: 518 | return action_map[action](**kwargs) 519 | except Exception as e: 520 | self.log(f"Error executing action {action}: {e}", "error") 521 | return None 522 | --------------------------------------------------------------------------------