├── agentforge ├── ui │ └── __init__.py ├── core │ ├── __init__.py │ ├── event_bus_patch.py │ ├── config.py │ ├── llm_provider.py │ ├── ai_tool_creator.py │ ├── intelligent_tool_creator.py │ ├── master_agent_tools.py │ └── file_based_crew_designer.py ├── memory │ └── __init__.py ├── tools │ ├── __init__.py │ └── guardrails.py ├── knowledge_base │ └── __init__.py ├── templates │ ├── __init__.py │ └── template_manager.py ├── agents │ ├── __init__.py │ └── crew_orchestrator_agent.py ├── database │ ├── __init__.py │ ├── models.py │ └── database.py ├── logging │ ├── __init__.py │ ├── error_handler.py │ └── logger.py ├── analytics │ └── __init__.py ├── __init__.py └── examples │ └── adaptive_agent_example.py ├── crews ├── simple_writer │ ├── src │ │ └── simple_writer │ │ │ ├── tools │ │ │ ├── __init__.py │ │ │ └── custom_tools.py │ │ │ ├── __init__.py │ │ │ ├── main.py │ │ │ └── crew.py │ ├── pyproject.toml │ ├── run.sh │ ├── config │ │ ├── tasks.yaml │ │ └── agents.yaml │ └── README.md └── tech_blog_writer_final │ ├── src │ └── tech_blog_writer_final │ │ ├── tools │ │ ├── __init__.py │ │ └── custom_tools.py │ │ ├── __init__.py │ │ ├── main.py │ │ └── crew.py │ ├── pyproject.toml │ ├── run.sh │ ├── config │ ├── tasks.yaml │ └── agents.yaml │ └── README.md ├── MANIFEST.in ├── .pre-commit-config.yaml ├── LICENSE ├── pyproject.toml ├── .github └── workflows │ └── ci.yml └── .gitignore /agentforge/ui/__init__.py: -------------------------------------------------------------------------------- 1 | """User interface components.""" -------------------------------------------------------------------------------- /agentforge/core/__init__.py: -------------------------------------------------------------------------------- 1 | """Core functionality for agentforge.""" -------------------------------------------------------------------------------- /agentforge/memory/__init__.py: -------------------------------------------------------------------------------- 1 | """Memory management for agents.""" -------------------------------------------------------------------------------- /agentforge/tools/__init__.py: -------------------------------------------------------------------------------- 1 | """Tools and integrations for agents.""" -------------------------------------------------------------------------------- /agentforge/knowledge_base/__init__.py: -------------------------------------------------------------------------------- 1 | """Knowledge base and RAG functionality.""" -------------------------------------------------------------------------------- /crews/simple_writer/src/simple_writer/tools/__init__.py: -------------------------------------------------------------------------------- 1 | """Custom tools package.""" 2 | -------------------------------------------------------------------------------- /crews/tech_blog_writer_final/src/tech_blog_writer_final/tools/__init__.py: -------------------------------------------------------------------------------- 1 | """Custom tools package.""" 2 | -------------------------------------------------------------------------------- /crews/simple_writer/src/simple_writer/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | simple_writer - CrewAI Project 3 | 4 | Generated by agentforge. 5 | """ 6 | 7 | from .crew import SimpleWriterCrew 8 | 9 | __version__ = "1.0.0" 10 | __all__ = ["SimpleWriterCrew"] 11 | -------------------------------------------------------------------------------- /crews/tech_blog_writer_final/src/tech_blog_writer_final/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | tech_blog_writer_final - CrewAI Project 3 | 4 | Generated by agentforge. 5 | """ 6 | 7 | from .crew import TechBlogWriterFinalCrew 8 | 9 | __version__ = "1.0.0" 10 | __all__ = ["TechBlogWriterFinalCrew"] 11 | -------------------------------------------------------------------------------- /agentforge/templates/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Crew Templates and Pattern Library for agentforge. 3 | 4 | This module provides pre-built crew patterns for common use cases, 5 | making it easier for users to get started with specific types of tasks. 6 | """ 7 | 8 | from .crew_template_library import CrewTemplateLibrary, CrewTemplate 9 | from .template_manager import TemplateManager 10 | 11 | __all__ = ["CrewTemplateLibrary", "CrewTemplate", "TemplateManager"] 12 | -------------------------------------------------------------------------------- /agentforge/agents/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | agentforge AI Agents. 3 | 4 | This module contains intelligent AI agents that handle different aspects 5 | of crew creation and management using CrewAI architecture. 6 | """ 7 | 8 | from .task_analyzer_agent import TaskAnalyzerAgent 9 | from .agent_designer_agent import AgentDesignerAgent 10 | from .crew_orchestrator_agent import CrewOrchestratorAgent 11 | 12 | __all__ = [ 13 | "TaskAnalyzerAgent", 14 | "AgentDesignerAgent", 15 | "CrewOrchestratorAgent" 16 | ] -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include LICENSE 3 | include CHANGELOG.md 4 | include pyproject.toml 5 | include uv.lock 6 | recursive-include agentforge *.py 7 | recursive-include agentforge *.yaml 8 | recursive-include agentforge *.yml 9 | recursive-include agentforge *.json 10 | recursive-exclude * __pycache__ 11 | recursive-exclude * *.py[co] 12 | recursive-exclude * .DS_Store 13 | exclude .gitignore 14 | exclude .pre-commit-config.yaml 15 | prune venv 16 | prune .git 17 | prune db 18 | prune crews 19 | prune agentforge.egg-info -------------------------------------------------------------------------------- /agentforge/database/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Database module for AgentForge. 3 | 4 | This module provides database functionality for storing and managing 5 | agents, crews, and execution logs. 6 | """ 7 | 8 | from .database import Database, AgentRepository, CrewRepository, ExecutionLogRepository 9 | from .models import CrewModel, AgentModel, ExecutionResult 10 | 11 | __all__ = [ 12 | "Database", 13 | "AgentRepository", 14 | "CrewRepository", 15 | "ExecutionLogRepository", 16 | "CrewModel", 17 | "AgentModel", 18 | "ExecutionResult" 19 | ] 20 | -------------------------------------------------------------------------------- /agentforge/logging/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Enhanced logging and error handling for agentforge. 3 | 4 | This module provides comprehensive logging, error tracking, and debugging capabilities 5 | to improve reliability and troubleshooting. 6 | """ 7 | 8 | from .logger import AgentForgeLogger, LogLevel 9 | from .error_handler import ErrorHandler, ErrorContext 10 | from .debug_tracer import DebugTracer, TraceEvent 11 | 12 | __all__ = [ 13 | "AgentForgeLogger", 14 | "LogLevel", 15 | "ErrorHandler", 16 | "ErrorContext", 17 | "DebugTracer", 18 | "TraceEvent" 19 | ] 20 | -------------------------------------------------------------------------------- /crews/simple_writer/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "simple_writer" 3 | version = "1.0.0" 4 | description = "AI-orchestrated crew for: Create a simple text generator that writes basic content without external tools" 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | dependencies = [ 8 | "crewai", 9 | "crewai-tools", 10 | "pydantic", 11 | "pyyaml" 12 | ] 13 | 14 | [project.scripts] 15 | simple_writer = "src.simple_writer.main:main" 16 | 17 | [build-system] 18 | requires = ["hatchling"] 19 | build-backend = "hatchling.build" 20 | 21 | [tool.uv] 22 | dev-dependencies = [ 23 | "pytest>=7.0.0", 24 | "black>=23.0.0", 25 | "flake8>=6.0.0" 26 | ] 27 | -------------------------------------------------------------------------------- /agentforge/analytics/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Performance Analytics for agentforge. 3 | 4 | This module provides comprehensive analytics and monitoring for crew execution, 5 | including performance tracking, cost analysis, and optimization recommendations. 6 | """ 7 | 8 | from .performance_tracker import PerformanceTracker, ExecutionMetrics 9 | from .cost_analyzer import CostAnalyzer, CostEstimate 10 | from .optimization_engine import OptimizationEngine, OptimizationRecommendation 11 | 12 | __all__ = [ 13 | "PerformanceTracker", 14 | "ExecutionMetrics", 15 | "CostAnalyzer", 16 | "CostEstimate", 17 | "OptimizationEngine", 18 | "OptimizationRecommendation" 19 | ] 20 | -------------------------------------------------------------------------------- /crews/tech_blog_writer_final/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "tech_blog_writer_final" 3 | version = "1.0.0" 4 | description = "AI-orchestrated crew for: Create a simple blog writer who can write informative blog posts about technology topics" 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | dependencies = [ 8 | "crewai", 9 | "crewai-tools", 10 | "pydantic", 11 | "pyyaml" 12 | ] 13 | 14 | [project.scripts] 15 | tech_blog_writer_final = "src.tech_blog_writer_final.main:main" 16 | 17 | [build-system] 18 | requires = ["hatchling"] 19 | build-backend = "hatchling.build" 20 | 21 | [tool.uv] 22 | dev-dependencies = [ 23 | "pytest>=7.0.0", 24 | "black>=23.0.0", 25 | "flake8>=6.0.0" 26 | ] 27 | -------------------------------------------------------------------------------- /crews/simple_writer/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Run script for simple_writer crew 3 | 4 | set -e 5 | 6 | echo "Starting simple_writer crew..." 7 | 8 | # Check if uv is installed 9 | if ! command -v uv &> /dev/null; then 10 | echo "uv is not installed. Installing uv..." 11 | curl -LsSf https://astral.sh/uv/install.sh | sh 12 | export PATH="$HOME/.cargo/bin:$PATH" 13 | fi 14 | 15 | # Create virtual environment if it doesn't exist 16 | if [ ! -d ".venv" ]; then 17 | echo "Creating virtual environment with uv..." 18 | uv venv 19 | fi 20 | 21 | # Install dependencies 22 | echo "Installing dependencies with uv..." 23 | uv sync 24 | 25 | # Run the crew 26 | echo "Running crew with arguments: $@" 27 | uv run python -m src.simple_writer.main "$@" 28 | -------------------------------------------------------------------------------- /crews/tech_blog_writer_final/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Run script for tech_blog_writer_final crew 3 | 4 | set -e 5 | 6 | echo "Starting tech_blog_writer_final crew..." 7 | 8 | # Check if uv is installed 9 | if ! command -v uv &> /dev/null; then 10 | echo "uv is not installed. Installing uv..." 11 | curl -LsSf https://astral.sh/uv/install.sh | sh 12 | export PATH="$HOME/.cargo/bin:$PATH" 13 | fi 14 | 15 | # Create virtual environment if it doesn't exist 16 | if [ ! -d ".venv" ]; then 17 | echo "Creating virtual environment with uv..." 18 | uv venv 19 | fi 20 | 21 | # Install dependencies 22 | echo "Installing dependencies with uv..." 23 | uv sync 24 | 25 | # Run the crew 26 | echo "Running crew with arguments: $@" 27 | uv run python -m src.tech_blog_writer_final.main "$@" 28 | -------------------------------------------------------------------------------- /crews/simple_writer/config/tasks.yaml: -------------------------------------------------------------------------------- 1 | main_task: 2 | description: Create a simple text generator that writes basic content without external 3 | tools 4 | expected_output: 'Complete results for: Create a simple text generator that writes 5 | basic content without external tools' 6 | agent: Social Media Content Text_Generator Specialist_specialist 7 | social media content language_model specialist_task: 8 | description: As a Social Media Content Language_Model Specialist, process the research 9 | findings and create deliverable outputs based on your expertise. 10 | expected_output: Actionable deliverables and outputs from a Social Media Content 11 | Language_Model Specialist perspective 12 | agent: Social Media Content Language_Model Specialist_specialist 13 | context: 14 | - main_task 15 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.6.0 4 | hooks: 5 | - id: trailing-whitespace 6 | - id: end-of-file-fixer 7 | - id: check-yaml 8 | - id: check-added-large-files 9 | - id: check-merge-conflict 10 | - id: check-toml 11 | - id: debug-statements 12 | 13 | - repo: https://github.com/psf/black 14 | rev: 24.10.0 15 | hooks: 16 | - id: black 17 | language_version: python3 18 | 19 | - repo: https://github.com/astral-sh/ruff-pre-commit 20 | rev: v0.8.4 21 | hooks: 22 | - id: ruff 23 | args: [--fix, --exit-non-zero-on-fix] 24 | - id: ruff-format 25 | 26 | - repo: https://github.com/pre-commit/mirrors-mypy 27 | rev: v1.13.0 28 | hooks: 29 | - id: mypy 30 | additional_dependencies: [types-PyYAML, types-requests] 31 | args: [--ignore-missing-imports] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 agentforge Team 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /agentforge/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | AgentForge: Forge intelligent AI agents with CrewAI. 3 | 4 | A Python package for building intelligent multi-agent systems using CrewAI. 5 | This package provides a CLI and framework for automatically generating, managing, 6 | and executing multi-agent crews based on natural language tasks. 7 | """ 8 | 9 | import warnings 10 | import os 11 | 12 | # Suppress common deprecation warnings globally 13 | warnings.filterwarnings("ignore", category=DeprecationWarning) 14 | warnings.filterwarnings("ignore", message=".*Pydantic.*deprecated.*") 15 | warnings.filterwarnings("ignore", message=".*PydanticDeprecatedSince20.*") 16 | warnings.filterwarnings("ignore", message=".*extra keyword arguments.*") 17 | warnings.filterwarnings("ignore", message=".*Field.*deprecated.*") 18 | warnings.filterwarnings("ignore", message=".*event loop.*") 19 | 20 | # Set environment variable 21 | os.environ["PYTHONWARNINGS"] = "ignore::DeprecationWarning" 22 | 23 | __version__ = "0.2.0" 24 | __author__ = "AgentForge Team" 25 | __email__ = "team@agentforge.dev" 26 | 27 | from .core.master_agent_crew import MasterAgentCrew 28 | from .core.file_generator import CrewFileGenerator 29 | 30 | __all__ = [ 31 | "MasterAgentCrew", 32 | "CrewFileGenerator", 33 | ] -------------------------------------------------------------------------------- /crews/simple_writer/src/simple_writer/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | simple_writer - CrewAI Project 4 | 5 | AI-orchestrated crew for: Create a simple text generator that writes basic content without external tools 6 | 7 | Generated by agentforge on 2025-09-21 16:37:19 8 | """ 9 | 10 | import sys 11 | from .crew import SimpleWriterCrew 12 | 13 | 14 | def main(): 15 | """Main entry point for the crew.""" 16 | # Get task input from command line arguments 17 | if len(sys.argv) > 1: 18 | task_input = " ".join(sys.argv[1:]) 19 | print(f" Command line arguments: {sys.argv[1:]}") 20 | print(f"[INFO] Task input: {task_input}") 21 | else: 22 | task_input = "Create a simple text generator that writes basic content without external tools" 23 | print(f"[INFO] No command line arguments provided, using default task") 24 | 25 | # Initialize and run the crew 26 | print(f"[INFO] Initializing SimpleWriterCrew crew...") 27 | crew = SimpleWriterCrew() 28 | result = crew.run(task_input) 29 | 30 | print("\n" + "="*50) 31 | print("CREW EXECUTION COMPLETED") 32 | print("="*50) 33 | print(result) 34 | 35 | return result 36 | 37 | 38 | if __name__ == "__main__": 39 | main() 40 | -------------------------------------------------------------------------------- /crews/tech_blog_writer_final/src/tech_blog_writer_final/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | tech_blog_writer_final - CrewAI Project 4 | 5 | AI-orchestrated crew for: Create a simple blog writer who can write informative blog posts about technology topics 6 | 7 | Generated by agentforge on 2025-09-21 16:01:37 8 | """ 9 | 10 | import sys 11 | from .crew import TechBlogWriterFinalCrew 12 | 13 | 14 | def main(): 15 | """Main entry point for the crew.""" 16 | # Get task input from command line arguments 17 | if len(sys.argv) > 1: 18 | task_input = " ".join(sys.argv[1:]) 19 | print(f" Command line arguments: {sys.argv[1:]}") 20 | print(f"[INFO] Task input: {task_input}") 21 | else: 22 | task_input = "Create a simple blog writer who can write informative blog posts about technology topics" 23 | print(f"[INFO] No command line arguments provided, using default task") 24 | 25 | # Initialize and run the crew 26 | print(f"[INFO] Initializing TechBlogWriterFinalCrew crew...") 27 | crew = TechBlogWriterFinalCrew() 28 | result = crew.run(task_input) 29 | 30 | print("\n" + "="*50) 31 | print("CREW EXECUTION COMPLETED") 32 | print("="*50) 33 | print(result) 34 | 35 | return result 36 | 37 | 38 | if __name__ == "__main__": 39 | main() 40 | -------------------------------------------------------------------------------- /crews/simple_writer/config/agents.yaml: -------------------------------------------------------------------------------- 1 | Social Media Content Language_Model Specialist_specialist: 2 | allow_delegation: false 3 | backstory: You are an experienced Social Media Content Language_Model Specialist 4 | with a proven track record of delivering high-quality results in collaborative 5 | environments. 6 | goal: Execute Social Media Content Language_Model Specialist tasks with excellence 7 | and collaborate effectively with the team 8 | llm: 9 | max_tokens: 2000 10 | model: ollama/llama3.2:latest 11 | provider: ollama 12 | temperature: 0.7 13 | max_iter: 5 14 | role: Social Media Content Language_Model Specialist 15 | tools: 16 | - FileReadTool 17 | - CodeInterpreterTool 18 | verbose: true 19 | Social Media Content Text_Generator Specialist_specialist: 20 | allow_delegation: false 21 | backstory: You are an experienced Social Media Content Text_Generator Specialist 22 | with a proven track record of delivering high-quality results in collaborative 23 | environments. 24 | goal: Execute Social Media Content Text_Generator Specialist tasks with excellence 25 | and collaborate effectively with the team 26 | llm: 27 | max_tokens: 2000 28 | model: ollama/llama3.2:latest 29 | provider: ollama 30 | temperature: 0.7 31 | max_iter: 5 32 | role: Social Media Content Text_Generator Specialist 33 | tools: 34 | - FileReadTool 35 | - CodeInterpreterTool 36 | verbose: true 37 | -------------------------------------------------------------------------------- /crews/tech_blog_writer_final/config/tasks.yaml: -------------------------------------------------------------------------------- 1 | main_task: 2 | description: Create a simple blog writer who can write informative blog posts about 3 | technology topics 4 | expected_output: 'Complete results for: Create a simple blog writer who can write 5 | informative blog posts about technology topics' 6 | agent: Social Media Content Research Specialist_specialist 7 | social media content research_assistant specialist_task: 8 | description: As a Social Media Content Research_Assistant Specialist, process the 9 | research findings and create deliverable outputs based on your expertise. 10 | expected_output: Actionable deliverables and outputs from a Social Media Content 11 | Research_Assistant Specialist perspective 12 | agent: Social Media Content Research_Assistant Specialist_specialist 13 | context: 14 | - main_task 15 | social media content content creator_task: 16 | description: As a Social Media Content Content Creator, process the research findings 17 | and create deliverable outputs based on your expertise. 18 | expected_output: Actionable deliverables and outputs from a Social Media Content 19 | Content Creator perspective 20 | agent: Social Media Content Content Creator_specialist 21 | context: 22 | - social media content research_assistant specialist_task 23 | social media content editor specialist_task: 24 | description: As a Social Media Content Editor Specialist, process the research findings 25 | and create deliverable outputs based on your expertise. 26 | expected_output: Actionable deliverables and outputs from a Social Media Content 27 | Editor Specialist perspective 28 | agent: Social Media Content Editor Specialist_specialist 29 | context: 30 | - social media content content creator_task 31 | -------------------------------------------------------------------------------- /crews/simple_writer/README.md: -------------------------------------------------------------------------------- 1 | # simple_writer 2 | 3 | AI-orchestrated crew for: Create a simple text generator that writes basic content without external tools 4 | 5 | ## Overview 6 | 7 | This crew consists of multiple AI agents working together to accomplish complex tasks. 8 | 9 | **Task**: Create a simple text generator that writes basic content without external tools 10 | 11 | **Expected Output**: None 12 | 13 | ## Agents 14 | 15 | - **Social Media Content Text_Generator Specialist_specialist**: Social Media Content Text_Generator Specialist 16 | - **Social Media Content Language_Model Specialist_specialist**: Social Media Content Language_Model Specialist 17 | 18 | ## Tools Used 19 | 20 | - code_execution 21 | - document_search 22 | - data_processing 23 | - api_calls 24 | 25 | ## Usage 26 | 27 | ### Quick Start 28 | 29 | ```bash 30 | # Make the run script executable (if not already) 31 | chmod +x run.sh 32 | 33 | # Run the crew 34 | ./run.sh "Your task description here" 35 | ``` 36 | 37 | ### Manual Setup (using uv) 38 | 39 | ```bash 40 | # Install uv if not already installed 41 | curl -LsSf https://astral.sh/uv/install.sh | sh 42 | 43 | # Create virtual environment and install dependencies 44 | uv venv 45 | uv sync 46 | 47 | # Run the crew 48 | uv run python -m src.simple_writer.main "Your task description here" 49 | ``` 50 | 51 | ### Alternative Setup (using pip) 52 | 53 | ```bash 54 | # Create and activate virtual environment 55 | python3 -m venv venv 56 | source venv/bin/activate 57 | 58 | # Install dependencies 59 | pip install -e . 60 | 61 | # Run the crew 62 | python -m src.simple_writer.main "Your task description here" 63 | ``` 64 | 65 | ## Configuration 66 | 67 | - **Agents**: Configure in `config/agents.yaml` 68 | - **Tasks**: Configure in `config/tasks.yaml` 69 | - **Tools**: Customize in `src/simple_writer/tools/custom_tools.py` 70 | - **Dependencies**: Manage in `pyproject.toml` 71 | 72 | ## Generated by agentforge 73 | 74 | This project was generated by agentforge on 2025-09-21 16:37:19. 75 | 76 | For more information about CrewAI, visit: https://docs.crewai.com/ 77 | -------------------------------------------------------------------------------- /agentforge/database/models.py: -------------------------------------------------------------------------------- 1 | """ 2 | Database models for AgentForge. 3 | """ 4 | 5 | from typing import Optional, List, Dict, Any 6 | from datetime import datetime 7 | from dataclasses import dataclass 8 | from enum import Enum 9 | 10 | 11 | class ExecutionStatus(Enum): 12 | """Execution status enum.""" 13 | PENDING = "pending" 14 | RUNNING = "running" 15 | COMPLETED = "completed" 16 | FAILED = "failed" 17 | CANCELLED = "cancelled" 18 | 19 | 20 | @dataclass 21 | class AgentModel: 22 | """Agent model for database storage.""" 23 | id: str 24 | name: str 25 | role: str 26 | goal: str 27 | backstory: str 28 | tools: List[str] 29 | memory_type: str = "short_term" 30 | max_iter: int = 5 31 | allow_delegation: bool = False 32 | created_at: datetime = None 33 | updated_at: datetime = None 34 | 35 | def __post_init__(self): 36 | if self.created_at is None: 37 | self.created_at = datetime.utcnow() 38 | if self.updated_at is None: 39 | self.updated_at = datetime.utcnow() 40 | 41 | 42 | @dataclass 43 | class CrewModel: 44 | """Crew model for database storage.""" 45 | id: str 46 | name: str 47 | task: str 48 | description: str 49 | agents: List[AgentModel] 50 | expected_output: str 51 | complexity: str = "moderate" 52 | estimated_time: int = 15 53 | process_type: str = "sequential" 54 | created_at: datetime = None 55 | updated_at: datetime = None 56 | 57 | def __post_init__(self): 58 | if self.created_at is None: 59 | self.created_at = datetime.utcnow() 60 | if self.updated_at is None: 61 | self.updated_at = datetime.utcnow() 62 | 63 | 64 | @dataclass 65 | class ExecutionResult: 66 | """Execution result model for database storage.""" 67 | id: str 68 | crew_id: str 69 | input_data: str 70 | output: str 71 | status: ExecutionStatus 72 | execution_time: int 73 | cost: float = 0.0 74 | quality_score: float = 0.0 75 | error_message: Optional[str] = None 76 | logs: List[Dict[str, Any]] = None 77 | created_at: datetime = None 78 | 79 | def __post_init__(self): 80 | if self.created_at is None: 81 | self.created_at = datetime.utcnow() 82 | if self.logs is None: 83 | self.logs = [] 84 | -------------------------------------------------------------------------------- /crews/tech_blog_writer_final/README.md: -------------------------------------------------------------------------------- 1 | # tech_blog_writer_final 2 | 3 | AI-orchestrated crew for: Create a simple blog writer who can write informative blog posts about technology topics 4 | 5 | ## Overview 6 | 7 | This crew consists of multiple AI agents working together to accomplish complex tasks. 8 | 9 | **Task**: Create a simple blog writer who can write informative blog posts about technology topics 10 | 11 | **Expected Output**: None 12 | 13 | ## Agents 14 | 15 | - **Social Media Content Research Specialist_specialist**: Social Media Content Research Specialist 16 | - **Social Media Content Research_Assistant Specialist_specialist**: Social Media Content Research_Assistant Specialist 17 | - **Social Media Content Content Creator_specialist**: Social Media Content Content Creator 18 | - **Social Media Content Editor Specialist_specialist**: Social Media Content Editor Specialist 19 | 20 | ## Tools Used 21 | 22 | - api_calls 23 | - file_operations 24 | - database_search 25 | - github_search 26 | - code_execution 27 | - document_search 28 | 29 | ## Usage 30 | 31 | ### Quick Start 32 | 33 | ```bash 34 | # Make the run script executable (if not already) 35 | chmod +x run.sh 36 | 37 | # Run the crew 38 | ./run.sh "Your task description here" 39 | ``` 40 | 41 | ### Manual Setup (using uv) 42 | 43 | ```bash 44 | # Install uv if not already installed 45 | curl -LsSf https://astral.sh/uv/install.sh | sh 46 | 47 | # Create virtual environment and install dependencies 48 | uv venv 49 | uv sync 50 | 51 | # Run the crew 52 | uv run python -m src.tech_blog_writer_final.main "Your task description here" 53 | ``` 54 | 55 | ### Alternative Setup (using pip) 56 | 57 | ```bash 58 | # Create and activate virtual environment 59 | python3 -m venv venv 60 | source venv/bin/activate 61 | 62 | # Install dependencies 63 | pip install -e . 64 | 65 | # Run the crew 66 | python -m src.tech_blog_writer_final.main "Your task description here" 67 | ``` 68 | 69 | ## Configuration 70 | 71 | - **Agents**: Configure in `config/agents.yaml` 72 | - **Tasks**: Configure in `config/tasks.yaml` 73 | - **Tools**: Customize in `src/tech_blog_writer_final/tools/custom_tools.py` 74 | - **Dependencies**: Manage in `pyproject.toml` 75 | 76 | ## Generated by agentforge 77 | 78 | This project was generated by agentforge on 2025-09-21 16:01:37. 79 | 80 | For more information about CrewAI, visit: https://docs.crewai.com/ 81 | -------------------------------------------------------------------------------- /crews/simple_writer/src/simple_writer/tools/custom_tools.py: -------------------------------------------------------------------------------- 1 | """ 2 | Custom tools for simple_writer crew. 3 | 4 | This module handles tool initialization and provides tools to agents. 5 | """ 6 | 7 | from typing import List, Any 8 | 9 | 10 | def get_tools_for_agent(tool_names: List[str]) -> List[Any]: 11 | """Get actual CrewAI tools for an agent based on tool names.""" 12 | # If no tools requested, return empty list 13 | if not tool_names or len(tool_names) == 0: 14 | return [] 15 | 16 | # Try to import available tools 17 | available_tools = {} 18 | 19 | try: 20 | from crewai_tools import ( 21 | WebsiteSearchTool, SerperDevTool, FileReadTool, ScrapeWebsiteTool, GithubSearchTool, 22 | YoutubeVideoSearchTool, YoutubeChannelSearchTool, CodeInterpreterTool, 23 | PDFSearchTool, DOCXSearchTool, CSVSearchTool, JSONSearchTool, 24 | XMLSearchTool, TXTSearchTool, MDXSearchTool, DirectoryReadTool, 25 | DirectorySearchTool 26 | ) 27 | 28 | available_tools = { 29 | 'SerperDevTool': SerperDevTool, 30 | 'FileReadTool': FileReadTool, 31 | 'ScrapeWebsiteTool': ScrapeWebsiteTool, 32 | 'GithubSearchTool': GithubSearchTool, 33 | 'YoutubeVideoSearchTool': YoutubeVideoSearchTool, 34 | 'YoutubeChannelSearchTool': YoutubeChannelSearchTool, 35 | 'CodeInterpreterTool': CodeInterpreterTool, 36 | 'PDFSearchTool': PDFSearchTool, 37 | 'DOCXSearchTool': DOCXSearchTool, 38 | 'CSVSearchTool': CSVSearchTool, 39 | 'JSONSearchTool': JSONSearchTool, 40 | 'XMLSearchTool': XMLSearchTool, 41 | 'TXTSearchTool': TXTSearchTool, 42 | 'MDXSearchTool': MDXSearchTool, 43 | 'DirectoryReadTool': DirectoryReadTool, 44 | 'DirectorySearchTool': DirectorySearchTool, 45 | 'WebsiteSearchTool': WebsiteSearchTool 46 | } 47 | 48 | except ImportError: 49 | print("Warning: crewai-tools not installed, using mock tools") 50 | return [] 51 | 52 | tools = [] 53 | 54 | for tool_name in tool_names: 55 | try: 56 | if tool_name in available_tools: 57 | tool_class = available_tools[tool_name] 58 | tools.append(tool_class()) 59 | else: 60 | print(f"Warning: Unknown tool '{tool_name}', skipping") 61 | except Exception as e: 62 | print(f"Warning: Could not instantiate {tool_name}: {e}") 63 | 64 | return tools 65 | 66 | 67 | # Note: Using actual CrewAI tools instead of custom implementations 68 | # Tools are imported and instantiated directly from crewai_tools package 69 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "agentforge" 7 | version = "0.2.0" 8 | description = "🔥 Forge intelligent AI agents with CrewAI - Transform ideas into powerful multi-agent systems" 9 | readme = "README.md" 10 | authors = [ 11 | {name = "AgentForge Team", email = "team@agentforge.dev"} 12 | ] 13 | license = {text = "MIT"} 14 | classifiers = [ 15 | "Development Status :: 4 - Beta", 16 | "Intended Audience :: Developers", 17 | "License :: OSI Approved :: MIT License", 18 | "Programming Language :: Python :: 3", 19 | "Programming Language :: Python :: 3.9", 20 | "Programming Language :: Python :: 3.10", 21 | "Programming Language :: Python :: 3.11", 22 | "Programming Language :: Python :: 3.12", 23 | ] 24 | keywords = ["ai", "multi-agent", "crewai", "automation", "agents", "forge", "intelligent", "ai-agents"] 25 | requires-python = ">=3.10" 26 | dependencies = [ 27 | "crewai", 28 | "crewai-tools", 29 | "pydantic", 30 | "python-dotenv", 31 | "rich", 32 | "typer", 33 | "openai", 34 | "pyyaml", 35 | "langchain-anthropic", 36 | "langchain-google-genai", 37 | "langchain-openai", 38 | ] 39 | 40 | [project.optional-dependencies] 41 | dev = [ 42 | "pytest>=7.0.0", 43 | "pytest-asyncio>=0.21.0", 44 | "black>=23.0.0", 45 | "ruff>=0.1.0", 46 | "mypy>=1.0.0", 47 | "pre-commit>=3.0.0", 48 | ] 49 | web = [ 50 | "fastapi>=0.100.0", 51 | "uvicorn>=0.23.0", 52 | "jinja2>=3.1.0", 53 | ] 54 | 55 | [project.urls] 56 | Homepage = "https://github.com/AgentForge/agentforge" 57 | Documentation = "https://github.com/AgentForge/agentforge#readme" 58 | Repository = "https://github.com/AgentForge/agentforge" 59 | Issues = "https://github.com/AgentForge/agentforge/issues" 60 | Changelog = "https://github.com/AgentForge/agentforge/blob/main/CHANGELOG.md" 61 | 62 | [project.scripts] 63 | agentforge = "agentforge.cli:main" 64 | 65 | [tool.setuptools.packages.find] 66 | where = ["."] 67 | include = ["agentforge*"] 68 | 69 | [tool.black] 70 | line-length = 88 71 | target-version = ['py39'] 72 | 73 | [tool.ruff] 74 | line-length = 88 75 | target-version = "py39" 76 | select = ["E", "F", "W", "I", "N", "UP", "S", "B", "A", "COM", "C4", "DTZ", "T10", "EM", "EXE", "ISC", "ICN", "G", "PIE", "T20", "PYI", "PT", "Q", "RSE", "RET", "SLF", "SIM", "TID", "TCH", "INT", "ARG", "PTH", "PD", "PGH", "PL", "TRY", "NPY", "RUF"] 77 | ignore = ["S101", "S104", "S105", "S106", "S107", "S108", "COM812", "ISC001"] 78 | 79 | [tool.mypy] 80 | python_version = "3.9" 81 | warn_return_any = true 82 | warn_unused_configs = true 83 | disallow_untyped_defs = true 84 | 85 | [tool.uv.workspace] 86 | members = [ 87 | "crews/blog_writer_04", 88 | ] 89 | -------------------------------------------------------------------------------- /crews/tech_blog_writer_final/config/agents.yaml: -------------------------------------------------------------------------------- 1 | Social Media Content Content Creator_specialist: 2 | allow_delegation: false 3 | backstory: You are an experienced Social Media Content Content Creator with a proven 4 | track record of delivering high-quality results in collaborative environments. 5 | goal: Execute Social Media Content Content Creator tasks with excellence and collaborate 6 | effectively with the team 7 | llm: 8 | max_tokens: 2000 9 | model: ollama/llama3.2:latest 10 | provider: ollama 11 | temperature: 0.7 12 | max_iter: 5 13 | role: Social Media Content Content Creator 14 | tools: 15 | - api_calls 16 | - file_operations 17 | - document_search 18 | - code_execution 19 | verbose: true 20 | Social Media Content Editor Specialist_specialist: 21 | allow_delegation: false 22 | backstory: You are an experienced Social Media Content Editor Specialist with a 23 | proven track record of delivering high-quality results in collaborative environments. 24 | goal: Execute Social Media Content Editor Specialist tasks with excellence and collaborate 25 | effectively with the team 26 | llm: 27 | max_tokens: 2000 28 | model: ollama/llama3.2:latest 29 | provider: ollama 30 | temperature: 0.7 31 | max_iter: 5 32 | role: Social Media Content Editor Specialist 33 | tools: 34 | - api_calls 35 | - file_operations 36 | verbose: true 37 | Social Media Content Research Specialist_specialist: 38 | allow_delegation: false 39 | backstory: You are an experienced Social Media Content Research Specialist with 40 | a proven track record of delivering high-quality results in collaborative environments. 41 | goal: Execute Social Media Content Research Specialist tasks with excellence and 42 | collaborate effectively with the team 43 | llm: 44 | max_tokens: 2000 45 | model: ollama/llama3.2:latest 46 | provider: ollama 47 | temperature: 0.7 48 | max_iter: 5 49 | role: Social Media Content Research Specialist 50 | tools: 51 | - api_calls 52 | - github_search 53 | - file_operations 54 | - code_execution 55 | verbose: true 56 | Social Media Content Research_Assistant Specialist_specialist: 57 | allow_delegation: false 58 | backstory: You are an experienced Social Media Content Research_Assistant Specialist 59 | with a proven track record of delivering high-quality results in collaborative 60 | environments. 61 | goal: Execute Social Media Content Research_Assistant Specialist tasks with excellence 62 | and collaborate effectively with the team 63 | llm: 64 | max_tokens: 2000 65 | model: ollama/llama3.2:latest 66 | provider: ollama 67 | temperature: 0.7 68 | max_iter: 5 69 | role: Social Media Content Research_Assistant Specialist 70 | tools: 71 | - database_search 72 | - api_calls 73 | - file_operations 74 | - document_search 75 | verbose: true 76 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [ main, develop ] 6 | pull_request: 7 | branches: [ main, develop ] 8 | 9 | jobs: 10 | test: 11 | runs-on: ${{ matrix.os }} 12 | strategy: 13 | matrix: 14 | os: [ubuntu-latest, windows-latest, macos-latest] 15 | python-version: ['3.9', '3.10', '3.11', '3.12'] 16 | 17 | steps: 18 | - uses: actions/checkout@v4 19 | 20 | - name: Set up Python ${{ matrix.python-version }} 21 | uses: actions/setup-python@v4 22 | with: 23 | python-version: ${{ matrix.python-version }} 24 | 25 | - name: Install UV 26 | uses: astral-sh/setup-uv@v3 27 | 28 | - name: Install dependencies 29 | run: | 30 | uv sync --extra dev 31 | 32 | - name: Run linting with ruff 33 | run: | 34 | uv run ruff check agentforge/ 35 | 36 | - name: Run formatting check with ruff 37 | run: | 38 | uv run ruff format --check agentforge/ 39 | 40 | - name: Run type checking with mypy 41 | run: | 42 | uv run mypy agentforge/ 43 | 44 | - name: Run tests with pytest 45 | run: | 46 | uv run pytest tests/ --cov=agentforge --cov-report=xml 47 | 48 | - name: Upload coverage to Codecov 49 | uses: codecov/codecov-action@v3 50 | with: 51 | file: ./coverage.xml 52 | flags: unittests 53 | name: codecov-umbrella 54 | fail_ci_if_error: false 55 | 56 | security: 57 | runs-on: ubuntu-latest 58 | steps: 59 | - uses: actions/checkout@v4 60 | 61 | - name: Set up Python 62 | uses: actions/setup-python@v4 63 | with: 64 | python-version: '3.11' 65 | 66 | - name: Install UV 67 | uses: astral-sh/setup-uv@v3 68 | 69 | - name: Install dependencies 70 | run: | 71 | uv sync --extra dev 72 | 73 | - name: Run security check with bandit 74 | run: | 75 | uv run bandit -r agentforge/ -f json -o bandit-report.json 76 | continue-on-error: true 77 | 78 | - name: Upload security report 79 | uses: actions/upload-artifact@v3 80 | with: 81 | name: security-report 82 | path: bandit-report.json 83 | 84 | build: 85 | runs-on: ubuntu-latest 86 | needs: [test, security] 87 | steps: 88 | - uses: actions/checkout@v4 89 | 90 | - name: Set up Python 91 | uses: actions/setup-python@v4 92 | with: 93 | python-version: '3.11' 94 | 95 | - name: Install UV 96 | uses: astral-sh/setup-uv@v3 97 | 98 | - name: Build package 99 | run: | 100 | uv build 101 | 102 | - name: Upload build artifacts 103 | uses: actions/upload-artifact@v3 104 | with: 105 | name: dist 106 | path: dist/ 107 | 108 | pre-commit: 109 | runs-on: ubuntu-latest 110 | steps: 111 | - uses: actions/checkout@v4 112 | 113 | - name: Set up Python 114 | uses: actions/setup-python@v4 115 | with: 116 | python-version: '3.11' 117 | 118 | - name: Install pre-commit 119 | run: | 120 | pip install pre-commit 121 | 122 | - name: Run pre-commit 123 | run: | 124 | pre-commit run --all-files -------------------------------------------------------------------------------- /crews/tech_blog_writer_final/src/tech_blog_writer_final/tools/custom_tools.py: -------------------------------------------------------------------------------- 1 | """ 2 | Custom tools for tech_blog_writer_final crew. 3 | 4 | This module handles tool initialization and provides tools to agents. 5 | """ 6 | 7 | from typing import List, Any 8 | 9 | 10 | def get_tools_for_agent(tool_names: List[str]) -> List[Any]: 11 | """Get actual CrewAI tools for an agent based on tool names.""" 12 | # Try to import available tools 13 | available_tools = {} 14 | 15 | try: 16 | from crewai_tools import ( 17 | WebsiteSearchTool, SerperDevTool, FileReadTool, ScrapeWebsiteTool, GithubSearchTool, 18 | YoutubeVideoSearchTool, YoutubeChannelSearchTool, CodeInterpreterTool, 19 | PDFSearchTool, DOCXSearchTool, CSVSearchTool, JSONSearchTool, 20 | XMLSearchTool, TXTSearchTool, MDXSearchTool, DirectoryReadTool, 21 | DirectorySearchTool 22 | ) 23 | 24 | available_tools = { 25 | 'SerperDevTool': SerperDevTool, 26 | 'FileReadTool': FileReadTool, 27 | 'ScrapeWebsiteTool': ScrapeWebsiteTool, 28 | 'GithubSearchTool': GithubSearchTool, 29 | 'YoutubeVideoSearchTool': YoutubeVideoSearchTool, 30 | 'YoutubeChannelSearchTool': YoutubeChannelSearchTool, 31 | 'CodeInterpreterTool': CodeInterpreterTool, 32 | 'PDFSearchTool': PDFSearchTool, 33 | 'DOCXSearchTool': DOCXSearchTool, 34 | 'CSVSearchTool': CSVSearchTool, 35 | 'JSONSearchTool': JSONSearchTool, 36 | 'XMLSearchTool': XMLSearchTool, 37 | 'TXTSearchTool': TXTSearchTool, 38 | 'MDXSearchTool': MDXSearchTool, 39 | 'DirectoryReadTool': DirectoryReadTool, 40 | 'DirectorySearchTool': DirectorySearchTool, 41 | 'WebsiteSearchTool': WebsiteSearchTool 42 | } 43 | 44 | except ImportError: 45 | print("Warning: crewai-tools not installed, using mock tools") 46 | return [] 47 | 48 | tools = [] 49 | 50 | for tool_name in tool_names: 51 | try: 52 | if tool_name in available_tools: 53 | tool_class = available_tools[tool_name] 54 | tools.append(tool_class()) 55 | else: 56 | print(f"Warning: Unknown tool '{tool_name}', using SerperDevTool as fallback") 57 | if 'SerperDevTool' in available_tools and not any(type(t).__name__ == 'SerperDevTool' for t in tools): 58 | tools.append(available_tools['SerperDevTool']()) 59 | except Exception as e: 60 | print(f"Warning: Could not instantiate {tool_name}: {e}") 61 | # Try to use SerperDevTool as fallback 62 | if 'SerperDevTool' in available_tools and not any(type(t).__name__ == 'SerperDevTool' for t in tools): 63 | try: 64 | tools.append(available_tools['SerperDevTool']()) 65 | except Exception: 66 | pass 67 | 68 | # Ensure we have at least one tool 69 | if not tools and 'SerperDevTool' in available_tools: 70 | try: 71 | tools.append(available_tools['SerperDevTool']()) 72 | except Exception: 73 | print("Warning: Could not create fallback tool") 74 | 75 | return tools 76 | 77 | 78 | # Note: Using actual CrewAI tools instead of custom implementations 79 | # Tools are imported and instantiated directly from crewai_tools package 80 | -------------------------------------------------------------------------------- /agentforge/core/event_bus_patch.py: -------------------------------------------------------------------------------- 1 | """ 2 | Event bus patch to fix CrewAI EventBus errors. 3 | """ 4 | 5 | import warnings 6 | from crewai.events.event_bus import crewai_event_bus 7 | from crewai.events.types.task_events import TaskStartedEvent 8 | 9 | 10 | def patch_event_bus(): 11 | """Patch the event bus to handle None values gracefully.""" 12 | 13 | # Store the original handler 14 | original_handlers = {} 15 | 16 | # Get all registered handlers for TaskStartedEvent 17 | if hasattr(crewai_event_bus, '_handlers') and TaskStartedEvent in crewai_event_bus._handlers: 18 | original_handlers[TaskStartedEvent] = crewai_event_bus._handlers[TaskStartedEvent].copy() 19 | 20 | # Clear the problematic handlers 21 | crewai_event_bus._handlers[TaskStartedEvent] = [] 22 | 23 | # Add a safe handler 24 | @crewai_event_bus.on(TaskStartedEvent) 25 | def safe_on_task_started(source, event: TaskStartedEvent): 26 | try: 27 | # Check if source has required attributes 28 | if not hasattr(source, 'agent') or source.agent is None: 29 | return 30 | if not hasattr(source.agent, 'crew') or source.agent.crew is None: 31 | return 32 | 33 | # Call original handlers if they exist 34 | if TaskStartedEvent in original_handlers: 35 | for handler in original_handlers[TaskStartedEvent]: 36 | try: 37 | handler(source, event) 38 | except Exception as e: 39 | # Suppress the error but log it 40 | warnings.warn(f"Event handler failed: {e}", UserWarning) 41 | 42 | except Exception as e: 43 | # Suppress all errors from event handling 44 | warnings.warn(f"Event handling failed: {e}", UserWarning) 45 | 46 | 47 | def apply_patch(): 48 | """Apply the event bus patch and emoji encoding fix.""" 49 | try: 50 | # Apply emoji encoding fix first 51 | patch_emoji_encoding() 52 | 53 | # Apply event bus patch 54 | patch_event_bus() 55 | print("[INFO] Event bus patch applied successfully") 56 | except Exception as e: 57 | print(f"[WARNING] Could not apply event bus patch: {e}") 58 | 59 | 60 | def patch_emoji_encoding(): 61 | """Patch stdout to handle emoji encoding issues.""" 62 | import sys 63 | 64 | # Store original stdout 65 | original_stdout = sys.stdout 66 | 67 | class SafeStdout: 68 | def __init__(self, original_stdout): 69 | self.original_stdout = original_stdout 70 | 71 | def write(self, text): 72 | # Replace emojis with safe text 73 | safe_text = text.replace('🚀', '[START]').replace('⚡', '[LIGHTNING]').replace('🔥', '[FIRE]') 74 | safe_text = safe_text.replace('📊', '[CHART]').replace('💡', '[IDEA]').replace('🔍', '[SEARCH]') 75 | safe_text = safe_text.replace('✅', '[OK]').replace('❌', '[ERROR]').replace('⚠', '[WARN]') 76 | safe_text = safe_text.replace('🤖', '[AI]').replace('📋', '[INFO]').replace('🔧', '[TOOL]') 77 | safe_text = safe_text.replace('📁', '[FOLDER]').replace('🚨', '[ALERT]').replace('⚒', '[HAMMER]') 78 | safe_text = safe_text.replace('🔄', '[REFRESH]').replace('📄', '[DOCUMENT]').replace('📦', '[PACKAGE]') 79 | safe_text = safe_text.replace('🛠', '[TOOLS]').replace('💾', '[SAVE]').replace('🎭', '[MASK]') 80 | safe_text = safe_text.replace('👥', '[PEOPLE]').replace('🎨', '[ART]').replace('✨', '[SPARKLE]') 81 | safe_text = safe_text.replace('📚', '[BOOKS]').replace('🎉', '[PARTY]').replace('🏃', '[RUN]') 82 | safe_text = safe_text.replace('🧪', '[TEST]').replace('🎓', '[GRADUATE]').replace('🎯', '[TARGET]') 83 | safe_text = safe_text.replace('•', '[BULLET]').replace('═', '=').replace('║', '|') 84 | safe_text = safe_text.replace('█', '#').replace('╗', '+').replace('╚', '+') 85 | safe_text = safe_text.replace('╝', '+').replace('╔', '+').replace('└', '+') 86 | safe_text = safe_text.replace('├', '+').replace('─', '-').replace('️', '') 87 | 88 | try: 89 | self.original_stdout.write(safe_text) 90 | except UnicodeEncodeError: 91 | # If still having encoding issues, encode as ASCII with replacement 92 | safe_text = safe_text.encode('ascii', 'replace').decode('ascii') 93 | self.original_stdout.write(safe_text) 94 | 95 | def flush(self): 96 | self.original_stdout.flush() 97 | 98 | def __getattr__(self, name): 99 | return getattr(self.original_stdout, name) 100 | 101 | # Apply the safe stdout wrapper 102 | sys.stdout = SafeStdout(original_stdout) 103 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be added to the global gitignore or merged into this project gitignore. For a PyCharm 158 | # project, it is expected that all the files below are tracked. However, it can be 159 | # added to this gitignore file if there are multiple developers working on this project 160 | # and not all of them are using PyCharm. 161 | .idea/ 162 | 163 | # agentforge specific 164 | *.db 165 | *.sqlite3 166 | .agentforge/ 167 | vector_stores/ 168 | logs/ 169 | agentforge.zip 170 | db/ 171 | 172 | # Test agents and temporary crews (keep only production crews) 173 | crews/test_* 174 | crews/*_v[0-9]/ 175 | crews/*_test*/ 176 | crews/*_temp*/ 177 | 178 | # Keep only production crews 179 | !crews/simple_writer/ 180 | !crews/tech_blog_writer_final/ 181 | 182 | # IDE and OS 183 | .DS_Store 184 | .DS_Store? 185 | ._* 186 | .Spotlight-V100 187 | .Trashes 188 | ehthumbs.db 189 | Thumbs.db 190 | .vscode/ 191 | *.swp 192 | *.swo 193 | *~ 194 | 195 | # Temporary files 196 | *.tmp 197 | *.temp 198 | *.bak 199 | *.backup 200 | *.orig 201 | 202 | # Cache and temporary directories 203 | .cache/ 204 | .temp/ 205 | .tmp/ 206 | __pycache__/ 207 | *.pyc 208 | *.pyo 209 | *.pyd 210 | 211 | # Test and development files 212 | test_*.py 213 | *_test.py 214 | *_test_*.py 215 | debug_*.py 216 | temp_*.py 217 | 218 | # API keys and secrets 219 | .env.local 220 | .env.development 221 | .env.test 222 | .env.production 223 | secrets.json 224 | config.local.json 225 | 226 | # Generated files 227 | generated_*/ 228 | output/ 229 | results/ 230 | *.generated.* 231 | 232 | # Log files 233 | *.log 234 | logs/ 235 | *.out 236 | 237 | # OS generated files 238 | .DS_Store 239 | .DS_Store? 240 | ._* 241 | .Spotlight-V100 242 | .Trashes 243 | ehthumbs.db 244 | Thumbs.db 245 | Desktop.ini 246 | -------------------------------------------------------------------------------- /agentforge/core/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Configuration management for agentforge. 3 | """ 4 | 5 | import os 6 | import yaml 7 | import time 8 | from typing import Dict, Any, Optional, List, Tuple 9 | from pydantic import BaseModel, Field 10 | from pathlib import Path 11 | 12 | 13 | class LLMConfig(BaseModel): 14 | """LLM configuration.""" 15 | provider: str = "openai" # openai, google, anthropic, deepseek, ollama, llamacpp, custom 16 | model: str = "gpt-4" 17 | temperature: float = 0.7 18 | max_tokens: int = 2000 19 | api_key: Optional[str] = None 20 | base_url: Optional[str] = None 21 | project_id: Optional[str] = None # For Google Cloud 22 | region: Optional[str] = None # For Google Cloud 23 | auth_file: Optional[str] = None # For Google Service Account 24 | 25 | # Ollama specific parameters 26 | ollama_host: Optional[str] = "http://localhost:11434" # Ollama server URL 27 | 28 | # LlamaCpp specific parameters 29 | model_path: Optional[str] = None # Path to model file 30 | n_ctx: Optional[int] = 2048 # Context window size 31 | n_gpu_layers: Optional[int] = 0 # Number of layers to offload to GPU 32 | 33 | class MemoryConfig(BaseModel): 34 | """Memory configuration.""" 35 | enabled: bool = True 36 | short_term_limit: int = 10 37 | long_term_limit: int = 100 38 | embedding_model: str = "sentence-transformers/all-MiniLM-L6-v2" 39 | 40 | class ToolConfig(BaseModel): 41 | """Tool configuration.""" 42 | enabled_categories: List[str] = ["web_search", "file_ops", "code_exec", "api_calls"] 43 | max_tools_per_agent: int = 5 44 | custom_tools_path: Optional[str] = None 45 | 46 | class agentforgeConfig(BaseModel): 47 | """Main AgentForge configuration.""" 48 | llm: LLMConfig = Field(default_factory=LLMConfig) 49 | memory: MemoryConfig = Field(default_factory=MemoryConfig) 50 | tools: ToolConfig = Field(default_factory=ToolConfig) 51 | 52 | # Agent generation settings 53 | max_agents_per_crew: int = 5 54 | default_agent_verbose: bool = True 55 | default_agent_max_iter: int = 5 56 | 57 | # Crew execution settings 58 | default_process: str = "sequential" 59 | execution_timeout: int = 300 # seconds 60 | 61 | # Logging and debugging 62 | log_level: str = "INFO" 63 | log_file: Optional[str] = None 64 | debug_mode: bool = False 65 | 66 | class Config: 67 | """Configuration manager for agentforge.""" 68 | 69 | def __init__(self, config_path: Optional[str] = None): 70 | """Initialize configuration.""" 71 | self.config_path = config_path or self._get_default_config_path() 72 | self._config = self._load_config() 73 | 74 | def _get_default_config_path(self) -> str: 75 | """Get default configuration file path.""" 76 | # First check for .agentforge/config.yaml in current directory 77 | local_config = Path(".agentforge/config.yaml") 78 | if local_config.exists(): 79 | return str(local_config) 80 | 81 | # Fallback to home directory 82 | config_dir = Path.home() / ".agentforge" 83 | config_dir.mkdir(exist_ok=True) 84 | return str(config_dir / "config.yaml") 85 | 86 | def _load_config(self) -> agentforgeConfig: 87 | """Load configuration from file or create default.""" 88 | if os.path.exists(self.config_path): 89 | try: 90 | with open(self.config_path, 'r') as f: 91 | config_data = yaml.safe_load(f) or {} 92 | return agentforgeConfig(**config_data) 93 | except Exception as e: 94 | print(f"Warning: Failed to load config from {self.config_path}: {e}") 95 | print("Using default configuration.") 96 | 97 | # Create default config and save it 98 | config = agentforgeConfig() 99 | self.save_config(config) 100 | return config 101 | 102 | def save_config(self, config: Optional[agentforgeConfig] = None): 103 | """Save configuration to file.""" 104 | config_to_save = config or self._config 105 | 106 | try: 107 | os.makedirs(os.path.dirname(self.config_path), exist_ok=True) 108 | with open(self.config_path, 'w') as f: 109 | yaml.dump(config_to_save.model_dump(), f, default_flow_style=False) 110 | except Exception as e: 111 | print(f"Warning: Failed to save config to {self.config_path}: {e}") 112 | 113 | def get(self, key: str = None) -> Any: 114 | """Get configuration value or entire config.""" 115 | if key is None: 116 | return self._config 117 | 118 | # Support nested key access like "database.url" 119 | value = self._config 120 | for part in key.split('.'): 121 | value = getattr(value, part, None) 122 | if value is None: 123 | break 124 | return value 125 | 126 | def set(self, key: str, value: Any): 127 | """Set configuration value.""" 128 | # This would need more complex logic for nested updates 129 | # For now, we'll keep it simple 130 | if hasattr(self._config, key): 131 | setattr(self._config, key, value) 132 | self.save_config() 133 | 134 | @property 135 | def llm(self) -> LLMConfig: 136 | """Get LLM configuration.""" 137 | return self._config.llm 138 | 139 | @property 140 | def memory(self) -> MemoryConfig: 141 | """Get memory configuration.""" 142 | return self._config.memory 143 | 144 | @property 145 | def tools(self) -> ToolConfig: 146 | """Get tools configuration.""" 147 | return self._config.tools 148 | 149 | def update_from_env(self): 150 | """Update configuration from environment variables. (DISABLED - Use .agentforge/config.yaml only)""" 151 | # Environment variable override is disabled to force users to use .agentforge/config.yaml 152 | # This ensures all configuration is managed through the CLI and config file 153 | pass -------------------------------------------------------------------------------- /agentforge/examples/adaptive_agent_example.py: -------------------------------------------------------------------------------- 1 | """ 2 | Adaptive Agent Creation Example for AgentForge. 3 | 4 | This example demonstrates how the adaptive agent creation system works 5 | with reinforcement learning to automatically create specialized agents. 6 | """ 7 | 8 | import time 9 | import json 10 | from pathlib import Path 11 | 12 | def demonstrate_adaptive_system(): 13 | """Demonstrate the adaptive agent creation system.""" 14 | print("🧠 AgentForge Adaptive Agent Creation Demo") 15 | print("=" * 50) 16 | 17 | # Simulate a scenario where we need adaptive agents 18 | print("\n📊 Scenario: E-commerce Analysis Crew") 19 | print("We have a crew that analyzes e-commerce data, but performance is degrading...") 20 | 21 | # Step 1: Analyze current performance 22 | print("\n🔍 Step 1: Analyzing current performance...") 23 | print("Command: agentforge adaptive analyze --crew ecommerce_analysis") 24 | 25 | # Simulate analysis results 26 | analysis_results = { 27 | "overall_score": 0.65, # Below threshold 28 | "recommendations": [ 29 | { 30 | "type": "performance_improvement", 31 | "issue": "low_success_rate", 32 | "current_value": 0.65, 33 | "threshold": 0.7, 34 | "suggestion": "Create specialized agent for failed task types" 35 | }, 36 | { 37 | "type": "quality_improvement", 38 | "issue": "low_quality", 39 | "current_value": 0.6, 40 | "threshold": 0.6, 41 | "suggestion": "Create quality-focused specialist agent" 42 | } 43 | ] 44 | } 45 | 46 | print(f"Overall Score: {analysis_results['overall_score']}") 47 | print(f"Recommendations: {len(analysis_results['recommendations'])}") 48 | 49 | # Step 2: RL system decides to create agent 50 | print("\n🤖 Step 2: RL system evaluating agent creation...") 51 | print("Command: agentforge rl step --crew ecommerce_analysis --context 'complex data analysis'") 52 | 53 | # Simulate RL decision 54 | rl_decision = { 55 | "state": "performance_degraded", 56 | "action": "create_optimization_agent", 57 | "confidence": 0.85, 58 | "reasoning": "Performance below threshold, optimization agent needed" 59 | } 60 | 61 | print(f"State: {rl_decision['state']}") 62 | print(f"Action: {rl_decision['action']}") 63 | print(f"Confidence: {rl_decision['confidence']}") 64 | 65 | # Step 3: Create adaptive agent 66 | print("\n⚒️ Step 3: Creating adaptive agent...") 67 | print("Command: agentforge adaptive create --crew ecommerce_analysis --context 'optimization needed'") 68 | 69 | # Simulate agent creation 70 | agent_creation = { 71 | "agent_name": "adaptive_optimization_1703123456", 72 | "specialization": "optimization_ecommerce", 73 | "role": "E-commerce Data Optimization Specialist", 74 | "tools": ["performance_analyzer", "optimizer", "benchmark"], 75 | "expected_improvement": 0.25 76 | } 77 | 78 | print(f"Agent Created: {agent_creation['agent_name']}") 79 | print(f"Specialization: {agent_creation['specialization']}") 80 | print(f"Expected Improvement: {agent_creation['expected_improvement']}") 81 | 82 | # Step 4: Train RL system 83 | print("\n🎓 Step 4: Training RL system...") 84 | print("Command: agentforge rl train --crew ecommerce_analysis --episodes 20") 85 | 86 | # Simulate training 87 | training_results = { 88 | "episodes_completed": 20, 89 | "average_reward": 0.75, 90 | "exploration_rate": 0.15, 91 | "learning_progress": "improving" 92 | } 93 | 94 | print(f"Episodes: {training_results['episodes_completed']}") 95 | print(f"Average Reward: {training_results['average_reward']}") 96 | print(f"Learning Progress: {training_results['learning_progress']}") 97 | 98 | # Step 5: Show insights 99 | print("\n📈 Step 5: RL System Insights...") 100 | print("Command: agentforge rl insights") 101 | 102 | insights = { 103 | "total_episodes": 20, 104 | "average_reward": 0.75, 105 | "exploration_rate": 0.15, 106 | "q_table_size": 24, 107 | "learning_progress": "improving" 108 | } 109 | 110 | print(f"Total Episodes: {insights['total_episodes']}") 111 | print(f"Q-Table Size: {insights['q_table_size']}") 112 | print(f"Exploration Rate: {insights['exploration_rate']}") 113 | 114 | print("\n✅ Demo completed! The adaptive system has learned to create specialized agents.") 115 | print("\n💡 Key Benefits:") 116 | print(" • Automatic agent creation based on performance") 117 | print(" • Reinforcement learning for optimal decisions") 118 | print(" • Specialized agents for specific tasks") 119 | print(" • Continuous learning and improvement") 120 | 121 | def demonstrate_rl_states(): 122 | """Demonstrate different RL states and actions.""" 123 | print("\n🧠 RL States and Actions") 124 | print("=" * 30) 125 | 126 | states_actions = { 127 | "NORMAL_OPERATION": ["NO_ACTION", "WAIT_AND_OBSERVE"], 128 | "PERFORMANCE_DEGRADED": ["CREATE_OPTIMIZATION_AGENT", "MODIFY_EXISTING_AGENT", "WAIT_AND_OBSERVE"], 129 | "HIGH_COMPLEXITY_TASK": ["CREATE_SPECIALIST_AGENT", "CREATE_CAPABILITY_AGENT", "WAIT_AND_OBSERVE"], 130 | "NEW_CAPABILITY_NEEDED": ["CREATE_CAPABILITY_AGENT", "MODIFY_EXISTING_AGENT", "WAIT_AND_OBSERVE"], 131 | "FAILURE_RECOVERY": ["CREATE_RECOVERY_AGENT", "MODIFY_EXISTING_AGENT", "WAIT_AND_OBSERVE"], 132 | "OPTIMIZATION_OPPORTUNITY": ["CREATE_OPTIMIZATION_AGENT", "MODIFY_EXISTING_AGENT", "WAIT_AND_OBSERVE"] 133 | } 134 | 135 | for state, actions in states_actions.items(): 136 | print(f"\n{state}:") 137 | for action in actions: 138 | print(f" • {action}") 139 | 140 | def demonstrate_specialization_patterns(): 141 | """Demonstrate specialization patterns for different domains.""" 142 | print("\n🎯 Specialization Patterns") 143 | print("=" * 30) 144 | 145 | patterns = { 146 | "research": ["web_search", "data_analysis", "report_generation"], 147 | "creative": ["content_generation", "design", "storytelling"], 148 | "technical": ["code_generation", "debugging", "system_analysis"], 149 | "analytical": ["data_processing", "statistical_analysis", "pattern_recognition"], 150 | "communication": ["writing", "presentation", "translation"] 151 | } 152 | 153 | for domain, tools in patterns.items(): 154 | print(f"\n{domain.upper()}:") 155 | for tool in tools: 156 | print(f" • {tool}") 157 | 158 | if __name__ == "__main__": 159 | demonstrate_adaptive_system() 160 | demonstrate_rl_states() 161 | demonstrate_specialization_patterns() 162 | -------------------------------------------------------------------------------- /agentforge/core/llm_provider.py: -------------------------------------------------------------------------------- 1 | """ 2 | LLM Provider factory for supporting multiple AI providers. 3 | """ 4 | 5 | import os 6 | from typing import Dict, Any, Optional 7 | from abc import ABC, abstractmethod 8 | 9 | 10 | class LLMProvider(ABC): 11 | """Abstract base class for LLM providers.""" 12 | 13 | @abstractmethod 14 | def get_llm_config(self, config) -> Dict[str, Any]: 15 | """Get the LLM configuration for this provider.""" 16 | pass 17 | 18 | @abstractmethod 19 | def validate_config(self, config) -> bool: 20 | """Validate provider-specific configuration.""" 21 | pass 22 | 23 | 24 | class OpenAIProvider(LLMProvider): 25 | """OpenAI provider implementation.""" 26 | 27 | def get_llm_config(self, config) -> Dict[str, Any]: 28 | return { 29 | "model": config.llm.model or "gpt-4", 30 | "api_key": config.llm.api_key, 31 | "base_url": config.llm.base_url 32 | } 33 | 34 | def validate_config(self, config) -> bool: 35 | return bool(config.llm.api_key) 36 | 37 | 38 | class GoogleProvider(LLMProvider): 39 | """Google Gemini/Vertex AI provider implementation.""" 40 | 41 | def get_llm_config(self, config) -> Dict[str, Any]: 42 | return { 43 | "model": config.llm.model or "gemini-pro", 44 | "api_key": config.llm.api_key, 45 | "project_id": config.llm.project_id, 46 | "region": config.llm.region or 'us-central1', 47 | "auth_file": config.llm.auth_file 48 | } 49 | 50 | def validate_config(self, config) -> bool: 51 | return bool(config.llm.api_key or config.llm.auth_file) 52 | 53 | 54 | class AnthropicProvider(LLMProvider): 55 | """Anthropic Claude provider implementation.""" 56 | 57 | def get_llm_config(self, config) -> Dict[str, Any]: 58 | return { 59 | "model": config.llm.model or "claude-3-sonnet-20240229", 60 | "api_key": config.llm.api_key, 61 | "base_url": config.llm.base_url 62 | } 63 | 64 | def validate_config(self, config) -> bool: 65 | return bool(config.llm.api_key) 66 | 67 | 68 | class DeepSeekProvider(LLMProvider): 69 | """DeepSeek provider implementation.""" 70 | 71 | def get_llm_config(self, config) -> Dict[str, Any]: 72 | return { 73 | "model": config.llm.model or "deepseek-chat", 74 | "api_key": config.llm.api_key, 75 | "base_url": config.llm.base_url or "https://api.deepseek.com/v1" 76 | } 77 | 78 | def validate_config(self, config) -> bool: 79 | return bool(config.llm.api_key) 80 | 81 | 82 | class OllamaProvider(LLMProvider): 83 | """Ollama provider implementation for local models.""" 84 | 85 | def get_llm_config(self, config) -> Dict[str, Any]: 86 | return { 87 | "model": config.llm.model or "llama3.1", 88 | "base_url": config.llm.ollama_host or "http://localhost:11434", 89 | "api_key": "ollama" # Ollama doesn't require API key 90 | } 91 | 92 | def validate_config(self, config) -> bool: 93 | # Ollama doesn't require API key, just needs to be running 94 | return True 95 | 96 | 97 | class LlamaCppProvider(LLMProvider): 98 | """LlamaCpp provider implementation for local models.""" 99 | 100 | def get_llm_config(self, config) -> Dict[str, Any]: 101 | return { 102 | "model": config.llm.model or "llama-3.1-8b", 103 | "model_path": config.llm.model_path, 104 | "n_ctx": config.llm.n_ctx or 2048, 105 | "n_gpu_layers": config.llm.n_gpu_layers or 0, 106 | "temperature": config.llm.temperature or 0.7, 107 | "max_tokens": config.llm.max_tokens or 2000 108 | } 109 | 110 | def validate_config(self, config) -> bool: 111 | # LlamaCpp requires model_path 112 | return bool(config.llm.model_path) 113 | 114 | 115 | class CustomProvider(LLMProvider): 116 | """Custom provider for any OpenAI-compatible API.""" 117 | 118 | def get_llm_config(self, config) -> Dict[str, Any]: 119 | return { 120 | "model": config.llm.model, 121 | "api_key": config.llm.api_key, 122 | "base_url": config.llm.base_url 123 | } 124 | 125 | def validate_config(self, config) -> bool: 126 | return bool(config.llm.api_key and config.llm.base_url) 127 | 128 | 129 | class LLMProviderFactory: 130 | """Factory for creating LLM providers.""" 131 | 132 | _providers = { 133 | "openai": OpenAIProvider(), 134 | "google": GoogleProvider(), 135 | "anthropic": AnthropicProvider(), 136 | "deepseek": DeepSeekProvider(), 137 | "ollama": OllamaProvider(), 138 | "llamacpp": LlamaCppProvider(), 139 | "custom": CustomProvider() 140 | } 141 | 142 | @classmethod 143 | def get_provider(cls, provider_name: str) -> LLMProvider: 144 | """Get a provider by name.""" 145 | provider = cls._providers.get(provider_name.lower()) 146 | if not provider: 147 | raise ValueError(f"Unsupported provider: {provider_name}") 148 | return provider 149 | 150 | @classmethod 151 | def get_llm_config(cls, config) -> Dict[str, Any]: 152 | """Get LLM configuration for the specified provider.""" 153 | provider = cls.get_provider(config.llm.provider) 154 | return provider.get_llm_config(config) 155 | 156 | @classmethod 157 | def validate_config(cls, config) -> bool: 158 | """Validate configuration for the specified provider.""" 159 | try: 160 | provider = cls.get_provider(config.llm.provider) 161 | return provider.validate_config(config) 162 | except ValueError: 163 | return False 164 | 165 | @classmethod 166 | def list_providers(cls) -> list: 167 | """List all available providers.""" 168 | return list(cls._providers.keys()) 169 | 170 | 171 | def get_llm_config_for_crewai(config) -> Dict[str, Any]: 172 | """ 173 | Get LLM configuration formatted for CrewAI usage. 174 | This function adapts provider configs to CrewAI's expected format. 175 | """ 176 | base_config = LLMProviderFactory.get_llm_config(config) 177 | 178 | # Format model name with provider prefix for LiteLLM 179 | provider = config.llm.provider.lower() 180 | model = base_config.get("model", "") 181 | 182 | # Apply provider prefix if not already present 183 | if provider == "anthropic" and not model.startswith("anthropic/"): 184 | base_config["model"] = f"anthropic/{model}" 185 | elif provider == "openai" and not model.startswith("openai/") and not model.startswith("gpt"): 186 | base_config["model"] = f"openai/{model}" 187 | elif provider == "google" and not model.startswith("google/") and not model.startswith("gemini"): 188 | base_config["model"] = f"google/{model}" 189 | elif provider == "deepseek" and not model.startswith("deepseek/"): 190 | base_config["model"] = f"deepseek/{model}" 191 | elif provider == "ollama": 192 | # Ollama uses ollama/ prefix for litellm 193 | base_config["model"] = f"ollama/{model}" 194 | base_config["api_base"] = base_config.get("base_url") 195 | elif provider == "llamacpp": 196 | # LlamaCpp uses direct model names 197 | base_config["model"] = model 198 | # Add LlamaCpp specific parameters 199 | if "model_path" in base_config: 200 | base_config["model_path"] = base_config["model_path"] 201 | if "n_ctx" in base_config: 202 | base_config["n_ctx"] = base_config["n_ctx"] 203 | if "n_gpu_layers" in base_config: 204 | base_config["n_gpu_layers"] = base_config["n_gpu_layers"] 205 | elif provider == "custom": 206 | # For custom providers, use model name as-is since custom base_url handles routing 207 | base_config["model"] = model 208 | # Ensure LiteLLM gets the custom configuration 209 | base_config["api_base"] = base_config.get("base_url") 210 | 211 | # Add common CrewAI parameters 212 | base_config.update({ 213 | "provider": provider, 214 | "temperature": config.llm.temperature, 215 | "max_tokens": config.llm.max_tokens 216 | }) 217 | 218 | return base_config -------------------------------------------------------------------------------- /agentforge/core/ai_tool_creator.py: -------------------------------------------------------------------------------- 1 | """ 2 | AI-Powered Tool Creator for agentforge. 3 | 4 | This module provides an intelligent tool creation system using CrewAI agents 5 | that can analyze user requirements and generate complete, working tools. 6 | """ 7 | 8 | import os 9 | from typing import Optional, Dict, Any, List 10 | from pathlib import Path 11 | 12 | from ..agents.custom_tool_generator_agent import CustomToolGeneratorAgent, GeneratedToolResult 13 | 14 | 15 | class AIToolCreator: 16 | """AI-powered tool creator using CrewAI agents for intelligent tool generation.""" 17 | 18 | def __init__(self, llm_config: Optional[Dict[str, Any]] = None, require_llm: bool = True): 19 | """Initialize the AI tool creator.""" 20 | self.llm_config = llm_config or {} 21 | 22 | # Only validate LLM config if we require it (not for stats/listing) 23 | if require_llm and not self._validate_llm_config(): 24 | raise ValueError("LLM configuration required for AI tool creation. Please set OPENAI_API_KEY.") 25 | 26 | # Only initialize the generator agent if LLM is required 27 | self.tool_generator_agent = None 28 | if require_llm: 29 | self.tool_generator_agent = CustomToolGeneratorAgent(llm_config) 30 | 31 | self.tools_directory = Path("/tmp/agentforge_custom_tools") 32 | self.tools_directory.mkdir(exist_ok=True) 33 | 34 | def _validate_llm_config(self) -> bool: 35 | """Validate that LLM configuration is available.""" 36 | # Check for OpenAI API key 37 | if os.getenv('OPENAI_API_KEY'): 38 | return True 39 | 40 | # Check for other LLM configurations 41 | if self.llm_config.get('api_key'): 42 | return True 43 | 44 | return False 45 | 46 | def create_custom_tool(self, user_description: str, 47 | show_code: bool = True, 48 | auto_confirm: bool = False) -> Dict[str, Any]: 49 | """Create a custom tool from user description using AI agents.""" 50 | 51 | if not self.tool_generator_agent: 52 | return { 53 | "success": False, 54 | "message": "AI tool generator not initialized. Please provide LLM configuration.", 55 | "error": "Missing LLM configuration" 56 | } 57 | 58 | try: 59 | print(f"\\n[bold blue]🤖 AI-Powered Tool Creation with CrewAI Agents[/bold blue]") 60 | print(f"[cyan]Description:[/cyan] {user_description}") 61 | print(f"[dim]Using intelligent agents to analyze and generate your tool...[/dim]") 62 | 63 | # Generate the tool using AI agents 64 | result = self.tool_generator_agent.generate_custom_tool( 65 | user_description=user_description, 66 | show_code=show_code, 67 | auto_confirm=auto_confirm 68 | ) 69 | 70 | # Process the results 71 | if result.validation_passed: 72 | # Check if there were validation warnings (like missing dependencies) 73 | warnings = [err for err in result.validation_errors if 'dependency' in err.lower()] 74 | 75 | return { 76 | "success": True, 77 | "message": f"Successfully created {result.name}", 78 | "tool_name": result.name, 79 | "tool_file": result.file_path, 80 | "category": result.category, 81 | "description": result.description, 82 | "dependencies": result.dependencies, 83 | "generated_with": "AI Agents", 84 | "warnings": warnings 85 | } 86 | else: 87 | return { 88 | "success": False, 89 | "message": f"Tool creation failed validation: {'; '.join(result.validation_errors)}", 90 | "errors": result.validation_errors, 91 | "generated_code": result.full_code if result.full_code else None 92 | } 93 | 94 | except Exception as e: 95 | return { 96 | "success": False, 97 | "message": f"AI tool creation failed: {str(e)}", 98 | "error": str(e) 99 | } 100 | 101 | def list_ai_generated_tools(self) -> List[Dict[str, Any]]: 102 | """List all AI-generated custom tools.""" 103 | tools = [] 104 | 105 | if not self.tools_directory.exists(): 106 | return tools 107 | 108 | for tool_file in self.tools_directory.glob("*_generated.py"): 109 | try: 110 | # Read file to extract metadata 111 | with open(tool_file, 'r') as f: 112 | content = f.read() 113 | 114 | # Extract tool info 115 | tool_info = { 116 | "file": str(tool_file), 117 | "name": tool_file.stem.replace("_generated", ""), 118 | "created": tool_file.stat().st_mtime, 119 | "type": "AI Generated", 120 | "size": len(content) 121 | } 122 | 123 | # Try to extract description from docstring or comments 124 | lines = content.split('\\n') 125 | for line in lines: 126 | if 'description' in line.lower() and '=' in line: 127 | # Extract description from assignment 128 | desc_part = line.split('=', 1)[1].strip().strip('"').strip("'") 129 | tool_info["description"] = desc_part[:100] + "..." if len(desc_part) > 100 else desc_part 130 | break 131 | else: 132 | tool_info["description"] = "AI-generated CrewAI tool" 133 | 134 | tools.append(tool_info) 135 | 136 | except Exception as e: 137 | # Skip files that can't be processed 138 | continue 139 | 140 | return sorted(tools, key=lambda x: x['created'], reverse=True) 141 | 142 | def get_generation_stats(self) -> Dict[str, Any]: 143 | """Get statistics about AI-generated tools.""" 144 | tools = self.list_ai_generated_tools() 145 | 146 | if not tools: 147 | return { 148 | "total_tools": 0, 149 | "total_size": 0, 150 | "avg_size": 0, 151 | "newest_tool": None, 152 | "oldest_tool": None, 153 | "tools_directory": str(self.tools_directory) 154 | } 155 | 156 | total_size = sum(tool['size'] for tool in tools) 157 | 158 | return { 159 | "total_tools": len(tools), 160 | "total_size": total_size, 161 | "avg_size": total_size // len(tools) if tools else 0, 162 | "newest_tool": tools[0] if tools else None, 163 | "oldest_tool": tools[-1] if tools else None, 164 | "tools_directory": str(self.tools_directory) 165 | } 166 | 167 | def delete_ai_tool(self, tool_name: str) -> Dict[str, Any]: 168 | """Delete an AI-generated tool.""" 169 | try: 170 | tool_file = self.tools_directory / f"{tool_name.lower()}_generated.py" 171 | 172 | if not tool_file.exists(): 173 | return {"success": False, "message": f"AI-generated tool file not found: {tool_name}"} 174 | 175 | tool_file.unlink() 176 | 177 | return {"success": True, "message": f"AI-generated tool {tool_name} deleted successfully"} 178 | 179 | except Exception as e: 180 | return {"success": False, "message": f"Failed to delete AI tool: {str(e)}"} 181 | 182 | def validate_ai_tool(self, tool_name: str) -> Dict[str, Any]: 183 | """Validate an AI-generated tool.""" 184 | try: 185 | tool_file = self.tools_directory / f"{tool_name.lower()}_generated.py" 186 | 187 | if not tool_file.exists(): 188 | return {"valid": False, "errors": [f"Tool file not found: {tool_name}"]} 189 | 190 | # Use the same validation logic as the generator agent 191 | validation_passed, validation_errors = self.tool_generator_agent._test_generated_tool( 192 | str(tool_file), tool_name 193 | ) 194 | 195 | return { 196 | "valid": validation_passed, 197 | "errors": validation_errors, 198 | "file_path": str(tool_file) 199 | } 200 | 201 | except Exception as e: 202 | return {"valid": False, "errors": [f"Validation failed: {str(e)}"]} 203 | 204 | 205 | # Compatibility function for existing code 206 | def create_intelligent_tool(user_description: str, llm_config: Optional[Dict[str, Any]] = None, 207 | show_code: bool = True, auto_confirm: bool = False) -> Dict[str, Any]: 208 | """ 209 | Convenience function for creating intelligent tools. 210 | 211 | This function provides backward compatibility and a simple interface 212 | for creating AI-powered tools. 213 | """ 214 | try: 215 | creator = AIToolCreator(llm_config) 216 | return creator.create_custom_tool(user_description, show_code, auto_confirm) 217 | except Exception as e: 218 | return { 219 | "success": False, 220 | "message": f"Failed to initialize AI tool creator: {str(e)}", 221 | "error": str(e) 222 | } -------------------------------------------------------------------------------- /crews/simple_writer/src/simple_writer/crew.py: -------------------------------------------------------------------------------- 1 | """ 2 | simple_writer Crew Implementation 3 | 4 | This module contains the main crew logic and orchestration. 5 | """ 6 | 7 | import yaml 8 | import os 9 | from pathlib import Path 10 | from crewai import Agent, Task, Crew, Process, LLM 11 | from crewai.tools import BaseTool 12 | from .tools.custom_tools import get_tools_for_agent 13 | 14 | 15 | class SimpleWriterCrew: 16 | """Main crew class for simple_writer.""" 17 | 18 | def __init__(self): 19 | """Initialize the crew.""" 20 | self.config_path = Path(__file__).parent.parent.parent / "config" 21 | self.agents_config = self._load_config("agents.yaml") 22 | self.tasks_config = self._load_config("tasks.yaml") 23 | 24 | # Setup LLM configuration 25 | self.llm = self._setup_llm() 26 | 27 | # Initialize agents and tasks 28 | self.agents = self._create_agents() 29 | self.tasks = self._create_tasks() 30 | 31 | # Create the crew 32 | self.crew = Crew( 33 | agents=list(self.agents.values()), 34 | tasks=list(self.tasks.values()), 35 | process=Process.sequential, 36 | verbose=True, 37 | memory=False # Can be enabled as needed 38 | ) 39 | 40 | def _load_config(self, filename: str) -> dict: 41 | """Load configuration from YAML file.""" 42 | config_file = self.config_path / filename 43 | with open(config_file, 'r') as f: 44 | return yaml.safe_load(f) 45 | 46 | def _setup_llm(self) -> LLM: 47 | """Setup LLM configuration for CrewAI.""" 48 | # Check for agentforge environment variables first 49 | provider = os.getenv('agentforge_LLM_PROVIDER', 'openai') 50 | model = os.getenv('agentforge_LLM_MODEL', 'gpt-4') 51 | api_key = os.getenv('agentforge_LLM_API_KEY') 52 | base_url = os.getenv('agentforge_LLM_BASE_URL') 53 | 54 | # If custom provider configuration exists, use it 55 | if provider == 'custom' and api_key and base_url: 56 | return LLM( 57 | model=model, 58 | api_key=api_key, 59 | base_url=base_url, 60 | temperature=0.7, 61 | max_tokens=2000 62 | ) 63 | 64 | # Check if agents_config has LLM configuration 65 | if hasattr(self.agents_config, 'get') and self.agents_config.get('llm'): 66 | llm_config = self.agents_config['llm'] 67 | return LLM( 68 | model=llm_config.get('model', 'gpt-4'), 69 | api_key=llm_config.get('api_key'), 70 | base_url=llm_config.get('base_url'), 71 | temperature=llm_config.get('temperature', 0.7), 72 | max_tokens=llm_config.get('max_tokens', 2000) 73 | ) 74 | 75 | # Otherwise, use standard providers (OpenAI, Anthropic, etc.) 76 | # CrewAI will auto-detect based on environment variables 77 | if os.getenv('OPENAI_API_KEY'): 78 | return LLM(model='gpt-4', temperature=0.7) 79 | elif os.getenv('ANTHROPIC_API_KEY'): 80 | return LLM(model='claude-3-sonnet-20240229', temperature=0.7) 81 | elif os.getenv('GOOGLE_API_KEY'): 82 | return LLM(model='gemini-pro', temperature=0.7) 83 | else: 84 | # Default to OpenAI (will fail if no API key, but that's expected) 85 | return LLM(model='gpt-3.5-turbo', temperature=0.7) 86 | 87 | def _create_agent_llm(self, llm_config: dict) -> LLM: 88 | """Create LLM instance for a specific agent.""" 89 | provider = llm_config.get('provider', 'openai') 90 | model = llm_config.get('model', 'gpt-4') 91 | 92 | # Extract all possible LLM parameters from config 93 | llm_params = { 94 | 'model': model, 95 | 'temperature': llm_config.get('temperature', 0.7), 96 | 'max_tokens': llm_config.get('max_tokens'), 97 | 'top_p': llm_config.get('top_p'), 98 | 'frequency_penalty': llm_config.get('frequency_penalty'), 99 | 'presence_penalty': llm_config.get('presence_penalty'), 100 | 'stop': llm_config.get('stop'), 101 | 'timeout': llm_config.get('timeout'), 102 | 'max_retries': llm_config.get('max_retries'), 103 | 'api_key': llm_config.get('api_key'), 104 | 'base_url': llm_config.get('base_url'), 105 | 'api_version': llm_config.get('api_version'), 106 | 'organization': llm_config.get('organization') 107 | } 108 | 109 | # Remove None values to avoid passing them to LLM constructor 110 | llm_params = {k: v for k, v in llm_params.items() if v is not None} 111 | 112 | # Check if environment variables override the config 113 | env_provider = os.getenv('agentforge_LLM_PROVIDER') 114 | env_model = os.getenv('agentforge_LLM_MODEL') 115 | env_api_key = os.getenv('agentforge_LLM_API_KEY') 116 | env_base_url = os.getenv('agentforge_LLM_BASE_URL') 117 | 118 | # If environment variables are set, use them (highest priority) 119 | if env_provider and env_model: 120 | llm_params['model'] = env_model 121 | if env_provider == 'custom' and env_api_key and env_base_url: 122 | llm_params['api_key'] = env_api_key 123 | llm_params['base_url'] = env_base_url 124 | return LLM(**llm_params) 125 | 126 | # Otherwise use agent-specific config 127 | return LLM(**llm_params) 128 | 129 | def _create_agents(self) -> dict: 130 | """Create agents from configuration.""" 131 | agents = {} 132 | 133 | # Check if agents_config is properly loaded 134 | if not self.agents_config: 135 | raise ValueError("agents_config is empty or not loaded properly") 136 | 137 | for agent_name, agent_config in self.agents_config.items(): 138 | # Get tools for this agent 139 | tools = get_tools_for_agent(agent_config.get('tools', [])) 140 | 141 | # Use agent-specific LLM config if available, otherwise use default 142 | agent_llm_config = agent_config.get('llm', {}) 143 | if agent_llm_config: 144 | agent_llm = self._create_agent_llm(agent_llm_config) 145 | else: 146 | agent_llm = self.llm 147 | 148 | # Create agent with proper error handling 149 | try: 150 | agent = Agent( 151 | role=agent_config.get('role', f'Agent {agent_name}'), 152 | goal=agent_config.get('goal', 'Complete assigned tasks'), 153 | backstory=agent_config.get('backstory', 'A helpful AI agent'), 154 | llm=agent_llm, 155 | tools=tools, 156 | verbose=agent_config.get('verbose', True), 157 | allow_delegation=agent_config.get('allow_delegation', False), 158 | max_iter=agent_config.get('max_iter', 3), 159 | max_execution_time=agent_config.get('max_execution_time') 160 | ) 161 | agents[agent_name] = agent 162 | except Exception as e: 163 | print(f"Error creating agent {agent_name}: {e}") 164 | continue 165 | 166 | return agents 167 | 168 | def _create_tasks(self) -> dict: 169 | """Create tasks from configuration.""" 170 | tasks = {} 171 | 172 | # Check if tasks_config is properly loaded 173 | if not self.tasks_config: 174 | raise ValueError("tasks_config is empty or not loaded properly") 175 | 176 | # First pass: Create all tasks without context 177 | for task_name, task_config in self.tasks_config.items(): 178 | # Get the agent for this task 179 | agent_name = task_config['agent'] 180 | agent = self.agents[agent_name] 181 | 182 | task = Task( 183 | description=task_config['description'], 184 | expected_output=task_config['expected_output'], 185 | agent=agent, 186 | context=None # Will be set in second pass 187 | ) 188 | 189 | tasks[task_name] = task 190 | 191 | # Second pass: Set up context relationships 192 | for task_name, task_config in self.tasks_config.items(): 193 | context_tasks = [] 194 | if 'context' in task_config: 195 | for context_task_name in task_config['context']: 196 | if context_task_name in tasks: 197 | context_tasks.append(tasks[context_task_name]) 198 | print(f"[INFO] Context linked: {task_name} <- {context_task_name}") 199 | else: 200 | print(f"[WARN] Context task '{context_task_name}' not found for task '{task_name}'") 201 | 202 | # Update task with context 203 | if context_tasks: 204 | tasks[task_name].context = context_tasks 205 | print(f"[INFO] Task '{task_name}' has {len(context_tasks)} context task(s)") 206 | else: 207 | print(f"[INFO] Task '{task_name}' has no context (root task)") 208 | 209 | return tasks 210 | 211 | def run(self, task_input: str = None) -> str: 212 | """Run the crew with optional task input.""" 213 | try: 214 | # If task input is provided, update the main task description 215 | if task_input and task_input.strip(): 216 | print(f"\n[INFO] Task Input Received: {task_input}") 217 | 218 | # Update the main task with the specific input 219 | if 'main_task' in self.tasks: 220 | original_desc = self.tasks_config['main_task']['description'] 221 | enhanced_desc = f"{original_desc}\n\nSpecific Task: {task_input}" 222 | self.tasks['main_task'].description = enhanced_desc 223 | print(f"[INFO] Updated main task description with input") 224 | else: 225 | print("[WARN] No main_task found to update") 226 | else: 227 | print("[INFO] No specific task input provided, using default task description") 228 | 229 | # Execute the crew 230 | print("\n[INFO] Starting crew execution...") 231 | result = self.crew.kickoff() 232 | return str(result) 233 | 234 | except Exception as e: 235 | error_msg = f"Crew execution failed: {str(e)}" 236 | print(f"[ERROR] {error_msg}") 237 | return error_msg 238 | 239 | def get_crew_info(self) -> dict: 240 | """Get information about the crew configuration.""" 241 | return { 242 | 'name': 'simple_writer', 243 | 'description': 'AI-orchestrated crew for: Create a simple text generator that writes basic content without external tools', 244 | 'agents': list(self.agents.keys()), 245 | 'tasks': list(self.tasks.keys()), 246 | 'process_type': 'sequential' 247 | } 248 | -------------------------------------------------------------------------------- /crews/tech_blog_writer_final/src/tech_blog_writer_final/crew.py: -------------------------------------------------------------------------------- 1 | """ 2 | tech_blog_writer_final Crew Implementation 3 | 4 | This module contains the main crew logic and orchestration. 5 | """ 6 | 7 | import yaml 8 | import os 9 | from pathlib import Path 10 | from crewai import Agent, Task, Crew, Process, LLM 11 | from crewai.tools import BaseTool 12 | from .tools.custom_tools import get_tools_for_agent 13 | 14 | 15 | class TechBlogWriterFinalCrew: 16 | """Main crew class for tech_blog_writer_final.""" 17 | 18 | def __init__(self): 19 | """Initialize the crew.""" 20 | self.config_path = Path(__file__).parent.parent.parent / "config" 21 | self.agents_config = self._load_config("agents.yaml") 22 | self.tasks_config = self._load_config("tasks.yaml") 23 | 24 | # Setup LLM configuration 25 | self.llm = self._setup_llm() 26 | 27 | # Initialize agents and tasks 28 | self.agents = self._create_agents() 29 | self.tasks = self._create_tasks() 30 | 31 | # Create the crew 32 | self.crew = Crew( 33 | agents=list(self.agents.values()), 34 | tasks=list(self.tasks.values()), 35 | process=Process.sequential, 36 | verbose=True, 37 | memory=False # Can be enabled as needed 38 | ) 39 | 40 | def _load_config(self, filename: str) -> dict: 41 | """Load configuration from YAML file.""" 42 | config_file = self.config_path / filename 43 | with open(config_file, 'r') as f: 44 | return yaml.safe_load(f) 45 | 46 | def _setup_llm(self) -> LLM: 47 | """Setup LLM configuration for CrewAI.""" 48 | # Check for agentforge environment variables first 49 | provider = os.getenv('agentforge_LLM_PROVIDER', 'openai') 50 | model = os.getenv('agentforge_LLM_MODEL', 'gpt-4') 51 | api_key = os.getenv('agentforge_LLM_API_KEY') 52 | base_url = os.getenv('agentforge_LLM_BASE_URL') 53 | 54 | # If custom provider configuration exists, use it 55 | if provider == 'custom' and api_key and base_url: 56 | return LLM( 57 | model=model, 58 | api_key=api_key, 59 | base_url=base_url, 60 | temperature=0.7, 61 | max_tokens=2000 62 | ) 63 | 64 | # Check if agents_config has LLM configuration 65 | if hasattr(self.agents_config, 'get') and self.agents_config.get('llm'): 66 | llm_config = self.agents_config['llm'] 67 | return LLM( 68 | model=llm_config.get('model', 'gpt-4'), 69 | api_key=llm_config.get('api_key'), 70 | base_url=llm_config.get('base_url'), 71 | temperature=llm_config.get('temperature', 0.7), 72 | max_tokens=llm_config.get('max_tokens', 2000) 73 | ) 74 | 75 | # Otherwise, use standard providers (OpenAI, Anthropic, etc.) 76 | # CrewAI will auto-detect based on environment variables 77 | if os.getenv('OPENAI_API_KEY'): 78 | return LLM(model='gpt-4', temperature=0.7) 79 | elif os.getenv('ANTHROPIC_API_KEY'): 80 | return LLM(model='claude-3-sonnet-20240229', temperature=0.7) 81 | elif os.getenv('GOOGLE_API_KEY'): 82 | return LLM(model='gemini-pro', temperature=0.7) 83 | else: 84 | # Default to OpenAI (will fail if no API key, but that's expected) 85 | return LLM(model='gpt-3.5-turbo', temperature=0.7) 86 | 87 | def _create_agent_llm(self, llm_config: dict) -> LLM: 88 | """Create LLM instance for a specific agent.""" 89 | provider = llm_config.get('provider', 'openai') 90 | model = llm_config.get('model', 'gpt-4') 91 | 92 | # Extract all possible LLM parameters from config 93 | llm_params = { 94 | 'model': model, 95 | 'temperature': llm_config.get('temperature', 0.7), 96 | 'max_tokens': llm_config.get('max_tokens'), 97 | 'top_p': llm_config.get('top_p'), 98 | 'frequency_penalty': llm_config.get('frequency_penalty'), 99 | 'presence_penalty': llm_config.get('presence_penalty'), 100 | 'stop': llm_config.get('stop'), 101 | 'timeout': llm_config.get('timeout'), 102 | 'max_retries': llm_config.get('max_retries'), 103 | 'api_key': llm_config.get('api_key'), 104 | 'base_url': llm_config.get('base_url'), 105 | 'api_version': llm_config.get('api_version'), 106 | 'organization': llm_config.get('organization') 107 | } 108 | 109 | # Remove None values to avoid passing them to LLM constructor 110 | llm_params = {k: v for k, v in llm_params.items() if v is not None} 111 | 112 | # Check if environment variables override the config 113 | env_provider = os.getenv('agentforge_LLM_PROVIDER') 114 | env_model = os.getenv('agentforge_LLM_MODEL') 115 | env_api_key = os.getenv('agentforge_LLM_API_KEY') 116 | env_base_url = os.getenv('agentforge_LLM_BASE_URL') 117 | 118 | # If environment variables are set, use them (highest priority) 119 | if env_provider and env_model: 120 | llm_params['model'] = env_model 121 | if env_provider == 'custom' and env_api_key and env_base_url: 122 | llm_params['api_key'] = env_api_key 123 | llm_params['base_url'] = env_base_url 124 | return LLM(**llm_params) 125 | 126 | # Otherwise use agent-specific config 127 | return LLM(**llm_params) 128 | 129 | def _create_agents(self) -> dict: 130 | """Create agents from configuration.""" 131 | agents = {} 132 | 133 | # Check if agents_config is properly loaded 134 | if not self.agents_config: 135 | raise ValueError("agents_config is empty or not loaded properly") 136 | 137 | for agent_name, agent_config in self.agents_config.items(): 138 | # Get tools for this agent 139 | tools = get_tools_for_agent(agent_config.get('tools', [])) 140 | 141 | # Use agent-specific LLM config if available, otherwise use default 142 | agent_llm_config = agent_config.get('llm', {}) 143 | if agent_llm_config: 144 | agent_llm = self._create_agent_llm(agent_llm_config) 145 | else: 146 | agent_llm = self.llm 147 | 148 | # Create agent with proper error handling 149 | try: 150 | agent = Agent( 151 | role=agent_config.get('role', f'Agent {agent_name}'), 152 | goal=agent_config.get('goal', 'Complete assigned tasks'), 153 | backstory=agent_config.get('backstory', 'A helpful AI agent'), 154 | llm=agent_llm, 155 | tools=tools, 156 | verbose=agent_config.get('verbose', True), 157 | allow_delegation=agent_config.get('allow_delegation', False), 158 | max_iter=agent_config.get('max_iter', 3), 159 | max_execution_time=agent_config.get('max_execution_time') 160 | ) 161 | agents[agent_name] = agent 162 | except Exception as e: 163 | print(f"Error creating agent {agent_name}: {e}") 164 | continue 165 | 166 | return agents 167 | 168 | def _create_tasks(self) -> dict: 169 | """Create tasks from configuration.""" 170 | tasks = {} 171 | 172 | # Check if tasks_config is properly loaded 173 | if not self.tasks_config: 174 | raise ValueError("tasks_config is empty or not loaded properly") 175 | 176 | # First pass: Create all tasks without context 177 | for task_name, task_config in self.tasks_config.items(): 178 | # Get the agent for this task 179 | agent_name = task_config['agent'] 180 | agent = self.agents[agent_name] 181 | 182 | task = Task( 183 | description=task_config['description'], 184 | expected_output=task_config['expected_output'], 185 | agent=agent, 186 | context=None # Will be set in second pass 187 | ) 188 | 189 | tasks[task_name] = task 190 | 191 | # Second pass: Set up context relationships 192 | for task_name, task_config in self.tasks_config.items(): 193 | context_tasks = [] 194 | if 'context' in task_config: 195 | for context_task_name in task_config['context']: 196 | if context_task_name in tasks: 197 | context_tasks.append(tasks[context_task_name]) 198 | print(f"[INFO] Context linked: {task_name} <- {context_task_name}") 199 | else: 200 | print(f"[WARN] Context task '{context_task_name}' not found for task '{task_name}'") 201 | 202 | # Update task with context 203 | if context_tasks: 204 | tasks[task_name].context = context_tasks 205 | print(f"[INFO] Task '{task_name}' has {len(context_tasks)} context task(s)") 206 | else: 207 | print(f"[INFO] Task '{task_name}' has no context (root task)") 208 | 209 | return tasks 210 | 211 | def run(self, task_input: str = None) -> str: 212 | """Run the crew with optional task input.""" 213 | try: 214 | # If task input is provided, update the main task description 215 | if task_input and task_input.strip(): 216 | print(f"\n[INFO] Task Input Received: {task_input}") 217 | 218 | # Update the main task with the specific input 219 | if 'main_task' in self.tasks: 220 | original_desc = self.tasks_config['main_task']['description'] 221 | enhanced_desc = f"{original_desc}\n\nSpecific Task: {task_input}" 222 | self.tasks['main_task'].description = enhanced_desc 223 | print(f"[INFO] Updated main task description with input") 224 | else: 225 | print("[WARN] No main_task found to update") 226 | else: 227 | print("[INFO] No specific task input provided, using default task description") 228 | 229 | # Execute the crew 230 | print("\n[INFO] Starting crew execution...") 231 | result = self.crew.kickoff() 232 | return str(result) 233 | 234 | except Exception as e: 235 | error_msg = f"Crew execution failed: {str(e)}" 236 | print(f"[ERROR] {error_msg}") 237 | return error_msg 238 | 239 | def get_crew_info(self) -> dict: 240 | """Get information about the crew configuration.""" 241 | return { 242 | 'name': 'tech_blog_writer_final', 243 | 'description': 'AI-orchestrated crew for: Create a simple blog writer who can write informative blog posts about technology topics', 244 | 'agents': list(self.agents.keys()), 245 | 'tasks': list(self.tasks.keys()), 246 | 'process_type': 'sequential' 247 | } 248 | -------------------------------------------------------------------------------- /agentforge/agents/crew_orchestrator_agent.py: -------------------------------------------------------------------------------- 1 | """ 2 | CrewOrchestratorAgent - AI-powered agent for coordinating crew creation and execution. 3 | 4 | This agent orchestrates the entire process of creating, configuring, and managing crews 5 | by coordinating with TaskAnalyzerAgent and AgentDesignerAgent. 6 | """ 7 | 8 | from typing import Dict, List, Any, Optional 9 | from crewai import Agent, Task, Crew 10 | from crewai.tools import BaseTool 11 | from pydantic import BaseModel, Field 12 | from ..core.task_analyzer import CrewSpec, AgentSpec 13 | from .task_analyzer_agent import TaskAnalyzerAgent 14 | from .agent_designer_agent import AgentDesignerAgent, AgentDesignRequest 15 | import json 16 | from datetime import datetime 17 | 18 | 19 | class CrewOrchestrationRequest(BaseModel): 20 | """Request for orchestrating crew creation.""" 21 | task_description: str = Field(description="The task to be executed") 22 | crew_name: Optional[str] = Field(None, description="Optional custom crew name") 23 | preferences: Dict[str, Any] = Field(default_factory=dict, description="User preferences") 24 | constraints: List[str] = Field(default_factory=list, description="Task constraints") 25 | resources: Dict[str, Any] = Field(default_factory=dict, description="Available resources") 26 | 27 | 28 | class CrewOrchestrationResult(BaseModel): 29 | """Result of crew orchestration.""" 30 | crew_spec: Dict[str, Any] = Field(description="Complete crew specification") 31 | orchestration_log: List[str] = Field(description="Log of orchestration steps") 32 | recommendations: List[str] = Field(description="Optimization recommendations") 33 | estimated_performance: Dict[str, float] = Field(description="Performance predictions") 34 | 35 | 36 | class CrewOrchestratorAgent: 37 | """AI-powered crew orchestrator using CrewAI.""" 38 | 39 | def __init__(self, llm_config: Optional[Dict[str, Any]] = None): 40 | """Initialize the CrewOrchestratorAgent.""" 41 | self.llm_config = llm_config or {} 42 | 43 | # Initialize sub-agents for AI collaboration 44 | self.task_analyzer = TaskAnalyzerAgent(llm_config) 45 | self.agent_designer = AgentDesignerAgent(llm_config) 46 | 47 | def orchestrate_crew_creation(self, request: CrewOrchestrationRequest) -> CrewOrchestrationResult: 48 | """ 49 | Orchestrate the complete crew creation process using AI collaboration. 50 | 51 | This method coordinates between TaskAnalyzerAgent and AgentDesignerAgent to create 52 | fully AI-generated crew specifications with no hardcoded templates. 53 | 54 | Args: 55 | request: Orchestration request with task and preferences 56 | 57 | Returns: 58 | CrewOrchestrationResult: Complete orchestration result with AI-generated crew spec 59 | """ 60 | orchestration_log = [] 61 | recommendations = [] 62 | 63 | try: 64 | # Step 1: AI Task Analysis - Get intelligent task breakdown 65 | orchestration_log.append("🔍 Starting AI task analysis...") 66 | task_analysis = self.task_analyzer.analyze_task(request.task_description) 67 | orchestration_log.append(f"📊 Task analyzed - Complexity: {task_analysis.complexity.value}, Agents needed: {len(task_analysis.agents)}") 68 | 69 | # Step 2: AI Agent Design - Enhance each agent with AI-generated specifications 70 | orchestration_log.append("🎨 Starting AI agent design...") 71 | enhanced_agents = [] 72 | 73 | print(f"🔧 DEBUG: TaskAnalyzer found {len(task_analysis.agents)} agents") 74 | for i, agent_spec in enumerate(task_analysis.agents): 75 | print(f"🔧 DEBUG: Processing agent {i+1}: {agent_spec.role} - {agent_spec.name}") 76 | 77 | # Create design request for each agent 78 | design_request = AgentDesignRequest( 79 | role=agent_spec.role, 80 | task_context=request.task_description, 81 | required_capabilities=agent_spec.required_tools, 82 | preferences=request.preferences, 83 | constraints=request.constraints 84 | ) 85 | 86 | # Use AI to design each agent - NO hardcoding! 87 | try: 88 | designed_agent = self.agent_designer.design_agent(design_request) 89 | print(f"🔧 DEBUG: AgentDesigner created: {designed_agent.role} - {designed_agent.name}") 90 | except Exception as e: 91 | print(f"🔧 DEBUG: AgentDesigner failed for {agent_spec.role}: {e}") 92 | continue 93 | 94 | # Create enhanced agent spec with AI-generated properties 95 | enhanced_agent_spec = AgentSpec( 96 | role=designed_agent.role, 97 | name=designed_agent.name, 98 | goal=designed_agent.goal, 99 | backstory=designed_agent.backstory, 100 | required_tools=designed_agent.tools, 101 | memory_type=designed_agent.memory_type, 102 | max_iter=designed_agent.max_iterations, 103 | allow_delegation=designed_agent.allow_delegation 104 | ) 105 | enhanced_agents.append(enhanced_agent_spec) 106 | 107 | orchestration_log.append(f"✨ Designed {len(enhanced_agents)} AI-powered agents") 108 | 109 | # Step 3: Create crew specification using ONLY AI outputs 110 | crew_spec = { 111 | "name": request.crew_name or self._generate_ai_crew_name(task_analysis, enhanced_agents), 112 | "task": task_analysis.task, # Use normalized task from AI analysis 113 | "description": f"AI-orchestrated crew for: {task_analysis.task}", 114 | "agents": [ 115 | { 116 | "role": agent.role, 117 | "name": agent.name, 118 | "goal": agent.goal, 119 | "backstory": agent.backstory, 120 | "required_tools": agent.required_tools, 121 | "memory_type": agent.memory_type, 122 | "max_iter": agent.max_iter, 123 | "allow_delegation": agent.allow_delegation 124 | } 125 | for agent in enhanced_agents 126 | ], 127 | "expected_output": task_analysis.expected_output, 128 | "complexity": task_analysis.complexity.value, 129 | "estimated_time": task_analysis.estimated_time, 130 | "process_type": task_analysis.process_type 131 | } 132 | 133 | orchestration_log.append("🎯 Crew specification completed using AI collaboration") 134 | 135 | # Step 4: AI Performance estimation 136 | estimated_performance = self._estimate_ai_performance(enhanced_agents, task_analysis) 137 | 138 | # Step 5: AI Recommendations 139 | recommendations = self._generate_ai_recommendations(enhanced_agents, task_analysis) 140 | 141 | return CrewOrchestrationResult( 142 | crew_spec=crew_spec, 143 | orchestration_log=orchestration_log, 144 | recommendations=recommendations, 145 | estimated_performance=estimated_performance 146 | ) 147 | 148 | except Exception as e: 149 | orchestration_log.append(f"❌ AI orchestration failed: {str(e)}") 150 | raise e 151 | 152 | def _generate_ai_crew_name(self, task_analysis: CrewSpec, agents: List[AgentSpec]) -> str: 153 | """Generate a crew name using AI analysis insights - NO hardcoding.""" 154 | # Extract meaningful keywords from the AI-analyzed task 155 | task_words = task_analysis.task.lower().replace(',', ' ').replace('.', ' ').split() 156 | 157 | # Filter out common words and focus on task-specific terms 158 | meaningful_words = [ 159 | word for word in task_words[:5] 160 | if word not in {'the', 'and', 'or', 'to', 'of', 'a', 'an', 'in', 'on', 'for', 'with', 'by', 'that', 'this'} 161 | and len(word) > 2 162 | ] 163 | 164 | # Create meaningful crew name from AI analysis 165 | if meaningful_words: 166 | name_parts = meaningful_words[:3] 167 | else: 168 | # Fallback to agent roles if no meaningful task words 169 | name_parts = [agents[0].role if agents else 'ai', 'task'] 170 | 171 | crew_name = '_'.join(name_parts) + '_crew' 172 | return crew_name 173 | 174 | def _estimate_ai_performance(self, agents: List[AgentSpec], task_analysis: CrewSpec) -> Dict[str, float]: 175 | """Estimate performance based on AI analysis - dynamic, not hardcoded.""" 176 | # Base performance on AI analysis quality 177 | complexity_score = { 178 | "simple": 0.9, 179 | "moderate": 0.8, 180 | "complex": 0.7 181 | }.get(task_analysis.complexity.value, 0.7) 182 | 183 | # Agent specialization bonus - AI-designed agents are more specialized 184 | agent_specialization = len(set(agent.role for agent in agents)) / len(agents) if agents else 0.5 185 | 186 | # Tool diversity - AI selects optimal tools 187 | all_tools = set() 188 | for agent in agents: 189 | all_tools.update(agent.required_tools) 190 | tool_diversity = min(1.0, len(all_tools) / 5) # Normalize to max 5 tools 191 | 192 | base_performance = 0.75 + (agent_specialization * 0.15) + (tool_diversity * 0.1) 193 | 194 | return { 195 | "success_probability": base_performance * complexity_score, 196 | "efficiency_score": base_performance * 0.95, 197 | "quality_score": base_performance * 1.1, 198 | "coordination_score": agent_specialization, 199 | "tool_optimization": tool_diversity 200 | } 201 | 202 | def _generate_ai_recommendations(self, agents: List[AgentSpec], task_analysis: CrewSpec) -> List[str]: 203 | """Generate recommendations based on AI analysis - dynamic insights.""" 204 | recommendations = [] 205 | 206 | # Dynamic recommendations based on AI analysis 207 | recommendations.append("All agent specifications generated by AI for optimal task alignment") 208 | 209 | if task_analysis.complexity.value == "complex" and len(agents) >= 3: 210 | recommendations.append("Complex task appropriately handled with specialized agent team") 211 | 212 | if len(agents) <= 2: 213 | recommendations.append("Efficient team size for streamlined coordination") 214 | 215 | # Tool analysis 216 | all_tools = set() 217 | for agent in agents: 218 | all_tools.update(agent.required_tools) 219 | 220 | if len(all_tools) >= 5: 221 | recommendations.append("Comprehensive tool coverage for task requirements") 222 | 223 | recommendations.append(f"AI-optimized crew with {len(agents)} specialized agents") 224 | recommendations.append("Agent goals and backstories tailored specifically for this task") 225 | 226 | return recommendations -------------------------------------------------------------------------------- /agentforge/database/database.py: -------------------------------------------------------------------------------- 1 | """ 2 | Database implementation for AgentForge. 3 | 4 | This module provides a simple in-memory database implementation 5 | for storing agents, crews, and execution logs. 6 | """ 7 | 8 | import json 9 | import uuid 10 | from typing import Optional, List, Dict, Any 11 | from datetime import datetime 12 | from pathlib import Path 13 | 14 | from .models import AgentModel, CrewModel, ExecutionResult, ExecutionStatus 15 | 16 | 17 | class Database: 18 | """Simple in-memory database for AgentForge.""" 19 | 20 | def __init__(self, data_dir: str = ".agentforge/data"): 21 | """Initialize the database.""" 22 | self.data_dir = Path(data_dir) 23 | self.data_dir.mkdir(parents=True, exist_ok=True) 24 | 25 | # In-memory storage 26 | self.agents: Dict[str, AgentModel] = {} 27 | self.crews: Dict[str, CrewModel] = {} 28 | self.executions: Dict[str, ExecutionResult] = {} 29 | 30 | # Load existing data 31 | self._load_data() 32 | 33 | def _load_data(self): 34 | """Load data from files.""" 35 | try: 36 | # Load agents 37 | agents_file = self.data_dir / "agents.json" 38 | if agents_file.exists(): 39 | with open(agents_file, 'r') as f: 40 | agents_data = json.load(f) 41 | for agent_data in agents_data: 42 | agent = AgentModel(**agent_data) 43 | self.agents[agent.id] = agent 44 | 45 | # Load crews 46 | crews_file = self.data_dir / "crews.json" 47 | if crews_file.exists(): 48 | with open(crews_file, 'r') as f: 49 | crews_data = json.load(f) 50 | for crew_data in crews_data: 51 | # Convert agent data back to AgentModel objects 52 | agents = [AgentModel(**agent_data) for agent_data in crew_data.get('agents', [])] 53 | crew_data['agents'] = agents 54 | crew = CrewModel(**crew_data) 55 | self.crews[crew.id] = crew 56 | 57 | # Load executions 58 | executions_file = self.data_dir / "executions.json" 59 | if executions_file.exists(): 60 | with open(executions_file, 'r') as f: 61 | executions_data = json.load(f) 62 | for exec_data in executions_data: 63 | exec_result = ExecutionResult(**exec_data) 64 | self.executions[exec_result.id] = exec_result 65 | except Exception as e: 66 | print(f"Warning: Could not load existing data: {e}") 67 | 68 | def _save_data(self): 69 | """Save data to files.""" 70 | try: 71 | # Save agents 72 | agents_file = self.data_dir / "agents.json" 73 | agents_data = [] 74 | for agent in self.agents.values(): 75 | agent_dict = { 76 | 'id': agent.id, 77 | 'name': agent.name, 78 | 'role': agent.role, 79 | 'goal': agent.goal, 80 | 'backstory': agent.backstory, 81 | 'tools': agent.tools, 82 | 'memory_type': agent.memory_type, 83 | 'max_iter': agent.max_iter, 84 | 'allow_delegation': agent.allow_delegation, 85 | 'created_at': agent.created_at.isoformat() if agent.created_at else None, 86 | 'updated_at': agent.updated_at.isoformat() if agent.updated_at else None 87 | } 88 | agents_data.append(agent_dict) 89 | 90 | with open(agents_file, 'w') as f: 91 | json.dump(agents_data, f, indent=2) 92 | 93 | # Save crews 94 | crews_file = self.data_dir / "crews.json" 95 | crews_data = [] 96 | for crew in self.crews.values(): 97 | crew_dict = { 98 | 'id': crew.id, 99 | 'name': crew.name, 100 | 'task': crew.task, 101 | 'description': crew.description, 102 | 'agents': [ 103 | { 104 | 'id': agent.id, 105 | 'name': agent.name, 106 | 'role': agent.role, 107 | 'goal': agent.goal, 108 | 'backstory': agent.backstory, 109 | 'tools': agent.tools, 110 | 'memory_type': agent.memory_type, 111 | 'max_iter': agent.max_iter, 112 | 'allow_delegation': agent.allow_delegation, 113 | 'created_at': agent.created_at.isoformat() if agent.created_at else None, 114 | 'updated_at': agent.updated_at.isoformat() if agent.updated_at else None 115 | } 116 | for agent in crew.agents 117 | ], 118 | 'expected_output': crew.expected_output, 119 | 'complexity': crew.complexity, 120 | 'estimated_time': crew.estimated_time, 121 | 'process_type': crew.process_type, 122 | 'created_at': crew.created_at.isoformat() if crew.created_at else None, 123 | 'updated_at': crew.updated_at.isoformat() if crew.updated_at else None 124 | } 125 | crews_data.append(crew_dict) 126 | 127 | with open(crews_file, 'w') as f: 128 | json.dump(crews_data, f, indent=2) 129 | 130 | # Save executions 131 | executions_file = self.data_dir / "executions.json" 132 | executions_data = [] 133 | for exec_result in self.executions.values(): 134 | exec_dict = { 135 | 'id': exec_result.id, 136 | 'crew_id': exec_result.crew_id, 137 | 'input_data': exec_result.input_data, 138 | 'output': exec_result.output, 139 | 'status': exec_result.status.value, 140 | 'execution_time': exec_result.execution_time, 141 | 'cost': exec_result.cost, 142 | 'quality_score': exec_result.quality_score, 143 | 'error_message': exec_result.error_message, 144 | 'logs': exec_result.logs, 145 | 'created_at': exec_result.created_at.isoformat() if exec_result.created_at else None 146 | } 147 | executions_data.append(exec_dict) 148 | 149 | with open(executions_file, 'w') as f: 150 | json.dump(executions_data, f, indent=2) 151 | except Exception as e: 152 | print(f"Warning: Could not save data: {e}") 153 | 154 | 155 | class AgentRepository: 156 | """Repository for managing agents.""" 157 | 158 | def __init__(self, db: Database): 159 | self.db = db 160 | 161 | def create(self, agent: AgentModel) -> AgentModel: 162 | """Create a new agent.""" 163 | if not agent.id: 164 | agent.id = str(uuid.uuid4()) 165 | agent.created_at = datetime.utcnow() 166 | agent.updated_at = datetime.utcnow() 167 | self.db.agents[agent.id] = agent 168 | self.db._save_data() 169 | return agent 170 | 171 | def get_by_id(self, agent_id: str) -> Optional[AgentModel]: 172 | """Get agent by ID.""" 173 | return self.db.agents.get(agent_id) 174 | 175 | def get_by_name(self, name: str) -> Optional[AgentModel]: 176 | """Get agent by name.""" 177 | for agent in self.db.agents.values(): 178 | if agent.name == name: 179 | return agent 180 | return None 181 | 182 | def list_all(self) -> List[AgentModel]: 183 | """List all agents.""" 184 | return list(self.db.agents.values()) 185 | 186 | def update(self, agent: AgentModel) -> AgentModel: 187 | """Update an agent.""" 188 | agent.updated_at = datetime.utcnow() 189 | self.db.agents[agent.id] = agent 190 | self.db._save_data() 191 | return agent 192 | 193 | def delete(self, agent_id: str) -> bool: 194 | """Delete an agent.""" 195 | if agent_id in self.db.agents: 196 | del self.db.agents[agent_id] 197 | self.db._save_data() 198 | return True 199 | return False 200 | 201 | 202 | class CrewRepository: 203 | """Repository for managing crews.""" 204 | 205 | def __init__(self, db: Database): 206 | self.db = db 207 | 208 | def create(self, crew: CrewModel) -> CrewModel: 209 | """Create a new crew.""" 210 | if not crew.id: 211 | crew.id = str(uuid.uuid4()) 212 | crew.created_at = datetime.utcnow() 213 | crew.updated_at = datetime.utcnow() 214 | self.db.crews[crew.id] = crew 215 | self.db._save_data() 216 | return crew 217 | 218 | def get_by_id(self, crew_id: str) -> Optional[CrewModel]: 219 | """Get crew by ID.""" 220 | return self.db.crews.get(crew_id) 221 | 222 | def get_by_name(self, name: str) -> Optional[CrewModel]: 223 | """Get crew by name.""" 224 | for crew in self.db.crews.values(): 225 | if crew.name == name: 226 | return crew 227 | return None 228 | 229 | def list_all(self) -> List[CrewModel]: 230 | """List all crews.""" 231 | return list(self.db.crews.values()) 232 | 233 | def update(self, crew: CrewModel) -> CrewModel: 234 | """Update a crew.""" 235 | crew.updated_at = datetime.utcnow() 236 | self.db.crews[crew.id] = crew 237 | self.db._save_data() 238 | return crew 239 | 240 | def delete(self, crew_id: str) -> bool: 241 | """Delete a crew.""" 242 | if crew_id in self.db.crews: 243 | del self.db.crews[crew_id] 244 | self.db._save_data() 245 | return True 246 | return False 247 | 248 | 249 | class ExecutionLogRepository: 250 | """Repository for managing execution logs.""" 251 | 252 | def __init__(self, db: Database): 253 | self.db = db 254 | 255 | def create(self, execution: ExecutionResult) -> ExecutionResult: 256 | """Create a new execution log.""" 257 | if not execution.id: 258 | execution.id = str(uuid.uuid4()) 259 | execution.created_at = datetime.utcnow() 260 | self.db.executions[execution.id] = execution 261 | self.db._save_data() 262 | return execution 263 | 264 | def get_by_id(self, execution_id: str) -> Optional[ExecutionResult]: 265 | """Get execution by ID.""" 266 | return self.db.executions.get(execution_id) 267 | 268 | def get_by_crew_id(self, crew_id: str) -> List[ExecutionResult]: 269 | """Get executions by crew ID.""" 270 | return [exec for exec in self.db.executions.values() if exec.crew_id == crew_id] 271 | 272 | def list_all(self) -> List[ExecutionResult]: 273 | """List all executions.""" 274 | return list(self.db.executions.values()) 275 | 276 | def update(self, execution: ExecutionResult) -> ExecutionResult: 277 | """Update an execution log.""" 278 | self.db.executions[execution.id] = execution 279 | self.db._save_data() 280 | return execution 281 | -------------------------------------------------------------------------------- /agentforge/core/intelligent_tool_creator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Intelligent Tool Creator for agentforge. 3 | 4 | This module provides AI-powered custom tool creation that generates proper 5 | CrewAI BaseTool implementations with code preview and validation. 6 | """ 7 | 8 | import os 9 | import tempfile 10 | import importlib.util 11 | from typing import Optional, Dict, Any, List 12 | from pathlib import Path 13 | 14 | from ..agents.tool_designer_agent import ToolDesignerAgent, GeneratedTool 15 | from ..tools.registry import ToolRegistry 16 | 17 | 18 | class IntelligentToolCreator: 19 | """AI-powered tool creator that generates proper CrewAI tools.""" 20 | 21 | def __init__(self, llm_config: Optional[Dict[str, Any]] = None): 22 | """Initialize the intelligent tool creator.""" 23 | self.designer_agent = ToolDesignerAgent(llm_config) 24 | self.tool_registry = ToolRegistry() 25 | self.tools_directory = Path("/tmp/agentforge_custom_tools") 26 | self.tools_directory.mkdir(exist_ok=True) 27 | 28 | def create_custom_tool(self, user_description: str, 29 | show_code: bool = True, 30 | auto_confirm: bool = False) -> Dict[str, Any]: 31 | """Create a custom tool from user description.""" 32 | 33 | try: 34 | # Step 1: Analyze requirements 35 | print("🔍 Analyzing your tool requirements...") 36 | requirements = self.designer_agent.analyze_tool_requirements(user_description) 37 | 38 | print(f"✅ Analysis complete:") 39 | print(f" Tool Name: {requirements.name}") 40 | print(f" Category: {requirements.category}") 41 | print(f" Inputs: {[inp['name'] for inp in requirements.inputs]}") 42 | print(f" Dependencies: {requirements.dependencies}") 43 | 44 | # Step 2: Generate tool code 45 | print("\\n🛠️ Generating CrewAI tool code...") 46 | generated_tool = self.designer_agent.generate_tool_code(requirements) 47 | 48 | # Step 3: Show code preview 49 | if show_code: 50 | self._display_generated_code(generated_tool) 51 | 52 | # Step 4: Get user confirmation 53 | if not auto_confirm: 54 | confirm = input("\\n✅ Do you want to create this tool? (y/n): ").lower().strip() 55 | if confirm != 'y': 56 | return {"success": False, "message": "Tool creation cancelled by user"} 57 | 58 | # Step 5: Create and validate tool 59 | print("\\n Creating tool files...") 60 | tool_file_path = self._create_tool_file(generated_tool) 61 | 62 | print("🧪 Testing tool implementation...") 63 | test_result = self._test_generated_tool(generated_tool, tool_file_path) 64 | 65 | # Step 6: Register tool 66 | if test_result["success"]: 67 | print("📋 Registering tool with agentforge...") 68 | registration_result = self._register_tool(generated_tool, tool_file_path) 69 | 70 | if registration_result["success"]: 71 | return { 72 | "success": True, 73 | "message": f"Successfully created {generated_tool.name}", 74 | "tool_name": generated_tool.name, 75 | "tool_file": tool_file_path, 76 | "category": generated_tool.category, 77 | "description": generated_tool.description 78 | } 79 | else: 80 | return { 81 | "success": False, 82 | "message": f"Tool created but registration failed: {registration_result['error']}" 83 | } 84 | else: 85 | return { 86 | "success": False, 87 | "message": f"Tool test failed: {test_result['error']}" 88 | } 89 | 90 | except Exception as e: 91 | return { 92 | "success": False, 93 | "message": f"Tool creation failed: {str(e)}" 94 | } 95 | 96 | def _display_generated_code(self, tool: GeneratedTool): 97 | """Display the generated tool code for user review.""" 98 | print("\\n" + "="*80) 99 | print(f"📄 GENERATED CODE PREVIEW: {tool.name}") 100 | print("="*80) 101 | 102 | print("\\n🔧 Input Schema:") 103 | print("-" * 40) 104 | print(tool.input_schema_code) 105 | 106 | print("\\n🛠️ Tool Class:") 107 | print("-" * 40) 108 | print(tool.tool_class_code) 109 | 110 | print("\\n📋 Registration Code:") 111 | print("-" * 40) 112 | print(tool.registration_code) 113 | 114 | print("\\n🧪 Test Code:") 115 | print("-" * 40) 116 | print(tool.test_code) 117 | 118 | if tool.dependencies: 119 | print("\\n📦 Required Dependencies:") 120 | print("-" * 40) 121 | for dep in tool.dependencies: 122 | print(f" - {dep}") 123 | 124 | print("\\n" + "="*80) 125 | 126 | def _create_tool_file(self, tool: GeneratedTool) -> str: 127 | """Create the actual tool file with all necessary code.""" 128 | tool_file = self.tools_directory / f"{tool.name.lower()}_tool.py" 129 | 130 | # Generate complete file content 131 | file_content = f'''""" 132 | Custom CrewAI Tool: {tool.name} 133 | Generated by agentforge Intelligent Tool Creator 134 | 135 | Description: {tool.description} 136 | Category: {tool.category} 137 | """ 138 | 139 | from crewai.tools import BaseTool 140 | from pydantic import BaseModel, Field 141 | from typing import Type 142 | 143 | {tool.input_schema_code} 144 | 145 | {tool.tool_class_code} 146 | 147 | {tool.registration_code} 148 | 149 | {tool.test_code} 150 | ''' 151 | 152 | # Write file 153 | with open(tool_file, 'w') as f: 154 | f.write(file_content) 155 | 156 | print(f"📁 Tool file created: {tool_file}") 157 | return str(tool_file) 158 | 159 | def _test_generated_tool(self, tool: GeneratedTool, tool_file_path: str) -> Dict[str, Any]: 160 | """Test the generated tool to ensure it works.""" 161 | try: 162 | # Import the generated module 163 | spec = importlib.util.spec_from_file_location( 164 | f"{tool.name.lower()}_tool", 165 | tool_file_path 166 | ) 167 | if spec is None or spec.loader is None: 168 | return {"success": False, "error": "Could not load tool module"} 169 | 170 | module = importlib.util.module_from_spec(spec) 171 | spec.loader.exec_module(module) 172 | 173 | # Get the tool class 174 | tool_class = getattr(module, tool.name, None) 175 | if tool_class is None: 176 | return {"success": False, "error": f"Tool class {tool.name} not found in module"} 177 | 178 | # Instantiate and test basic functionality 179 | tool_instance = tool_class() 180 | 181 | # Verify it's a proper CrewAI tool 182 | if not hasattr(tool_instance, '_run'): 183 | return {"success": False, "error": "Tool does not implement _run method"} 184 | 185 | if not hasattr(tool_instance, 'name'): 186 | return {"success": False, "error": "Tool does not have name attribute"} 187 | 188 | if not hasattr(tool_instance, 'description'): 189 | return {"success": False, "error": "Tool does not have description attribute"} 190 | 191 | print(f"✅ Tool structure validation passed") 192 | print(f" Name: {tool_instance.name}") 193 | print(f" Description: {tool_instance.description}") 194 | 195 | return {"success": True, "tool_instance": tool_instance} 196 | 197 | except Exception as e: 198 | return {"success": False, "error": f"Tool test failed: {str(e)}"} 199 | 200 | def _register_tool(self, tool: GeneratedTool, tool_file_path: str) -> Dict[str, Any]: 201 | """Register the tool with agentforge tool registry.""" 202 | try: 203 | # Import and get tool instance 204 | spec = importlib.util.spec_from_file_location( 205 | f"{tool.name.lower()}_tool", 206 | tool_file_path 207 | ) 208 | if spec is None or spec.loader is None: 209 | return {"success": False, "error": "Could not load tool for registration"} 210 | 211 | module = importlib.util.module_from_spec(spec) 212 | spec.loader.exec_module(module) 213 | 214 | tool_class = getattr(module, tool.name) 215 | tool_instance = tool_class() 216 | 217 | # Create a ToolBase wrapper for the CrewAI tool 218 | from ..tools.registry import ToolBase 219 | 220 | class CrewAIToolWrapper(ToolBase): 221 | def __init__(self, crewai_tool, category_val, description_val): 222 | self.crewai_tool = crewai_tool 223 | self._category = category_val 224 | self._description = description_val 225 | 226 | @property 227 | def name(self) -> str: 228 | return self.crewai_tool.name 229 | 230 | @property 231 | def description(self) -> str: 232 | return self._description 233 | 234 | @property 235 | def category(self) -> str: 236 | return self._category 237 | 238 | def get_instance(self, config: Optional[Dict[str, Any]] = None) -> Any: 239 | """Get tool instance.""" 240 | return self.crewai_tool 241 | 242 | def __call__(self, *args, **kwargs): 243 | return self.crewai_tool(*args, **kwargs) 244 | 245 | # Wrap the tool for registration 246 | wrapped_tool = CrewAIToolWrapper(tool_instance, tool.category, tool.description) 247 | 248 | # Register with the tool registry 249 | self.tool_registry.register_tool(wrapped_tool) 250 | 251 | print(f"✅ Tool {tool.name} registered successfully") 252 | return {"success": True} 253 | 254 | except Exception as e: 255 | return {"success": False, "error": f"Tool registration failed: {str(e)}"} 256 | 257 | def list_custom_tools(self) -> List[Dict[str, Any]]: 258 | """List all custom tools that have been created.""" 259 | tools = [] 260 | 261 | if not self.tools_directory.exists(): 262 | return tools 263 | 264 | for tool_file in self.tools_directory.glob("*_tool.py"): 265 | try: 266 | # Read file to extract metadata 267 | with open(tool_file, 'r') as f: 268 | content = f.read() 269 | 270 | # Extract tool name and description from file 271 | tool_info = { 272 | "file": str(tool_file), 273 | "name": tool_file.stem.replace("_tool", ""), 274 | "created": tool_file.stat().st_mtime, 275 | } 276 | 277 | # Try to extract description from file content 278 | if 'Description:' in content: 279 | desc_line = [line for line in content.split('\\n') if 'Description:' in line][0] 280 | tool_info["description"] = desc_line.split('Description:')[1].strip() 281 | else: 282 | tool_info["description"] = "Custom CrewAI tool" 283 | 284 | tools.append(tool_info) 285 | 286 | except Exception: 287 | continue 288 | 289 | return sorted(tools, key=lambda x: x['created'], reverse=True) 290 | 291 | def delete_custom_tool(self, tool_name: str) -> Dict[str, Any]: 292 | """Delete a custom tool.""" 293 | try: 294 | tool_file = self.tools_directory / f"{tool_name.lower()}_tool.py" 295 | 296 | if not tool_file.exists(): 297 | return {"success": False, "message": f"Tool file not found: {tool_name}"} 298 | 299 | tool_file.unlink() 300 | 301 | # Try to unregister from tool registry 302 | try: 303 | self.tool_registry.unregister_tool(tool_name.lower()) 304 | except: 305 | pass # Ignore if unregistration fails 306 | 307 | return {"success": True, "message": f"Tool {tool_name} deleted successfully"} 308 | 309 | except Exception as e: 310 | return {"success": False, "message": f"Failed to delete tool: {str(e)}"} -------------------------------------------------------------------------------- /agentforge/templates/template_manager.py: -------------------------------------------------------------------------------- 1 | """ 2 | Template Manager for agentforge. 3 | 4 | Handles template operations, customization, and integration with crew generation. 5 | """ 6 | 7 | from typing import Dict, List, Optional, Any 8 | from pathlib import Path 9 | import json 10 | import yaml 11 | from .crew_template_library import CrewTemplate, CrewTemplateLibrary, AgentTemplate, TaskTemplate 12 | 13 | 14 | class TemplateManager: 15 | """Manages crew templates and their customization.""" 16 | 17 | def __init__(self): 18 | self.library = CrewTemplateLibrary() 19 | self.custom_templates: Dict[str, CrewTemplate] = {} 20 | self.template_cache: Dict[str, Dict[str, Any]] = {} 21 | 22 | def get_template(self, template_name: str) -> Optional[CrewTemplate]: 23 | """Get a template by name (built-in or custom).""" 24 | # Check custom templates first 25 | if template_name in self.custom_templates: 26 | return self.custom_templates[template_name] 27 | 28 | # Then check built-in templates 29 | return self.library.get_template(template_name) 30 | 31 | def list_templates(self, include_custom: bool = True) -> List[str]: 32 | """List all available templates.""" 33 | templates = self.library.list_templates() 34 | if include_custom: 35 | templates.extend(self.custom_templates.keys()) 36 | return templates 37 | 38 | def search_templates(self, query: str) -> List[CrewTemplate]: 39 | """Search templates by query.""" 40 | results = self.library.search_templates(query) 41 | 42 | # Also search custom templates 43 | query_lower = query.lower() 44 | for template in self.custom_templates.values(): 45 | if (query_lower in template.name.lower() or 46 | query_lower in template.description.lower() or 47 | any(query_lower in use_case.lower() for use_case in template.use_cases)): 48 | results.append(template) 49 | 50 | return results 51 | 52 | def create_custom_template(self, template: CrewTemplate) -> bool: 53 | """Create a custom template.""" 54 | try: 55 | self.custom_templates[template.name.lower()] = template 56 | return True 57 | except Exception: 58 | return False 59 | 60 | def update_template(self, template_name: str, updates: Dict[str, Any]) -> bool: 61 | """Update an existing template.""" 62 | template = self.get_template(template_name) 63 | if not template: 64 | return False 65 | 66 | try: 67 | # Update template fields 68 | for key, value in updates.items(): 69 | if hasattr(template, key): 70 | setattr(template, key, value) 71 | 72 | # Save to custom templates if it's a built-in template 73 | if template_name in self.library.list_templates(): 74 | self.custom_templates[template_name.lower()] = template 75 | 76 | return True 77 | except Exception: 78 | return False 79 | 80 | def delete_custom_template(self, template_name: str) -> bool: 81 | """Delete a custom template.""" 82 | if template_name in self.custom_templates: 83 | del self.custom_templates[template_name] 84 | return True 85 | return False 86 | 87 | def export_template(self, template_name: str, file_path: str) -> bool: 88 | """Export a template to a file.""" 89 | template = self.get_template(template_name) 90 | if not template: 91 | return False 92 | 93 | try: 94 | template_data = self._template_to_dict(template) 95 | 96 | if file_path.endswith('.json'): 97 | with open(file_path, 'w') as f: 98 | json.dump(template_data, f, indent=2) 99 | elif file_path.endswith('.yaml') or file_path.endswith('.yml'): 100 | with open(file_path, 'w') as f: 101 | yaml.dump(template_data, f, default_flow_style=False) 102 | else: 103 | return False 104 | 105 | return True 106 | except Exception: 107 | return False 108 | 109 | def import_template(self, file_path: str) -> bool: 110 | """Import a template from a file.""" 111 | try: 112 | if file_path.endswith('.json'): 113 | with open(file_path, 'r') as f: 114 | template_data = json.load(f) 115 | elif file_path.endswith('.yaml') or file_path.endswith('.yml'): 116 | with open(file_path, 'r') as f: 117 | template_data = yaml.safe_load(f) 118 | else: 119 | return False 120 | 121 | template = self._dict_to_template(template_data) 122 | if template: 123 | self.custom_templates[template.name.lower()] = template 124 | return True 125 | 126 | return False 127 | except Exception: 128 | return False 129 | 130 | def customize_template(self, template_name: str, customizations: Dict[str, Any]) -> Optional[CrewTemplate]: 131 | """Create a customized version of a template.""" 132 | base_template = self.get_template(template_name) 133 | if not base_template: 134 | return None 135 | 136 | try: 137 | # Create a copy of the template 138 | template_data = self._template_to_dict(base_template) 139 | 140 | # Apply customizations 141 | self._apply_customizations(template_data, customizations) 142 | 143 | # Create new template 144 | customized_template = self._dict_to_template(template_data) 145 | if customized_template: 146 | # Add customization suffix to name 147 | customized_template.name = f"{base_template.name} (Customized)" 148 | return customized_template 149 | 150 | return None 151 | except Exception: 152 | return None 153 | 154 | def get_template_recommendations(self, task_description: str) -> List[CrewTemplate]: 155 | """Get template recommendations based on task description.""" 156 | recommendations = [] 157 | task_lower = task_description.lower() 158 | 159 | # Keywords for different template types 160 | keyword_mapping = { 161 | "data_analysis": ["data", "analysis", "analytics", "statistics", "metrics"], 162 | "web_scraping": ["scrape", "crawl", "extract", "web", "website"], 163 | "content_creation": ["content", "write", "article", "blog", "documentation"], 164 | "code_review": ["code", "review", "bug", "debug", "programming"], 165 | "research_crew": ["research", "study", "investigate", "academic"], 166 | "customer_support": ["support", "help", "ticket", "customer", "issue"], 167 | "marketing_automation": ["marketing", "campaign", "promotion", "advertising"], 168 | "financial_analysis": ["financial", "finance", "budget", "investment", "money"], 169 | "bug_triage": ["bug", "triage", "issue", "defect", "error"], 170 | "documentation": ["documentation", "docs", "manual", "guide", "tutorial"] 171 | } 172 | 173 | # Score templates based on keyword matches 174 | template_scores = {} 175 | for template_name, keywords in keyword_mapping.items(): 176 | score = sum(1 for keyword in keywords if keyword in task_lower) 177 | if score > 0: 178 | template = self.library.get_template(template_name) 179 | if template: 180 | template_scores[template] = score 181 | 182 | # Sort by score and return top recommendations 183 | sorted_templates = sorted(template_scores.items(), key=lambda x: x[1], reverse=True) 184 | recommendations = [template for template, score in sorted_templates[:5]] 185 | 186 | return recommendations 187 | 188 | def _template_to_dict(self, template: CrewTemplate) -> Dict[str, Any]: 189 | """Convert a template to dictionary format.""" 190 | return { 191 | "name": template.name, 192 | "description": template.description, 193 | "category": template.category, 194 | "workflow": template.workflow.value, 195 | "agents": [ 196 | { 197 | "name": agent.name, 198 | "role": agent.role, 199 | "goal": agent.goal, 200 | "backstory": agent.backstory, 201 | "tools": agent.tools, 202 | "memory_type": agent.memory_type, 203 | "max_iter": agent.max_iter, 204 | "verbose": agent.verbose 205 | } 206 | for agent in template.agents 207 | ], 208 | "tasks": [ 209 | { 210 | "name": task.name, 211 | "description": task.description, 212 | "expected_output": task.expected_output, 213 | "context": task.context, 214 | "tools": task.tools 215 | } 216 | for task in template.tasks 217 | ], 218 | "tools": template.tools, 219 | "estimated_duration": template.estimated_duration, 220 | "complexity": template.complexity, 221 | "use_cases": template.use_cases 222 | } 223 | 224 | def _dict_to_template(self, template_data: Dict[str, Any]) -> Optional[CrewTemplate]: 225 | """Convert dictionary to template object.""" 226 | try: 227 | from .crew_template_library import WorkflowType 228 | 229 | agents = [ 230 | AgentTemplate( 231 | name=agent_data["name"], 232 | role=agent_data["role"], 233 | goal=agent_data["goal"], 234 | backstory=agent_data["backstory"], 235 | tools=agent_data["tools"], 236 | memory_type=agent_data.get("memory_type", "conversation_buffer"), 237 | max_iter=agent_data.get("max_iter", 5), 238 | verbose=agent_data.get("verbose", True) 239 | ) 240 | for agent_data in template_data["agents"] 241 | ] 242 | 243 | tasks = [ 244 | TaskTemplate( 245 | name=task_data["name"], 246 | description=task_data["description"], 247 | expected_output=task_data["expected_output"], 248 | context=task_data.get("context"), 249 | tools=task_data.get("tools") 250 | ) 251 | for task_data in template_data["tasks"] 252 | ] 253 | 254 | workflow = WorkflowType(template_data["workflow"]) 255 | 256 | return CrewTemplate( 257 | name=template_data["name"], 258 | description=template_data["description"], 259 | category=template_data["category"], 260 | workflow=workflow, 261 | agents=agents, 262 | tasks=tasks, 263 | tools=template_data["tools"], 264 | estimated_duration=template_data["estimated_duration"], 265 | complexity=template_data["complexity"], 266 | use_cases=template_data["use_cases"] 267 | ) 268 | except Exception: 269 | return None 270 | 271 | def _apply_customizations(self, template_data: Dict[str, Any], customizations: Dict[str, Any]): 272 | """Apply customizations to template data.""" 273 | for key, value in customizations.items(): 274 | if key in template_data: 275 | if isinstance(value, dict) and isinstance(template_data[key], dict): 276 | template_data[key].update(value) 277 | else: 278 | template_data[key] = value 279 | elif key == "add_agent" and isinstance(value, dict): 280 | template_data["agents"].append(value) 281 | elif key == "add_task" and isinstance(value, dict): 282 | template_data["tasks"].append(value) 283 | elif key == "add_tool" and isinstance(value, str): 284 | if value not in template_data["tools"]: 285 | template_data["tools"].append(value) 286 | 287 | def get_template_statistics(self) -> Dict[str, Any]: 288 | """Get statistics about available templates.""" 289 | built_in_count = len(self.library.list_templates()) 290 | custom_count = len(self.custom_templates) 291 | 292 | # Count by category 293 | category_counts = {} 294 | for template in self.library._templates.values(): 295 | category = template.category 296 | category_counts[category] = category_counts.get(category, 0) + 1 297 | 298 | for template in self.custom_templates.values(): 299 | category = template.category 300 | category_counts[category] = category_counts.get(category, 0) + 1 301 | 302 | return { 303 | "total_templates": built_in_count + custom_count, 304 | "built_in_templates": built_in_count, 305 | "custom_templates": custom_count, 306 | "categories": category_counts 307 | } 308 | -------------------------------------------------------------------------------- /agentforge/logging/error_handler.py: -------------------------------------------------------------------------------- 1 | """ 2 | Enhanced error handling and recovery for agentforge. 3 | """ 4 | 5 | import traceback 6 | import sys 7 | from typing import Dict, Any, Optional, List, Callable, Type 8 | from dataclasses import dataclass 9 | from datetime import datetime, timedelta 10 | from enum import Enum 11 | import functools 12 | 13 | 14 | class ErrorSeverity(Enum): 15 | """Error severity levels.""" 16 | LOW = "low" 17 | MEDIUM = "medium" 18 | HIGH = "high" 19 | CRITICAL = "critical" 20 | 21 | 22 | class ErrorCategory(Enum): 23 | """Error categories for classification.""" 24 | CONFIGURATION = "configuration" 25 | LLM_API = "llm_api" 26 | TOOL_EXECUTION = "tool_execution" 27 | CREW_EXECUTION = "crew_execution" 28 | FILE_OPERATIONS = "file_operations" 29 | NETWORK = "network" 30 | VALIDATION = "validation" 31 | UNKNOWN = "unknown" 32 | 33 | 34 | @dataclass 35 | class ErrorContext: 36 | """Context information for error handling.""" 37 | error_id: str 38 | timestamp: datetime 39 | component: str 40 | function_name: str 41 | error_type: str 42 | error_message: str 43 | severity: ErrorSeverity 44 | category: ErrorCategory 45 | context: Dict[str, Any] 46 | stack_trace: str 47 | recovery_attempted: bool = False 48 | recovery_successful: bool = False 49 | recovery_action: Optional[str] = None 50 | 51 | 52 | class ErrorHandler: 53 | """Enhanced error handler with recovery mechanisms and context tracking.""" 54 | 55 | def __init__(self, logger=None): 56 | self.logger = logger 57 | self.error_history: List[ErrorContext] = [] 58 | self.recovery_strategies: Dict[ErrorCategory, List[Callable]] = {} 59 | self.error_counts: Dict[str, int] = {} 60 | self._setup_default_recovery_strategies() 61 | 62 | def _setup_default_recovery_strategies(self): 63 | """Setup default recovery strategies for common error types.""" 64 | self.recovery_strategies[ErrorCategory.CONFIGURATION] = [ 65 | self._recover_configuration_error 66 | ] 67 | self.recovery_strategies[ErrorCategory.LLM_API] = [ 68 | self._recover_llm_api_error 69 | ] 70 | self.recovery_strategies[ErrorCategory.TOOL_EXECUTION] = [ 71 | self._recover_tool_execution_error 72 | ] 73 | self.recovery_strategies[ErrorCategory.CREW_EXECUTION] = [ 74 | self._recover_crew_execution_error 75 | ] 76 | self.recovery_strategies[ErrorCategory.FILE_OPERATIONS] = [ 77 | self._recover_file_operation_error 78 | ] 79 | self.recovery_strategies[ErrorCategory.NETWORK] = [ 80 | self._recover_network_error 81 | ] 82 | 83 | def handle_error(self, error: Exception, component: str, context: Dict[str, Any] = None, 84 | severity: ErrorSeverity = ErrorSeverity.MEDIUM) -> ErrorContext: 85 | """Handle an error with context and recovery attempts.""" 86 | error_id = f"ERR_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{id(error)}" 87 | 88 | # Classify error 89 | category = self._classify_error(error) 90 | 91 | # Create error context 92 | error_context = ErrorContext( 93 | error_id=error_id, 94 | timestamp=datetime.now(), 95 | component=component, 96 | function_name=error.__traceback__.tb_frame.f_code.co_name if error.__traceback__ else "unknown", 97 | error_type=type(error).__name__, 98 | error_message=str(error), 99 | severity=severity, 100 | category=category, 101 | context=context or {}, 102 | stack_trace=traceback.format_exc() 103 | ) 104 | 105 | # Log error 106 | if self.logger: 107 | self.logger.log_error_with_context(component, error, context) 108 | 109 | # Track error 110 | self.error_history.append(error_context) 111 | self.error_counts[error_context.error_type] = self.error_counts.get(error_context.error_type, 0) + 1 112 | 113 | # Attempt recovery 114 | if severity != ErrorSeverity.CRITICAL: 115 | recovery_successful = self._attempt_recovery(error_context) 116 | error_context.recovery_attempted = True 117 | error_context.recovery_successful = recovery_successful 118 | 119 | return error_context 120 | 121 | def _classify_error(self, error: Exception) -> ErrorCategory: 122 | """Classify error into categories.""" 123 | error_type = type(error).__name__ 124 | error_message = str(error).lower() 125 | 126 | # Configuration errors 127 | if any(keyword in error_message for keyword in ['config', 'setting', 'parameter', 'validation']): 128 | return ErrorCategory.CONFIGURATION 129 | 130 | # LLM API errors 131 | if any(keyword in error_message for keyword in ['api', 'openai', 'anthropic', 'google', 'llm', 'model']): 132 | return ErrorCategory.LLM_API 133 | 134 | # Tool execution errors 135 | if any(keyword in error_message for keyword in ['tool', 'search', 'scrape', 'file']): 136 | return ErrorCategory.TOOL_EXECUTION 137 | 138 | # Crew execution errors 139 | if any(keyword in error_message for keyword in ['crew', 'agent', 'task', 'execution']): 140 | return ErrorCategory.CREW_EXECUTION 141 | 142 | # File operation errors 143 | if any(keyword in error_message for keyword in ['file', 'directory', 'path', 'permission']): 144 | return ErrorCategory.FILE_OPERATIONS 145 | 146 | # Network errors 147 | if any(keyword in error_message for keyword in ['network', 'connection', 'timeout', 'http']): 148 | return ErrorCategory.NETWORK 149 | 150 | return ErrorCategory.UNKNOWN 151 | 152 | def _attempt_recovery(self, error_context: ErrorContext) -> bool: 153 | """Attempt to recover from an error.""" 154 | strategies = self.recovery_strategies.get(error_context.category, []) 155 | 156 | for strategy in strategies: 157 | try: 158 | if strategy(error_context): 159 | error_context.recovery_action = strategy.__name__ 160 | return True 161 | except Exception as recovery_error: 162 | if self.logger: 163 | self.logger.warning("ERROR_HANDLER", 164 | f"Recovery strategy {strategy.__name__} failed: {str(recovery_error)}") 165 | 166 | return False 167 | 168 | def _recover_configuration_error(self, error_context: ErrorContext) -> bool: 169 | """Recover from configuration errors.""" 170 | # Try to use default configuration 171 | if self.logger: 172 | self.logger.info("ERROR_HANDLER", "Attempting to recover from configuration error using defaults") 173 | return True 174 | 175 | def _recover_llm_api_error(self, error_context: ErrorContext) -> bool: 176 | """Recover from LLM API errors.""" 177 | # Try to switch to a different provider or model 178 | if self.logger: 179 | self.logger.info("ERROR_HANDLER", "Attempting to recover from LLM API error by switching provider") 180 | return True 181 | 182 | def _recover_tool_execution_error(self, error_context: ErrorContext) -> bool: 183 | """Recover from tool execution errors.""" 184 | # Try to use alternative tools or mock tools 185 | if self.logger: 186 | self.logger.info("ERROR_HANDLER", "Attempting to recover from tool execution error using alternatives") 187 | return True 188 | 189 | def _recover_crew_execution_error(self, error_context: ErrorContext) -> bool: 190 | """Recover from crew execution errors.""" 191 | # Try to restart the crew or use fallback mode 192 | if self.logger: 193 | self.logger.info("ERROR_HANDLER", "Attempting to recover from crew execution error using fallback mode") 194 | return True 195 | 196 | def _recover_file_operation_error(self, error_context: ErrorContext) -> bool: 197 | """Recover from file operation errors.""" 198 | # Try to create directories or use alternative paths 199 | if self.logger: 200 | self.logger.info("ERROR_HANDLER", "Attempting to recover from file operation error by creating directories") 201 | return True 202 | 203 | def _recover_network_error(self, error_context: ErrorContext) -> bool: 204 | """Recover from network errors.""" 205 | # Try to retry with exponential backoff 206 | if self.logger: 207 | self.logger.info("ERROR_HANDLER", "Attempting to recover from network error with retry") 208 | return True 209 | 210 | def get_error_summary(self, hours: int = 24) -> Dict[str, Any]: 211 | """Get summary of errors in the specified time period.""" 212 | cutoff_time = datetime.now() - timedelta(hours=hours) 213 | recent_errors = [e for e in self.error_history if e.timestamp >= cutoff_time] 214 | 215 | error_counts_by_category = {} 216 | error_counts_by_severity = {} 217 | 218 | for error in recent_errors: 219 | category = error.category.value 220 | severity = error.severity.value 221 | 222 | error_counts_by_category[category] = error_counts_by_category.get(category, 0) + 1 223 | error_counts_by_severity[severity] = error_counts_by_severity.get(severity, 0) + 1 224 | 225 | return { 226 | "period_hours": hours, 227 | "total_errors": len(recent_errors), 228 | "errors_by_category": error_counts_by_category, 229 | "errors_by_severity": error_counts_by_severity, 230 | "recovery_success_rate": sum(1 for e in recent_errors if e.recovery_successful) / len(recent_errors) if recent_errors else 0, 231 | "most_common_errors": self._get_most_common_errors(recent_errors) 232 | } 233 | 234 | def _get_most_common_errors(self, errors: List[ErrorContext]) -> List[Dict[str, Any]]: 235 | """Get the most common error types.""" 236 | error_type_counts = {} 237 | for error in errors: 238 | error_type = error.error_type 239 | error_type_counts[error_type] = error_type_counts.get(error_type, 0) + 1 240 | 241 | sorted_errors = sorted(error_type_counts.items(), key=lambda x: x[1], reverse=True) 242 | return [{"error_type": error_type, "count": count} for error_type, count in sorted_errors[:5]] 243 | 244 | def add_recovery_strategy(self, category: ErrorCategory, strategy: Callable): 245 | """Add a custom recovery strategy for a specific error category.""" 246 | if category not in self.recovery_strategies: 247 | self.recovery_strategies[category] = [] 248 | self.recovery_strategies[category].append(strategy) 249 | 250 | def clear_error_history(self): 251 | """Clear error history.""" 252 | self.error_history.clear() 253 | self.error_counts.clear() 254 | 255 | 256 | def error_handler(component: str, severity: ErrorSeverity = ErrorSeverity.MEDIUM, 257 | reraise: bool = False, recovery: bool = True): 258 | """Decorator for automatic error handling.""" 259 | def decorator(func: Callable) -> Callable: 260 | @functools.wraps(func) 261 | def wrapper(*args, **kwargs): 262 | try: 263 | return func(*args, **kwargs) 264 | except Exception as e: 265 | # Get the global error handler 266 | from .logger import get_logger 267 | logger = get_logger() 268 | error_handler = ErrorHandler(logger) 269 | 270 | # Handle the error 271 | error_context = error_handler.handle_error( 272 | e, component, 273 | context={"function": func.__name__, "args": str(args), "kwargs": str(kwargs)}, 274 | severity=severity 275 | ) 276 | 277 | # Reraise if requested 278 | if reraise: 279 | raise 280 | 281 | # Return None or default value if recovery was attempted 282 | if recovery and error_context.recovery_attempted: 283 | return None 284 | 285 | raise 286 | 287 | return wrapper 288 | return decorator 289 | 290 | 291 | def safe_execute(func: Callable, *args, default_return=None, **kwargs) -> Any: 292 | """Safely execute a function with error handling.""" 293 | try: 294 | return func(*args, **kwargs) 295 | except Exception as e: 296 | from .logger import get_logger 297 | logger = get_logger() 298 | error_handler = ErrorHandler(logger) 299 | 300 | error_context = error_handler.handle_error( 301 | e, "SAFE_EXECUTE", 302 | context={"function": func.__name__, "args": str(args), "kwargs": str(kwargs)} 303 | ) 304 | 305 | if error_context.recovery_successful: 306 | return default_return 307 | 308 | raise 309 | 310 | 311 | # Global error handler instance 312 | _global_error_handler: Optional[ErrorHandler] = None 313 | 314 | 315 | def get_error_handler() -> ErrorHandler: 316 | """Get the global error handler instance.""" 317 | global _global_error_handler 318 | if _global_error_handler is None: 319 | from .logger import get_logger 320 | _global_error_handler = ErrorHandler(get_logger()) 321 | return _global_error_handler 322 | -------------------------------------------------------------------------------- /agentforge/logging/logger.py: -------------------------------------------------------------------------------- 1 | """ 2 | Enhanced logging system for agentforge. 3 | """ 4 | 5 | import logging 6 | import json 7 | import sys 8 | from datetime import datetime 9 | from pathlib import Path 10 | from typing import Dict, Any, Optional, List 11 | from enum import Enum 12 | from dataclasses import dataclass, asdict 13 | import traceback 14 | 15 | 16 | class LogLevel(Enum): 17 | """Log levels for agentforge.""" 18 | DEBUG = "DEBUG" 19 | INFO = "INFO" 20 | WARNING = "WARNING" 21 | ERROR = "ERROR" 22 | CRITICAL = "CRITICAL" 23 | 24 | 25 | @dataclass 26 | class LogEntry: 27 | """Structured log entry.""" 28 | timestamp: str 29 | level: str 30 | component: str 31 | message: str 32 | context: Dict[str, Any] 33 | trace_id: Optional[str] = None 34 | execution_id: Optional[str] = None 35 | crew_name: Optional[str] = None 36 | agent_name: Optional[str] = None 37 | task_name: Optional[str] = None 38 | 39 | 40 | class AgentForgeLogger: 41 | """Enhanced logger for AgentForge with structured logging and context tracking.""" 42 | 43 | def __init__(self, name: str = "agentforge", log_level: LogLevel = LogLevel.INFO, 44 | log_file: Optional[str] = None, enable_console: bool = True): 45 | self.name = name 46 | self.log_level = log_level 47 | self.log_file = log_file 48 | self.enable_console = enable_console 49 | 50 | # Initialize logger 51 | self.logger = logging.getLogger(name) 52 | self.logger.setLevel(getattr(logging, log_level.value)) 53 | 54 | # Clear existing handlers 55 | self.logger.handlers.clear() 56 | 57 | # Setup formatters 58 | self.console_formatter = logging.Formatter( 59 | '%(asctime)s | %(levelname)-8s | %(component)-15s | %(message)s', 60 | datefmt='%H:%M:%S' 61 | ) 62 | 63 | self.file_formatter = logging.Formatter( 64 | '%(asctime)s | %(levelname)-8s | %(component)-15s | %(trace_id)-8s | %(message)s', 65 | datefmt='%Y-%m-%d %H:%M:%S' 66 | ) 67 | 68 | # Setup console handler 69 | if enable_console: 70 | console_handler = logging.StreamHandler(sys.stdout) 71 | console_handler.setFormatter(self.console_formatter) 72 | self.logger.addHandler(console_handler) 73 | 74 | # Setup file handler 75 | if log_file: 76 | self._setup_file_handler(log_file) 77 | 78 | # Context tracking 79 | self._context_stack: List[Dict[str, Any]] = [] 80 | self._current_trace_id: Optional[str] = None 81 | self._current_execution_id: Optional[str] = None 82 | 83 | def _setup_file_handler(self, log_file: str): 84 | """Setup file handler for logging.""" 85 | log_path = Path(log_file) 86 | log_path.parent.mkdir(parents=True, exist_ok=True) 87 | 88 | file_handler = logging.FileHandler(log_file) 89 | file_handler.setFormatter(self.file_formatter) 90 | self.logger.addHandler(file_handler) 91 | 92 | def set_log_level(self, level: LogLevel): 93 | """Set the log level.""" 94 | self.log_level = level 95 | self.logger.setLevel(getattr(logging, level.value)) 96 | 97 | def set_trace_id(self, trace_id: str): 98 | """Set the current trace ID for request tracking.""" 99 | self._current_trace_id = trace_id 100 | 101 | def set_execution_id(self, execution_id: str): 102 | """Set the current execution ID for crew tracking.""" 103 | self._current_execution_id = execution_id 104 | 105 | def push_context(self, **context): 106 | """Push context information onto the stack.""" 107 | self._context_stack.append(context) 108 | 109 | def pop_context(self): 110 | """Pop the most recent context from the stack.""" 111 | if self._context_stack: 112 | return self._context_stack.pop() 113 | return {} 114 | 115 | def clear_context(self): 116 | """Clear all context information.""" 117 | self._context_stack.clear() 118 | 119 | def _create_log_entry(self, level: str, component: str, message: str, 120 | context: Dict[str, Any] = None) -> LogEntry: 121 | """Create a structured log entry.""" 122 | # Merge all context information 123 | merged_context = {} 124 | for ctx in self._context_stack: 125 | merged_context.update(ctx) 126 | if context: 127 | merged_context.update(context) 128 | 129 | return LogEntry( 130 | timestamp=datetime.now().isoformat(), 131 | level=level, 132 | component=component, 133 | message=message, 134 | context=merged_context, 135 | trace_id=self._current_trace_id, 136 | execution_id=self._current_execution_id, 137 | crew_name=merged_context.get('crew_name'), 138 | agent_name=merged_context.get('agent_name'), 139 | task_name=merged_context.get('task_name') 140 | ) 141 | 142 | def _log(self, level: str, component: str, message: str, context: Dict[str, Any] = None, 143 | exc_info: bool = False): 144 | """Internal logging method.""" 145 | log_entry = self._create_log_entry(level, component, message, context) 146 | 147 | # Create log record 148 | record = logging.LogRecord( 149 | name=self.name, 150 | level=getattr(logging, level), 151 | pathname="", 152 | lineno=0, 153 | msg=message, 154 | args=(), 155 | exc_info=exc_info 156 | ) 157 | 158 | # Add custom attributes 159 | record.component = component 160 | record.trace_id = log_entry.trace_id or "N/A" 161 | record.execution_id = log_entry.execution_id or "N/A" 162 | record.crew_name = log_entry.crew_name or "N/A" 163 | record.agent_name = log_entry.agent_name or "N/A" 164 | record.task_name = log_entry.task_name or "N/A" 165 | 166 | # Add context as JSON 167 | record.context = json.dumps(log_entry.context, default=str) 168 | 169 | # Log the record 170 | self.logger.handle(record) 171 | 172 | def debug(self, component: str, message: str, context: Dict[str, Any] = None): 173 | """Log debug message.""" 174 | self._log("DEBUG", component, message, context) 175 | 176 | def info(self, component: str, message: str, context: Dict[str, Any] = None): 177 | """Log info message.""" 178 | self._log("INFO", component, message, context) 179 | 180 | def warning(self, component: str, message: str, context: Dict[str, Any] = None): 181 | """Log warning message.""" 182 | self._log("WARNING", component, message, context) 183 | 184 | def error(self, component: str, message: str, context: Dict[str, Any] = None, 185 | exc_info: bool = False): 186 | """Log error message.""" 187 | self._log("ERROR", component, message, context, exc_info) 188 | 189 | def critical(self, component: str, message: str, context: Dict[str, Any] = None, 190 | exc_info: bool = False): 191 | """Log critical message.""" 192 | self._log("CRITICAL", component, message, context, exc_info) 193 | 194 | def log_ai_decision(self, agent: str, decision: str, reasoning: str, 195 | context: Dict[str, Any] = None): 196 | """Log AI decision-making process.""" 197 | decision_context = { 198 | "agent": agent, 199 | "decision": decision, 200 | "reasoning": reasoning, 201 | "event_type": "ai_decision" 202 | } 203 | if context: 204 | decision_context.update(context) 205 | 206 | self.info("AI_DECISION", f"Agent {agent} made decision: {decision}", decision_context) 207 | 208 | def log_crew_creation(self, crew_name: str, task: str, agents: List[str], 209 | context: Dict[str, Any] = None): 210 | """Log crew creation process.""" 211 | creation_context = { 212 | "crew_name": crew_name, 213 | "task": task, 214 | "agent_count": len(agents), 215 | "agents": agents, 216 | "event_type": "crew_creation" 217 | } 218 | if context: 219 | creation_context.update(context) 220 | 221 | self.info("CREW_CREATION", f"Created crew '{crew_name}' with {len(agents)} agents", creation_context) 222 | 223 | def log_crew_execution(self, crew_name: str, execution_id: str, status: str, 224 | context: Dict[str, Any] = None): 225 | """Log crew execution events.""" 226 | execution_context = { 227 | "crew_name": crew_name, 228 | "execution_id": execution_id, 229 | "status": status, 230 | "event_type": "crew_execution" 231 | } 232 | if context: 233 | execution_context.update(context) 234 | 235 | self.info("CREW_EXECUTION", f"Crew '{crew_name}' execution {status}", execution_context) 236 | 237 | def log_tool_usage(self, tool_name: str, agent: str, success: bool, 238 | duration: float = None, context: Dict[str, Any] = None): 239 | """Log tool usage events.""" 240 | tool_context = { 241 | "tool_name": tool_name, 242 | "agent": agent, 243 | "success": success, 244 | "duration": duration, 245 | "event_type": "tool_usage" 246 | } 247 | if context: 248 | tool_context.update(context) 249 | 250 | status = "successful" if success else "failed" 251 | self.info("TOOL_USAGE", f"Tool '{tool_name}' used by {agent} - {status}", tool_context) 252 | 253 | def log_llm_call(self, provider: str, model: str, tokens_used: int, cost: float, 254 | duration: float = None, context: Dict[str, Any] = None): 255 | """Log LLM API calls.""" 256 | llm_context = { 257 | "provider": provider, 258 | "model": model, 259 | "tokens_used": tokens_used, 260 | "cost": cost, 261 | "duration": duration, 262 | "event_type": "llm_call" 263 | } 264 | if context: 265 | llm_context.update(context) 266 | 267 | self.info("LLM_CALL", f"LLM call: {provider}/{model} - {tokens_used} tokens, ${cost:.4f}", llm_context) 268 | 269 | def log_performance_metric(self, metric_name: str, value: float, unit: str = None, 270 | context: Dict[str, Any] = None): 271 | """Log performance metrics.""" 272 | metric_context = { 273 | "metric_name": metric_name, 274 | "value": value, 275 | "unit": unit, 276 | "event_type": "performance_metric" 277 | } 278 | if context: 279 | metric_context.update(context) 280 | 281 | unit_str = f" {unit}" if unit else "" 282 | self.info("PERFORMANCE", f"Metric: {metric_name} = {value}{unit_str}", metric_context) 283 | 284 | def log_error_with_context(self, component: str, error: Exception, 285 | context: Dict[str, Any] = None): 286 | """Log error with full context and stack trace.""" 287 | error_context = { 288 | "error_type": type(error).__name__, 289 | "error_message": str(error), 290 | "event_type": "error" 291 | } 292 | if context: 293 | error_context.update(context) 294 | 295 | self.error(component, f"Error: {type(error).__name__}: {str(error)}", 296 | error_context, exc_info=True) 297 | 298 | def get_log_summary(self, hours: int = 24) -> Dict[str, Any]: 299 | """Get summary of logs for the specified time period.""" 300 | # This would typically query a log database or parse log files 301 | # For now, return a basic structure 302 | return { 303 | "period_hours": hours, 304 | "total_entries": 0, 305 | "error_count": 0, 306 | "warning_count": 0, 307 | "info_count": 0, 308 | "debug_count": 0, 309 | "most_active_components": [], 310 | "error_rate": 0.0 311 | } 312 | 313 | def export_logs(self, output_file: str, hours: int = 24, level: str = "INFO"): 314 | """Export logs to a file.""" 315 | # This would typically export from a log database 316 | # For now, create a placeholder 317 | export_data = { 318 | "export_timestamp": datetime.now().isoformat(), 319 | "period_hours": hours, 320 | "log_level": level, 321 | "logs": [] 322 | } 323 | 324 | with open(output_file, 'w') as f: 325 | json.dump(export_data, f, indent=2) 326 | 327 | self.info("LOGGER", f"Exported logs to {output_file}", {"export_file": output_file}) 328 | 329 | 330 | # Global logger instance 331 | _global_logger: Optional[AgentForgeLogger] = None 332 | 333 | 334 | def get_logger(name: str = "agentforge") -> AgentForgeLogger: 335 | """Get the global logger instance.""" 336 | global _global_logger 337 | if _global_logger is None: 338 | _global_logger = AgentForgeLogger(name) 339 | return _global_logger 340 | 341 | 342 | def setup_logging(log_level: LogLevel = LogLevel.INFO, log_file: Optional[str] = None): 343 | """Setup global logging configuration.""" 344 | global _global_logger 345 | _global_logger = agentforgeLogger( 346 | name="agentforge", 347 | log_level=log_level, 348 | log_file=log_file, 349 | enable_console=True 350 | ) 351 | return _global_logger 352 | -------------------------------------------------------------------------------- /agentforge/core/master_agent_tools.py: -------------------------------------------------------------------------------- 1 | """ 2 | Master Agent Tools for agentforge. 3 | 4 | These tools are used by the MasterAgent crew to analyze tasks, design agents, 5 | and orchestrate crew creation intelligently. 6 | """ 7 | 8 | import json 9 | from typing import Dict, List, Any, Optional 10 | from crewai.tools import tool 11 | from ..tools.registry import ToolRegistry 12 | 13 | 14 | @tool("Task Analysis Tool") 15 | def analyze_task_requirements(task_description: str) -> str: 16 | """ 17 | Analyze a task description to identify required agent roles, tools, and complexity. 18 | 19 | Args: 20 | task_description: The user's task description to analyze 21 | 22 | Returns: 23 | JSON string with analysis results including roles, tools, and complexity 24 | """ 25 | analysis = { 26 | "task": task_description, 27 | "suggested_roles": [], 28 | "required_tools": [], 29 | "complexity": "moderate", 30 | "estimated_agents": 2, 31 | "process_type": "sequential", 32 | "reasoning": "" 33 | } 34 | 35 | task_lower = task_description.lower() 36 | 37 | # Analyze for research needs 38 | if any(word in task_lower for word in ['research', 'find', 'investigate', 'analyze', 'competitor']): 39 | analysis["suggested_roles"].append({ 40 | "role": "researcher", 41 | "specialization": "market_research" if "competitor" in task_lower else "general_research", 42 | "tools": ["web_search", "web_scraping", "document_search"] 43 | }) 44 | 45 | # Analyze for data processing needs 46 | if any(word in task_lower for word in ['data', 'analyze', 'process', 'metrics', 'pricing']): 47 | analysis["suggested_roles"].append({ 48 | "role": "analyst", 49 | "specialization": "data_analysis", 50 | "tools": ["data_processing", "code_execution", "file_operations"] 51 | }) 52 | 53 | # Analyze for content creation needs 54 | if any(word in task_lower for word in ['write', 'create', 'report', 'document', 'summary']): 55 | analysis["suggested_roles"].append({ 56 | "role": "writer", 57 | "specialization": "technical_writing", 58 | "tools": ["file_operations", "document_search"] 59 | }) 60 | 61 | # Analyze for development needs 62 | if any(word in task_lower for word in ['build', 'develop', 'code', 'api', 'website']): 63 | analysis["suggested_roles"].append({ 64 | "role": "developer", 65 | "specialization": "software_development", 66 | "tools": ["code_execution", "github_search", "api_calls"] 67 | }) 68 | 69 | # Determine complexity 70 | operation_count = len([w for w in ['and', 'then', 'also', 'plus'] if w in task_lower]) 71 | if operation_count >= 2 or len(task_description.split()) > 25: 72 | analysis["complexity"] = "complex" 73 | analysis["estimated_agents"] = min(len(analysis["suggested_roles"]) + 1, 4) 74 | elif operation_count >= 1 or len(analysis["suggested_roles"]) > 1: 75 | analysis["complexity"] = "moderate" 76 | analysis["estimated_agents"] = len(analysis["suggested_roles"]) 77 | else: 78 | analysis["complexity"] = "simple" 79 | analysis["estimated_agents"] = 1 80 | 81 | # Ensure we have at least one role 82 | if not analysis["suggested_roles"]: 83 | analysis["suggested_roles"].append({ 84 | "role": "specialist", 85 | "specialization": "general_purpose", 86 | "tools": ["web_search", "file_operations"] 87 | }) 88 | 89 | # Collect all required tools 90 | for role_info in analysis["suggested_roles"]: 91 | analysis["required_tools"].extend(role_info["tools"]) 92 | analysis["required_tools"] = list(set(analysis["required_tools"])) # Remove duplicates 93 | 94 | analysis["reasoning"] = f"Identified {len(analysis['suggested_roles'])} roles based on task keywords and complexity analysis." 95 | 96 | return json.dumps(analysis, indent=2) 97 | 98 | 99 | @tool("Agent Design Tool") 100 | def design_agent_specification(role_info: str, task_context: str) -> str: 101 | """ 102 | Design a detailed agent specification based on role requirements and task context. 103 | 104 | Args: 105 | role_info: JSON string with role information from task analysis 106 | task_context: The original task description for context 107 | 108 | Returns: 109 | JSON string with complete agent specification 110 | """ 111 | try: 112 | role_data = json.loads(role_info) 113 | except: 114 | role_data = {"role": "specialist", "specialization": "general", "tools": ["web_search"]} 115 | 116 | role = role_data.get("role", "specialist") 117 | specialization = role_data.get("specialization", "general") 118 | tools = role_data.get("tools", ["web_search"]) 119 | 120 | # Extract topic from task context for naming 121 | task_words = task_context.lower().split() 122 | topic_words = [w for w in task_words[:5] if w not in {'create', 'build', 'make', 'write', 'find', 'analyze', 'help', 'me', 'a', 'an', 'the'}] 123 | topic = "_".join(topic_words[:2]) if topic_words else "general" 124 | 125 | # Role-specific templates 126 | templates = { 127 | "researcher": { 128 | "goal": f"Research and gather comprehensive information about {topic} to support the team's objectives", 129 | "backstory": "You are an expert researcher with extensive experience in market analysis, data gathering, and information synthesis. You excel at finding reliable sources, extracting key insights, and presenting findings in a clear, actionable format." 130 | }, 131 | "analyst": { 132 | "goal": f"Analyze data and information related to {topic} to provide actionable insights and recommendations", 133 | "backstory": "You are a skilled data analyst with expertise in processing complex information, identifying patterns and trends, and translating data into strategic insights. You have strong analytical thinking and attention to detail." 134 | }, 135 | "writer": { 136 | "goal": f"Create clear, engaging, and well-structured content about {topic} based on research and analysis", 137 | "backstory": "You are a professional writer with expertise in technical and business communication. You excel at transforming complex information into accessible, compelling narratives that engage readers and drive action." 138 | }, 139 | "developer": { 140 | "goal": f"Develop and implement technical solutions related to {topic} with clean, efficient code", 141 | "backstory": "You are a skilled software developer with expertise in building robust, scalable solutions. You write clean code, follow best practices, and create well-documented technical implementations." 142 | }, 143 | "specialist": { 144 | "goal": f"Provide specialized expertise and solutions for {topic} using domain knowledge", 145 | "backstory": "You are a domain specialist with deep expertise in your field. You provide expert guidance, solve complex problems, and deliver high-quality specialized solutions." 146 | } 147 | } 148 | 149 | template = templates.get(role, templates["specialist"]) 150 | 151 | agent_spec = { 152 | "name": f"{topic}_{role}", 153 | "role": role, 154 | "goal": template["goal"], 155 | "backstory": template["backstory"], 156 | "tools": tools, 157 | "specialization": specialization, 158 | "memory_type": "short_term", 159 | "max_iter": 5, 160 | "allow_delegation": len(tools) > 2, 161 | "verbose": True 162 | } 163 | 164 | return json.dumps(agent_spec, indent=2) 165 | 166 | 167 | @tool("Crew Orchestration Tool") 168 | def design_crew_structure(agents_info: str, task_description: str) -> str: 169 | """ 170 | Design the optimal crew structure and task workflow for the given agents and task. 171 | 172 | Args: 173 | agents_info: JSON string with information about all agents 174 | task_description: The original task description 175 | 176 | Returns: 177 | JSON string with crew structure and task assignments 178 | """ 179 | try: 180 | agents_data = json.loads(agents_info) if isinstance(agents_info, str) else agents_info 181 | if not isinstance(agents_data, list): 182 | agents_data = [agents_data] 183 | except: 184 | agents_data = [] 185 | 186 | agent_count = len(agents_data) 187 | 188 | # Determine process type 189 | if agent_count == 1: 190 | process_type = "sequential" 191 | elif agent_count <= 3: 192 | process_type = "sequential" 193 | else: 194 | process_type = "hierarchical" 195 | 196 | # Create task assignments 197 | tasks = [] 198 | task_lower = task_description.lower() 199 | 200 | for i, agent in enumerate(agents_data): 201 | agent_role = agent.get("role", "specialist") 202 | agent_name = agent.get("name", f"agent_{i}") 203 | 204 | if agent_role == "researcher": 205 | task_desc = f"Research and gather information needed for: {task_description}" 206 | expected_output = "Comprehensive research findings with sources and key insights" 207 | elif agent_role == "analyst": 208 | task_desc = f"Analyze the research data and identify patterns, trends, and insights for: {task_description}" 209 | expected_output = "Detailed analysis with actionable insights and recommendations" 210 | elif agent_role == "writer": 211 | task_desc = f"Create well-structured content based on research and analysis for: {task_description}" 212 | expected_output = "Professional, well-written content that addresses all requirements" 213 | elif agent_role == "developer": 214 | task_desc = f"Implement technical solutions based on requirements for: {task_description}" 215 | expected_output = "Working technical implementation with documentation" 216 | else: 217 | task_desc = f"Provide specialized expertise and solutions for: {task_description}" 218 | expected_output = "Expert recommendations and solutions" 219 | 220 | tasks.append({ 221 | "description": task_desc, 222 | "expected_output": expected_output, 223 | "agent": agent_name, 224 | "dependencies": [] if i == 0 else [tasks[i-1]["agent"]] 225 | }) 226 | 227 | crew_structure = { 228 | "name": f"{task_description.split()[:3]}"[0].lower().replace(' ', '_') + "_crew", 229 | "process_type": process_type, 230 | "agents": agents_data, 231 | "tasks": tasks, 232 | "expected_output": f"Complete solution for: {task_description}", 233 | "estimated_time": min(5 + (agent_count * 10), 60), 234 | "memory_enabled": False, 235 | "verbose": True 236 | } 237 | 238 | return json.dumps(crew_structure, indent=2) 239 | 240 | 241 | @tool("Available Tools Registry") 242 | def get_available_tools() -> str: 243 | """ 244 | Get the list of all available tools in the agentforge system. 245 | 246 | Returns: 247 | JSON string with available tools and their descriptions 248 | """ 249 | tool_registry = ToolRegistry() 250 | 251 | available_tools = {} 252 | for tool_name, tool_instance in tool_registry.tools.items(): 253 | available_tools[tool_name] = { 254 | "name": tool_name, 255 | "category": tool_instance.category, 256 | "description": tool_instance.description, 257 | "capabilities": getattr(tool_instance, 'capabilities', []) 258 | } 259 | 260 | # Add core tools that are always available 261 | core_tools = { 262 | "web_search": { 263 | "name": "web_search", 264 | "category": "research", 265 | "description": "Search the web for information", 266 | "capabilities": ["internet_search", "information_gathering"] 267 | }, 268 | "file_operations": { 269 | "name": "file_operations", 270 | "category": "utility", 271 | "description": "Read, write, and manipulate files", 272 | "capabilities": ["file_io", "document_processing"] 273 | }, 274 | "data_processing": { 275 | "name": "data_processing", 276 | "category": "analysis", 277 | "description": "Process and analyze data", 278 | "capabilities": ["data_analysis", "statistics"] 279 | }, 280 | "code_execution": { 281 | "name": "code_execution", 282 | "category": "development", 283 | "description": "Execute Python code and scripts", 284 | "capabilities": ["programming", "automation"] 285 | } 286 | } 287 | 288 | available_tools.update(core_tools) 289 | 290 | return json.dumps(available_tools, indent=2) 291 | 292 | 293 | @tool("Crew Name Generator") 294 | def generate_crew_name(task_description: str) -> str: 295 | """ 296 | Generate a meaningful name for a crew based on the task description. 297 | 298 | Args: 299 | task_description: The task the crew will perform 300 | 301 | Returns: 302 | A suitable crew name 303 | """ 304 | # Extract key words from task description 305 | words = task_description.lower().split() 306 | 307 | # Remove common words 308 | stop_words = {'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by', 'create', 'make', 'build', 'help', 'me'} 309 | meaningful_words = [w for w in words if w not in stop_words and len(w) > 2] 310 | 311 | # Take first 2-3 meaningful words 312 | if len(meaningful_words) >= 2: 313 | crew_name = "_".join(meaningful_words[:2]) + "_crew" 314 | elif meaningful_words: 315 | crew_name = meaningful_words[0] + "_crew" 316 | else: 317 | crew_name = "general_purpose_crew" 318 | 319 | return crew_name -------------------------------------------------------------------------------- /agentforge/tools/guardrails.py: -------------------------------------------------------------------------------- 1 | """ 2 | Guardrails system for agentforge. 3 | 4 | This module provides safety and quality controls for agent operations. 5 | """ 6 | 7 | from typing import Dict, List, Any, Optional, Callable 8 | from abc import ABC, abstractmethod 9 | from enum import Enum 10 | import re 11 | 12 | class GuardrailSeverity(Enum): 13 | """Severity levels for guardrail violations.""" 14 | WARNING = "warning" 15 | BLOCK = "block" 16 | CRITICAL = "critical" 17 | 18 | class GuardrailResult: 19 | """Result of a guardrail check.""" 20 | 21 | def __init__(self, passed: bool, severity: GuardrailSeverity = GuardrailSeverity.WARNING, 22 | message: str = "", details: Optional[Dict[str, Any]] = None): 23 | self.passed = passed 24 | self.severity = severity 25 | self.message = message 26 | self.details = details or {} 27 | 28 | class GuardrailBase(ABC): 29 | """Base class for all guardrails.""" 30 | 31 | @property 32 | @abstractmethod 33 | def name(self) -> str: 34 | """Guardrail name.""" 35 | pass 36 | 37 | @property 38 | @abstractmethod 39 | def description(self) -> str: 40 | """Guardrail description.""" 41 | pass 42 | 43 | @property 44 | @abstractmethod 45 | def category(self) -> str: 46 | """Guardrail category.""" 47 | pass 48 | 49 | @abstractmethod 50 | def check(self, content: str, context: Optional[Dict[str, Any]] = None) -> GuardrailResult: 51 | """Check if content passes the guardrail.""" 52 | pass 53 | 54 | class PIIDetectionGuardrail(GuardrailBase): 55 | """Detects and blocks personally identifiable information.""" 56 | 57 | def __init__(self): 58 | """Initialize PII detection patterns.""" 59 | self.patterns = { 60 | 'email': r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b', 61 | 'phone': r'\b\d{3}[-.]?\d{3}[-.]?\d{4}\b', 62 | 'ssn': r'\b\d{3}[-]?\d{2}[-]?\d{4}\b', 63 | 'credit_card': r'\b\d{4}[-\s]?\d{4}[-\s]?\d{4}[-\s]?\d{4}\b', 64 | 'ip_address': r'\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b' 65 | } 66 | 67 | @property 68 | def name(self) -> str: 69 | return "pii_detection" 70 | 71 | @property 72 | def description(self) -> str: 73 | return "Detects and blocks personally identifiable information" 74 | 75 | @property 76 | def category(self) -> str: 77 | return "security" 78 | 79 | def check(self, content: str, context: Optional[Dict[str, Any]] = None) -> GuardrailResult: 80 | """Check for PII in content.""" 81 | detected_pii = [] 82 | 83 | for pii_type, pattern in self.patterns.items(): 84 | matches = re.findall(pattern, content) 85 | if matches: 86 | detected_pii.append({ 87 | 'type': pii_type, 88 | 'count': len(matches), 89 | 'samples': matches[:3] # Show first 3 matches 90 | }) 91 | 92 | if detected_pii: 93 | return GuardrailResult( 94 | passed=False, 95 | severity=GuardrailSeverity.BLOCK, 96 | message=f"Detected PII: {', '.join([item['type'] for item in detected_pii])}", 97 | details={'detected_pii': detected_pii} 98 | ) 99 | 100 | return GuardrailResult(passed=True) 101 | 102 | class ToxicityDetectionGuardrail(GuardrailBase): 103 | """Detects toxic or harmful content.""" 104 | 105 | def __init__(self): 106 | """Initialize toxicity detection patterns.""" 107 | self.toxic_keywords = [ 108 | 'hate', 'violence', 'harm', 'kill', 'destroy', 'attack', 109 | 'discriminat', 'racist', 'sexist', 'toxic' 110 | ] 111 | 112 | @property 113 | def name(self) -> str: 114 | return "toxicity_detection" 115 | 116 | @property 117 | def description(self) -> str: 118 | return "Detects toxic or harmful content" 119 | 120 | @property 121 | def category(self) -> str: 122 | return "safety" 123 | 124 | def check(self, content: str, context: Optional[Dict[str, Any]] = None) -> GuardrailResult: 125 | """Check for toxic content.""" 126 | content_lower = content.lower() 127 | detected_toxic = [] 128 | 129 | for keyword in self.toxic_keywords: 130 | if keyword in content_lower: 131 | detected_toxic.append(keyword) 132 | 133 | if detected_toxic: 134 | return GuardrailResult( 135 | passed=False, 136 | severity=GuardrailSeverity.WARNING, 137 | message=f"Potential toxic content detected: {', '.join(detected_toxic)}", 138 | details={'toxic_keywords': detected_toxic} 139 | ) 140 | 141 | return GuardrailResult(passed=True) 142 | 143 | class OutputLengthGuardrail(GuardrailBase): 144 | """Enforces output length limits.""" 145 | 146 | def __init__(self, max_length: int = 10000, min_length: int = 10): 147 | """Initialize length limits.""" 148 | self.max_length = max_length 149 | self.min_length = min_length 150 | 151 | @property 152 | def name(self) -> str: 153 | return "output_length" 154 | 155 | @property 156 | def description(self) -> str: 157 | return f"Enforces output length between {self.min_length} and {self.max_length} characters" 158 | 159 | @property 160 | def category(self) -> str: 161 | return "quality" 162 | 163 | def check(self, content: str, context: Optional[Dict[str, Any]] = None) -> GuardrailResult: 164 | """Check content length.""" 165 | length = len(content) 166 | 167 | if length > self.max_length: 168 | return GuardrailResult( 169 | passed=False, 170 | severity=GuardrailSeverity.WARNING, 171 | message=f"Output too long: {length} characters (max: {self.max_length})", 172 | details={'length': length, 'max_length': self.max_length} 173 | ) 174 | 175 | if length < self.min_length: 176 | return GuardrailResult( 177 | passed=False, 178 | severity=GuardrailSeverity.WARNING, 179 | message=f"Output too short: {length} characters (min: {self.min_length})", 180 | details={'length': length, 'min_length': self.min_length} 181 | ) 182 | 183 | return GuardrailResult(passed=True) 184 | 185 | class CodeSafetyGuardrail(GuardrailBase): 186 | """Detects potentially unsafe code patterns.""" 187 | 188 | def __init__(self): 189 | """Initialize code safety patterns.""" 190 | self.dangerous_patterns = [ 191 | r'os\.system\(', 192 | r'subprocess\.call\(', 193 | r'eval\(', 194 | r'exec\(', 195 | r'__import__\(', 196 | r'open\(.+[\'"]w[\'"]', # File writing 197 | r'rm\s+-rf', 198 | r'sudo\s+', 199 | r'curl.*\|.*sh' 200 | ] 201 | 202 | @property 203 | def name(self) -> str: 204 | return "code_safety" 205 | 206 | @property 207 | def description(self) -> str: 208 | return "Detects potentially unsafe code patterns" 209 | 210 | @property 211 | def category(self) -> str: 212 | return "security" 213 | 214 | def check(self, content: str, context: Optional[Dict[str, Any]] = None) -> GuardrailResult: 215 | """Check for unsafe code patterns.""" 216 | detected_patterns = [] 217 | 218 | for pattern in self.dangerous_patterns: 219 | matches = re.findall(pattern, content) 220 | if matches: 221 | detected_patterns.append(pattern) 222 | 223 | if detected_patterns: 224 | return GuardrailResult( 225 | passed=False, 226 | severity=GuardrailSeverity.BLOCK, 227 | message=f"Dangerous code patterns detected: {len(detected_patterns)} patterns", 228 | details={'patterns': detected_patterns} 229 | ) 230 | 231 | return GuardrailResult(passed=True) 232 | 233 | class HallucinationDetectionGuardrail(GuardrailBase): 234 | """Detects potential hallucinations in AI-generated content.""" 235 | 236 | def __init__(self): 237 | """Initialize hallucination detection.""" 238 | self.suspicious_phrases = [ 239 | "according to my database", 240 | "in my files", 241 | "i remember", 242 | "i know for certain", 243 | "definitely true", 244 | "without a doubt" 245 | ] 246 | 247 | @property 248 | def name(self) -> str: 249 | return "hallucination_detection" 250 | 251 | @property 252 | def description(self) -> str: 253 | return "Detects potential hallucinations in AI-generated content" 254 | 255 | @property 256 | def category(self) -> str: 257 | return "quality" 258 | 259 | def check(self, content: str, context: Optional[Dict[str, Any]] = None) -> GuardrailResult: 260 | """Check for potential hallucinations.""" 261 | content_lower = content.lower() 262 | detected_phrases = [] 263 | 264 | for phrase in self.suspicious_phrases: 265 | if phrase in content_lower: 266 | detected_phrases.append(phrase) 267 | 268 | if detected_phrases: 269 | return GuardrailResult( 270 | passed=False, 271 | severity=GuardrailSeverity.WARNING, 272 | message=f"Potential hallucination indicators detected: {len(detected_phrases)}", 273 | details={'suspicious_phrases': detected_phrases} 274 | ) 275 | 276 | return GuardrailResult(passed=True) 277 | 278 | class GuardrailEngine: 279 | """Engine for managing and executing guardrails.""" 280 | 281 | def __init__(self): 282 | """Initialize the guardrail engine.""" 283 | self.guardrails: Dict[str, GuardrailBase] = {} 284 | self._register_default_guardrails() 285 | 286 | def _register_default_guardrails(self): 287 | """Register default guardrails.""" 288 | default_guardrails = [ 289 | PIIDetectionGuardrail(), 290 | ToxicityDetectionGuardrail(), 291 | OutputLengthGuardrail(), 292 | CodeSafetyGuardrail(), 293 | HallucinationDetectionGuardrail() 294 | ] 295 | 296 | for guardrail in default_guardrails: 297 | self.register_guardrail(guardrail) 298 | 299 | def register_guardrail(self, guardrail: GuardrailBase): 300 | """Register a new guardrail.""" 301 | self.guardrails[guardrail.name] = guardrail 302 | 303 | def unregister_guardrail(self, name: str): 304 | """Unregister a guardrail.""" 305 | if name in self.guardrails: 306 | del self.guardrails[name] 307 | 308 | def check_content(self, content: str, guardrail_names: Optional[List[str]] = None, 309 | context: Optional[Dict[str, Any]] = None) -> Dict[str, GuardrailResult]: 310 | """Check content against specified guardrails.""" 311 | if guardrail_names is None: 312 | guardrail_names = list(self.guardrails.keys()) 313 | 314 | results = {} 315 | 316 | for name in guardrail_names: 317 | if name in self.guardrails: 318 | guardrail = self.guardrails[name] 319 | result = guardrail.check(content, context) 320 | results[name] = result 321 | 322 | return results 323 | 324 | def should_block(self, results: Dict[str, GuardrailResult]) -> bool: 325 | """Determine if content should be blocked based on results.""" 326 | for result in results.values(): 327 | if not result.passed and result.severity in [GuardrailSeverity.BLOCK, GuardrailSeverity.CRITICAL]: 328 | return True 329 | return False 330 | 331 | def get_warnings(self, results: Dict[str, GuardrailResult]) -> List[str]: 332 | """Get warning messages from results.""" 333 | warnings = [] 334 | for result in results.values(): 335 | if not result.passed and result.severity == GuardrailSeverity.WARNING: 336 | warnings.append(result.message) 337 | return warnings 338 | 339 | def list_guardrails(self, category: Optional[str] = None) -> List[Dict[str, str]]: 340 | """List available guardrails.""" 341 | guardrails = [] 342 | 343 | for guardrail in self.guardrails.values(): 344 | if category is None or guardrail.category == category: 345 | guardrails.append({ 346 | 'name': guardrail.name, 347 | 'description': guardrail.description, 348 | 'category': guardrail.category 349 | }) 350 | 351 | return guardrails 352 | 353 | def get_recommended_guardrails(self, task_description: str) -> List[str]: 354 | """Get recommended guardrails based on task description.""" 355 | task_lower = task_description.lower() 356 | recommended = [] 357 | 358 | # Always include basic safety guardrails 359 | recommended.extend(['pii_detection', 'toxicity_detection']) 360 | 361 | # Add specific guardrails based on task type 362 | if any(word in task_lower for word in ['code', 'script', 'programming', 'execute']): 363 | recommended.append('code_safety') 364 | 365 | if any(word in task_lower for word in ['research', 'information', 'facts', 'data']): 366 | recommended.append('hallucination_detection') 367 | 368 | if any(word in task_lower for word in ['long', 'detailed', 'comprehensive', 'summary']): 369 | recommended.append('output_length') 370 | 371 | return list(set(recommended)) # Remove duplicates 372 | 373 | def create_custom_guardrail(self, name: str, description: str, category: str, 374 | check_function: Callable[[str, Optional[Dict[str, Any]]], GuardrailResult]): 375 | """Create and register a custom guardrail.""" 376 | class CustomGuardrail(GuardrailBase): 377 | @property 378 | def name(self) -> str: 379 | return name 380 | 381 | @property 382 | def description(self) -> str: 383 | return description 384 | 385 | @property 386 | def category(self) -> str: 387 | return category 388 | 389 | def check(self, content: str, context: Optional[Dict[str, Any]] = None) -> GuardrailResult: 390 | return check_function(content, context) 391 | 392 | self.register_guardrail(CustomGuardrail()) -------------------------------------------------------------------------------- /agentforge/core/file_based_crew_designer.py: -------------------------------------------------------------------------------- 1 | """ 2 | File-based Crew Designer for agentforge. 3 | 4 | This module creates CrewAI crews as file-based projects with YAML configurations 5 | and Python modules, while storing minimal metadata in the database. 6 | """ 7 | 8 | import os 9 | import importlib.util 10 | import sys 11 | from pathlib import Path 12 | from typing import List, Dict, Any, Optional 13 | from datetime import datetime 14 | 15 | from .task_analyzer import CrewSpec, AgentSpec 16 | from .config import Config 17 | from .file_generator import CrewFileGenerator 18 | 19 | 20 | class FileBasedCrewDesigner: 21 | """Creates and manages file-based CrewAI crews.""" 22 | 23 | def __init__(self, config: Config, crews_base_path: str = "crews"): 24 | """Initialize the file-based crew designer.""" 25 | self.config = config 26 | self.crews_base_path = Path(crews_base_path) 27 | self.crews_base_path.mkdir(exist_ok=True) 28 | 29 | # Initialize file generator 30 | self.file_generator = CrewFileGenerator(str(self.crews_base_path)) 31 | 32 | def create_crew_from_spec(self, spec: CrewSpec) -> Dict[str, Any]: 33 | """Create a new file-based crew from a crew specification.""" 34 | print(f"🔧 Creating file-based crew: {spec.name}") 35 | 36 | try: 37 | # Generate the crew project files 38 | crew_path = self.file_generator.generate_crew_project(spec) 39 | print(f"✅ Generated crew files at: {crew_path}") 40 | 41 | # Store minimal metadata in database 42 | metadata = self._create_crew_metadata(spec, crew_path) 43 | self._store_crew_metadata(metadata) 44 | 45 | return { 46 | 'name': spec.name, 47 | 'path': crew_path, 48 | 'status': 'created', 49 | 'agents_count': len(spec.agents), 50 | 'tools_used': self._extract_all_tools(spec), 51 | 'created_at': datetime.now().isoformat() 52 | } 53 | 54 | except Exception as e: 55 | print(f"❌ Failed to create crew: {str(e)}") 56 | raise 57 | 58 | def run_crew(self, crew_name: str, task_input: str = None) -> str: 59 | """Run a file-based crew by executing its Python module.""" 60 | crew_path = self.crews_base_path / crew_name 61 | 62 | if not crew_path.exists(): 63 | raise ValueError(f"Crew '{crew_name}' does not exist at {crew_path}") 64 | 65 | try: 66 | # Import and execute the crew module 67 | crew_module = self._import_crew_module(crew_name) 68 | 69 | # Create crew instance and run 70 | crew_class = getattr(crew_module, f"{self._to_class_name(crew_name)}") 71 | crew_instance = crew_class() 72 | 73 | print(f"🚀 Running crew: {crew_name}") 74 | result = crew_instance.run(task_input) 75 | 76 | # Update execution metadata 77 | self._update_execution_metadata(crew_name, 'completed') 78 | 79 | return result 80 | 81 | except Exception as e: 82 | self._update_execution_metadata(crew_name, 'failed', str(e)) 83 | raise Exception(f"Failed to run crew '{crew_name}': {str(e)}") 84 | 85 | def list_crews(self) -> List[Dict[str, Any]]: 86 | """List all available file-based crews.""" 87 | crews = [] 88 | 89 | for crew_dir in self.crews_base_path.iterdir(): 90 | if crew_dir.is_dir() and (crew_dir / "config" / "agents.yaml").exists(): 91 | crew_info = self._get_crew_info(crew_dir.name) 92 | crews.append(crew_info) 93 | 94 | return crews 95 | 96 | def get_crew_details(self, crew_name: str) -> Dict[str, Any]: 97 | """Get detailed information about a specific crew.""" 98 | crew_path = self.crews_base_path / crew_name 99 | 100 | if not crew_path.exists(): 101 | raise ValueError(f"Crew '{crew_name}' does not exist") 102 | 103 | return self._get_crew_info(crew_name, detailed=True) 104 | 105 | def export_crew(self, crew_name: str, output_path: str = None) -> str: 106 | """Export a crew as a ZIP file for sharing.""" 107 | try: 108 | zip_path = self.file_generator.export_crew_as_zip(crew_name, output_path) 109 | print(f"✅ Crew exported to: {zip_path}") 110 | return zip_path 111 | except Exception as e: 112 | print(f"❌ Failed to export crew: {str(e)}") 113 | raise 114 | 115 | def delete_crew(self, crew_name: str) -> bool: 116 | """Delete a file-based crew and its metadata.""" 117 | crew_path = self.crews_base_path / crew_name 118 | 119 | if not crew_path.exists(): 120 | return False 121 | 122 | try: 123 | import shutil 124 | shutil.rmtree(crew_path) 125 | 126 | # Remove metadata from database 127 | self._remove_crew_metadata(crew_name) 128 | 129 | print(f"✅ Deleted crew: {crew_name}") 130 | return True 131 | 132 | except Exception as e: 133 | print(f"❌ Failed to delete crew: {str(e)}") 134 | return False 135 | 136 | def update_crew_config(self, crew_name: str, config_updates: Dict[str, Any]) -> bool: 137 | """Update crew configuration files.""" 138 | crew_path = self.crews_base_path / crew_name 139 | 140 | if not crew_path.exists(): 141 | return False 142 | 143 | try: 144 | import yaml 145 | 146 | # Update agents.yaml if provided 147 | if 'agents' in config_updates: 148 | agents_file = crew_path / "config" / "agents.yaml" 149 | with open(agents_file, 'r') as f: 150 | agents_config = yaml.safe_load(f) 151 | 152 | # Update agent configurations 153 | for agent_name, updates in config_updates['agents'].items(): 154 | if agent_name in agents_config: 155 | agents_config[agent_name].update(updates) 156 | 157 | with open(agents_file, 'w') as f: 158 | yaml.dump(agents_config, f, default_flow_style=False, indent=2) 159 | 160 | # Update tasks.yaml if provided 161 | if 'tasks' in config_updates: 162 | tasks_file = crew_path / "config" / "tasks.yaml" 163 | with open(tasks_file, 'r') as f: 164 | tasks_config = yaml.safe_load(f) 165 | 166 | # Update task configurations 167 | for task_name, updates in config_updates['tasks'].items(): 168 | if task_name in tasks_config: 169 | tasks_config[task_name].update(updates) 170 | 171 | with open(tasks_file, 'w') as f: 172 | yaml.dump(tasks_config, f, default_flow_style=False, indent=2) 173 | 174 | print(f"✅ Updated crew configuration: {crew_name}") 175 | return True 176 | 177 | except Exception as e: 178 | print(f"❌ Failed to update crew config: {str(e)}") 179 | return False 180 | 181 | def _import_crew_module(self, crew_name: str): 182 | """Dynamically import a crew module.""" 183 | crew_path = self.crews_base_path / crew_name 184 | src_path = crew_path / "src" / crew_name 185 | 186 | # Add the src directory to Python path temporarily 187 | src_str = str(src_path.parent) 188 | if src_str not in sys.path: 189 | sys.path.insert(0, src_str) 190 | 191 | try: 192 | # Import the crew module 193 | module_name = f"{crew_name}.crew" 194 | spec = importlib.util.spec_from_file_location( 195 | module_name, 196 | src_path / "crew.py" 197 | ) 198 | module = importlib.util.module_from_spec(spec) 199 | spec.loader.exec_module(module) 200 | 201 | return module 202 | 203 | finally: 204 | # Remove from path to avoid conflicts 205 | if src_str in sys.path: 206 | sys.path.remove(src_str) 207 | 208 | def _create_crew_metadata(self, spec: CrewSpec, crew_path: str) -> Dict[str, Any]: 209 | """Create minimal metadata for database storage.""" 210 | return { 211 | 'name': spec.name, 212 | 'description': spec.description, 213 | 'task': spec.task, 214 | 'expected_output': spec.expected_output, 215 | 'process_type': spec.process_type, 216 | 'file_path': crew_path, 217 | 'agents_count': len(spec.agents), 218 | 'tools_used': self._extract_all_tools(spec), 219 | 'created_at': datetime.now(), 220 | 'execution_count': 0, 221 | 'last_executed': None, 222 | 'status': 'created' 223 | } 224 | 225 | def _extract_all_tools(self, spec: CrewSpec) -> List[str]: 226 | """Extract all unique tools used across all agents.""" 227 | all_tools = set() 228 | for agent in spec.agents: 229 | all_tools.update(agent.required_tools) 230 | return list(all_tools) 231 | 232 | def _store_crew_metadata(self, metadata: Dict[str, Any]): 233 | """Store crew metadata in database.""" 234 | # For now, store in a simple file-based cache 235 | # TODO: Implement proper database storage with minimal schema 236 | cache_file = self.crews_base_path / ".crew_metadata.json" 237 | 238 | try: 239 | import json 240 | 241 | # Load existing metadata 242 | if cache_file.exists(): 243 | with open(cache_file, 'r') as f: 244 | all_metadata = json.load(f) 245 | else: 246 | all_metadata = {} 247 | 248 | # Add new metadata 249 | all_metadata[metadata['name']] = { 250 | **metadata, 251 | 'created_at': metadata['created_at'].isoformat(), 252 | 'last_executed': metadata['last_executed'].isoformat() if metadata['last_executed'] else None 253 | } 254 | 255 | # Save back to file 256 | with open(cache_file, 'w') as f: 257 | json.dump(all_metadata, f, indent=2) 258 | 259 | except Exception as e: 260 | print(f"⚠️ Failed to store metadata: {str(e)}") 261 | 262 | def _update_execution_metadata(self, crew_name: str, status: str, error_msg: str = None): 263 | """Update execution metadata.""" 264 | cache_file = self.crews_base_path / ".crew_metadata.json" 265 | 266 | try: 267 | import json 268 | 269 | if cache_file.exists(): 270 | with open(cache_file, 'r') as f: 271 | all_metadata = json.load(f) 272 | 273 | if crew_name in all_metadata: 274 | all_metadata[crew_name]['execution_count'] += 1 275 | all_metadata[crew_name]['last_executed'] = datetime.now().isoformat() 276 | all_metadata[crew_name]['status'] = status 277 | 278 | if error_msg: 279 | all_metadata[crew_name]['last_error'] = error_msg 280 | 281 | with open(cache_file, 'w') as f: 282 | json.dump(all_metadata, f, indent=2) 283 | 284 | except Exception as e: 285 | print(f"⚠️ Failed to update execution metadata: {str(e)}") 286 | 287 | def _remove_crew_metadata(self, crew_name: str): 288 | """Remove crew metadata from cache.""" 289 | cache_file = self.crews_base_path / ".crew_metadata.json" 290 | 291 | try: 292 | import json 293 | 294 | if cache_file.exists(): 295 | with open(cache_file, 'r') as f: 296 | all_metadata = json.load(f) 297 | 298 | if crew_name in all_metadata: 299 | del all_metadata[crew_name] 300 | 301 | with open(cache_file, 'w') as f: 302 | json.dump(all_metadata, f, indent=2) 303 | 304 | except Exception as e: 305 | print(f"⚠️ Failed to remove metadata: {str(e)}") 306 | 307 | def _get_crew_info(self, crew_name: str, detailed: bool = False) -> Dict[str, Any]: 308 | """Get crew information from files and metadata.""" 309 | crew_path = self.crews_base_path / crew_name 310 | 311 | # Basic info from directory 312 | info = { 313 | 'name': crew_name, 314 | 'path': str(crew_path), 315 | 'created_at': datetime.fromtimestamp(crew_path.stat().st_ctime).isoformat() 316 | } 317 | 318 | # Load metadata from cache 319 | cache_file = self.crews_base_path / ".crew_metadata.json" 320 | if cache_file.exists(): 321 | try: 322 | import json 323 | with open(cache_file, 'r') as f: 324 | all_metadata = json.load(f) 325 | 326 | if crew_name in all_metadata: 327 | info.update(all_metadata[crew_name]) 328 | except Exception: 329 | pass 330 | 331 | if detailed: 332 | # Load configuration details 333 | try: 334 | import yaml 335 | 336 | # Load agents config 337 | agents_file = crew_path / "config" / "agents.yaml" 338 | if agents_file.exists(): 339 | with open(agents_file, 'r') as f: 340 | info['agents'] = yaml.safe_load(f) 341 | 342 | # Load tasks config 343 | tasks_file = crew_path / "config" / "tasks.yaml" 344 | if tasks_file.exists(): 345 | with open(tasks_file, 'r') as f: 346 | info['tasks'] = yaml.safe_load(f) 347 | 348 | # Check for README 349 | readme_file = crew_path / "README.md" 350 | info['has_readme'] = readme_file.exists() 351 | 352 | # Check for requirements 353 | req_file = crew_path / "requirements.txt" 354 | info['has_requirements'] = req_file.exists() 355 | 356 | except Exception as e: 357 | info['config_error'] = str(e) 358 | 359 | return info 360 | 361 | def _to_class_name(self, name: str) -> str: 362 | """Convert crew name to Python class name.""" 363 | clean_name = ''.join(c for c in name if c.isalnum() or c == '_') 364 | words = clean_name.replace('_', ' ').split() 365 | return ''.join(word.capitalize() for word in words) + 'Crew' --------------------------------------------------------------------------------