├── docs ├── README.md ├── api_reference.md ├── memory_management.md ├── custom_tools.md └── flows.md ├── generated-icon.png ├── hawkins_agent ├── storage │ ├── __init__.py │ ├── base.py │ └── hawkindb.py ├── __init__.py ├── llm │ ├── __init__.py │ ├── base.py │ ├── manager.py │ └── lite_llm.py ├── tools │ ├── __init__.py │ ├── rag.py │ ├── base.py │ ├── email.py │ ├── summarize.py │ ├── search.py │ ├── code_interpreter.py │ └── weather.py ├── types.py ├── llm.py ├── flow.py ├── memory.py ├── mock │ └── __init__.py └── agent.py ├── .gitignore ├── pyproject.toml ├── setup.py ├── examples ├── model_test.py ├── test_rag.py ├── simple_agent.py ├── tool_test.py ├── knowledge_base_demo.py ├── multi_agent_flow.py ├── maldives_trip_planner.py ├── blog_writer_flow.py └── multiagent-trip-planner.py └── README.md /docs/README.md: -------------------------------------------------------------------------------- 1 | pip install hawkins-agent -------------------------------------------------------------------------------- /generated-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/harishsg993010/HawkinsAgent/HEAD/generated-icon.png -------------------------------------------------------------------------------- /hawkins_agent/storage/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Storage module for Hawkins Agent Framework 3 | 4 | This module provides database integration and storage management capabilities, 5 | including memory storage using HawkinDB and base interfaces for custom storage implementations. 6 | """ 7 | 8 | from .base import BaseStorage, StorageConfig 9 | from .hawkindb import HawkinDBStorage 10 | 11 | __all__ = ["BaseStorage", "StorageConfig", "HawkinDBStorage"] -------------------------------------------------------------------------------- /hawkins_agent/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Hawkins Agent Framework 3 | A simple yet powerful framework for building AI agents 4 | """ 5 | 6 | from .agent import Agent, AgentBuilder 7 | from .types import Message, AgentResponse 8 | from .tools.base import BaseTool 9 | from .flow import FlowManager, FlowStep 10 | 11 | __version__ = "0.1.4" 12 | __all__ = ["Agent", "AgentBuilder", "Message", "AgentResponse", "BaseTool", 13 | "FlowManager", "FlowStep"] -------------------------------------------------------------------------------- /hawkins_agent/llm/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | LiteLLM integration module for Hawkins Agent Framework 3 | 4 | This module provides integration with LiteLLM for language model interactions, 5 | including response parsing, error handling, and prompt management. 6 | """ 7 | 8 | from .base import BaseLLMProvider 9 | from .lite_llm import LiteLLMProvider 10 | from .manager import LLMManager 11 | 12 | __all__ = ["BaseLLMProvider", "LiteLLMProvider", "LLMManager"] 13 | -------------------------------------------------------------------------------- /hawkins_agent/tools/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Hawkins Agent Tools 3 | A collection of tools for use with Hawkins agents 4 | """ 5 | 6 | from .base import BaseTool 7 | from .email import EmailTool 8 | from .search import WebSearchTool 9 | from .rag import RAGTool 10 | from .summarize import SummarizationTool 11 | from .code_interpreter import CodeInterpreterTool 12 | from .weather import WeatherTool 13 | 14 | __all__ = [ 15 | "BaseTool", 16 | "EmailTool", 17 | "WebSearchTool", 18 | "RAGTool", 19 | "SummarizationTool", 20 | "CodeInterpreterTool", 21 | "WeatherTool" 22 | ] -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | *.so 6 | .Python 7 | build/ 8 | develop-eggs/ 9 | dist/ 10 | downloads/ 11 | eggs/ 12 | .eggs/ 13 | lib/ 14 | lib64/ 15 | parts/ 16 | sdist/ 17 | var/ 18 | wheels/ 19 | share/python-wheels/ 20 | *.egg-info/ 21 | .installed.cfg 22 | *.egg 23 | 24 | # Environment 25 | .env 26 | .venv 27 | env/ 28 | venv/ 29 | ENV/ 30 | env.bak/ 31 | venv.bak/ 32 | .env.local 33 | 34 | # IDE 35 | .idea/ 36 | .vscode/ 37 | *.swp 38 | *.swo 39 | 40 | # Database 41 | *.db 42 | *.sqlite3 43 | 44 | # Replit specific 45 | .pythonlibs/ 46 | .upm/ 47 | .breakpoints 48 | replit.nix 49 | poetry.lock 50 | 51 | # Node modules (if any) 52 | node_modules/ 53 | package-lock.json 54 | 55 | # Build artifacts 56 | *.log 57 | *.pot 58 | *.pyc 59 | 60 | # OS specific 61 | .DS_Store 62 | Thumbs.db -------------------------------------------------------------------------------- /hawkins_agent/tools/rag.py: -------------------------------------------------------------------------------- 1 | """RAG tool implementation using HawkinsRAG""" 2 | 3 | from typing import Dict, Any 4 | from hawkins_rag import HawkinsRAG 5 | from .base import BaseTool 6 | from ..types import ToolResponse 7 | 8 | class RAGTool(BaseTool): 9 | """Tool for retrieving information from knowledge base""" 10 | 11 | def __init__(self, knowledge_base: HawkinsRAG): 12 | super().__init__(name="RAGTool") 13 | self.kb = knowledge_base 14 | 15 | @property 16 | def description(self) -> str: 17 | return "Query the knowledge base for information" 18 | 19 | async def execute(self, **kwargs) -> ToolResponse: 20 | """Query the knowledge base""" 21 | try: 22 | query = kwargs.get('query', '') 23 | results = await self.kb.query(query) 24 | return ToolResponse( 25 | success=True, 26 | result=results, 27 | error=None 28 | ) 29 | except Exception as e: 30 | return ToolResponse( 31 | success=False, 32 | result=None, 33 | error=str(e) 34 | ) -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "hawkins-agent" 3 | version = "0.1.4" 4 | description = "A Python SDK for building AI agents with minimal code using Hawkins ecosystem with HawkinDB memory" 5 | requires-python = ">=3.11" 6 | readme = "README.md" 7 | license = { text = "MIT" } 8 | authors = [ 9 | { name = "Harish Santhanalakshmi Ganesan" } 10 | ] 11 | classifiers = [ 12 | "Development Status :: 3 - Alpha", 13 | "Intended Audience :: Developers", 14 | "License :: OSI Approved :: MIT License", 15 | "Programming Language :: Python :: 3", 16 | "Programming Language :: Python :: 3.11", 17 | "Topic :: Software Development :: Libraries :: Python Modules", 18 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 19 | ] 20 | dependencies = [ 21 | "aiohttp>=3.8.0", 22 | "flask[async]>=3.1.0", 23 | "google-api-python-client>=2.156.0", 24 | "hawkins-rag>=0.1.0", 25 | "hawkinsdb>=1.0.1", 26 | "litellm>=1.0.0", 27 | "openai>=1.58.1", 28 | "python-dotenv>=0.19.0", 29 | "serpapi>=0.1.5", 30 | "tavily-python>=0.5.0", 31 | "trafilatura>=2.0.0", 32 | ] 33 | 34 | [project.optional-dependencies] 35 | dev = [ 36 | "pytest>=7.0.0", 37 | "black>=22.0.0", 38 | "mypy>=1.0.0" 39 | ] 40 | 41 | [build-system] 42 | requires = ["hatchling"] 43 | build-backend = "hatchling.build" 44 | 45 | [tool.hatch.metadata] 46 | allow-direct-references = true 47 | 48 | [tool.pytest.ini_options] 49 | asyncio_mode = "auto" 50 | 51 | [project.urls] 52 | Documentation = "https://github.com/hawkins-ai/hawkins-agent#readme" 53 | Source = "https://github.com/hawkins-ai/hawkins-agent" 54 | -------------------------------------------------------------------------------- /hawkins_agent/llm/base.py: -------------------------------------------------------------------------------- 1 | """Base classes for LLM integration""" 2 | 3 | from abc import ABC, abstractmethod 4 | from typing import List, Optional, Dict, Any 5 | from ..types import Message 6 | 7 | class BaseLLMProvider(ABC): 8 | """Abstract base class for LLM providers 9 | 10 | This class defines the interface that all LLM providers must implement. 11 | It handles the core functionality of interacting with language models. 12 | """ 13 | 14 | def __init__(self, model: str, **kwargs): 15 | """Initialize the LLM provider 16 | 17 | Args: 18 | model: Name of the language model to use 19 | **kwargs: Additional provider-specific configuration 20 | """ 21 | self.model = model 22 | self.config = kwargs 23 | 24 | @abstractmethod 25 | async def generate(self, messages: List[Message]) -> str: 26 | """Generate a response from the language model 27 | 28 | Args: 29 | messages: List of conversation messages 30 | 31 | Returns: 32 | Generated response text 33 | 34 | Raises: 35 | LLMError: If there's an error during generation 36 | """ 37 | pass 38 | 39 | @abstractmethod 40 | async def validate_response(self, response: str) -> bool: 41 | """Validate a response from the language model 42 | 43 | Args: 44 | response: Response text to validate 45 | 46 | Returns: 47 | True if response is valid, False otherwise 48 | """ 49 | pass 50 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """Setup script for the Hawkins Agent Framework""" 2 | 3 | from setuptools import setup, find_packages 4 | 5 | with open("README.md", "r", encoding="utf-8") as fh: 6 | long_description = fh.read() 7 | 8 | setup( 9 | name="hawkins-agent", 10 | version="0.1.4", 11 | author="Harish Santhanalakshmi Ganesan", 12 | description="A Python SDK for building AI agents with minimal code using Hawkins ecosystem with HawkinDB memory", 13 | long_description=long_description, 14 | long_description_content_type="text/markdown", 15 | url="https://github.com/hawkins-ai/hawkins-agent", 16 | packages=find_packages(), 17 | classifiers=[ 18 | "Development Status :: 3 - Alpha", 19 | "Intended Audience :: Developers", 20 | "License :: OSI Approved :: MIT License", 21 | "Programming Language :: Python :: 3", 22 | "Programming Language :: Python :: 3.11", 23 | "Topic :: Software Development :: Libraries :: Python Modules", 24 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 25 | ], 26 | python_requires=">=3.11", 27 | install_requires=[ 28 | "aiohttp>=3.8.0", 29 | "flask[async]>=3.1.0", 30 | "google-api-python-client>=2.156.0", 31 | "hawkins-rag>=0.1.0", 32 | "hawkinsdb>=1.0.1", 33 | "litellm>=1.0.0", 34 | "openai>=1.58.1", 35 | "python-dotenv>=0.19.0", 36 | "serpapi>=0.1.5", 37 | "tavily-python>=0.5.0", 38 | "trafilatura>=2.0.0", 39 | ], 40 | extras_require={ 41 | "dev": [ 42 | "pytest>=7.0.0", 43 | "black>=22.0.0", 44 | "mypy>=1.0.0" 45 | ] 46 | }, 47 | ) -------------------------------------------------------------------------------- /examples/model_test.py: -------------------------------------------------------------------------------- 1 | """Test different LLM models using LiteLLM integration""" 2 | 3 | from hawkins_agent import AgentBuilder 4 | from hawkins_agent.llm import LiteLLMProvider 5 | import asyncio 6 | import logging 7 | 8 | # Setup logging 9 | logging.basicConfig(level=logging.INFO) 10 | logger = logging.getLogger(__name__) 11 | 12 | async def main(): 13 | """Test different LLM models""" 14 | try: 15 | # Create an OpenAI agent 16 | logger.info("Creating agent with GPT-4o...") 17 | openai_agent = (AgentBuilder("openai_assistant") 18 | .with_model("openai/gpt-4o") # Latest OpenAI model 19 | .with_provider(LiteLLMProvider, temperature=0.7) 20 | .build()) 21 | 22 | # Create an Anthropic agent 23 | logger.info("Creating agent with Claude 3...") 24 | anthropic_agent = (AgentBuilder("anthropic_assistant") 25 | .with_model("anthropic/claude-3-sonnet-20240229") # Claude model 26 | .with_provider(LiteLLMProvider, temperature=0.5) 27 | .build()) 28 | 29 | # Test both agents 30 | test_message = "What is the capital of France?" 31 | 32 | logger.info("\nTesting OpenAI agent...") 33 | openai_response = await openai_agent.process(test_message) 34 | logger.info(f"OpenAI Response: {openai_response.message}") 35 | 36 | logger.info("\nTesting Anthropic agent...") 37 | anthropic_response = await anthropic_agent.process(test_message) 38 | logger.info(f"Anthropic Response: {anthropic_response.message}") 39 | 40 | except Exception as e: 41 | logger.error(f"Error testing models: {str(e)}", exc_info=True) 42 | raise 43 | 44 | if __name__ == "__main__": 45 | asyncio.run(main()) 46 | -------------------------------------------------------------------------------- /examples/test_rag.py: -------------------------------------------------------------------------------- 1 | """Test basic HawkinsRAG functionality""" 2 | 3 | from hawkins_rag import HawkinsRAG 4 | import asyncio 5 | import logging 6 | import tempfile 7 | import os 8 | 9 | # Set up logging 10 | logging.basicConfig(level=logging.INFO) 11 | logger = logging.getLogger(__name__) 12 | 13 | async def main(): 14 | """Test basic RAG functionality""" 15 | try: 16 | # Initialize RAG system 17 | logger.info("Initializing RAG system...") 18 | rag = HawkinsRAG() 19 | 20 | # Test document content 21 | test_content = """ 22 | Artificial Intelligence in 2024 has seen remarkable developments. 23 | Key trends include: 24 | 1. Advanced language models 25 | 2. Improved multimodal capabilities 26 | 3. Focus on AI safety and ethics 27 | """ 28 | 29 | # Create temporary file for test content 30 | logger.info("Creating test document...") 31 | with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as temp_file: 32 | temp_file.write(test_content) 33 | temp_path = temp_file.name 34 | 35 | try: 36 | # Load document 37 | logger.info("Loading test document...") 38 | rag.load_document(temp_path, source_type="text") 39 | 40 | # Test query 41 | logger.info("Testing query...") 42 | query = "What are the key trends in AI?" 43 | response = rag.query(query) 44 | 45 | logger.info("\nQuery Results:") 46 | logger.info("-" * 40) 47 | logger.info(f"Query: {query}") 48 | logger.info(f"Response: {response}") 49 | 50 | finally: 51 | # Clean up temporary file 52 | os.unlink(temp_path) 53 | logger.info("Cleaned up temporary file") 54 | 55 | except Exception as e: 56 | logger.error(f"Error in RAG test: {str(e)}", exc_info=True) 57 | raise 58 | 59 | if __name__ == "__main__": 60 | asyncio.run(main()) -------------------------------------------------------------------------------- /hawkins_agent/types.py: -------------------------------------------------------------------------------- 1 | """Type definitions for the Hawkins Agent Framework 2 | 3 | This module contains the core type definitions used throughout the framework, 4 | including message types, agent responses, and tool responses. 5 | """ 6 | 7 | from dataclasses import dataclass 8 | from typing import Dict, List, Optional, Any, Union 9 | from enum import Enum 10 | 11 | class MessageRole(str, Enum): 12 | """Defines the role of a message in a conversation 13 | 14 | Attributes: 15 | USER: Message from the user 16 | ASSISTANT: Message from the AI assistant 17 | SYSTEM: System-level message or instruction 18 | """ 19 | USER = "user" 20 | ASSISTANT = "assistant" 21 | SYSTEM = "system" 22 | 23 | @dataclass 24 | class Message: 25 | """Represents a message in the conversation 26 | 27 | Attributes: 28 | role: The role of the message sender (user, assistant, or system) 29 | content: The actual content of the message 30 | metadata: Optional metadata associated with the message 31 | """ 32 | role: MessageRole 33 | content: str 34 | metadata: Optional[Dict[str, Any]] = None 35 | 36 | @dataclass 37 | class AgentResponse: 38 | """Represents an agent's response to a user message 39 | 40 | Attributes: 41 | message: The text response from the agent 42 | tool_calls: List of tool calls made during response generation 43 | metadata: Additional metadata about the response, including tool results 44 | """ 45 | message: str 46 | tool_calls: List[Dict[str, Any]] 47 | metadata: Dict[str, Any] 48 | 49 | @dataclass 50 | class ToolResponse: 51 | """Represents a tool's response after execution 52 | 53 | Attributes: 54 | success: Whether the tool execution was successful 55 | result: The result of the tool execution (if successful) 56 | error: Error message if the execution failed 57 | 58 | Example: 59 | >>> tool_response = ToolResponse( 60 | ... success=True, 61 | ... result="Email sent successfully", 62 | ... error=None 63 | ... ) 64 | """ 65 | success: bool 66 | result: Any 67 | error: Optional[str] = None -------------------------------------------------------------------------------- /examples/simple_agent.py: -------------------------------------------------------------------------------- 1 | """Example of creating a simple agent""" 2 | 3 | from hawkins_agent import AgentBuilder 4 | from hawkins_agent.tools import WebSearchTool 5 | from hawkins_agent.mock import KnowledgeBase 6 | from hawkins_agent.llm import LiteLLMProvider 7 | import logging 8 | import os 9 | import asyncio 10 | 11 | # Set up logging 12 | logging.basicConfig(level=logging.INFO) 13 | logger = logging.getLogger(__name__) 14 | 15 | async def main(): 16 | """Demonstrate basic agent functionality""" 17 | try: 18 | # Create a knowledge base 19 | kb = KnowledgeBase() 20 | 21 | # Get Tavily API key 22 | tavily_api_key = os.getenv("TAVILY_API_KEY") 23 | if not tavily_api_key: 24 | logger.error("TAVILY_API_KEY environment variable not set") 25 | return 26 | 27 | # Configure search tool with Tavily 28 | search_tool = WebSearchTool(api_key=tavily_api_key) 29 | 30 | # Create an agent with GPT-4o and tools 31 | logger.info("Creating agent with GPT-4o...") 32 | agent = (AgentBuilder("assistant") 33 | .with_model("openai/gpt-4o") # Use latest OpenAI model 34 | .with_provider(LiteLLMProvider, temperature=0.7) 35 | .with_knowledge_base(kb) 36 | .with_tool(search_tool) 37 | .build()) 38 | 39 | logger.info("\nTesting agent with a search query...") 40 | 41 | # Test query 42 | query = "What are the latest developments in AI technology in 2024?" 43 | logger.info(f"Query: {query}") 44 | 45 | response = await agent.process(query) 46 | 47 | # Print response details 48 | logger.info("\nAgent Response:") 49 | logger.info("-" * 40) 50 | logger.info(response.message) 51 | 52 | if response.tool_calls: 53 | logger.info("\nTool Calls Made:") 54 | logger.info("-" * 40) 55 | for call in response.tool_calls: 56 | logger.info(f"Tool: {call['name']}") 57 | logger.info(f"Parameters: {call['parameters']}") 58 | 59 | if "tool_results" in response.metadata: 60 | logger.info("\nTool Results:") 61 | logger.info("-" * 40) 62 | for result in response.metadata["tool_results"]: 63 | if result["success"]: 64 | logger.info(f"Success: {result['result']}") 65 | else: 66 | logger.error(f"Error: {result['error']}") 67 | 68 | except Exception as e: 69 | logger.error(f"Error running simple agent: {str(e)}", exc_info=True) 70 | raise 71 | 72 | if __name__ == "__main__": 73 | asyncio.run(main()) -------------------------------------------------------------------------------- /hawkins_agent/llm.py: -------------------------------------------------------------------------------- 1 | """LLM integration using lite_llm 2 | 3 | This module handles the interaction with language models through lite_llm, 4 | providing a consistent interface for model management and response parsing. 5 | """ 6 | 7 | from typing import Dict, Any, List, Optional 8 | import json 9 | import logging 10 | from .mock import LiteLLM 11 | from .types import Message, MessageRole 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | class LLMManager: 16 | """Manages LLM interactions and response parsing 17 | 18 | This class handles all interactions with the language model, including: 19 | - Message formatting and prompt construction 20 | - Response parsing and validation 21 | - Error handling and retry logic 22 | """ 23 | 24 | def __init__(self, model: str = "gpt-3.5-turbo"): 25 | """Initialize the LLM manager 26 | 27 | Args: 28 | model: The name of the LLM model to use 29 | """ 30 | self.model = model 31 | self.llm = LiteLLM(model=model) 32 | 33 | async def generate_response(self, 34 | messages: List[Message], 35 | system_prompt: Optional[str] = None) -> str: 36 | """Generate a response from the LLM 37 | 38 | Args: 39 | messages: List of conversation messages 40 | system_prompt: Optional system-level instructions 41 | 42 | Returns: 43 | The generated response text 44 | """ 45 | try: 46 | # Construct the complete message list 47 | prompt_messages = [] 48 | 49 | # Add system prompt if provided 50 | if system_prompt: 51 | prompt_messages.append(Message( 52 | role=MessageRole.SYSTEM, 53 | content=system_prompt 54 | )) 55 | 56 | # Add conversation messages 57 | prompt_messages.extend(messages) 58 | 59 | # Generate response 60 | response = await self.llm.generate( 61 | self._format_messages(prompt_messages) 62 | ) 63 | 64 | return response 65 | 66 | except Exception as e: 67 | logger.error(f"Error generating LLM response: {str(e)}") 68 | raise 69 | 70 | def _format_messages(self, messages: List[Message]) -> str: 71 | """Format messages for the LLM 72 | 73 | Args: 74 | messages: List of messages to format 75 | 76 | Returns: 77 | Formatted prompt string 78 | """ 79 | formatted = [] 80 | 81 | for msg in messages: 82 | formatted.append(f"{msg.role.value}: {msg.content}") 83 | 84 | return "\n".join(formatted) 85 | -------------------------------------------------------------------------------- /hawkins_agent/tools/base.py: -------------------------------------------------------------------------------- 1 | """Base tool implementation for the Hawkins Agent Framework 2 | 3 | This module provides the base class for implementing tools that can be used by agents. 4 | Tools are the primary way to add capabilities to agents, such as sending emails, 5 | performing web searches, or accessing external APIs. 6 | 7 | Example: 8 | >>> class CustomTool(BaseTool): 9 | ... @property 10 | ... def description(self) -> str: 11 | ... return "Description of what the tool does" 12 | ... 13 | ... async def execute(self, **kwargs) -> ToolResponse: 14 | ... result = await self._perform_operation(**kwargs) 15 | ... return ToolResponse(success=True, result=result) 16 | """ 17 | 18 | from abc import ABC, abstractmethod 19 | from typing import Any, Dict, Optional 20 | from ..types import ToolResponse 21 | 22 | class BaseTool(ABC): 23 | """Abstract base class for all tools in the Hawkins Agent Framework 24 | 25 | All tools must inherit from this class and implement the required methods. 26 | The tool's name is automatically derived from the class name, but can be 27 | overridden if needed. 28 | 29 | Attributes: 30 | _name: Protected name attribute of the tool 31 | """ 32 | 33 | def __init__(self, name: Optional[str] = None): 34 | """Initialize the tool with an optional custom name 35 | 36 | Args: 37 | name: Optional custom name for the tool. If not provided, 38 | the class name will be used. 39 | """ 40 | self._name = name or self.__class__.__name__ 41 | 42 | @property 43 | def name(self) -> str: 44 | """Get the tool name""" 45 | return self._name 46 | 47 | @property 48 | @abstractmethod 49 | def description(self) -> str: 50 | """Tool description shown to the LLM 51 | 52 | This description should clearly explain what the tool does and how 53 | it should be used. The LLM will use this description to determine 54 | when to use the tool. 55 | 56 | Returns: 57 | A string describing the tool's functionality 58 | """ 59 | pass 60 | 61 | @abstractmethod 62 | async def execute(self, **kwargs) -> ToolResponse: 63 | """Execute the tool with the provided parameters 64 | 65 | This method should implement the tool's core functionality. It receives 66 | keyword arguments from the LLM's tool call and should return a ToolResponse 67 | indicating success or failure. 68 | 69 | Args: 70 | **kwargs: Keyword arguments passed by the LLM 71 | 72 | Returns: 73 | ToolResponse indicating success/failure and containing results 74 | """ 75 | pass 76 | 77 | def validate_params(self, params: Dict[str, Any]) -> bool: 78 | """Validate the parameters before execution 79 | 80 | Override this method to add custom parameter validation logic. 81 | The default implementation accepts all parameters. 82 | 83 | Args: 84 | params: Dictionary of parameters to validate 85 | 86 | Returns: 87 | True if parameters are valid, False otherwise 88 | """ 89 | return True -------------------------------------------------------------------------------- /hawkins_agent/tools/email.py: -------------------------------------------------------------------------------- 1 | """Email tool implementation""" 2 | 3 | import smtplib 4 | from email.mime.text import MIMEText 5 | from email.mime.multipart import MIMEMultipart 6 | from typing import Dict, Any 7 | import logging 8 | from .base import BaseTool 9 | from ..types import ToolResponse 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | class EmailTool(BaseTool): 14 | """Tool for sending emails 15 | 16 | This tool provides email sending capabilities to agents. It validates 17 | email addresses and handles common email sending errors gracefully. 18 | """ 19 | 20 | @property 21 | def description(self) -> str: 22 | """Get the tool description""" 23 | return "Send emails with specified subject and content" 24 | 25 | def validate_params(self, params: Dict[str, Any]) -> bool: 26 | """Validate email parameters 27 | 28 | Checks for required fields and basic email format validation. 29 | 30 | Args: 31 | params: Dictionary containing email parameters 32 | 33 | Returns: 34 | True if parameters are valid, False otherwise 35 | """ 36 | required_fields = {'to', 'subject', 'content'} 37 | if not all(field in params for field in required_fields): 38 | logger.error(f"Missing required email fields: {required_fields - set(params.keys())}") 39 | return False 40 | 41 | # Basic email validation 42 | email = params['to'] 43 | if '@' not in email or '.' not in email: 44 | logger.error(f"Invalid email format: {email}") 45 | return False 46 | 47 | return True 48 | 49 | async def execute(self, **kwargs) -> ToolResponse: 50 | """Send an email 51 | 52 | Args: 53 | **kwargs: Must include 'to', 'subject', and 'content' parameters 54 | 55 | Returns: 56 | ToolResponse indicating success/failure of email sending 57 | """ 58 | try: 59 | # Extract required parameters 60 | to = kwargs.get('to') 61 | subject = kwargs.get('subject') 62 | content = kwargs.get('content') 63 | 64 | # Validate parameters 65 | if not self.validate_params({'to': to, 'subject': subject, 'content': content}): 66 | return ToolResponse( 67 | success=False, 68 | result=None, 69 | error="Invalid email parameters" 70 | ) 71 | 72 | msg = MIMEMultipart() 73 | msg['To'] = to 74 | msg['Subject'] = subject 75 | msg.attach(MIMEText(content, 'plain')) 76 | 77 | # Implementation of email sending logic 78 | # For now, we're using the mock implementation 79 | logger.info(f"Sending email to {to}") 80 | 81 | return ToolResponse( 82 | success=True, 83 | result=f"Email sent to {to}" 84 | ) 85 | 86 | except Exception as e: 87 | logger.error(f"Error sending email: {str(e)}") 88 | return ToolResponse( 89 | success=False, 90 | result=None, 91 | error=f"Failed to send email: {str(e)}" 92 | ) -------------------------------------------------------------------------------- /hawkins_agent/storage/base.py: -------------------------------------------------------------------------------- 1 | """Base storage interface definitions""" 2 | 3 | from abc import ABC, abstractmethod 4 | from typing import Dict, List, Any, Optional 5 | from dataclasses import dataclass 6 | from datetime import datetime 7 | 8 | @dataclass 9 | class StorageConfig: 10 | """Configuration for storage providers 11 | 12 | Attributes: 13 | retention_days: Number of days to retain memories 14 | max_entries: Maximum number of entries to store 15 | importance_threshold: Minimum importance score for retention 16 | """ 17 | retention_days: Optional[int] = None 18 | max_entries: Optional[int] = None 19 | importance_threshold: float = 0.0 20 | 21 | class BaseStorage(ABC): 22 | """Abstract base class for storage implementations 23 | 24 | This class defines the interface that all storage providers must implement, 25 | providing basic CRUD operations and memory management functionality. 26 | """ 27 | 28 | def __init__(self, config: Optional[StorageConfig] = None): 29 | """Initialize storage with optional configuration 30 | 31 | Args: 32 | config: Storage configuration parameters 33 | """ 34 | self.config = config or StorageConfig() 35 | 36 | @abstractmethod 37 | async def insert(self, data: Dict[str, Any]) -> str: 38 | """Insert data into storage 39 | 40 | Args: 41 | data: Data to store 42 | 43 | Returns: 44 | ID of the stored data 45 | """ 46 | pass 47 | 48 | @abstractmethod 49 | async def search(self, 50 | query: str, 51 | collection: str, 52 | limit: int = 10) -> List[Dict[str, Any]]: 53 | """Search for data in storage 54 | 55 | Args: 56 | query: Search query 57 | collection: Collection to search in 58 | limit: Maximum number of results 59 | 60 | Returns: 61 | List of matching entries 62 | """ 63 | pass 64 | 65 | @abstractmethod 66 | async def clear(self) -> None: 67 | """Clear all data from storage""" 68 | pass 69 | 70 | @abstractmethod 71 | def now(self) -> str: 72 | """Get current timestamp in ISO format 73 | 74 | Returns: 75 | Current timestamp string 76 | """ 77 | pass 78 | 79 | async def prune(self, collection: str) -> None: 80 | """Prune old or low-importance data 81 | 82 | Args: 83 | collection: Collection to prune 84 | """ 85 | if self.config.retention_days: 86 | await self._prune_by_age(collection) 87 | 88 | if self.config.importance_threshold > 0: 89 | await self._prune_by_importance(collection) 90 | 91 | @abstractmethod 92 | async def _prune_by_age(self, collection: str) -> None: 93 | """Remove entries older than retention period""" 94 | pass 95 | 96 | @abstractmethod 97 | async def _prune_by_importance(self, collection: str) -> None: 98 | """Remove entries below importance threshold""" 99 | pass 100 | -------------------------------------------------------------------------------- /examples/tool_test.py: -------------------------------------------------------------------------------- 1 | """Example demonstrating various tool capabilities""" 2 | 3 | from hawkins_agent import AgentBuilder 4 | from hawkins_agent.tools import WeatherTool 5 | from hawkins_agent.mock import KnowledgeBase 6 | from hawkins_agent.llm import LiteLLMProvider 7 | import logging 8 | import os 9 | import asyncio 10 | 11 | # Set up logging with more detailed format 12 | logging.basicConfig( 13 | level=logging.INFO, 14 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' 15 | ) 16 | logger = logging.getLogger(__name__) 17 | 18 | async def main(): 19 | """Test weather tool functionality""" 20 | try: 21 | # Create a knowledge base 22 | kb = KnowledgeBase() 23 | 24 | # Configure weather tool with API key 25 | logger.info("Initializing weather tool...") 26 | weather_tool = WeatherTool(api_key="1b73fe8fc5a03431a43f83fa899d0a4d") 27 | 28 | # Create agent with weather tool 29 | logger.info("Creating agent with weather tool...") 30 | agent = (AgentBuilder("weather_tester") 31 | .with_model("openai/gpt-4o") 32 | .with_provider(LiteLLMProvider, temperature=0.7) 33 | .with_knowledge_base(kb) 34 | .with_tool(weather_tool) 35 | .build()) 36 | 37 | # Test queries 38 | queries = [ 39 | "What's the current weather in London,GB?", 40 | "Tell me the weather in Tokyo,JP", 41 | "What's the weather like in New York,US?" 42 | ] 43 | 44 | for query in queries: 45 | logger.info(f"\nProcessing query: {query}") 46 | try: 47 | response = await agent.process(query) 48 | 49 | logger.info("\nResponse:") 50 | logger.info("-" * 40) 51 | logger.info(response.message) 52 | 53 | if response.tool_calls: 54 | logger.info("\nTool Calls Made:") 55 | logger.info("-" * 40) 56 | for call in response.tool_calls: 57 | logger.info(f"Tool: {call['name']}") 58 | logger.info(f"Parameters: {call['parameters']}") 59 | 60 | if "tool_results" in response.metadata: 61 | logger.info("\nTool Results:") 62 | logger.info("-" * 40) 63 | for result in response.metadata["tool_results"]: 64 | if result["success"]: 65 | weather_data = result["result"] 66 | logger.info("Weather Information:") 67 | logger.info(f"- Temperature: {weather_data['temperature']}°C") 68 | logger.info(f"- Feels like: {weather_data['feels_like']}°C") 69 | logger.info(f"- Description: {weather_data['description']}") 70 | logger.info(f"- Humidity: {weather_data['humidity']}%") 71 | logger.info(f"- Wind Speed: {weather_data['wind_speed']} m/s") 72 | logger.info(f"- Pressure: {weather_data['pressure']} hPa") 73 | else: 74 | logger.error(f"Error: {result['error']}") 75 | 76 | except Exception as e: 77 | logger.error(f"Error processing query '{query}': {str(e)}", exc_info=True) 78 | 79 | except Exception as e: 80 | logger.error(f"Error in weather tool demonstration: {str(e)}", exc_info=True) 81 | raise 82 | 83 | if __name__ == "__main__": 84 | asyncio.run(main()) -------------------------------------------------------------------------------- /hawkins_agent/flow.py: -------------------------------------------------------------------------------- 1 | """Flow control module for managing agent interactions""" 2 | 3 | from typing import Dict, List, Any, Optional, Callable, Awaitable 4 | from dataclasses import dataclass 5 | import logging 6 | from .agent import Agent 7 | from .types import AgentResponse 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | @dataclass 12 | class FlowStep: 13 | """Represents a step in the agent workflow 14 | 15 | Attributes: 16 | name: Name of the step 17 | agent: Agent responsible for this step 18 | process: Function to process the step's input 19 | requires: List of step names that must complete before this step 20 | """ 21 | name: str 22 | agent: Agent 23 | process: Callable[[Dict[str, Any]], Awaitable[Dict[str, Any]]] 24 | requires: List[str] = None 25 | 26 | class FlowManager: 27 | """Manages workflow between multiple agents 28 | 29 | This class handles: 30 | - Step dependencies and execution order 31 | - Data passing between steps 32 | - Error handling and recovery 33 | """ 34 | 35 | def __init__(self): 36 | """Initialize the flow manager""" 37 | self.steps: Dict[str, FlowStep] = {} 38 | self.results: Dict[str, Dict[str, Any]] = {} 39 | 40 | def add_step(self, step: FlowStep) -> "FlowManager": 41 | """Add a step to the workflow 42 | 43 | Args: 44 | step: Step configuration to add 45 | 46 | Returns: 47 | Self for chaining 48 | """ 49 | self.steps[step.name] = step 50 | return self 51 | 52 | async def execute(self, initial_data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: 53 | """Execute the complete workflow 54 | 55 | Args: 56 | initial_data: Initial data to pass to the first step 57 | 58 | Returns: 59 | Combined results from all steps 60 | """ 61 | try: 62 | self.results = {} 63 | data = initial_data or {} 64 | 65 | # Find steps with no dependencies 66 | available = [ 67 | name for name, step in self.steps.items() 68 | if not step.requires 69 | ] 70 | 71 | while available: 72 | # Execute available steps 73 | for step_name in available[:]: 74 | step = self.steps[step_name] 75 | 76 | try: 77 | logger.info(f"Executing step: {step_name}") 78 | result = await step.process(data) 79 | self.results[step_name] = result 80 | data.update(result) 81 | available.remove(step_name) 82 | 83 | except Exception as e: 84 | logger.error(f"Error in step {step_name}: {str(e)}") 85 | raise 86 | 87 | # Find newly available steps 88 | for name, step in self.steps.items(): 89 | if (name not in self.results and # Not completed 90 | name not in available and # Not already queued 91 | step.requires and # Has dependencies 92 | all(req in self.results for req in step.requires)): # All deps met 93 | available.append(name) 94 | 95 | return self.results 96 | 97 | except Exception as e: 98 | logger.error(f"Error executing workflow: {str(e)}") 99 | raise 100 | -------------------------------------------------------------------------------- /hawkins_agent/llm/manager.py: -------------------------------------------------------------------------------- 1 | """LLM Manager implementation""" 2 | 3 | from typing import List, Optional, Dict, Any 4 | import logging 5 | import json 6 | from .base import BaseLLMProvider 7 | from .lite_llm import LiteLLMProvider 8 | from ..types import Message, MessageRole 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | class LLMManager: 13 | """Manages LLM interactions and providers""" 14 | 15 | def __init__(self, 16 | model: str = "gpt-4o", 17 | provider_class: Optional[type] = None, 18 | **kwargs): 19 | """Initialize the LLM manager""" 20 | self.model = model 21 | provider_class = provider_class or LiteLLMProvider 22 | self.provider = provider_class(model=model, **kwargs) 23 | 24 | async def generate_response(self, 25 | messages: List[Message], 26 | tools: Optional[List[Dict[str, Any]]] = None) -> Dict[str, Any]: 27 | """Generate a response from the LLM with optional tool support""" 28 | try: 29 | logger.info("Starting response generation") 30 | logger.debug(f"Input messages: {messages}") 31 | logger.debug(f"Available tools: {tools}") 32 | 33 | # Format tools for OpenAI function calling format 34 | formatted_tools = None 35 | if tools: 36 | formatted_tools = [] 37 | for tool in tools: 38 | formatted_tool = { 39 | "name": tool["name"], 40 | "description": tool["description"], 41 | "parameters": { 42 | "type": "object", 43 | "properties": { 44 | "query": { 45 | "type": "string", 46 | "description": "The query to be processed by the tool" 47 | } 48 | }, 49 | "required": ["query"] 50 | } 51 | } 52 | formatted_tools.append(formatted_tool) 53 | logger.debug(f"Formatted tools: {json.dumps(formatted_tools, indent=2)}") 54 | 55 | # Add system prompt if tools are provided 56 | if formatted_tools: 57 | tool_descriptions = "\n".join( 58 | f"- {tool['name']}: {tool['description']}" 59 | for tool in formatted_tools 60 | ) 61 | system_content = f"""You have access to the following tools: 62 | {tool_descriptions} 63 | 64 | When you need to search for information or use a tool, choose the appropriate tool and provide a relevant query. 65 | First analyze what tool would be most appropriate, then use it with a well-formulated query. 66 | Always summarize the results in a clear and concise way. 67 | To use a tool, include it in your response like this: {{"name": "tool_name", "parameters": {{"query": "your query"}}}}""" 68 | 69 | messages = [Message( 70 | role=MessageRole.SYSTEM, 71 | content=system_content 72 | )] + messages 73 | 74 | logger.info(f"Generating response with model: {self.model}") 75 | logger.debug(f"Final messages: {messages}") 76 | 77 | response = await self.provider.generate( 78 | messages=messages, 79 | tools=formatted_tools 80 | ) 81 | 82 | logger.info("Response generated successfully") 83 | logger.debug(f"Raw response: {json.dumps(response, indent=2)}") 84 | 85 | return response 86 | 87 | except Exception as e: 88 | logger.error(f"Error generating response: {str(e)}", exc_info=True) 89 | return { 90 | "content": f"Error generating response: {str(e)}", 91 | "tool_calls": [] 92 | } -------------------------------------------------------------------------------- /hawkins_agent/tools/summarize.py: -------------------------------------------------------------------------------- 1 | """Text summarization tool implementation""" 2 | 3 | from typing import Dict, Any 4 | import logging 5 | from .base import BaseTool 6 | from ..types import ToolResponse 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | class SummarizationTool(BaseTool): 11 | """Tool for summarizing long text content""" 12 | 13 | def __init__(self): 14 | """Initialize the summarization tool""" 15 | super().__init__(name="text_summarize") 16 | 17 | @property 18 | def description(self) -> str: 19 | """Get the tool description""" 20 | return "Summarize long text content into concise key points" 21 | 22 | def validate_params(self, params: Dict[str, Any]) -> bool: 23 | """Validate summarization parameters 24 | 25 | Args: 26 | params: Dictionary containing summarization parameters 27 | 28 | Returns: 29 | True if parameters are valid, False otherwise 30 | """ 31 | if 'query' not in params: 32 | logger.error("Missing required 'query' parameter") 33 | return False 34 | 35 | text = params.get('query') 36 | if not isinstance(text, str) or not text.strip(): 37 | logger.error("Text must be a non-empty string") 38 | return False 39 | 40 | return True 41 | 42 | async def execute(self, **kwargs) -> ToolResponse: 43 | """Execute the summarization 44 | 45 | Args: 46 | **kwargs: Must include 'query' parameter containing the text to summarize 47 | 48 | Returns: 49 | ToolResponse containing the summarized text or error 50 | """ 51 | try: 52 | # Extract and validate text 53 | text = kwargs.get("query", "") 54 | if not self.validate_params({"query": text}): 55 | return ToolResponse( 56 | success=False, 57 | error="Invalid or missing text parameter", 58 | result=None 59 | ) 60 | 61 | logger.info("Executing text summarization") 62 | logger.debug(f"Input text length: {len(text)}") 63 | 64 | # Handle empty or very short text 65 | if len(text.strip()) < 50: 66 | return ToolResponse( 67 | success=True, 68 | result=text.strip(), 69 | error=None 70 | ) 71 | 72 | # Split text into sentences (handling potential None case) 73 | sentences = [s.strip() for s in text.split('.') if s.strip()] 74 | total_sentences = len(sentences) 75 | 76 | # For longer texts, extract key sentences 77 | if total_sentences > 5: 78 | # Extract important sentences - first, middle, and last 79 | key_sentences = [ 80 | sentences[0], # Introduction 81 | sentences[total_sentences // 2], # Middle point 82 | sentences[-1] # Conclusion 83 | ] 84 | 85 | # Add additional important sentences based on length 86 | if total_sentences > 10: 87 | key_sentences.insert(1, sentences[total_sentences // 4]) 88 | key_sentences.insert(-1, sentences[3 * total_sentences // 4]) 89 | 90 | summary = '. '.join(sent for sent in key_sentences if sent) + '.' 91 | else: 92 | # For shorter texts, use all sentences 93 | summary = '. '.join(sentences) + '.' 94 | 95 | logger.info(f"Generated summary of length {len(summary)}") 96 | return ToolResponse( 97 | success=True, 98 | result=summary, 99 | error=None 100 | ) 101 | 102 | except Exception as e: 103 | error_msg = f"Summarization failed: {str(e)}" 104 | logger.error(error_msg) 105 | return ToolResponse( 106 | success=False, 107 | result=None, 108 | error=error_msg 109 | ) -------------------------------------------------------------------------------- /examples/knowledge_base_demo.py: -------------------------------------------------------------------------------- 1 | """ 2 | Demonstration of AI agent using a knowledge base to answer questions. 3 | Shows how to load documents and query them using RAG capabilities. 4 | """ 5 | from hawkins_agent import Agent, AgentConfig 6 | from hawkins_agent.tools import RAGTool 7 | import asyncio 8 | import logging 9 | 10 | # Set up logging 11 | logging.basicConfig(level=logging.INFO) 12 | logger = logging.getLogger(__name__) 13 | 14 | async def main(): 15 | # Configure the agent 16 | config = AgentConfig( 17 | name="Knowledge Assistant", 18 | role="Document analysis and query assistant", 19 | goal="Help users interact with documents and answer questions", 20 | tools=[RAGTool()], 21 | db_path="knowledge_demo.db", 22 | knowledge_base_path="knowledge_base.db" 23 | ) 24 | 25 | # Initialize agent 26 | agent = Agent(config) 27 | 28 | # Example documents 29 | documents = [ 30 | { 31 | "content": """ 32 | Artificial Intelligence (AI) is transforming how we live and work. 33 | Machine learning, a subset of AI, enables systems to learn from data. 34 | Natural Language Processing (NLP) helps computers understand human language. 35 | Reinforcement Learning allows AI systems to learn through trial and error. 36 | """, 37 | "metadata": {"topic": "AI Overview", "source": "introduction.txt"} 38 | }, 39 | { 40 | "content": """ 41 | Python is a popular programming language known for its simplicity and readability. 42 | It's widely used in AI, web development, and data science. 43 | Key features include dynamic typing, automatic memory management, and extensive libraries. 44 | Popular frameworks include TensorFlow, PyTorch, and Django. 45 | """, 46 | "metadata": {"topic": "Python Programming", "source": "python_guide.txt"} 47 | } 48 | ] 49 | 50 | print("\nTesting AI agent with knowledge base capabilities...") 51 | 52 | # Load documents 53 | for doc in documents: 54 | print(f"\nLoading document about {doc['metadata']['topic']}...") 55 | result = await agent.execute( 56 | task=f"Load document about {doc['metadata']['topic']}", 57 | context={ 58 | "knowledge_action": { 59 | "operation": "load", 60 | "content": doc["content"], 61 | "source_type": "text", 62 | "metadata": doc["metadata"] 63 | } 64 | } 65 | ) 66 | if result.get("success"): 67 | print(f"✓ Successfully loaded document about {doc['metadata']['topic']}") 68 | else: 69 | print(f"✗ Failed to load document: {result.get('error')}") 70 | 71 | # Test queries 72 | test_queries = [ 73 | "What is artificial intelligence and its subfields?", 74 | "What are the main features of Python?", 75 | "How is Python used in AI development?", 76 | "What is the relationship between Machine Learning and AI?" 77 | ] 78 | 79 | print("\nTesting knowledge base queries...") 80 | 81 | for query in test_queries: 82 | print(f"\nQuery: {query}") 83 | result = await agent.execute( 84 | task=query, 85 | context={ 86 | "knowledge_action": { 87 | "operation": "query", 88 | "query": query 89 | } 90 | } 91 | ) 92 | 93 | if result.get("success"): 94 | print("Answer:", result.get("answer")) 95 | else: 96 | print("Error:", result.get("error")) 97 | 98 | # Show relevant context from memory 99 | context = agent.memory.get_relevant_context(query) 100 | if context["episodic_memories"]: 101 | print("\nRelated previous interactions:") 102 | for memory in context["episodic_memories"][:2]: # Show top 2 related memories 103 | print(f"- Previous query: {memory['properties'].get('task')}") 104 | print(f" Answer: {memory['properties'].get('final_answer')}") 105 | 106 | if __name__ == "__main__": 107 | asyncio.run(main()) 108 | -------------------------------------------------------------------------------- /hawkins_agent/tools/search.py: -------------------------------------------------------------------------------- 1 | """Web search tool implementation using Tavily API""" 2 | 3 | from typing import Dict, Any, Optional 4 | import logging 5 | import json 6 | from tavily import TavilyClient 7 | from .base import BaseTool 8 | from ..types import ToolResponse 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | class WebSearchTool(BaseTool): 13 | """Tool for web searching using Tavily AI""" 14 | 15 | def __init__(self, api_key: str): 16 | """Initialize the search tool 17 | 18 | Args: 19 | api_key: Tavily API key for authentication 20 | """ 21 | super().__init__(name="web_search") 22 | self.client = TavilyClient(api_key=api_key) 23 | 24 | @property 25 | def description(self) -> str: 26 | """Get the tool description""" 27 | return "Search the web for recent and accurate information using Tavily AI" 28 | 29 | def validate_params(self, params: Dict[str, Any]) -> bool: 30 | """Validate search parameters 31 | 32 | Args: 33 | params: Dictionary of parameters to validate 34 | 35 | Returns: 36 | True if parameters are valid, False otherwise 37 | """ 38 | if 'query' not in params: 39 | logger.error("Missing required 'query' parameter") 40 | return False 41 | 42 | query = params.get('query') 43 | if not isinstance(query, str) or not query.strip(): 44 | logger.error("Query must be a non-empty string") 45 | return False 46 | 47 | return True 48 | 49 | async def execute(self, **kwargs) -> ToolResponse: 50 | """Execute the web search 51 | 52 | Args: 53 | **kwargs: Must include 'query' parameter 54 | 55 | Returns: 56 | ToolResponse containing search results or error 57 | """ 58 | try: 59 | # Extract and validate query 60 | query = kwargs.get("query") 61 | if not self.validate_params({"query": query}): 62 | return ToolResponse( 63 | success=False, 64 | error="Invalid or missing query parameter", 65 | result=None 66 | ) 67 | 68 | logger.info(f"Executing Tavily search for query: {query}") 69 | 70 | # Execute search with Tavily 71 | search_params = { 72 | "query": query, 73 | "search_depth": "advanced", 74 | "include_raw_content": False, 75 | "include_domains": [], 76 | "exclude_domains": [], 77 | "max_results": 3 78 | } 79 | 80 | # Use synchronous search since Tavily client doesn't support async 81 | response = self.client.search(query=query) 82 | 83 | if not response or "results" not in response: 84 | logger.error("Invalid response from Tavily API") 85 | return ToolResponse( 86 | success=False, 87 | error="Invalid API response", 88 | result=None 89 | ) 90 | 91 | # Format the results 92 | results = [] 93 | for result in response.get("results", [])[:3]: # Limit to top 3 results 94 | results.append({ 95 | "title": result.get("title", ""), 96 | "content": result.get("content", ""), 97 | "url": result.get("url", ""), 98 | "score": result.get("relevance_score", 0) 99 | }) 100 | 101 | # Create a concise summary 102 | summary = f"Found {len(results)} relevant results:\n\n" 103 | for result in results: 104 | summary += f"- {result['content'][:250]}...\n" 105 | summary += f" Source: {result['url']}\n\n" 106 | 107 | return ToolResponse( 108 | success=True, 109 | result=summary, 110 | error=None 111 | ) 112 | 113 | except Exception as e: 114 | error_msg = f"Search failed: {str(e)}" 115 | logger.error(error_msg) 116 | return ToolResponse( 117 | success=False, 118 | result=None, 119 | error=error_msg 120 | ) -------------------------------------------------------------------------------- /hawkins_agent/tools/code_interpreter.py: -------------------------------------------------------------------------------- 1 | """Code interpreter tool implementation using Open Interpreter""" 2 | 3 | import os 4 | from typing import Dict, Any 5 | import logging 6 | from interpreter import OpenInterpreter 7 | from .base import BaseTool 8 | from ..types import ToolResponse 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | class CodeInterpreterTool(BaseTool): 13 | """Tool for writing and running code based on problem descriptions""" 14 | 15 | def __init__(self, model: str = "gpt-4o", api_key: str = None, api_base: str = None): 16 | """Initialize the code interpreter tool 17 | 18 | Args: 19 | model: The model to use for code generation 20 | api_key: Optional OpenAI API key 21 | api_base: Optional OpenAI API base URL 22 | """ 23 | super().__init__(name="code_interpreter") 24 | self.interpreter = OpenInterpreter() 25 | self.interpreter.llm.model = model 26 | self.interpreter.llm.api_key = ( 27 | api_key if api_key else os.environ.get("OPENAI_API_KEY") 28 | ) 29 | if api_base: 30 | self.interpreter.llm.api_base = api_base 31 | elif "OPENAI_BASE_URL" in os.environ: 32 | self.interpreter.llm.api_base = os.environ["OPENAI_BASE_URL"] 33 | 34 | @property 35 | def description(self) -> str: 36 | """Get the tool description""" 37 | return "Order a programmer to write and run code based on the description of a problem" 38 | 39 | def validate_params(self, params: Dict[str, Any]) -> bool: 40 | """Validate the parameters 41 | 42 | Args: 43 | params: Parameters to validate 44 | 45 | Returns: 46 | True if parameters are valid 47 | """ 48 | if 'query' not in params: 49 | logger.error("Missing required 'query' parameter") 50 | return False 51 | 52 | query = params.get('query') 53 | if not isinstance(query, str) or not query.strip(): 54 | logger.error("Query must be a non-empty string") 55 | return False 56 | 57 | return True 58 | 59 | async def execute(self, **kwargs) -> ToolResponse: 60 | """Execute the code interpreter 61 | 62 | Args: 63 | **kwargs: Must include 'query' parameter 64 | 65 | Returns: 66 | ToolResponse containing the execution results 67 | """ 68 | try: 69 | # Extract and validate query 70 | query = kwargs.get("query", "") 71 | if not self.validate_params({"query": query}): 72 | return ToolResponse( 73 | success=False, 74 | error="Invalid or missing query parameter", 75 | result=None 76 | ) 77 | 78 | logger.info(f"Executing code interpreter for query: {query}") 79 | 80 | # Run the interpreter 81 | messages = self.interpreter.chat(query, display=False) 82 | 83 | # Process results 84 | code = [] 85 | console = [] 86 | content = "" 87 | 88 | for message in messages: 89 | if message["type"] == "code": 90 | code.append(message["content"]) 91 | elif message["type"] == "console": 92 | console.append(message["content"]) 93 | elif message["type"] == "message": 94 | content += message["content"] + "\n" 95 | 96 | result = { 97 | "messages": messages, 98 | "code": code, 99 | "console": console, 100 | "content": content.strip() 101 | } 102 | 103 | logger.info("Code interpreter execution completed successfully") 104 | return ToolResponse( 105 | success=True, 106 | result=result, 107 | error=None 108 | ) 109 | 110 | except Exception as e: 111 | error_msg = f"Code interpreter execution failed: {str(e)}" 112 | logger.error(error_msg) 113 | return ToolResponse( 114 | success=False, 115 | result=None, 116 | error=error_msg 117 | ) 118 | -------------------------------------------------------------------------------- /hawkins_agent/storage/hawkindb.py: -------------------------------------------------------------------------------- 1 | """HawkinDB storage implementation""" 2 | 3 | import logging 4 | from typing import Dict, List, Any, Optional 5 | from datetime import datetime, timedelta 6 | from hawkinsdb import HawkinsDB 7 | from .base import BaseStorage, StorageConfig 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | class HawkinDBStorage(BaseStorage): 12 | """Storage implementation using HawkinDB 13 | 14 | This class provides a concrete implementation of the BaseStorage 15 | interface using HawkinDB as the underlying storage engine. 16 | """ 17 | 18 | def __init__(self, config: Optional[StorageConfig] = None, **kwargs): 19 | """Initialize HawkinDB storage 20 | 21 | Args: 22 | config: Storage configuration 23 | **kwargs: Additional HawkinDB configuration 24 | """ 25 | super().__init__(config) 26 | self.db = HawkinsDB(storage_type='sqlite') 27 | 28 | async def insert(self, data: Dict[str, Any]) -> str: 29 | """Insert data into HawkinDB 30 | 31 | Args: 32 | data: Data to store 33 | 34 | Returns: 35 | ID of the stored data 36 | """ 37 | try: 38 | result = self.db.add_entity(data) 39 | return str(result.get('id', datetime.now().timestamp())) 40 | except Exception as e: 41 | logger.error(f"Error inserting data: {str(e)}") 42 | raise 43 | 44 | async def search(self, 45 | query: str, 46 | collection: str, 47 | limit: int = 10) -> List[Dict[str, Any]]: 48 | """Search for data in HawkinDB 49 | 50 | Args: 51 | query: Search query 52 | collection: Collection to search in 53 | limit: Maximum number of results 54 | 55 | Returns: 56 | List of matching entries 57 | """ 58 | try: 59 | frames = self.db.query_frames(query) 60 | return frames[:limit] if frames else [] 61 | except Exception as e: 62 | logger.error(f"Error searching data: {str(e)}") 63 | return [] 64 | 65 | async def clear(self) -> None: 66 | """Clear all data from HawkinDB""" 67 | try: 68 | # Reset the database 69 | self.db = HawkinsDB(storage_type='sqlite') 70 | logger.info("Cleared all data from HawkinDB") 71 | except Exception as e: 72 | logger.error(f"Error clearing data: {str(e)}") 73 | raise 74 | 75 | def now(self) -> str: 76 | """Get current timestamp 77 | 78 | Returns: 79 | ISO formatted timestamp string 80 | """ 81 | return datetime.now().isoformat() 82 | 83 | async def _prune_by_age(self, collection: str) -> None: 84 | """Remove old entries based on retention policy""" 85 | if not self.config.retention_days: 86 | return 87 | 88 | try: 89 | cutoff = datetime.now() - timedelta(days=self.config.retention_days) 90 | entities = self.db.list_entities() 91 | 92 | for entity in entities: 93 | frames = self.db.query_frames(entity) 94 | if frames and 'timestamp' in frames[0]: 95 | if datetime.fromisoformat(frames[0]['timestamp']) < cutoff: 96 | self.db.remove_entity(entity) 97 | 98 | logger.info(f"Pruned entries older than {cutoff}") 99 | except Exception as e: 100 | logger.error(f"Error pruning old data: {str(e)}") 101 | 102 | async def _prune_by_importance(self, collection: str) -> None: 103 | """Remove entries below importance threshold""" 104 | if not self.config.importance_threshold: 105 | return 106 | 107 | try: 108 | threshold = self.config.importance_threshold 109 | entities = self.db.list_entities() 110 | 111 | for entity in entities: 112 | frames = self.db.query_frames(entity) 113 | if frames and 'metadata' in frames[0]: 114 | importance = frames[0].get('metadata', {}).get('importance', 0) 115 | if importance < threshold: 116 | self.db.remove_entity(entity) 117 | 118 | logger.info(f"Pruned entries below importance {threshold}") 119 | except Exception as e: 120 | logger.error(f"Error pruning low importance data: {str(e)}") -------------------------------------------------------------------------------- /hawkins_agent/llm/lite_llm.py: -------------------------------------------------------------------------------- 1 | """LiteLLM provider implementation""" 2 | 3 | from typing import List, Optional, Dict, Any 4 | import json 5 | import logging 6 | from litellm import acompletion 7 | from .base import BaseLLMProvider 8 | from ..types import Message, MessageRole, ToolResponse 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | class LiteLLMProvider(BaseLLMProvider): 13 | """LiteLLM integration for language model access""" 14 | 15 | def __init__(self, model: str, **kwargs): 16 | """Initialize LiteLLM provider""" 17 | super().__init__(model, **kwargs) 18 | self.default_model = "openai/gpt-4o" 19 | self.config = kwargs 20 | self.supports_functions = not model.startswith("anthropic/") 21 | 22 | async def generate(self, messages: List[Message], tools: Optional[List[Dict[str, Any]]] = None) -> Dict[str, Any]: 23 | """Generate a response using litellm""" 24 | try: 25 | formatted_messages = self._format_messages_for_litellm(messages) 26 | logger.info(f"Sending request to LiteLLM with model: {self.model or self.default_model}") 27 | logger.debug(f"Using tools: {tools}") 28 | 29 | request_params = { 30 | "model": self.model or self.default_model, 31 | "messages": formatted_messages, 32 | "temperature": self.config.get('temperature', 0.7) 33 | } 34 | 35 | # Only add function calling for supported models 36 | if tools and self.supports_functions: 37 | request_params["functions"] = tools 38 | request_params["function_call"] = "auto" 39 | 40 | logger.debug(f"Request parameters: {json.dumps(request_params, indent=2)}") 41 | 42 | # Use acompletion for async support 43 | response = await acompletion(**request_params) 44 | 45 | if not response or not hasattr(response, 'choices') or not response.choices: 46 | logger.error("Invalid response format from LiteLLM") 47 | return {"content": "Error: Invalid response format", "tool_calls": []} 48 | 49 | first_choice = response.choices[0] 50 | if not hasattr(first_choice, 'message'): 51 | logger.error("Response choice missing message attribute") 52 | return {"content": "Error: Invalid response format", "tool_calls": []} 53 | 54 | message = first_choice.message 55 | result = { 56 | "content": message.content if hasattr(message, 'content') else "", 57 | "tool_calls": [] 58 | } 59 | 60 | # Handle function calls for supported models 61 | if self.supports_functions: 62 | if hasattr(message, 'function_call') and message.function_call: 63 | try: 64 | result["tool_calls"] = [{ 65 | "name": message.function_call.name, 66 | "parameters": json.loads(message.function_call.arguments) 67 | }] 68 | except (AttributeError, json.JSONDecodeError) as e: 69 | logger.error(f"Error parsing function call: {e}") 70 | 71 | elif hasattr(message, 'tool_calls') and message.tool_calls: 72 | try: 73 | result["tool_calls"] = [ 74 | { 75 | "name": tool_call.function.name, 76 | "parameters": json.loads(tool_call.function.arguments) 77 | } 78 | for tool_call in message.tool_calls 79 | if hasattr(tool_call, 'function') 80 | ] 81 | except (AttributeError, json.JSONDecodeError) as e: 82 | logger.error(f"Error parsing tool calls: {e}") 83 | 84 | logger.info("Successfully generated response from LiteLLM") 85 | logger.debug(f"Response: {json.dumps(result, indent=2)}") 86 | return result 87 | 88 | except Exception as e: 89 | logger.error(f"Error generating response: {str(e)}") 90 | return { 91 | "content": f"Error generating response: {str(e)}", 92 | "tool_calls": [] 93 | } 94 | 95 | async def validate_response(self, response: str) -> bool: 96 | """Validate response format""" 97 | if not response or not isinstance(response, str): 98 | return False 99 | return True 100 | 101 | def _format_messages_for_litellm(self, messages: List[Message]) -> List[Dict[str, str]]: 102 | """Format messages for litellm""" 103 | try: 104 | formatted = [] 105 | for msg in messages: 106 | formatted.append({ 107 | "role": msg.role.value, 108 | "content": msg.content 109 | }) 110 | logger.debug(f"Formatted {len(formatted)} messages for LiteLLM") 111 | return formatted 112 | except Exception as e: 113 | logger.error(f"Error formatting messages: {e}") 114 | return [{"role": "user", "content": "Error formatting messages"}] -------------------------------------------------------------------------------- /hawkins_agent/memory.py: -------------------------------------------------------------------------------- 1 | """Memory management using HawkinDB""" 2 | 3 | from typing import Dict, List, Any, Optional 4 | from datetime import datetime, timedelta 5 | import logging 6 | from .storage import HawkinDBStorage, StorageConfig 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | class MemoryManager: 11 | """Manages agent memory using HawkinDB 12 | 13 | This class provides sophisticated memory management including: 14 | - Short-term and long-term memory storage 15 | - Contextual memory retrieval 16 | - Memory pruning and organization 17 | """ 18 | 19 | def __init__(self, config: Optional[Dict[str, Any]] = None): 20 | """Initialize memory manager 21 | 22 | Args: 23 | config: Optional configuration for memory management 24 | """ 25 | config = config or {} 26 | storage_config = StorageConfig( 27 | retention_days=config.get('retention_days'), 28 | max_entries=config.get('max_entries'), 29 | importance_threshold=config.get('importance_threshold', 0.0) 30 | ) 31 | self.storage = HawkinDBStorage(config=storage_config) 32 | 33 | async def add_interaction(self, user_message: str, agent_response: str): 34 | """Add an interaction to memory 35 | 36 | Args: 37 | user_message: The user's message 38 | agent_response: The agent's response 39 | """ 40 | try: 41 | # Convert interaction to HawkinsDB compatible format 42 | memory_data = { 43 | "column": "memory_type", 44 | "name": f"interaction_{datetime.now().timestamp()}", 45 | "properties": { 46 | "user_message": user_message, 47 | "agent_response": agent_response, 48 | "timestamp": self.storage.now() 49 | }, 50 | "metadata": { 51 | "importance": self._calculate_importance(user_message) 52 | } 53 | } 54 | 55 | await self.storage.insert(memory_data) 56 | logger.info(f"Added interaction to memory: {user_message[:50]}...") 57 | 58 | except Exception as e: 59 | logger.error(f"Error adding interaction to memory: {str(e)}") 60 | 61 | async def get_relevant_memories( 62 | self, 63 | query: str, 64 | limit: int = 5, 65 | time_window: Optional[timedelta] = None 66 | ) -> List[Dict[str, Any]]: 67 | """Retrieve relevant memories based on the query 68 | 69 | Args: 70 | query: The query to search for relevant memories 71 | limit: Maximum number of memories to retrieve 72 | time_window: Optional time window to restrict search 73 | 74 | Returns: 75 | List of relevant memory entries 76 | """ 77 | try: 78 | memories = await self.storage.search( 79 | query=query, 80 | collection="memories", 81 | limit=limit 82 | ) 83 | 84 | # Filter by time window if specified 85 | if time_window and memories: 86 | current_time = datetime.fromisoformat(self.storage.now()) 87 | memories = [ 88 | m for m in memories 89 | if (current_time - datetime.fromisoformat(m.get('properties', {}).get('timestamp', ''))) <= time_window 90 | ] 91 | 92 | return memories 93 | 94 | except Exception as e: 95 | logger.error(f"Error retrieving memories: {str(e)}") 96 | return [] 97 | 98 | def _calculate_importance(self, message: str) -> float: 99 | """Calculate the importance score of a message 100 | 101 | This implementation uses a simple length-based scoring system, 102 | but could be enhanced with more sophisticated importance calculation. 103 | 104 | Args: 105 | message: The message to evaluate 106 | 107 | Returns: 108 | Importance score between 0 and 1 109 | """ 110 | # Simple length-based importance 111 | return min(len(message) / 1000, 1.0) 112 | 113 | async def add_knowledge(self, knowledge: Dict[str, Any]): 114 | """Add permanent knowledge to memory 115 | 116 | Args: 117 | knowledge: Knowledge to store 118 | """ 119 | try: 120 | # Convert knowledge to HawkinsDB compatible format 121 | knowledge_data = { 122 | "column": "memory_type", 123 | "name": f"knowledge_{datetime.now().timestamp()}", 124 | "properties": { 125 | "content": knowledge, 126 | "timestamp": self.storage.now() 127 | } 128 | } 129 | 130 | await self.storage.insert(knowledge_data) 131 | 132 | except Exception as e: 133 | logger.error(f"Error adding knowledge to memory: {str(e)}") 134 | 135 | async def clear(self): 136 | """Clear all memories 137 | 138 | This should be used with caution as it removes all stored memories. 139 | """ 140 | try: 141 | await self.storage.clear() 142 | logger.info("Cleared all memories") 143 | 144 | except Exception as e: 145 | logger.error(f"Error clearing memories: {str(e)}") -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hawkins Agent Framework 2 | 3 | A Python SDK for building AI agents with minimal code using This framework integrates key tools and services for building functional AI agents. 4 | 5 | ![Version](https://img.shields.io/pypi/v/hawkins-agent) 6 | ![Python](https://img.shields.io/pypi/pyversions/hawkins-agent) 7 | ![License](https://img.shields.io/pypi/l/hawkins-agent) 8 | 9 | ## Features 10 | 11 | - **Seamless LLM Integration**: Built-in support for LiteLLM, enabling easy integration with various language models 12 | - **Web Search Capabilities**: Integrated Tavily search functionality for real-time information retrieval 13 | - **Memory Management**: HawkinDB integration for efficient agent memory storage and retrieval 14 | - **Multi-Agent Orchestration**: Advanced flow control system for coordinating multiple agents 15 | - **Tool Integration**: Extensible tool system with pre-built tools for common tasks 16 | - **Email Functionality**: Built-in email capabilities for agent communication 17 | - **Asynchronous Design**: Built with modern async/await patterns for optimal performance 18 | 19 | ## Installation 20 | 21 | ```bash 22 | pip install hawkins-agent 23 | ``` 24 | 25 | Requires Python 3.11 or higher. 26 | 27 | ## Quick Start 28 | 29 | Here's a simple example to get you started: 30 | 31 | ```python 32 | from hawkins_agent import AgentBuilder 33 | from hawkins_agent.tools import WebSearchTool 34 | from hawkins_agent.mock import KnowledgeBase 35 | 36 | search_tool = WebSearchTool(api_key=os.environ.get("TAVILY_API_KEY")) 37 | 38 | async def main(): 39 | # Create a knowledge base 40 | kb = KnowledgeBase() 41 | 42 | # Create agent with web search capabilities 43 | agent = (AgentBuilder("researcher") 44 | .with_model("gpt-4o") 45 | .with_knowledge_base(kb) 46 | .with_tool(search_tool) 47 | .build()) 48 | 49 | # Process a query 50 | response = await agent.process("What are the latest developments in AI?") 51 | print(response.message) 52 | 53 | if __name__ == "__main__": 54 | import asyncio 55 | asyncio.run(main()) 56 | ``` 57 | 58 | ## Advanced Usage 59 | 60 | ### Multi-Agent Workflow 61 | 62 | Create complex workflows with multiple specialized agents: 63 | 64 | ```python 65 | from hawkins_agent import AgentBuilder, FlowManager, FlowStep 66 | from hawkins_agent.tools import WebSearchTool, WeatherTool 67 | 68 | # Create specialized agents 69 | research_agent = (AgentBuilder("researcher") 70 | .with_model("gpt-4o") 71 | .with_tool(WebSearchTool()) 72 | .build()) 73 | 74 | writer_agent = (AgentBuilder("writer") 75 | .with_model("gpt-4o") 76 | .build()) 77 | 78 | # Create flow manager 79 | flow = FlowManager() 80 | 81 | # Define workflow steps 82 | async def research_step(input_data, context): 83 | query = input_data.get("topic") 84 | result = await research_agent.process(f"Research this topic: {query}") 85 | return {"research": result.message} 86 | 87 | async def writing_step(input_data, context): 88 | research = context.get("research", {}).get("research") 89 | result = await writer_agent.process(f"Write an article based on: {research}") 90 | return {"article": result.message} 91 | 92 | # Add steps to flow 93 | flow.add_step(FlowStep( 94 | name="research", 95 | agent=research_agent, 96 | process=research_step 97 | )) 98 | 99 | flow.add_step(FlowStep( 100 | name="writing", 101 | agent=writer_agent, 102 | process=writing_step, 103 | requires=["research"] 104 | )) 105 | 106 | # Execute flow 107 | results = await flow.execute({"topic": "AI trends in 2024"}) 108 | ``` 109 | 110 | ### Using Custom Tools 111 | 112 | Create your own tools by extending the BaseTool class: 113 | 114 | ```python 115 | from hawkins_agent.tools.base import BaseTool 116 | from hawkins_agent.types import ToolResponse 117 | 118 | class CustomTool(BaseTool): 119 | name = "custom_tool" 120 | description = "A custom tool for specific tasks" 121 | 122 | async def execute(self, query: str) -> ToolResponse: 123 | try: 124 | # Tool implementation here 125 | result = await self._process(query) 126 | return ToolResponse(success=True, result=result) 127 | except Exception as e: 128 | return ToolResponse(success=False, error=str(e)) 129 | ``` 130 | 131 | ## Documentation 132 | 133 | For more detailed documentation, see: 134 | - [Flow System Documentation](docs/flows.md) 135 | - [Custom Tools Guide](docs/custom_tools.md) 136 | - [Memory Management](docs/memory_management.md) 137 | - [API Reference](docs/api_reference.md) 138 | 139 | ## Examples 140 | 141 | The `examples/` directory contains several example implementations: 142 | - `simple_agent.py`: Basic agent usage 143 | - `multi_agent_flow.py`: Complex multi-agent workflow 144 | - `tool_test.py`: Tool integration examples 145 | - `blog_writer_flow.py`: Content generation workflow 146 | - `maldives_trip_planner.py`: Travel planning agent system 147 | 148 | ## Development 149 | 150 | To contribute to the project: 151 | 152 | 1. Clone the repository 153 | 2. Install development dependencies: 154 | ```bash 155 | pip install -e .[dev] 156 | ``` 157 | 3. Run tests: 158 | ```bash 159 | pytest 160 | ``` 161 | 162 | ## License 163 | 164 | MIT License - see the [LICENSE](LICENSE) file for details. 165 | 166 | ## Credits 167 | 168 | Built with ❤️ by the Harish and AI Agents 169 | -------------------------------------------------------------------------------- /docs/api_reference.md: -------------------------------------------------------------------------------- 1 | # Hawkins Agent Framework API Reference 2 | 3 | ## Core Components 4 | 5 | ### AgentBuilder 6 | 7 | The main class for creating AI agents. 8 | 9 | ```python 10 | class AgentBuilder: 11 | def __init__(self, name: str) 12 | def with_model(self, model: str) -> AgentBuilder 13 | def with_provider(self, provider_class: type, **kwargs) -> AgentBuilder 14 | def with_tool(self, tool: BaseTool) -> AgentBuilder 15 | def with_knowledge_base(self, kb: Any) -> AgentBuilder 16 | def build(self) -> Agent 17 | ``` 18 | 19 | #### Methods 20 | 21 | - `with_model(model: str)`: Set the LLM model 22 | - `with_provider(provider_class: type, **kwargs)`: Set the LLM provider 23 | - `with_tool(tool: BaseTool)`: Add a tool to the agent 24 | - `with_knowledge_base(kb: Any)`: Set the knowledge base 25 | - `build()`: Create the agent instance 26 | 27 | ### Agent 28 | 29 | The main agent class that processes queries and manages tools. 30 | 31 | ```python 32 | class Agent: 33 | async def process(self, query: str) -> AgentResponse 34 | async def execute_tool(self, tool_name: str, **params) -> ToolResponse 35 | ``` 36 | 37 | ### Types 38 | 39 | #### Message 40 | 41 | ```python 42 | @dataclass 43 | class Message: 44 | role: MessageRole 45 | content: str 46 | metadata: Optional[Dict[str, Any]] = None 47 | ``` 48 | 49 | #### AgentResponse 50 | 51 | ```python 52 | @dataclass 53 | class AgentResponse: 54 | message: str 55 | tool_calls: List[Dict[str, Any]] 56 | metadata: Dict[str, Any] 57 | ``` 58 | 59 | #### ToolResponse 60 | 61 | ```python 62 | @dataclass 63 | class ToolResponse: 64 | success: bool 65 | result: Any 66 | error: Optional[str] = None 67 | ``` 68 | 69 | ## Tools 70 | 71 | ### BaseTool 72 | 73 | Base class for all tools. 74 | 75 | ```python 76 | class BaseTool: 77 | def __init__(self, name: str) 78 | 79 | @property 80 | def description(self) -> str 81 | 82 | def validate_params(self, params: Dict[str, Any]) -> bool 83 | 84 | async def execute(self, **kwargs) -> ToolResponse 85 | ``` 86 | 87 | ### Built-in Tools 88 | 89 | #### WebSearchTool 90 | 91 | ```python 92 | class WebSearchTool(BaseTool): 93 | def __init__(self, api_key: str) 94 | ``` 95 | 96 | #### EmailTool 97 | 98 | ```python 99 | class EmailTool(BaseTool): 100 | def __init__(self, smtp_config: Dict[str, Any]) 101 | ``` 102 | 103 | #### WeatherTool 104 | 105 | ```python 106 | class WeatherTool(BaseTool): 107 | def __init__(self, api_key: Optional[str] = None) 108 | ``` 109 | 110 | #### RAGTool 111 | 112 | ```python 113 | class RAGTool(BaseTool): 114 | def __init__(self, knowledge_base: Any) 115 | ``` 116 | 117 | ## LLM Integration 118 | 119 | ### LLMManager 120 | 121 | Manages LLM interactions and providers. 122 | 123 | ```python 124 | class LLMManager: 125 | def __init__(self, model: str = "gpt-4o", 126 | provider_class: Optional[type] = None, 127 | **kwargs) 128 | 129 | async def generate_response(self, 130 | messages: List[Message], 131 | tools: Optional[List[Dict[str, Any]]] = None) -> Dict[str, Any] 132 | ``` 133 | 134 | ### LiteLLMProvider 135 | 136 | Default LLM provider implementation. 137 | 138 | ```python 139 | class LiteLLMProvider: 140 | def __init__(self, model: str, **kwargs) 141 | 142 | async def generate(self, 143 | messages: List[Message], 144 | tools: Optional[List[Dict[str, Any]]] = None) -> Dict[str, Any] 145 | ``` 146 | 147 | ## Multi-Agent Flows 148 | 149 | ### FlowManager 150 | 151 | Manages multi-agent workflows. 152 | 153 | ```python 154 | class FlowManager: 155 | def add_step(self, step: FlowStep) 156 | async def execute(self, input_data: Dict[str, Any]) -> Dict[str, Any] 157 | ``` 158 | 159 | ### FlowStep 160 | 161 | Represents a step in a multi-agent workflow. 162 | 163 | ```python 164 | class FlowStep: 165 | def __init__(self, 166 | name: str, 167 | agent: Optional[Agent], 168 | process: Callable, 169 | requires: Optional[List[str]] = None) 170 | ``` 171 | 172 | ## Environment Variables 173 | 174 | Required environment variables for various features: 175 | 176 | ```python 177 | # Core 178 | OPENAI_API_KEY: str # OpenAI API key for LLM integration 179 | 180 | # Tool-specific 181 | TAVILY_API_KEY: str # For WebSearchTool 182 | OPENWEATHERMAP_API_KEY: str # For WeatherTool 183 | 184 | # Optional 185 | OPENAI_BASE_URL: str # Custom OpenAI API endpoint 186 | ``` 187 | 188 | ## Error Handling 189 | 190 | Common exceptions and their meanings: 191 | 192 | ```python 193 | class ToolExecutionError(Exception): 194 | """Raised when a tool execution fails""" 195 | 196 | class InvalidParameterError(Exception): 197 | """Raised when invalid parameters are provided""" 198 | 199 | class LLMError(Exception): 200 | """Raised when LLM interaction fails""" 201 | ``` 202 | 203 | ## Logging 204 | 205 | The framework uses Python's standard logging module: 206 | 207 | ```python 208 | import logging 209 | 210 | # Configure logging 211 | logging.basicConfig( 212 | level=logging.INFO, 213 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' 214 | ) 215 | ``` 216 | 217 | ## Configuration 218 | 219 | Example configuration structure: 220 | 221 | ```python 222 | config = { 223 | "model": "openai/gpt-4o", 224 | "temperature": 0.7, 225 | "max_tokens": 1000, 226 | "tools": { 227 | "web_search": { 228 | "api_key": "your-api-key" 229 | }, 230 | "weather": { 231 | "api_key": "your-api-key" 232 | } 233 | } 234 | } 235 | ``` 236 | -------------------------------------------------------------------------------- /hawkins_agent/mock/__init__.py: -------------------------------------------------------------------------------- 1 | """Mock implementations of external dependencies for development""" 2 | 3 | from typing import List, Dict, Any 4 | 5 | class LiteLLM: 6 | def __init__(self, model: str): 7 | self.model = model 8 | self.supports_functions = not model.startswith("anthropic/") 9 | 10 | async def generate(self, messages: List[Dict[str, str]]) -> Dict[str, Any]: 11 | """Generate a mock response that demonstrates tool usage""" 12 | prompt = messages[-1]["content"].lower() 13 | 14 | # GPT-4 responses are more detailed and use more tools 15 | if self.model.startswith("openai/"): 16 | if "trends" in prompt or "developments" in prompt: 17 | return { 18 | "content": "Let me search for the latest information.\n", 19 | "tool_calls": [{ 20 | "name": "web_search", 21 | "parameters": { 22 | "query": "latest AI trends and developments 2024" 23 | } 24 | }] 25 | } 26 | 27 | # Knowledge base query example 28 | if "context" in prompt or "previous" in prompt: 29 | return { 30 | "content": "Let me check our knowledge base.\n", 31 | "tool_calls": [{ 32 | "name": "RAGTool", 33 | "parameters": { 34 | "query": "AI trends and developments" 35 | } 36 | }] 37 | } 38 | 39 | # Anthropic models use text-based tool calls 40 | elif self.model.startswith("anthropic/"): 41 | if "trends" in prompt or "developments" in prompt: 42 | return { 43 | "content": """Let me search for the latest information. 44 | 45 | 46 | {"name": "web_search", "parameters": {"query": "latest AI trends and developments 2024"}} 47 | 48 | 49 | Based on the search results: 50 | 1. Large Language Models are becoming more accessible 51 | 2. Focus on AI governance and ethics 52 | 3. Increased enterprise adoption""" 53 | } 54 | 55 | if "context" in prompt or "previous" in prompt: 56 | return { 57 | "content": """I'll check our knowledge base for relevant information. 58 | 59 | 60 | {"name": "RAGTool", "parameters": {"query": "AI trends and developments"}} 61 | 62 | 63 | The knowledge base shows several key developments in AI technology.""" 64 | } 65 | 66 | # Default response for other cases 67 | return { 68 | "content": "I understand your request and will help you with that. What specific information would you like to know?" 69 | } 70 | 71 | class Document: 72 | """Mock document class for development""" 73 | def __init__(self, content: str): 74 | """Initialize document with content 75 | 76 | Args: 77 | content: The document content 78 | """ 79 | self.content = content 80 | 81 | class KnowledgeBase: 82 | """Mock knowledge base for development""" 83 | def __init__(self): 84 | """Initialize the knowledge base""" 85 | self.documents = [] 86 | 87 | async def add_document(self, document: Document): 88 | """Add a document to the knowledge base 89 | 90 | Args: 91 | document: Document object to add 92 | """ 93 | self.documents.append(document) 94 | 95 | async def query(self, query: str) -> list[str]: 96 | """Query the knowledge base 97 | 98 | Args: 99 | query: Query string 100 | 101 | Returns: 102 | List of relevant document contents 103 | """ 104 | # Simple mock implementation - return content containing query terms 105 | results = [] 106 | query_terms = query.lower().split() 107 | 108 | for doc in self.documents: 109 | content = doc.content.lower() 110 | if any(term in content for term in query_terms): 111 | results.append(doc.content) 112 | 113 | # If no exact matches, return some default insights 114 | if not results: 115 | if "enterprise" in query.lower(): 116 | return [ 117 | "Enterprise AI adoption increased significantly", 118 | "Major focus on AI governance frameworks", 119 | "Efficiency improvements with AI automation" 120 | ] 121 | elif "ai" in query.lower(): 122 | return [ 123 | "AI models becoming more sophisticated", 124 | "Focus on responsible AI development", 125 | "Increased adoption in various sectors" 126 | ] 127 | 128 | return results[:3] # Limit results 129 | 130 | class HawkinDB: 131 | def __init__(self, **kwargs): 132 | self.kwargs = kwargs 133 | self.storage = {} 134 | 135 | async def insert(self, data: dict): 136 | self.storage[data.get('name', str(len(self.storage)))] = data 137 | 138 | async def search(self, collection: str, query: str, limit: int): 139 | if "ai" in query.lower(): 140 | return [{ 141 | "type": "memory", 142 | "content": "Previous discussion about AI trends in enterprise", 143 | "timestamp": self.now(), 144 | "metadata": { 145 | "importance": 0.8, 146 | "source": "research_agent" 147 | } 148 | }] 149 | return [] 150 | 151 | async def clear(self): 152 | self.storage.clear() 153 | 154 | def now(self): 155 | from datetime import datetime 156 | return datetime.now().isoformat() -------------------------------------------------------------------------------- /hawkins_agent/tools/weather.py: -------------------------------------------------------------------------------- 1 | """Weather data tool implementation using OpenWeatherMap API""" 2 | 3 | import requests 4 | from typing import Dict, Any, Optional 5 | from datetime import datetime, timedelta 6 | import logging 7 | import os 8 | from .base import BaseTool 9 | from ..types import ToolResponse 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | class WeatherTool(BaseTool): 14 | """Tool for fetching weather data using OpenWeatherMap API""" 15 | 16 | def __init__(self, api_key: Optional[str] = None): 17 | """Initialize the weather tool 18 | 19 | Args: 20 | api_key: OpenWeatherMap API key. If not provided, will try to get from environment. 21 | """ 22 | super().__init__(name="weather") 23 | self.api_key = api_key or os.environ.get("OPENWEATHERMAP_API_KEY") 24 | if not self.api_key: 25 | logger.warning("No OpenWeatherMap API key provided") 26 | self.BASE_URL = "https://api.openweathermap.org/data/2.5/weather" 27 | 28 | @property 29 | def description(self) -> str: 30 | """Get the tool description""" 31 | return "Get current weather data for specified city" 32 | 33 | def validate_params(self, params: Dict[str, Any]) -> bool: 34 | """Validate weather query parameters 35 | 36 | Args: 37 | params: Dictionary containing query parameters 38 | 39 | Returns: 40 | True if parameters are valid, False otherwise 41 | """ 42 | if not isinstance(params.get('query'), str): 43 | logger.error("Query must be a string") 44 | return False 45 | 46 | # Query should be in format: "city_name,country_code" 47 | parts = params['query'].split(',') 48 | if len(parts) != 2: 49 | logger.error("Query must be in format: city_name,country_code") 50 | return False 51 | 52 | if not self.api_key: 53 | logger.error("OpenWeatherMap API key not provided") 54 | return False 55 | 56 | return True 57 | 58 | async def execute(self, **kwargs) -> ToolResponse: 59 | """Execute the weather query 60 | 61 | Args: 62 | **kwargs: Must include query parameter with format: 63 | "city_name,country_code" 64 | Example: "London,GB" or "Paris,FR" 65 | 66 | Returns: 67 | ToolResponse containing weather data or error 68 | """ 69 | try: 70 | # Extract and validate parameters 71 | query = kwargs.get("query", "") 72 | logger.info(f"Processing weather query: {query}") 73 | 74 | if not self.validate_params({"query": query}): 75 | return ToolResponse( 76 | success=False, 77 | error="Invalid parameters or missing API key. Required format: city_name,country_code (e.g. London,GB)", 78 | result=None 79 | ) 80 | 81 | # Parse query parameters 82 | city_name, country_code = [part.strip() for part in query.split(',')] 83 | 84 | logger.info(f"Fetching weather data for {city_name}, {country_code}") 85 | logger.debug(f"Using API key: {'*' * 4}{self.api_key[-4:]}") 86 | 87 | # Make API request 88 | try: 89 | response = requests.get( 90 | self.BASE_URL, 91 | params={ 92 | "q": f"{city_name},{country_code}", 93 | "units": "metric", # Use metric units 94 | "appid": self.api_key 95 | }, 96 | timeout=10 # Add timeout 97 | ) 98 | 99 | response.raise_for_status() # Raise exception for bad status codes 100 | 101 | except requests.exceptions.RequestException as e: 102 | error_msg = f"Weather API request failed: {str(e)}" 103 | logger.error(error_msg) 104 | return ToolResponse( 105 | success=False, 106 | error=error_msg, 107 | result=None 108 | ) 109 | 110 | # Parse response 111 | data = response.json() 112 | logger.debug(f"Received weather data: {data}") 113 | 114 | # Extract relevant information 115 | try: 116 | weather_info = { 117 | "temperature": round(data["main"]["temp"], 1), # Celsius 118 | "humidity": data["main"]["humidity"], # Percentage 119 | "description": data["weather"][0]["description"], 120 | "wind_speed": round(data["wind"]["speed"], 1), # meters/sec 121 | "feels_like": round(data["main"]["feels_like"], 1), # Celsius 122 | "pressure": data["main"]["pressure"], # hPa 123 | } 124 | 125 | logger.info(f"Successfully retrieved weather data for {city_name}") 126 | logger.debug(f"Processed weather info: {weather_info}") 127 | 128 | return ToolResponse( 129 | success=True, 130 | result=weather_info, 131 | error=None 132 | ) 133 | 134 | except KeyError as e: 135 | error_msg = f"Invalid response format from weather API: {str(e)}" 136 | logger.error(error_msg) 137 | return ToolResponse( 138 | success=False, 139 | result=None, 140 | error=error_msg 141 | ) 142 | 143 | except Exception as e: 144 | error_msg = f"Weather query failed: {str(e)}" 145 | logger.error(error_msg) 146 | return ToolResponse( 147 | success=False, 148 | result=None, 149 | error=error_msg 150 | ) -------------------------------------------------------------------------------- /examples/multi_agent_flow.py: -------------------------------------------------------------------------------- 1 | """Example of creating multiple agents with flow control""" 2 | 3 | from hawkins_agent import AgentBuilder 4 | from hawkins_agent.tools import RAGTool, WebSearchTool 5 | from hawkins_agent.mock import KnowledgeBase, Document 6 | from hawkins_agent.flow import FlowManager, FlowStep 7 | from hawkins_agent.llm import LiteLLMProvider 8 | import logging 9 | import os 10 | import asyncio 11 | 12 | # Setup logging 13 | logging.basicConfig(level=logging.INFO) 14 | logger = logging.getLogger(__name__) 15 | 16 | async def main(): 17 | """Demonstrate multi-agent workflow with flow control""" 18 | try: 19 | # Set up logging 20 | logging.basicConfig( 21 | level=logging.INFO, 22 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' 23 | ) 24 | 25 | logger.info("Initializing knowledge bases...") 26 | research_kb = KnowledgeBase() 27 | support_kb = KnowledgeBase() 28 | 29 | # Create mock data instead of loading files 30 | logger.info("Creating mock knowledge base data...") 31 | 32 | # Create Document objects with content 33 | research_docs = [ 34 | Document("AI is rapidly evolving with focus on multimodal models and efficient training"), 35 | Document("Enterprises are adopting AI for automation and decision support"), 36 | Document("Latest research focuses on making AI more reliable and explainable") 37 | ] 38 | 39 | support_docs = [ 40 | Document("Follow industry standards for AI implementation"), 41 | Document("Ensure ethical AI usage and proper documentation"), 42 | Document("Provide comprehensive support for AI integration") 43 | ] 44 | 45 | # Add documents to knowledge bases 46 | for doc in research_docs: 47 | await research_kb.add_document(doc) 48 | for doc in support_docs: 49 | await support_kb.add_document(doc) 50 | 51 | # Get Tavily API key for web search 52 | tavily_api_key = os.getenv("TAVILY_API_KEY") 53 | if not tavily_api_key: 54 | logger.error("TAVILY_API_KEY environment variable not set") 55 | return 56 | 57 | # Create research agent with GPT-4o for complex analysis 58 | logger.info("Creating research agent...") 59 | researcher = (AgentBuilder("researcher") 60 | .with_model("openai/gpt-4o") # Latest OpenAI model 61 | .with_provider(LiteLLMProvider, temperature=0.7) 62 | .with_knowledge_base(research_kb) 63 | .with_tool(WebSearchTool(api_key=tavily_api_key)) 64 | .with_memory({"retention_days": 7}) 65 | .build()) 66 | 67 | # Create support agent with Claude 3 Sonnet for summarization 68 | logger.info("Creating support agent...") 69 | support = (AgentBuilder("support") 70 | .with_model("anthropic/claude-3-sonnet-20240229") # Claude 3 for summaries 71 | .with_provider(LiteLLMProvider, temperature=0.5) 72 | .with_knowledge_base(support_kb) 73 | .with_tool(RAGTool(support_kb)) 74 | .with_memory({"retention_days": 30}) 75 | .build()) 76 | 77 | # Create flow steps 78 | async def research_step(data: dict) -> dict: 79 | """Execute research phase""" 80 | response = await researcher.process( 81 | "Analyze current AI trends and their impact on enterprise applications", 82 | context={"focus": data.get("focus", "enterprise applications")} 83 | ) 84 | return { 85 | "research_findings": response.message, 86 | "tool_calls": response.tool_calls 87 | } 88 | 89 | async def summary_step(data: dict) -> dict: 90 | """Execute summary phase""" 91 | response = await support.process( 92 | f"Create a summary of: {data['research_findings']}", 93 | context={"format": "bullet points"} 94 | ) 95 | return { 96 | "summary": response.message, 97 | "tool_calls": response.tool_calls 98 | } 99 | 100 | # Configure workflow 101 | flow = FlowManager() 102 | flow.add_step(FlowStep( 103 | name="research", 104 | agent=researcher, 105 | process=research_step 106 | )) 107 | flow.add_step(FlowStep( 108 | name="summarize", 109 | agent=support, 110 | process=summary_step, 111 | requires=["research"] # Must wait for research to complete 112 | )) 113 | 114 | # Execute workflow 115 | logger.info("Executing workflow...") 116 | results = await flow.execute({ 117 | "focus": "enterprise applications", 118 | "format": "concise bullet points" 119 | }) 120 | 121 | # Display results 122 | logger.info("\n" + "="*50) 123 | logger.info("Research Findings:") 124 | logger.info("="*50) 125 | logger.info(results["research"]["research_findings"]) 126 | 127 | logger.info("\n" + "="*50) 128 | logger.info("Summarized Insights:") 129 | logger.info("="*50) 130 | logger.info(results["summarize"]["summary"]) 131 | 132 | # Log tool usage 133 | for step, data in results.items(): 134 | if data.get("tool_calls"): 135 | logger.info(f"\nTools used in {step} phase:") 136 | for call in data["tool_calls"]: 137 | logger.info(f"- {call['name']}: {call['parameters']}") 138 | 139 | except Exception as e: 140 | logger.error(f"Error in multi-agent workflow: {str(e)}", exc_info=True) 141 | raise 142 | 143 | if __name__ == "__main__": 144 | asyncio.run(main()) -------------------------------------------------------------------------------- /docs/memory_management.md: -------------------------------------------------------------------------------- 1 | # Memory Management in Hawkins AI Framework 2 | 3 | The Hawkins AI Framework uses HawkinsDB for persistent memory management, allowing agents to maintain context and learn from past interactions. This document explains the memory system architecture and usage. 4 | 5 | ## Overview 6 | 7 | HawkinsDB provides a SQLite-based storage system for maintaining agent memory across sessions. The memory system enables: 8 | - Storage of past interactions 9 | - Context retention 10 | - Pattern recognition across conversations 11 | - Long-term learning capabilities 12 | 13 | ## Memory Architecture 14 | 15 | ### Core Components 16 | 17 | 1. **Memory Storage** 18 | ```python 19 | from hawkinsdb import HawkinsDB 20 | 21 | db = HawkinsDB( 22 | storage_type="sqlite", 23 | db_path="hawkins_memory.db" 24 | ) 25 | ``` 26 | 27 | 2. **Memory Types** 28 | - Short-term memory (conversation context) 29 | - Long-term memory (learned patterns) 30 | - Episodic memory (specific interaction records) 31 | 32 | ## Using Memory in Agents 33 | 34 | ### Basic Memory Integration 35 | 36 | ```python 37 | from hawkins_agent import AgentBuilder 38 | from hawkinsdb import HawkinsDB, LLMInterface 39 | import asyncio 40 | 41 | 42 | import os 43 | os.environ["OPENAI_API_KEY"]="" 44 | # Initialize memory 45 | memory_db = HawkinsDB() 46 | llm = LLMInterface(memory_db) 47 | 48 | 49 | # Create agent with memory 50 | agent = (AgentBuilder("assistant") 51 | .with_model("gpt-4o") 52 | .with_memory(memory_db) 53 | .with_memory({ 54 | "retention_days": 7, # Keep memory for 7 days 55 | "max_entries": 1000 # Maximum memory entries 56 | }) 57 | .build()) 58 | 59 | async def main(): 60 | response = await agent.process("Define AI") 61 | similar_memories = llm.query( 62 | "What is AI" 63 | ) 64 | print(similar_memories) 65 | print(response) 66 | 67 | if __name__ == "__main__": 68 | asyncio.run(main()) 69 | 70 | ### Memory Operations 71 | 72 | 1. **Storing Interactions** 73 | ```python 74 | # Automatically handled during agent.process() 75 | response = await agent.process("What's the weather?") 76 | # Interaction stored in memory with metadata 77 | ``` 78 | 79 | 2. **Retrieving Context** 80 | ```python 81 | from hawkinsdb import HawkinsDB, LLMInterface 82 | memory_db = HawkinsDB() 83 | llm = LLMInterface(memory_db) 84 | 85 | # Recent interactions are automatically included in context 86 | similar_memories = llm.query( 87 | "What is AI" 88 | ) 89 | ``` 90 | 91 | 3. **Memory Configuration** 92 | ```python 93 | # Configure memory retention 94 | agent = (AgentBuilder("assistant") 95 | .with_memory({ 96 | "retention_days": 7, # Keep memory for 7 days 97 | "max_entries": 1000 # Maximum memory entries 98 | }) 99 | .build()) 100 | ``` 101 | 102 | ## Memory Schemas 103 | 104 | HawkinsDB uses the following schema for storing memories: 105 | 106 | ```sql 107 | CREATE TABLE memories ( 108 | id TEXT PRIMARY KEY, 109 | content TEXT NOT NULL, 110 | timestamp DATETIME DEFAULT CURRENT_TIMESTAMP, 111 | type TEXT NOT NULL, 112 | metadata JSON 113 | ); 114 | ``` 115 | 116 | Fields: 117 | - `id`: Unique identifier for the memory 118 | - `content`: The actual content of the interaction 119 | - `timestamp`: When the memory was created 120 | - `type`: Type of memory (conversation, learning, etc.) 121 | - `metadata`: Additional structured data about the memory 122 | 123 | ## Example: Advanced Memory Usage 124 | 125 | ```python 126 | from hawkins_agent import AgentBuilder 127 | from hawkins_agent.tools import WebSearchTool 128 | from hawkinsdb import HawkinsDB 129 | 130 | # Initialize memory with custom configuration 131 | memory_db = HawkinsDB( 132 | storage_type="sqlite", 133 | db_path="hawkins_memory.db", 134 | config={ 135 | "retention_days": 30, 136 | "max_entries": 5000, 137 | "index_type": "semantic" # Enable semantic search 138 | } 139 | ) 140 | 141 | # Create agent with memory and tools 142 | agent = (AgentBuilder("research_assistant") 143 | .with_model("gpt-4o") 144 | .with_memory(memory_db) 145 | .with_tool(WebSearchTool()) 146 | .build()) 147 | 148 | # Memory will automatically store: 149 | # - User queries 150 | # - Agent responses 151 | # - Tool usage and results 152 | # - Context and metadata 153 | ``` 154 | 155 | ## Best Practices 156 | 157 | 1. **Memory Maintenance** 158 | - Regularly clean up old memories using retention policies 159 | - Index frequently accessed memories for faster retrieval 160 | - Monitor memory storage size 161 | 162 | 2. **Context Management** 163 | - Use relevant memory retrieval for maintaining conversation context 164 | - Balance between too little and too much context 165 | - Prioritize recent and relevant memories 166 | 167 | 3. **Performance Optimization** 168 | - Use appropriate indexing strategies 169 | - Implement caching for frequently accessed memories 170 | - Configure retention policies based on use case 171 | 172 | ## Memory Limitations 173 | 174 | 1. **Storage Limits** 175 | - Default SQLite database size limits apply 176 | - Consider cleanup strategies for long-running agents 177 | 178 | 2. **Search Performance** 179 | - Large memory stores may impact search performance 180 | - Use appropriate indexing and limiting in queries 181 | 182 | 3. **Context Windows** 183 | - LLM token limits affect how much memory can be included in context 184 | - Implement smart context selection strategies 185 | 186 | ## Security Considerations 187 | 188 | 1. **Data Privacy** 189 | - Memory stores may contain sensitive information 190 | - Implement appropriate access controls 191 | - Consider data encryption for sensitive memories 192 | 193 | 2. **Data Retention** 194 | - Follow data retention policies 195 | - Implement secure deletion mechanisms 196 | - Handle user data according to privacy requirements 197 | -------------------------------------------------------------------------------- /examples/maldives_trip_planner.py: -------------------------------------------------------------------------------- 1 | """Multi-agent system for planning a Maldives trip""" 2 | 3 | from hawkins_agent import AgentBuilder 4 | from hawkins_agent.tools import WebSearchTool, WeatherTool 5 | from hawkins_agent.llm import LiteLLMProvider 6 | import logging 7 | import os 8 | import asyncio 9 | import json 10 | from datetime import datetime, timedelta 11 | 12 | # Setup logging 13 | logging.basicConfig( 14 | level=logging.INFO, 15 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' 16 | ) 17 | logger = logging.getLogger(__name__) 18 | 19 | class TripFlow: 20 | """Simple flow manager for trip planning""" 21 | 22 | def __init__(self): 23 | self.steps = [] 24 | 25 | def add_step(self, name, func, requires=None): 26 | self.steps.append({ 27 | 'name': name, 28 | 'func': func, 29 | 'requires': requires or [] 30 | }) 31 | 32 | async def execute(self, input_data): 33 | results = {} 34 | for step in self.steps: 35 | try: 36 | # Check requirements 37 | for req in step['requires']: 38 | if req not in results: 39 | raise Exception(f"Required step {req} not completed") 40 | 41 | # Execute step 42 | logger.info(f"Executing step: {step['name']}") 43 | result = await step['func'](input_data, results) 44 | results[step['name']] = result 45 | 46 | except Exception as e: 47 | logger.error(f"Error in step {step['name']}: {str(e)}") 48 | results[step['name']] = {'error': str(e)} 49 | 50 | return results 51 | 52 | async def main(): 53 | """Plan a 5-day Maldives trip using multiple specialized agents""" 54 | try: 55 | # Initialize tools 56 | logger.info("Initializing tools...") 57 | weather_tool = WeatherTool() 58 | search_tool = WebSearchTool(api_key=os.environ.get("TAVILY_API_KEY")) 59 | 60 | # Create research agent for destination info 61 | logger.info("Creating agents...") 62 | researcher = (AgentBuilder("destination_researcher") 63 | .with_model("gpt-4o") 64 | .with_provider(LiteLLMProvider, temperature=0.7) 65 | .with_tool(search_tool) 66 | .build()) 67 | 68 | # Create activity planner agent 69 | activity_planner = (AgentBuilder("activity_planner") 70 | .with_model("gpt-4o") 71 | .with_provider(LiteLLMProvider, temperature=0.8) 72 | .build()) 73 | 74 | # Create logistics agent 75 | logistics_agent = (AgentBuilder("logistics_planner") 76 | .with_model("gpt-4o") 77 | .with_provider(LiteLLMProvider, temperature=0.6) 78 | .with_tool(weather_tool) 79 | .build()) 80 | 81 | async def research_step(input_data, previous_results): 82 | """Research Maldives destinations and key information""" 83 | logger.info("Researching Maldives destinations...") 84 | 85 | response = await researcher.process( 86 | "Research the best areas to stay in Maldives for a 5-day trip, " 87 | "including popular resorts, must-visit locations, and travel tips. " 88 | "Focus on practical information for trip planning." 89 | ) 90 | 91 | return { 92 | 'content': response.message, 93 | 'destinations': response.metadata.get('destinations', []) 94 | } 95 | 96 | async def plan_activities(input_data, previous_results): 97 | """Plan daily activities for 5 days""" 98 | logger.info("Planning daily activities...") 99 | 100 | research = previous_results['research']['content'] 101 | response = await activity_planner.process( 102 | f"Based on this research: {research}\n" 103 | "Create a detailed 5-day itinerary for the Maldives with specific " 104 | "activities for each day. Include water sports, relaxation time, " 105 | "and cultural experiences. Format as a day-by-day schedule." 106 | ) 107 | 108 | return { 109 | 'content': response.message, 110 | 'itinerary': response.metadata.get('itinerary', {}) 111 | } 112 | 113 | async def plan_logistics(input_data, previous_results): 114 | """Plan accommodation and transportation""" 115 | logger.info("Planning logistics...") 116 | 117 | research = previous_results['research']['content'] 118 | activities = previous_results['activities']['content'] 119 | 120 | # Check weather for trip dates 121 | start_date = datetime.now() + timedelta(days=30) # Plan for next month 122 | weather_query = f"Male,MV" # Capital city as reference 123 | 124 | response = await logistics_agent.process( 125 | f"Based on the research: {research}\n" 126 | f"And planned activities: {activities}\n" 127 | "Provide detailed logistics planning including:\n" 128 | "1. Recommended resorts/hotels\n" 129 | "2. Transportation between islands\n" 130 | "3. Estimated costs\n" 131 | "4. Booking tips" 132 | ) 133 | 134 | return { 135 | 'content': response.message, 136 | 'logistics': response.metadata.get('logistics', {}) 137 | } 138 | 139 | # Configure flow 140 | flow = TripFlow() 141 | flow.add_step('research', research_step) 142 | flow.add_step('activities', plan_activities, ['research']) 143 | flow.add_step('logistics', plan_logistics, ['research', 'activities']) 144 | 145 | # Execute flow 146 | logger.info("\nStarting Maldives trip planning...") 147 | logger.info("=" * 50) 148 | 149 | results = await flow.execute({}) 150 | 151 | # Display results 152 | logger.info("\nTrip Planning Results:") 153 | logger.info("=" * 50) 154 | 155 | for step_name, result in results.items(): 156 | logger.info(f"\n{step_name.upper()}:") 157 | logger.info("-" * 40) 158 | if 'error' in result: 159 | logger.error(f"Error in {step_name}: {result['error']}") 160 | else: 161 | logger.info(result['content']) 162 | 163 | except Exception as e: 164 | logger.error(f"Error in trip planning: {str(e)}", exc_info=True) 165 | raise 166 | 167 | if __name__ == "__main__": 168 | asyncio.run(main()) 169 | -------------------------------------------------------------------------------- /docs/custom_tools.md: -------------------------------------------------------------------------------- 1 | # Creating Custom Tools for Hawkins Agents 2 | 3 | This guide explains how to create custom tools for your Hawkins agents. Custom tools allow you to extend agent capabilities with your own functionality. 4 | 5 | ## Tool Architecture 6 | 7 | Tools in Hawkins follow a simple but powerful architecture: 8 | 9 | 1. Inherit from `BaseTool` 10 | 2. Implement required methods 11 | 3. Register with an agent 12 | 13 | ## Basic Structure 14 | 15 | ```python 16 | from typing import Dict, Any 17 | from hawkins_agent.tools import BaseTool 18 | from hawkins_agent.types import ToolResponse 19 | 20 | class CustomTool(BaseTool): 21 | """Your custom tool implementation""" 22 | 23 | def __init__(self): 24 | """Initialize your tool""" 25 | super().__init__(name="custom_tool_name") 26 | 27 | @property 28 | def description(self) -> str: 29 | """Tool description used by the agent""" 30 | return "Description of what your tool does" 31 | 32 | def validate_params(self, params: Dict[str, Any]) -> bool: 33 | """Validate input parameters""" 34 | return True 35 | 36 | async def execute(self, **kwargs) -> ToolResponse: 37 | """Execute the tool's functionality""" 38 | try: 39 | # Your tool logic here 40 | result = "Tool execution result" 41 | return ToolResponse( 42 | success=True, 43 | result=result, 44 | error=None 45 | ) 46 | except Exception as e: 47 | return ToolResponse( 48 | success=False, 49 | result=None, 50 | error=str(e) 51 | ) 52 | ``` 53 | 54 | ## Step-by-Step Guide 55 | 56 | ### 1. Create Tool Class 57 | 58 | Create a new class inheriting from `BaseTool`: 59 | 60 | ```python 61 | from hawkins_agent.tools import BaseTool 62 | 63 | class WeatherTool(BaseTool): 64 | def __init__(self, api_key: str): 65 | super().__init__(name="weather") 66 | self.api_key = api_key 67 | ``` 68 | 69 | ### 2. Add Description 70 | 71 | Implement the `description` property: 72 | 73 | ```python 74 | @property 75 | def description(self) -> str: 76 | return "Get weather information for a specified location" 77 | ``` 78 | 79 | ### 3. Implement Parameter Validation 80 | 81 | Add validation logic: 82 | 83 | ```python 84 | def validate_params(self, params: Dict[str, Any]) -> bool: 85 | if 'query' not in params: 86 | return False 87 | if not isinstance(params['query'], str): 88 | return False 89 | return True 90 | ``` 91 | 92 | ### 4. Implement Execute Method 93 | 94 | Add the main tool functionality: 95 | 96 | ```python 97 | async def execute(self, **kwargs) -> ToolResponse: 98 | try: 99 | query = kwargs.get('query', '') 100 | # Your tool logic here 101 | result = await self._fetch_weather(query) 102 | return ToolResponse( 103 | success=True, 104 | result=result, 105 | error=None 106 | ) 107 | except Exception as e: 108 | return ToolResponse( 109 | success=False, 110 | result=None, 111 | error=str(e) 112 | ) 113 | ``` 114 | 115 | ## Using Custom Tools 116 | 117 | Register your tool with an agent: 118 | 119 | ```python 120 | from hawkins_agent import AgentBuilder 121 | 122 | # Create your custom tool 123 | custom_tool = CustomTool() 124 | 125 | # Add to agent 126 | agent = (AgentBuilder("assistant") 127 | .with_model("openai/gpt-4o") 128 | .with_tool(custom_tool) 129 | .build()) 130 | ``` 131 | 132 | ## Best Practices 133 | 134 | 1. **Error Handling** 135 | - Always use try-catch blocks 136 | - Return clear error messages 137 | - Log errors appropriately 138 | 139 | 2. **Parameter Validation** 140 | - Validate all required parameters 141 | - Check parameter types 142 | - Provide clear validation feedback 143 | 144 | 3. **Documentation** 145 | - Add docstrings to your tool class 146 | - Document parameters and return values 147 | - Include usage examples 148 | 149 | 4. **Async Support** 150 | - Use async/await for I/O operations 151 | - Handle async errors appropriately 152 | - Don't block the event loop 153 | 154 | ## Example: Custom Database Tool 155 | 156 | Here's a complete example of a custom database query tool: 157 | 158 | ```python 159 | from hawkins_agent.tools import BaseTool 160 | from hawkins_agent.types import ToolResponse 161 | import asyncpg 162 | 163 | class DatabaseTool(BaseTool): 164 | """Tool for executing database queries""" 165 | 166 | def __init__(self, connection_string: str): 167 | super().__init__(name="database") 168 | self.conn_string = connection_string 169 | 170 | @property 171 | def description(self) -> str: 172 | return "Execute database queries and return results" 173 | 174 | def validate_params(self, params: Dict[str, Any]) -> bool: 175 | if 'query' not in params: 176 | return False 177 | if not isinstance(params['query'], str): 178 | return False 179 | return True 180 | 181 | async def execute(self, **kwargs) -> ToolResponse: 182 | try: 183 | query = kwargs.get('query', '') 184 | conn = await asyncpg.connect(self.conn_string) 185 | result = await conn.fetch(query) 186 | await conn.close() 187 | 188 | return ToolResponse( 189 | success=True, 190 | result=result, 191 | error=None 192 | ) 193 | except Exception as e: 194 | return ToolResponse( 195 | success=False, 196 | result=None, 197 | error=str(e) 198 | ) 199 | ``` 200 | 201 | ## Testing Custom Tools 202 | 203 | Always test your tools thoroughly: 204 | 205 | ```python 206 | async def test_custom_tool(): 207 | tool = CustomTool() 208 | response = await tool.execute(query="test input") 209 | assert response.success 210 | assert response.result is not None 211 | ``` 212 | 213 | ## Common Patterns 214 | 215 | 1. **API Integration** 216 | - Handle API authentication 217 | - Implement rate limiting 218 | - Cache responses when appropriate 219 | 220 | 2. **Resource Management** 221 | - Clean up resources in try-finally blocks 222 | - Use context managers 223 | - Handle connection pooling 224 | 225 | 3. **Input Processing** 226 | - Sanitize inputs 227 | - Convert data types 228 | - Handle missing parameters 229 | 230 | ## Troubleshooting 231 | 232 | Common issues and solutions: 233 | 234 | 1. **Tool Not Recognized** 235 | - Check tool name registration 236 | - Verify tool is properly added to agent 237 | 238 | 2. **Execution Errors** 239 | - Check parameter validation 240 | - Verify async/await usage 241 | - Review error handling 242 | 243 | 3. **Performance Issues** 244 | - Implement caching 245 | - Use connection pooling 246 | - Optimize resource usage 247 | -------------------------------------------------------------------------------- /examples/blog_writer_flow.py: -------------------------------------------------------------------------------- 1 | """Example of multi-agent blog writing system""" 2 | 3 | from hawkins_agent import AgentBuilder 4 | from hawkins_agent.tools import RAGTool, WebSearchTool, SummarizationTool 5 | from hawkins_rag import HawkinsRAG 6 | import logging 7 | import os 8 | import asyncio 9 | import tempfile 10 | import shutil 11 | 12 | # Setup logging with more detailed format 13 | logging.basicConfig( 14 | level=logging.INFO, 15 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' 16 | ) 17 | logger = logging.getLogger(__name__) 18 | 19 | class SimpleFlow: 20 | """A simplified flow manager for RAG operations""" 21 | 22 | def __init__(self): 23 | self.steps = [] 24 | 25 | def add_step(self, name, func, requires=None): 26 | self.steps.append({ 27 | 'name': name, 28 | 'func': func, 29 | 'requires': requires or [] 30 | }) 31 | 32 | async def execute(self, input_data): 33 | results = {} 34 | for step in self.steps: 35 | try: 36 | # Wait for required steps 37 | for req in step['requires']: 38 | if req not in results: 39 | raise Exception(f"Required step {req} not completed") 40 | 41 | # Execute step 42 | logger.info(f"Executing step: {step['name']}") 43 | result = await step['func'](input_data, results) 44 | results[step['name']] = result 45 | 46 | except Exception as e: 47 | logger.error(f"Error in step {step['name']}: {str(e)}") 48 | results[step['name']] = {'error': str(e)} 49 | 50 | return results 51 | 52 | async def main(): 53 | """Demonstrate blog writing with RAG system""" 54 | try: 55 | # Initialize knowledge bases 56 | logger.info("Initializing RAG systems...") 57 | research_rag = HawkinsRAG() 58 | writer_rag = HawkinsRAG() 59 | editor_rag = HawkinsRAG() 60 | 61 | # Create temp directory for document storage 62 | temp_dir = tempfile.mkdtemp() 63 | logger.info(f"Created temporary directory: {temp_dir}") 64 | 65 | try: 66 | async def research_step(input_data, previous_results): 67 | """Execute research phase""" 68 | topic = input_data.get("topic", "AI trends") 69 | logger.info(f"Researching topic: {topic}") 70 | 71 | try: 72 | # Store topic 73 | topic_file = os.path.join(temp_dir, "topic.txt") 74 | with open(topic_file, 'w') as f: 75 | f.write(f"Research topic: {topic}") 76 | research_rag.load_document(topic_file, source_type="text") 77 | 78 | # Query RAG 79 | response = research_rag.query( 80 | f"Research this topic thoroughly and gather key information: {topic}" 81 | ) 82 | research_text = str(response) 83 | 84 | logger.info("Research completed successfully") 85 | return {'content': research_text} 86 | 87 | except Exception as e: 88 | logger.error(f"Research error: {str(e)}") 89 | return {'error': str(e)} 90 | 91 | async def writing_step(input_data, previous_results): 92 | """Execute writing phase""" 93 | try: 94 | research = previous_results['research']['content'] 95 | style = input_data.get("style", "informative") 96 | 97 | logger.info("Writing draft based on research...") 98 | 99 | # Store research for writer 100 | research_file = os.path.join(temp_dir, "research.txt") 101 | with open(research_file, 'w') as f: 102 | f.write(research) 103 | writer_rag.load_document(research_file, source_type="text") 104 | 105 | # Generate draft 106 | response = writer_rag.query( 107 | f"Write a {style} blog post based on this research: {research}" 108 | ) 109 | draft_text = str(response) 110 | 111 | logger.info("Draft completed successfully") 112 | return {'content': draft_text} 113 | 114 | except Exception as e: 115 | logger.error(f"Writing error: {str(e)}") 116 | return {'error': str(e)} 117 | 118 | async def editing_step(input_data, previous_results): 119 | """Execute editing phase""" 120 | try: 121 | draft = previous_results['writing']['content'] 122 | logger.info("Editing draft...") 123 | 124 | # Store draft for editor 125 | draft_file = os.path.join(temp_dir, "draft.txt") 126 | with open(draft_file, 'w') as f: 127 | f.write(draft) 128 | editor_rag.load_document(draft_file, source_type="text") 129 | 130 | # Edit draft 131 | response = editor_rag.query( 132 | "Edit and improve this blog post draft for clarity, engagement and professionalism: " + draft 133 | ) 134 | final_text = str(response) 135 | 136 | logger.info("Editing completed successfully") 137 | return {'content': final_text} 138 | 139 | except Exception as e: 140 | logger.error(f"Editing error: {str(e)}") 141 | return {'error': str(e)} 142 | 143 | # Configure flow 144 | flow = SimpleFlow() 145 | flow.add_step('research', research_step) 146 | flow.add_step('writing', writing_step, ['research']) 147 | flow.add_step('editing', editing_step, ['writing']) 148 | 149 | # Execute flow 150 | logger.info("\nExecuting blog writing workflow...") 151 | logger.info("=" * 50) 152 | 153 | input_data = { 154 | "topic": "The Impact of AI on Software Development in 2024", 155 | "style": "informative and engaging" 156 | } 157 | 158 | logger.info(f"Input: {input_data}") 159 | results = await flow.execute(input_data) 160 | 161 | # Display results 162 | logger.info("\nWorkflow Results:") 163 | logger.info("=" * 50) 164 | 165 | for step_name, result in results.items(): 166 | logger.info(f"\n{step_name.upper()} OUTPUT:") 167 | logger.info("-" * 40) 168 | if 'error' in result: 169 | logger.error(f"Error in {step_name}: {result['error']}") 170 | else: 171 | logger.info(result['content']) 172 | 173 | finally: 174 | # Clean up temp directory 175 | shutil.rmtree(temp_dir) 176 | logger.info("Cleaned up temporary directory") 177 | 178 | except Exception as e: 179 | logger.error(f"Error in blog writing workflow: {str(e)}", exc_info=True) 180 | raise 181 | 182 | if __name__ == "__main__": 183 | asyncio.run(main()) -------------------------------------------------------------------------------- /docs/flows.md: -------------------------------------------------------------------------------- 1 | # Flow System in Hawkins Agent Framework 2 | 3 | The Flow system in Hawkins Agent Framework enables orchestration of complex multi-agent workflows. This guide explains how to create, manage, and optimize flows in your applications. 4 | 5 | ## Overview 6 | 7 | Flows allow you to: 8 | - Chain multiple agents together 9 | - Coordinate complex tasks 10 | - Share context between agents 11 | - Handle dependencies between steps 12 | - Manage state across the workflow 13 | 14 | ## Architecture 15 | 16 | ### Core Components 17 | 18 | 1. **FlowManager** 19 | ```python 20 | class FlowManager: 21 | def add_step(self, step: FlowStep) 22 | async def execute(self, input_data: Dict[str, Any]) -> Dict[str, Any] 23 | ``` 24 | 25 | 2. **FlowStep** 26 | ```python 27 | class FlowStep: 28 | def __init__(self, 29 | name: str, 30 | agent: Optional[Agent], 31 | process: Callable, 32 | requires: Optional[List[str]] = None) 33 | ``` 34 | 35 | ## Creating Flows 36 | 37 | ### Basic Flow Example 38 | 39 | ```python 40 | from hawkins_agent import AgentBuilder, FlowManager, FlowStep 41 | 42 | # Create agents 43 | research_agent = (AgentBuilder("researcher") 44 | .with_model("gpt-4o") 45 | .with_tool(WebSearchTool()) 46 | .build()) 47 | 48 | writer_agent = (AgentBuilder("writer") 49 | .with_model("gpt-4o") 50 | .build()) 51 | 52 | # Create flow manager 53 | flow = FlowManager() 54 | 55 | # Define steps 56 | async def research_step(input_data: Dict[str, Any], context: Dict[str, Any]): 57 | query = input_data.get("topic") 58 | result = await research_agent.process(f"Research this topic: {query}") 59 | return {"research": result.message} 60 | 61 | async def writing_step(input_data: Dict[str, Any], context: Dict[str, Any]): 62 | research = context.get("research", {}).get("research") 63 | result = await writer_agent.process(f"Write an article based on: {research}") 64 | return {"article": result.message} 65 | 66 | # Add steps to flow 67 | flow.add_step(FlowStep( 68 | name="research", 69 | agent=research_agent, 70 | process=research_step 71 | )) 72 | 73 | flow.add_step(FlowStep( 74 | name="writing", 75 | agent=writer_agent, 76 | process=writing_step, 77 | requires=["research"] # This step requires research to complete first 78 | )) 79 | 80 | # Execute flow 81 | results = await flow.execute({"topic": "AI trends in 2024"}) 82 | ``` 83 | 84 | ## Advanced Features 85 | 86 | ### 1. Parallel Execution 87 | 88 | Steps without dependencies can run in parallel: 89 | 90 | ```python 91 | # These steps will run concurrently 92 | flow.add_step(FlowStep("market_research", market_agent, market_research)) 93 | flow.add_step(FlowStep("competitor_analysis", analysis_agent, analyze_competitors)) 94 | 95 | # This step waits for both above steps 96 | flow.add_step(FlowStep( 97 | "strategy", 98 | strategy_agent, 99 | create_strategy, 100 | requires=["market_research", "competitor_analysis"] 101 | )) 102 | ``` 103 | 104 | ### 2. Error Handling 105 | 106 | ```python 107 | async def safe_step(input_data, context): 108 | try: 109 | result = await process_data(input_data) 110 | return {"status": "success", "data": result} 111 | except Exception as e: 112 | return {"status": "error", "error": str(e)} 113 | 114 | flow.add_step(FlowStep( 115 | "safe_operation", 116 | agent, 117 | safe_step, 118 | error_handler=handle_step_error 119 | )) 120 | ``` 121 | 122 | ### 3. Context Sharing 123 | 124 | ```python 125 | async def step_with_context(input_data, context): 126 | # Access results from previous steps 127 | previous_result = context.get("previous_step", {}).get("data") 128 | 129 | # Process with context 130 | result = await process_with_context(previous_result) 131 | 132 | return {"data": result} 133 | ``` 134 | 135 | ## Best Practices 136 | 137 | 1. **Step Design** 138 | - Keep steps focused and single-purpose 139 | - Use clear, descriptive step names 140 | - Document dependencies explicitly 141 | - Handle errors gracefully 142 | 143 | 2. **Flow Structure** 144 | - Organize steps logically 145 | - Minimize dependencies where possible 146 | - Consider parallel execution opportunities 147 | - Use meaningful step names 148 | 149 | 3. **Context Management** 150 | - Pass only necessary data between steps 151 | - Clean up temporary data after use 152 | - Document context requirements 153 | - Handle missing context gracefully 154 | 155 | 4. **Error Handling** 156 | - Implement error handlers for critical steps 157 | - Log errors appropriately 158 | - Provide meaningful error messages 159 | - Consider recovery strategies 160 | 161 | ## Example: Document Processing Flow 162 | 163 | ```python 164 | from hawkins_agent import AgentBuilder, FlowManager, FlowStep 165 | from hawkins_agent.tools import RAGTool, SummarizationTool 166 | 167 | async def extract_text(input_data, context): 168 | document = input_data["document"] 169 | text = await document_processor.extract_text(document) 170 | return {"text": text} 171 | 172 | async def summarize_content(input_data, context): 173 | text = context["extract"]["text"] 174 | summary = await summarizer_agent.process(f"Summarize: {text}") 175 | return {"summary": summary.message} 176 | 177 | async def generate_insights(input_data, context): 178 | summary = context["summarize"]["summary"] 179 | insights = await analyst_agent.process(f"Generate insights from: {summary}") 180 | return {"insights": insights.message} 181 | 182 | # Create flow 183 | doc_flow = FlowManager() 184 | 185 | # Add steps 186 | doc_flow.add_step(FlowStep("extract", None, extract_text)) 187 | doc_flow.add_step(FlowStep("summarize", summarizer_agent, summarize_content, ["extract"])) 188 | doc_flow.add_step(FlowStep("analyze", analyst_agent, generate_insights, ["summarize"])) 189 | 190 | # Execute 191 | results = await doc_flow.execute({"document": document_path}) 192 | ``` 193 | 194 | ## Performance Considerations 195 | 196 | 1. **Memory Usage** 197 | - Monitor context size 198 | - Clean up large objects after use 199 | - Use streaming for large data 200 | 201 | 2. **Execution Time** 202 | - Optimize step order 203 | - Use parallel execution 204 | - Implement timeouts 205 | - Cache repeated operations 206 | 207 | 3. **Resource Management** 208 | - Close connections properly 209 | - Release resources after use 210 | - Implement proper cleanup 211 | 212 | ## Debugging Flows 213 | 214 | 1. **Logging** 215 | ```python 216 | import logging 217 | 218 | logging.basicConfig(level=logging.INFO) 219 | logger = logging.getLogger(__name__) 220 | 221 | async def debug_step(input_data, context): 222 | logger.info(f"Starting step with input: {input_data}") 223 | try: 224 | result = await process_data(input_data) 225 | logger.info(f"Step completed: {result}") 226 | return result 227 | except Exception as e: 228 | logger.error(f"Step failed: {e}") 229 | raise 230 | ``` 231 | 232 | 2. **Step Visualization** 233 | ```python 234 | def visualize_flow(flow: FlowManager): 235 | """Generate a visualization of the flow structure""" 236 | steps = flow.get_steps() 237 | for step in steps: 238 | print(f"Step: {step.name}") 239 | print(f"Dependencies: {step.requires or []}") 240 | print("---") 241 | ``` 242 | 243 | ## Limitations and Considerations 244 | 245 | 1. **Memory Constraints** 246 | - Large context objects can impact performance 247 | - Consider implementing cleanup strategies 248 | 249 | 2. **Error Propagation** 250 | - Failed steps can affect dependent steps 251 | - Implement appropriate fallback mechanisms 252 | 253 | 3. **Scalability** 254 | - Complex flows may require additional monitoring 255 | - Consider breaking large flows into smaller sub-flows 256 | 257 | 4. **Testing** 258 | - Test steps individually 259 | - Validate flow execution paths 260 | - Mock long-running operations 261 | - Test error scenarios 262 | 263 | ## Conclusion 264 | 265 | The Flow system in Hawkins Agent Framework provides a powerful way to orchestrate complex multi-agent workflows. By following these guidelines and best practices, you can create robust and efficient flows that handle complex tasks while maintaining code quality and performance. 266 | -------------------------------------------------------------------------------- /examples/multiagent-trip-planner.py: -------------------------------------------------------------------------------- 1 | """Multi-agent system for planning a 4-day Chennai-Golden Triangle-Chennai trip""" 2 | 3 | from hawkins_agent import AgentBuilder 4 | from hawkins_agent.tools import WebSearchTool, WeatherTool 5 | from hawkins_agent.llm import LiteLLMProvider 6 | import logging 7 | import os 8 | import asyncio 9 | import json 10 | from datetime import datetime, timedelta 11 | 12 | # API keys setup 13 | os.environ["OPENAI_API_KEY"]="" 14 | os.environ["TAVILY_API_KEY"]="" 15 | 16 | # Setup logging 17 | logging.basicConfig( 18 | level=logging.INFO, 19 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' 20 | ) 21 | logger = logging.getLogger(__name__) 22 | 23 | class TripFlow: 24 | """Flow manager for 4-day Chennai-Golden Triangle trip planning""" 25 | 26 | def __init__(self): 27 | self.steps = [] 28 | 29 | def add_step(self, name, func, requires=None): 30 | self.steps.append({ 31 | 'name': name, 32 | 'func': func, 33 | 'requires': requires or [] 34 | }) 35 | 36 | async def execute(self, input_data): 37 | results = {} 38 | for step in self.steps: 39 | try: 40 | # Check requirements 41 | for req in step['requires']: 42 | if req not in results: 43 | raise Exception(f"Required step {req} not completed") 44 | 45 | # Execute step 46 | logger.info(f"Executing step: {step['name']}") 47 | result = await step['func'](input_data, results) 48 | results[step['name']] = result 49 | 50 | except Exception as e: 51 | logger.error(f"Error in step {step['name']}: {str(e)}") 52 | results[step['name']] = {'error': str(e)} 53 | 54 | return results 55 | 56 | async def main(): 57 | """Plan a 4-day trip (3 nights) from Chennai covering Golden Triangle within ₹50,000 for 4 people""" 58 | try: 59 | # Initialize tools 60 | logger.info("Initializing tools...") 61 | weather_tool = WeatherTool() 62 | search_tool = WebSearchTool(api_key=os.environ.get("TAVILY_API_KEY")) 63 | 64 | # Create travel agent for flight bookings 65 | travel_agent = (AgentBuilder("travel_agent") 66 | .with_model("gpt-4o") 67 | .with_provider(LiteLLMProvider, temperature=0.6) 68 | .with_tool(search_tool) 69 | .build()) 70 | 71 | # Create research agent for destination info 72 | researcher = (AgentBuilder("destination_researcher") 73 | .with_model("gpt-4o") 74 | .with_provider(LiteLLMProvider, temperature=0.7) 75 | .with_tool(search_tool) 76 | .build()) 77 | 78 | # Create activity planner agent with budget constraints 79 | activity_planner = (AgentBuilder("activity_planner") 80 | .with_model("gpt-4o") 81 | .with_provider(LiteLLMProvider, temperature=0.8) 82 | .build()) 83 | 84 | # Create logistics agent with focus on budget management 85 | logistics_agent = (AgentBuilder("logistics_planner") 86 | .with_model("gpt-4o") 87 | .with_provider(LiteLLMProvider, temperature=0.6) 88 | .with_tool(weather_tool) 89 | .build()) 90 | 91 | async def plan_travel(input_data, previous_results): 92 | """Plan Chennai-Delhi-Chennai travel""" 93 | logger.info("Planning Chennai-Delhi travel arrangements...") 94 | 95 | response = await travel_agent.process( 96 | "Research and recommend flight options for 4 people:\n" 97 | "1. Chennai to Delhi (Day 1 early morning)\n" 98 | "2. Delhi to Chennai (Day 4 evening)\n" 99 | "Consider:\n" 100 | "- Budget airlines with best rates\n" 101 | "- Early morning arrival in Delhi on Day 1\n" 102 | "- Late evening departure from Delhi on Day 4\n" 103 | "- Airport transfers in both cities\n" 104 | "Total trip budget: ₹50,000 for 4 people" 105 | ) 106 | 107 | return { 108 | 'content': response.message, 109 | 'travel_plan': response.metadata.get('travel_plan', {}) 110 | } 111 | 112 | async def research_step(input_data, previous_results): 113 | """Research Golden Triangle destinations and key information""" 114 | logger.info("Researching Delhi, Agra, and Jaipur...") 115 | 116 | travel_info = previous_results['travel']['content'] 117 | response = await researcher.process( 118 | f"Based on travel arrangements: {travel_info}\n" 119 | "Research for 3-night Golden Triangle tour for 4 people with rental cab:\n" 120 | "1. Must-visit monuments and attractions\n" 121 | "2. Car rental services in Delhi for Golden Triangle circuit\n" 122 | "3. Budget accommodation for 3 nights\n" 123 | "4. Local food recommendations\n" 124 | "5. Best driving routes: Delhi-Agra-Jaipur-Delhi\n" 125 | "6. Parking availability at hotels and attractions\n" 126 | "7. Toll charges and fuel costs estimation\n" 127 | "Consider remaining budget after flight bookings from total ₹50,000" 128 | ) 129 | 130 | return { 131 | 'content': response.message, 132 | 'destinations': response.metadata.get('destinations', []) 133 | } 134 | 135 | async def plan_activities(input_data, previous_results): 136 | """Plan activities for 4 days within budget""" 137 | logger.info("Planning daily activities...") 138 | 139 | research = previous_results['research']['content'] 140 | travel_info = previous_results['travel']['content'] 141 | response = await activity_planner.process( 142 | f"Based on travel arrangements: {travel_info}\n" 143 | f"And research: {research}\n" 144 | "Create a detailed 4-day Golden Triangle itinerary with rental car:\n" 145 | "Day 1: - Early morning flight from Chennai to Delhi\n" 146 | " - Pick up rental car from Delhi airport\n" 147 | " - Delhi sightseeing by car (optimized route)\n" 148 | " - Night in Delhi\n" 149 | "Day 2: - Early morning drive to Agra (via Yamuna Expressway)\n" 150 | " - Taj Mahal and Agra Fort visits\n" 151 | " - Optional evening visit to Mehtab Bagh\n" 152 | " - Night in Agra\n" 153 | "Day 3: - Morning drive to Jaipur (via state highway)\n" 154 | " - En-route stop at Fatehpur Sikri (optional)\n" 155 | " - Afternoon/evening Jaipur sightseeing\n" 156 | " - Night in Jaipur\n" 157 | "Day 4: - Morning sightseeing in Jaipur\n" 158 | " - Post-lunch drive to Delhi Airport\n" 159 | " - Return rental car\n" 160 | " - Evening flight to Chennai\n" 161 | "Include:\n" 162 | "1. Optimal driving routes\n" 163 | "2. Parking locations\n" 164 | "3. Major toll points\n" 165 | "4. Fuel stops\n" 166 | "5. Budget-friendly activities and costs" 167 | ) 168 | 169 | return { 170 | 'content': response.message, 171 | 'itinerary': response.metadata.get('itinerary', {}) 172 | } 173 | 174 | async def plan_logistics(input_data, previous_results): 175 | """Plan accommodation, transportation, and budget allocation""" 176 | logger.info("Planning logistics and budget...") 177 | 178 | travel_info = previous_results['travel']['content'] 179 | research = previous_results['research']['content'] 180 | activities = previous_results['activities']['content'] 181 | 182 | # Check weather for trip dates 183 | start_date = datetime.now() + timedelta(days=30) 184 | 185 | response = await logistics_agent.process( 186 | f"Based on travel arrangements: {travel_info}\n" 187 | f"Research information: {research}\n" 188 | f"And planned activities: {activities}\n" 189 | "Provide detailed logistics for 4 people, 4 days (3 nights) with rental cab:\n" 190 | "1. Budget breakdown for:\n" 191 | " - Flights (Chennai-Delhi-Chennai)\n" 192 | " - Rental car (4-day SUV/MPV rental)\n" 193 | " - Fuel costs for entire circuit\n" 194 | " - Toll charges\n" 195 | " - 3 nights accommodation (Delhi, Agra, Jaipur)\n" 196 | " - Food and drinks\n" 197 | " - Entry tickets\n" 198 | " - Driver allowance if required\n" 199 | "2. Recommended rental car services in Delhi\n" 200 | "3. Recommended budget hotels with parking\n" 201 | "4. Estimated driving times:\n" 202 | " - Delhi Airport to hotel\n" 203 | " - Delhi to Agra\n" 204 | " - Agra to Jaipur\n" 205 | " - Jaipur to Delhi Airport\n" 206 | "5. Money-saving strategies\n" 207 | "6. Essential packing list\n" 208 | "7. Parking and toll information\n" 209 | "Total budget: ₹50,000" 210 | ) 211 | 212 | return { 213 | 'content': response.message, 214 | 'logistics': response.metadata.get('logistics', {}) 215 | } 216 | 217 | # Configure flow 218 | flow = TripFlow() 219 | flow.add_step('travel', plan_travel) 220 | flow.add_step('research', research_step, ['travel']) 221 | flow.add_step('activities', plan_activities, ['travel', 'research']) 222 | flow.add_step('logistics', plan_logistics, ['travel', 'research', 'activities']) 223 | 224 | # Execute flow 225 | logger.info("\nStarting Chennai-Golden Triangle trip planning...") 226 | logger.info("=" * 50) 227 | 228 | results = await flow.execute({}) 229 | 230 | # Display results 231 | logger.info("\nTrip Planning Results:") 232 | logger.info("=" * 50) 233 | 234 | for step_name, result in results.items(): 235 | logger.info(f"\n{step_name.upper()}:") 236 | logger.info("-" * 40) 237 | if 'error' in result: 238 | logger.error(f"Error in {step_name}: {result['error']}") 239 | else: 240 | logger.info(result['content']) 241 | 242 | except Exception as e: 243 | logger.error(f"Error in trip planning: {str(e)}", exc_info=True) 244 | raise 245 | 246 | if __name__ == "__main__": 247 | asyncio.run(main()) 248 | -------------------------------------------------------------------------------- /hawkins_agent/agent.py: -------------------------------------------------------------------------------- 1 | """Core Agent implementation""" 2 | 3 | from typing import List, Optional, Dict, Any, Type, Union 4 | from .llm import LLMManager, BaseLLMProvider, LiteLLMProvider 5 | from .mock import Document, KnowledgeBase 6 | from .memory import MemoryManager 7 | from .tools.base import BaseTool 8 | from .types import Message, AgentResponse, MessageRole, ToolResponse 9 | import json 10 | import re 11 | import logging 12 | from dataclasses import asdict 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | class Agent: 17 | """Main Agent class that handles interactions and tool usage""" 18 | 19 | def __init__( 20 | self, 21 | name: str, 22 | llm_model: str = "gpt-4o", 23 | llm_provider_class: Type[BaseLLMProvider] = LiteLLMProvider, 24 | llm_config: Optional[Dict[str, Any]] = None, 25 | knowledge_base: Optional[KnowledgeBase] = None, 26 | tools: Optional[List[BaseTool]] = None, 27 | memory_config: Optional[Dict[str, Any]] = None, 28 | system_prompt: Optional[str] = None 29 | ): 30 | self.name = name 31 | self.llm = LLMManager( 32 | model=llm_model, 33 | provider_class=llm_provider_class, 34 | **llm_config or {} 35 | ) 36 | self.knowledge_base = knowledge_base 37 | self.tools = tools or [] 38 | self.memory = MemoryManager(config=memory_config) 39 | self.system_prompt = system_prompt or self._get_default_system_prompt() 40 | 41 | async def _handle_tool_results( 42 | self, 43 | results: List[Dict[str, Any]], 44 | original_message: str 45 | ) -> Optional[str]: 46 | """Handle tool execution results""" 47 | try: 48 | # Create prompt with results 49 | result_prompt = "Based on the tool results:\n" 50 | for result in results: 51 | if result.get("success", False): 52 | result_prompt += f"\n- {result.get('result', '')}" 53 | else: 54 | result_prompt += f"\n- Error: {result.get('error', 'Unknown error')}" 55 | 56 | result_prompt += "\n\nPlease provide a concise summary of these findings." 57 | 58 | # Get follow-up response 59 | response = await self.llm.generate_response( 60 | messages=[Message( 61 | role=MessageRole.USER, 62 | content=result_prompt 63 | )] 64 | ) 65 | 66 | return response.get("content", "").strip() if response else "" 67 | 68 | except Exception as e: 69 | logger.error(f"Error handling tool results: {str(e)}") 70 | return None 71 | 72 | async def process(self, message: str, context: Optional[Dict[str, Any]] = None) -> AgentResponse: 73 | """Process a user message""" 74 | try: 75 | # Get context and construct messages 76 | combined_context = await self._gather_context(message) 77 | if context: 78 | combined_context.update(context) 79 | 80 | # Format messages list with system prompt and context 81 | messages = [Message(role=MessageRole.SYSTEM, content=self.system_prompt)] 82 | 83 | # Add context if available 84 | if combined_context: 85 | context_msg = "Context:\n" + "\n".join([ 86 | f"- {k}: {v}" for k, v in combined_context.items() 87 | ]) 88 | messages.append(Message( 89 | role=MessageRole.SYSTEM, 90 | content=context_msg 91 | )) 92 | 93 | messages.append(Message(role=MessageRole.USER, content=message)) 94 | 95 | # Format tools for LLM 96 | formatted_tools = [] 97 | if self.tools: 98 | for tool in self.tools: 99 | formatted_tools.append({ 100 | "name": tool.name, 101 | "description": tool.description, 102 | "parameters": { 103 | "type": "object", 104 | "properties": { 105 | "query": { 106 | "type": "string", 107 | "description": "The query or parameters for the tool" 108 | } 109 | }, 110 | "required": ["query"] 111 | } 112 | }) 113 | 114 | # Get LLM response 115 | response = await self.llm.generate_response( 116 | messages=messages, 117 | tools=formatted_tools if self.tools else None 118 | ) 119 | 120 | # Parse response and handle tool calls 121 | result = await self._process_response(response, message) 122 | 123 | # Update memory if we have a valid message 124 | if result and result.message: 125 | await self.memory.add_interaction(message, result.message) 126 | 127 | return result or AgentResponse( 128 | message="Error processing response", 129 | tool_calls=[], 130 | metadata={"error": "Failed to process response"} 131 | ) 132 | 133 | except Exception as e: 134 | logger.error(f"Error processing message: {str(e)}") 135 | return AgentResponse( 136 | message=f"I encountered an error processing your message: {str(e)}", 137 | tool_calls=[], 138 | metadata={"error": str(e)} 139 | ) 140 | 141 | async def _process_response(self, response: Dict[str, Any], original_message: str) -> AgentResponse: 142 | """Process the LLM response and handle tool calls""" 143 | try: 144 | message = response.get("content", "") or "" 145 | tool_calls = [] 146 | metadata = {} 147 | 148 | # Extract tool calls from the message for non-function-calling models 149 | if not self.llm.provider.supports_functions: 150 | tool_call_pattern = r'\s*({[^}]+})\s*' 151 | matches = re.finditer(tool_call_pattern, message) 152 | 153 | for match in matches: 154 | try: 155 | tool_call = json.loads(match.group(1)) 156 | tool_calls.append(tool_call) 157 | # Remove the tool call from the message 158 | message = message.replace(match.group(0), "") 159 | except json.JSONDecodeError as e: 160 | logger.error(f"Error parsing tool call JSON: {e}") 161 | else: 162 | # Use standard function calling response format 163 | tool_calls = response.get("tool_calls", []) 164 | 165 | # Execute tools and get results if any tool calls present 166 | if tool_calls: 167 | tool_results = await self._execute_tools(tool_calls) 168 | metadata["tool_results"] = tool_results 169 | 170 | # Generate follow-up based on tool results 171 | if any(result.get("success", False) for result in tool_results): 172 | follow_up = await self._handle_tool_results( 173 | tool_results, 174 | message 175 | ) 176 | if follow_up: 177 | message = (message or "").strip() + "\n\n" + follow_up 178 | 179 | return AgentResponse( 180 | message=message.strip(), 181 | tool_calls=tool_calls, 182 | metadata=metadata 183 | ) 184 | 185 | except Exception as e: 186 | logger.error(f"Error processing response: {str(e)}") 187 | return AgentResponse( 188 | message=str(response.get("content", "")), 189 | tool_calls=[], 190 | metadata={"error": str(e)} 191 | ) 192 | 193 | async def _gather_context(self, message: str) -> Dict[str, Any]: 194 | """Gather context from memory and knowledge base""" 195 | context = {} 196 | 197 | try: 198 | # Get relevant memories if available 199 | memories = await self.memory.get_relevant_memories(message) 200 | if memories: 201 | context["memory"] = memories 202 | 203 | # Query knowledge base if available 204 | if self.knowledge_base: 205 | kb_results = await self.knowledge_base.query(message) 206 | if kb_results: 207 | context["knowledge"] = kb_results 208 | 209 | except Exception as e: 210 | logger.error(f"Error gathering context: {str(e)}") 211 | 212 | return context 213 | 214 | async def _execute_tools(self, tool_calls: List[Dict[str, Any]]) -> List[Dict[str, Any]]: 215 | """Execute tool calls and return results""" 216 | results = [] 217 | 218 | for call in tool_calls: 219 | tool_name = call.get("name") 220 | parameters = call.get("parameters", {}) 221 | 222 | # Find matching tool 223 | tool = next( 224 | (t for t in self.tools if t.name == tool_name), 225 | None 226 | ) 227 | 228 | if tool: 229 | try: 230 | result = await tool.execute(**parameters) 231 | if isinstance(result, ToolResponse): 232 | results.append({ 233 | "tool": tool_name, 234 | "success": result.success, 235 | "result": result.result, 236 | "error": result.error 237 | }) 238 | else: 239 | logger.warning(f"Tool {tool_name} returned invalid response type") 240 | results.append({ 241 | "tool": tool_name, 242 | "success": False, 243 | "result": None, 244 | "error": "Invalid tool response format" 245 | }) 246 | except Exception as e: 247 | logger.error(f"Error executing tool {tool_name}: {str(e)}") 248 | results.append({ 249 | "tool": tool_name, 250 | "success": False, 251 | "result": None, 252 | "error": str(e) 253 | }) 254 | 255 | return results 256 | 257 | def _get_default_system_prompt(self) -> str: 258 | """Get the default system prompt for the agent""" 259 | base_prompt = f"""You are {self.name}, an AI assistant that helps users with their tasks.""" 260 | 261 | if not self.tools: 262 | return base_prompt 263 | 264 | tool_descriptions = "\n".join( 265 | f"- {tool.name}: {tool.description}" 266 | for tool in self.tools 267 | ) 268 | 269 | # Adjust prompt based on whether the model supports function calling 270 | if self.llm.provider.supports_functions: 271 | return f"{base_prompt}\n\nYou have access to the following tools:\n\n{tool_descriptions}" 272 | else: 273 | return f"""{base_prompt} 274 | 275 | You have access to the following tools: 276 | {tool_descriptions} 277 | 278 | When you need to use a tool, please use the exact format defined by each tool's schema: 279 | 280 | 281 | {"name": "tool_name", "parameters": {"parameter1": "value1", "parameter2": "value2"}} 282 | 283 | 284 | Important guidelines: 285 | - Use only the parameter names specified in the tool_descriptions 286 | - Include all required parameters as defined in each tool's schema 287 | - Format parameter values according to their expected types (string, number, boolean, etc.) 288 | - For complex parameters like arrays or objects, use proper JSON formatting 289 | - Wait for the tool's response before proceeding with your analysis 290 | 291 | If a tool description does not specify any parameters, use this fallback format: 292 | 293 | {"name": "tool_name", "parameters": {"query": "your query"}} 294 | 295 | 296 | After receiving results from a tool, interpret the output and incorporate it into your response. 297 | """ 298 | 299 | class AgentBuilder: 300 | """Builder class for creating agents with a fluent interface""" 301 | 302 | def __init__(self, name: str): 303 | self.name = name 304 | self.llm_model = "gpt-4o" # Default to latest model 305 | self.llm_provider_class = LiteLLMProvider 306 | self.knowledge_base = None 307 | self.tools = [] 308 | self.memory_config = {} 309 | self.llm_config = {} 310 | 311 | def with_model(self, model: str) -> "AgentBuilder": 312 | """Set the LLM model""" 313 | self.llm_model = model 314 | return self 315 | 316 | def with_provider(self, provider_class: Type[BaseLLMProvider], **config) -> "AgentBuilder": 317 | """Set custom LLM provider with configuration""" 318 | self.llm_provider_class = provider_class 319 | self.llm_config = config 320 | return self 321 | 322 | def with_knowledge_base(self, kb: KnowledgeBase) -> "AgentBuilder": 323 | """Add a knowledge base""" 324 | self.knowledge_base = kb 325 | return self 326 | 327 | def with_tool(self, tool: BaseTool) -> "AgentBuilder": 328 | """Add a tool""" 329 | self.tools.append(tool) 330 | return self 331 | 332 | def with_memory(self, config: Dict[str, Any]) -> "AgentBuilder": 333 | """Configure memory""" 334 | self.memory_config = config 335 | return self 336 | 337 | def build(self) -> Agent: 338 | """Create the agent instance""" 339 | return Agent( 340 | name=self.name, 341 | llm_model=self.llm_model, 342 | llm_provider_class=self.llm_provider_class, 343 | llm_config=self.llm_config, 344 | knowledge_base=self.knowledge_base, 345 | tools=self.tools, 346 | memory_config=self.memory_config 347 | ) 348 | --------------------------------------------------------------------------------