├── .gitignore ├── .pre-commit-config.yaml ├── LICENSE ├── README.md ├── examples ├── all_design_patterns │ └── patterns_example.py ├── documentation_generator │ ├── documentation.py │ ├── flow_config.yaml │ └── flow_diagram.md ├── redditchatbot │ ├── flask_app.py │ └── tframex_config.py └── website_designer │ └── designer.py ├── pyproject.toml ├── requirements.txt └── tframex ├── __init__.py ├── agents ├── __init__.py ├── base.py ├── llm_agent.py └── tool_agent.py ├── app.py ├── flows ├── __init__.py ├── flow_context.py └── flows.py ├── models ├── __init__.py └── primitives.py ├── patterns ├── __init__.py └── patterns.py └── util ├── __init__.py ├── engine.py ├── llms.py ├── logging ├── __init__.py └── logging_config.py ├── memory.py └── tools.py /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | .repomixignore 3 | repomix-output.xml 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | # Virtual environments 10 | venv/ 11 | .env/ 12 | .env.bak/ 13 | .venv/ 14 | *.env 15 | 16 | # Installer logs 17 | pip-log.txt 18 | pip-delete-this-directory.txt 19 | 20 | # Distribution / packaging 21 | build/ 22 | dist/ 23 | *.egg-info/ 24 | .eggs/ 25 | *.egg 26 | MANIFEST 27 | 28 | # Test and coverage reports 29 | htmlcov/ 30 | .tox/ 31 | .nox/ 32 | .coverage 33 | coverage.xml 34 | .cache 35 | .pytest_cache/ 36 | nosetests.xml 37 | test-results/ 38 | 39 | # PyBuilder 40 | target/ 41 | 42 | # Jupyter Notebook checkpoints 43 | .ipynb_checkpoints 44 | 45 | # MyPy, Pyre, pytype 46 | .mypy_cache/ 47 | .pyre/ 48 | .pytype/ 49 | 50 | # Cython debug symbols 51 | cython_debug/ 52 | 53 | # VS Code / IDE configs 54 | .vscode/ 55 | .idea/ 56 | *.swp 57 | 58 | # PyInstaller 59 | *.spec 60 | 61 | # Output folders 62 | example_outputs/ 63 | generated/ 64 | logs/ 65 | 66 | # System files 67 | .DS_Store 68 | Thumbs.db 69 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/psf/black 3 | rev: 24.3.0 # replace with latest version 4 | hooks: 5 | - id: black 6 | 7 | - repo: https://github.com/PyCQA/isort 8 | rev: 5.13.2 9 | hooks: 10 | - id: isort 11 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) [2025] [Tesslate AI] 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | # TFrameX: The Extensible Task & Flow Orchestration Framework for LLMs 5 | 6 | 7 | 8 | ![image](https://github.com/user-attachments/assets/031f3b09-34da-4725-bb05-d064f55eec9e) 9 | 10 | Please join our discord for support: [Discord](https://discord.gg/DkzMzwBTaw) 11 | 12 | [TframeX Documentation Website](https://tframex.tesslate.com/) 13 | 14 | [![PyPI version](https://badge.fury.io/py/tframex.svg)](https://badge.fury.io/py/tframex) 15 | 16 | [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) 17 | 18 | 19 | 20 | **TFrameX** empowers you to build sophisticated, multi-agent LLM applications with unparalleled ease and flexibility. Move beyond simple prompt-response interactions and construct complex, dynamic workflows where intelligent agents collaborate, use tools, and adapt to intricate tasks. 21 | 22 | 23 | 24 | 25 | **Find our Agent Builder Framework Here**: [Tesslate Studio Agent Builder](https://github.com/TesslateAI/Studio) 26 | 27 | ![image](https://github.com/user-attachments/assets/8e5b0689-38e6-4832-8de5-07ea03ed1c25) 28 | 29 | 30 | 31 | --- 32 | 33 | 34 | 35 | ## ✨ Why TFrameX? 36 | 37 | 38 | 39 | * 🧠 **Intelligent Agents, Simplified:** Define specialized agents with unique system prompts, tools, and even dedicated LLM models. 40 | 41 | * 🛠️ **Seamless Tool Integration:** Equip your agents with custom tools using a simple decorator. Let them interact with APIs, databases, or any Python function. 42 | 43 | * 🌊 **Powerful Flow Orchestration:** Design complex workflows by chaining agents and predefined patterns (Sequential, Parallel, Router, Discussion) using an intuitive `Flow` API. 44 | 45 | * 🧩 **Composable & Modular:** Build reusable components (agents, tools, flows) that can be combined to create increasingly complex applications. 46 | 47 | * 🚀 **Agent-as-Tool Paradigm:** Elevate your architecture by enabling agents to call other agents as tools, creating hierarchical and supervised agent structures. 48 | 49 | * 🎨 **Fine-Grained Control:** Customize agent behavior with features like per-agent LLMs and `` tag stripping for cleaner outputs. 50 | 51 | * 💬 **Interactive Debugging:** Quickly test your flows and agents with the built-in interactive chat. 52 | 53 | * 🔌 **Pluggable LLMs:** Start with `OpenAIChatLLM` (compatible with OpenAI API and many local server UIs like Ollama) and extend to other models easily. 54 | 55 | 56 | 57 | --- 58 | 59 | ## 💡 Core Concepts 60 | 61 | 62 | [TframeX Documentation Website](https://tframex.tesslate.com/) 63 | 64 | 65 | **TFrameX** is designed to orchestrate complex agent interactions using its powerful `Flow` system, which controls the sequence and logic of operations. 66 | 67 | 68 | 69 | Within a `Flow`, you define reusable collaboration structures called **Patterns**—such as `SequentialPattern`, `RouterPattern`, or `DiscussionPattern`. These patterns can be **nested** inside one another. For example, a `ParallelPattern` may contain several `SequentialPattern`s, enabling hierarchical task breakdowns. 70 | 71 | 72 | 73 | TFrameX also supports the **agent-as-tool** paradigm: `LLMAgent`s can directly call other registered agents. This enables supervisor-worker relationships and task delegation between agents. 74 | 75 | 76 | 77 | Together, **nested patterns** and **inter-agent calling** allow for sophisticated designs—including recursive or cyclical flows. For example, a `DiscussionPattern` creates a controlled loop of interaction. However, to avoid infinite loops, flows must be carefully structured with clear **termination conditions** or managed by **moderator agents**. 78 | 79 | 80 | 81 | These entire `Flows`—along with their patterns and agent configurations—can be defined declaratively in **YAML** files. This makes it easy to version, share, and modify agent behaviors programmatically, giving you maximum flexibility in building adaptive, interconnected agent systems. 82 | 83 | 84 | 85 | 86 | TFrameX revolves around a few key concepts: 87 | 88 | 89 | 90 | 1. 🌟 **Agents (`BaseAgent`, `LLMAgent`, `ToolAgent`)**: 91 | 92 | * The core actors in your system. 93 | 94 | * **`LLMAgent`**: Leverages an LLM to reason, respond, and decide when to use tools or call other agents. 95 | 96 | * **`ToolAgent`**: A stateless agent that directly executes a specific tool (useful for simpler, direct tool invocations within a flow). 97 | 98 | * Can have their own memory, system prompts, and a dedicated LLM instance. 99 | 100 | * Support for `strip_think_tags`: Automatically remove internal "thinking" steps (e.g., `...`) from the final output for cleaner user-facing responses. 101 | 102 | 103 | 104 | 2. 🔧 **Tools (`@app.tool`)**: 105 | 106 | * Python functions (sync or async) that agents can call to perform actions or retrieve information from the outside world (APIs, databases, file systems, etc.). 107 | 108 | * Schemas are automatically inferred from type hints or can be explicitly defined. 109 | 110 | 111 | 112 | 3. 🌊 **Flows (`Flow`)**: 113 | 114 | * Define the sequence or graph of operations. 115 | 116 | * A flow consists of steps, where each step can be an agent or a **Pattern**. 117 | 118 | * Orchestrate how data (as `Message` objects) and control pass between agents. 119 | 120 | 121 | 122 | 4. 🧩 **Patterns (`SequentialPattern`, `ParallelPattern`, `RouterPattern`, `DiscussionPattern`)**: 123 | 124 | * Reusable templates for common multi-agent interaction structures: 125 | 126 | * **`SequentialPattern`**: Executes a series of agents/patterns one after another. 127 | 128 | * **`ParallelPattern`**: Executes multiple agents/patterns concurrently on the same input. 129 | 130 | * **`RouterPattern`**: Uses a "router" agent to decide which subsequent agent/pattern to execute. 131 | 132 | * **`DiscussionPattern`**: Facilitates a multi-round discussion between several agents, optionally moderated. 133 | 134 | 135 | 136 | 5. 🤝 **Agent-as-Tool (Supervisor Agents)**: 137 | 138 | * A powerful feature where one `LLMAgent` can be configured to call other registered agents as if they were tools. This allows for creating supervisor agents that delegate tasks to specialized sub-agents. 139 | 140 | 141 | 142 | 6. 🤖 **LLMs (`BaseLLMWrapper`, `OpenAIChatLLM`)**: 143 | 144 | * Pluggable wrappers for LLM APIs. `OpenAIChatLLM` provides out-of-the-box support for OpenAI-compatible APIs (including many local model servers like Ollama or LiteLLM). 145 | 146 | * Agents can use a default LLM provided by the app, or have a specific LLM instance assigned for specialized tasks. 147 | 148 | 149 | 150 | 7. 💾 **Memory (`InMemoryMemoryStore`)**: 151 | 152 | * Provides agents with conversation history. `InMemoryMemoryStore` is available by default, and you can implement custom stores by inheriting from `BaseMemoryStore`. 153 | 154 | 155 | 156 | 157 | ## Getting Started 158 | 159 | 1. **Installation:** 160 | 161 | To use TFrameX in your project, install it via pip: 162 | ```bash 163 | pip install tframex 164 | ``` 165 | Core dependencies (like `httpx`, `pydantic`, `PyYAML`, `python-dotenv`, `openai`) are listed in `pyproject.toml` and should be installed automatically. If you plan to run specific examples from the TFrameX repository, you might need additional packages like `aiohttp` (for the Reddit tool example) or `Flask` (for the web app example). You can install these separately: `pip install aiohttp Flask`. 166 | 167 | **For Developers (Contributing to TFrameX or running all examples from source):** 168 | 169 | If you're developing TFrameX itself or want to run examples directly from a cloned repository, we recommend setting up a dedicated virtual environment. You can use [`uv`](https://github.com/astral-sh/uv) for its speed, or `pip` with `venv`. 170 | 171 | **Setting up with `uv` (Recommended):** 172 | First, install `uv` by following the instructions at [astral.sh/uv](https://astral.sh/uv). 173 | Then, in your cloned TFrameX repository: 174 | ```bash 175 | # Clone the repository (if you haven't already) 176 | # git clone https://github.com/TesslateAI/TFrameX.git 177 | # cd TFrameX 178 | 179 | # Create and activate a virtual environment 180 | uv venv 181 | source .venv/bin/activate # On macOS/Linux 182 | # For Windows PowerShell: .venv\Scripts\Activate.ps1 183 | 184 | # Install TFrameX in editable mode with optional dependencies 185 | # For core development and running most examples: 186 | uv pip install -e ".[examples]" 187 | # To include development tools (linters, formatters): 188 | # uv pip install -e ".[examples,dev]" 189 | ``` 190 | This approach uses the `pyproject.toml` file for precise dependency management. 191 | 192 | **Setting up with `pip` and `venv` (Alternative):** 193 | In your cloned TFrameX repository: 194 | ```bash 195 | # Clone the repository (if you haven't already) 196 | # git clone https://github.com/TesslateAI/TFrameX.git 197 | # cd TFrameX 198 | 199 | # Create and activate a virtual environment 200 | python -m venv .venv 201 | source .venv/bin/activate # On macOS/Linux 202 | # For Windows Command Prompt: .venv\Scripts\activate.bat 203 | 204 | # Install TFrameX in editable mode with optional dependencies 205 | # For core development and running most examples: 206 | pip install -e ".[examples]" 207 | # To include development tools: 208 | # pip install -e ".[examples,dev]" 209 | ``` 210 | 211 | 2. **Set up your LLM Environment:** 212 | Ensure your environment variables for your LLM API are set (e.g., `OPENAI_API_KEY`, `OPENAI_API_BASE`). Create a `.env` file in your project root (TFrameX uses `python-dotenv` to load this): 213 | ```env 214 | # Example for Ollama (running locally) 215 | OPENAI_API_BASE="http://localhost:11434/v1" 216 | OPENAI_API_KEY="ollama" # Placeholder, as Ollama doesn't require a key by default 217 | OPENAI_MODEL_NAME="llama3" # Or your preferred model served by Ollama 218 | 219 | # Example for OpenAI API 220 | # OPENAI_API_KEY="your_openai_api_key" 221 | # OPENAI_MODEL_NAME="gpt-3.5-turbo" 222 | # OPENAI_API_BASE="https://api.openai.com/v1" # (Usually default if not set) 223 | ``` 224 | 225 | 3. **Your First TFrameX App:** 226 | 227 | 228 | ```python 229 | import asyncio 230 | import os 231 | from dotenv import load_dotenv 232 | from tframex import TFrameXApp, OpenAIChatLLM, Message 233 | 234 | load_dotenv() # Load .env file 235 | 236 | # 1. Configure your LLM 237 | # TFrameX will use environment variables for OPENAI_API_BASE, OPENAI_API_KEY, OPENAI_MODEL_NAME by default if available 238 | # You can explicitly pass them too: 239 | 240 | my_llm = OpenAIChatLLM( 241 | model_name=os.getenv("OPENAI_MODEL_NAME", "gpt-3.5-turbo"), 242 | api_base_url=os.getenv("OPENAI_API_BASE"), # Can be http://localhost:11434/v1 for Ollama 243 | api_key=os.getenv("OPENAI_API_KEY") # Can be "ollama" for Ollama 244 | ) 245 | 246 | # 2. Initialize TFrameXApp 247 | app = TFrameXApp(default_llm=my_llm) 248 | 249 | # 3. Define a simple agent 250 | @app.agent( 251 | name="GreeterAgent", 252 | system_prompt="You are a friendly greeter. Greet the user and mention their name: {user_name}." 253 | ) 254 | async def greeter_agent_func(): # The function body can be pass; TFrameX handles logic for LLMAgent 255 | pass 256 | 257 | 258 | # 4. Run the agent 259 | async def main(): 260 | async with app.run_context() as rt: # Creates a runtime context 261 | user_input = Message(role="user", content="Hello there!") 262 | 263 | response = await rt.call_agent( 264 | "GreeterAgent", 265 | user_input, 266 | 267 | # You can pass template variables to the system prompt 268 | 269 | template_vars={"user_name": "Alex"} 270 | ) 271 | 272 | print(f"GreeterAgent says: {response.content}") 273 | 274 | if __name__ == "__main__": 275 | # Basic check for LLM configuration 276 | if not my_llm.api_base_url: 277 | print("Error: LLM API base URL not configured. Check .env or OpenAIChatLLM instantiation.") 278 | else: 279 | asyncio.run(main()) 280 | ``` 281 | 282 | 283 | --- 284 | 285 | 286 | 287 | ## 🛠️ Building with TFrameX: Code In Action 288 | 289 | 290 | 291 | Let's explore how to use TFrameX's features with concrete examples. 292 | 293 | 294 | 295 | ### 🤖 Defining Agents 296 | 297 | 298 | 299 | Agents are the heart of TFrameX. Use the `@app.agent` decorator. 300 | 301 | 302 | 303 | ```python 304 | # In your app setup (app = TFrameXApp(...)) 305 | 306 | 307 | @app.agent( 308 | name="EchoAgent", 309 | description="A simple agent that echoes the user's input.", 310 | system_prompt="Repeat the user's message verbatim." 311 | ) 312 | async def echo_agent_placeholder(): # Function body is a placeholder for LLMAgents 313 | pass 314 | 315 | 316 | # An agent that uses a specific, more powerful LLM and strips tags 317 | special_llm_config = OpenAIChatLLM( 318 | model_name=os.getenv("SPECIAL_MODEL_NAME", "gpt-4-turbo"), # A different model 319 | api_base_url=os.getenv("OPENAI_API_BASE_SPECIAL", os.getenv("OPENAI_API_BASE")), 320 | api_key=os.getenv("OPENAI_API_KEY_SPECIAL", os.getenv("OPENAI_API_KEY")) 321 | ) 322 | 323 | 324 | @app.agent( 325 | name="CreativeWriterAgent", 326 | description="A creative writer using a specialized LLM.", 327 | system_prompt=( 328 | "You are a highly creative writer. Generate a short, imaginative story based on the user's prompt. " 329 | "You might use ... tags for your internal monologue before the final story. " 330 | "The final story should be engaging and whimsical." 331 | ), 332 | llm=special_llm_config, # Assign a specific LLM instance 333 | strip_think_tags=True # Remove content from final output 334 | ) 335 | async def creative_writer_placeholder(): 336 | pass 337 | ``` 338 | 339 | * **System Prompts:** Guide the LLM's behavior and persona. You can use f-string like template variables (e.g., `{user_name}`) that are filled at runtime. 340 | 341 | * **Per-Agent LLM:** Assign `llm=your_llm_instance` to give an agent a specific model, different from the app's default. 342 | 343 | * **`strip_think_tags=True`:** If your agent's system prompt encourages it to "think out loud" using `...` tags (a common technique for complex reasoning), setting this to `True` will remove those blocks before the final response is returned, keeping the output clean for the end-user. 344 | 345 | 346 | 347 | ### 🔧 Defining Tools 348 | 349 | 350 | 351 | Equip your agents with tools to interact with the world. 352 | 353 | 354 | 355 | ```python 356 | @app.tool(description="Gets the current weather for a specific location.") 357 | async def get_current_weather(location: str, unit: str = "celsius") -> str: 358 | # In a real app, this would call a weather API 359 | if "tokyo" in location.lower(): 360 | return f"The current weather in Tokyo is 25°{unit.upper()[0]} and sunny." 361 | if "paris" in location.lower(): 362 | return f"The current weather in Paris is 18°{unit.upper()[0]} and cloudy." 363 | return f"Weather data for {location} is currently unavailable." 364 | 365 | 366 | # Agent that uses the tool 367 | @app.agent( 368 | name="WeatherAgent", 369 | description="Provides weather information using the 'get_current_weather' tool.", 370 | system_prompt=( 371 | "You are a Weather Assistant. Use your 'get_current_weather' tool to find the weather. " 372 | "If the user asks about something other than weather, politely state your purpose. " 373 | "Tool details: {available_tools_descriptions}" # TFrameX injects this 374 | ), 375 | tools=["get_current_weather"] # List tool names available to this agent 376 | ) 377 | async def weather_agent_placeholder(): 378 | pass 379 | ``` 380 | 381 | TFrameX automatically generates the necessary schema for the LLM to understand how to call your tools based on function signatures and type hints. The `{available_tools_descriptions}` placeholder in the system prompt will be dynamically replaced with the names and descriptions of the tools available to that specific agent. 382 | 383 | 384 | 385 | ### 🌊 Orchestrating with Flows 386 | 387 | 388 | 389 | Flows define how agents and patterns are connected to achieve complex tasks. 390 | 391 | 392 | 393 | ```python 394 | from tframex import Flow, SequentialPattern, ParallelPattern, RouterPattern, DiscussionPattern 395 | 396 | # --- Assume Agents are defined (e.g., EchoAgent, UpperCaseAgent, WeatherAgent, CityInfoAgent, SummarizerAgent) --- 397 | 398 | # 1. Sequential Flow: Steps execute one after another 399 | sequential_flow = Flow( 400 | flow_name="SequentialEchoUpper", 401 | description="Echoes, then uppercases input." 402 | ) 403 | sequential_flow.add_step("EchoAgent").add_step("UpperCaseAgent") 404 | app.register_flow(sequential_flow) 405 | 406 | # 2. Parallel Flow: Tasks run concurrently, results are aggregated 407 | @app.agent(name="SummarizerAgent", description="Summarizes input text.", system_prompt="Provide a concise summary of the input text.") 408 | async def summarizer_agent_placeholder(): pass 409 | 410 | parallel_flow = Flow( 411 | flow_name="ParallelInfoSummarize", 412 | description="Gets weather & city info in parallel, then summarizes." 413 | ) 414 | parallel_flow.add_step( 415 | ParallelPattern( 416 | pattern_name="GetInfoInParallel", 417 | tasks=["WeatherAgent", "CityInfoAgent"] # Agent names to run in parallel 418 | ) 419 | ) 420 | parallel_flow.add_step("SummarizerAgent") # Summarizes the combined output 421 | app.register_flow(parallel_flow) 422 | 423 | # 3. Router Flow: An agent decides the next step 424 | # First, define a router agent: 425 | @app.agent( 426 | name="TaskRouterAgent", 427 | description="Classifies query for RouterPattern: 'weather', 'city_info', or 'general'.", 428 | system_prompt="Analyze user query. Respond with ONE route key: 'weather', 'city_info', or 'general'. NO OTHER TEXT." 429 | ) 430 | async def task_router_placeholder(): pass 431 | 432 | @app.agent(name="GeneralQA_Agent", system_prompt="You are a helpful assistant. Answer general questions to the best of your ability.") 433 | async def general_qa_placeholder(): pass 434 | 435 | router_flow = Flow(flow_name="SmartRouterFlow", description="Routes task using TaskRouterAgent.") 436 | router_flow.add_step( 437 | RouterPattern( 438 | pattern_name="MainTaskRouter", 439 | router_agent_name="TaskRouterAgent", # This agent's output (e.g., "weather") is the route key 440 | routes={ 441 | "weather": "WeatherAgent", 442 | "city_info": "CityInfoAgent", # Assuming CityInfoAgent is defined 443 | "general": "GeneralQA_Agent" 444 | }, 445 | default_route="GeneralQA_Agent" # Fallback if route key doesn't match 446 | ) 447 | ) 448 | app.register_flow(router_flow) 449 | 450 | # 4. Discussion Flow: Multiple agents discuss a topic 451 | @app.agent(name="OptimistAgent", system_prompt="You are the Optimist. Find positive aspects.") 452 | async def optimist_placeholder(): pass 453 | @app.agent(name="PessimistAgent", system_prompt="You are the Pessimist. Point out downsides.") 454 | async def pessimist_placeholder(): pass 455 | @app.agent(name="DiscussionModeratorAgent", system_prompt="Summarize the discussion round, identify key themes, and pose a follow-up question to keep the discussion going.") 456 | async def moderator_placeholder(): pass 457 | 458 | discussion_flow = Flow(flow_name="TeamDebateFlow", description="Agents debate a topic.") 459 | discussion_flow.add_step( 460 | DiscussionPattern( 461 | pattern_name="TechDebate", 462 | participant_agent_names=["OptimistAgent", "PessimistAgent"], 463 | discussion_rounds=2, 464 | moderator_agent_name="DiscussionModeratorAgent" # Optional moderator 465 | ) 466 | ) 467 | app.register_flow(discussion_flow) 468 | 469 | # --- Running a Flow --- 470 | async def main_flow_runner(): 471 | async with app.run_context() as rt: 472 | # Example: Run the sequential flow 473 | initial_msg = Message(role="user", content="hello world") 474 | flow_context = await rt.run_flow("SequentialEchoUpper", initial_msg) 475 | print(f"Sequential Flow Output: {flow_context.current_message.content}") 476 | # Expected: Something like "HELLO WORLD" (after echo then uppercase, if agents are so defined) 477 | 478 | # Example: Run the router flow with a weather query 479 | weather_query = Message(role="user", content="What's the weather in Tokyo?") 480 | flow_context_route = await rt.run_flow("SmartRouterFlow", weather_query) 481 | print(f"Router Flow (Weather) Output: {flow_context_route.current_message.content}") 482 | # Expected: WeatherAgent's response for Tokyo 483 | 484 | # Example: Run discussion flow 485 | topic = Message(role="user", content="Let's discuss the future of remote work.") 486 | discussion_context = await rt.run_flow("TeamDebateFlow", topic) 487 | print(f"Discussion Flow Output:\n{discussion_context.current_message.content}") 488 | 489 | if __name__ == "__main__": 490 | # Ensure app and all agents (EchoAgent, UpperCaseAgent, WeatherAgent, CityInfoAgent, 491 | # SummarizerAgent, TaskRouterAgent, GeneralQA_Agent, OptimistAgent, PessimistAgent, 492 | # DiscussionModeratorAgent) are defined and registered with 'app' before running. 493 | # Also ensure 'my_llm' and 'special_llm_config' are initialized. 494 | # This is a simplified main guard; a full example would have all definitions. 495 | import asyncio # Assuming asyncio is needed for async main 496 | if 'app' in globals() and hasattr(app, 'default_llm') and app.default_llm: # Basic check 497 | asyncio.run(main_flow_runner()) 498 | else: 499 | print("Please ensure 'app' and its 'default_llm' are initialized, and all agents are defined.") 500 | ``` 501 | 502 | 503 | 504 | ### 🤝 Agent-as-Tool: Building Supervisor Agents 505 | 506 | 507 | 508 | One of TFrameX's most powerful features is allowing an `LLMAgent` to call *other registered agents* as if they were tools. This enables hierarchical agent structures where a "supervisor" agent can delegate sub-tasks to specialized "worker" agents. 509 | 510 | 511 | 512 | ```python 513 | # Assuming WeatherAgent and CityInfoAgent are already defined and registered... 514 | 515 | 516 | @app.agent( 517 | name="SmartQueryDelegateAgent", 518 | description="Supervises WeatherAgent and CityInfoAgent, calling them as tools.", 519 | system_prompt=( 520 | "You are a Smart Query Supervisor. Your goal is to understand the user's complete request and " 521 | "delegate tasks to specialist agents. You have the following specialist agents available:\n" 522 | "{available_agents_descriptions}\n\n" # TFrameX populates this! 523 | "When the user asks a question, first determine if it requires information from one or more specialists. " 524 | "For each required piece of information, call the appropriate specialist agent. The input to the specialist " 525 | "agent should be the specific part of the user's query relevant to that agent, passed as 'input_message'. " 526 | "After gathering all necessary information from the specialists, synthesize their responses into a single, " 527 | "comprehensive answer for the user. If the user's query is simple and doesn't need a specialist, " 528 | "answer it directly." 529 | ), 530 | callable_agents=["WeatherAgent", "CityInfoAgent"] # List names of agents this agent can call 531 | ) 532 | async def smart_query_delegate_placeholder(): 533 | pass 534 | 535 | 536 | # A flow that uses this supervisor agent 537 | smart_delegate_flow = Flow(flow_name="SmartDelegateFlow", description="Uses SmartQueryDelegateAgent for complex queries.") 538 | smart_delegate_flow.add_step("SmartQueryDelegateAgent") 539 | app.register_flow(smart_delegate_flow) 540 | 541 | 542 | # --- Running this flow --- 543 | async def main_supervisor(): 544 | async with app.run_context() as rt: 545 | # This query might require both CityInfoAgent and WeatherAgent 546 | query = Message(role="user", content="Tell me about the attractions in Paris and what the weather is like there today.") 547 | flow_context = await rt.run_flow("SmartDelegateFlow", query) 548 | print(f"Supervisor Agent Output:\n{flow_context.current_message.content}") 549 | 550 | 551 | if __name__ == "__main__": 552 | # ... (ensure app, WeatherAgent, CityInfoAgent, SmartQueryDelegateAgent are defined and registered) 553 | # This is a simplified main guard. 554 | if 'app' in globals() and app.default_llm: 555 | import asyncio 556 | asyncio.run(main_supervisor()) 557 | else: 558 | print("Please ensure 'app', its 'default_llm', and relevant agents are initialized.") 559 | ``` 560 | 561 | When you specify `callable_agents`, TFrameX makes these agents available to the `SmartQueryDelegateAgent` as functions it can invoke (via the LLM's tool/function calling mechanism). The `{available_agents_descriptions}` template variable in the system prompt will automatically be populated with the names and descriptions of these callable agents, guiding the supervisor LLM on how and when to use them. The supervisor agent will then receive their responses as tool results and can synthesize a final answer. 562 | 563 | 564 | 565 | ### 💬 Interactive Chat 566 | 567 | 568 | 569 | Test your flows quickly using the built-in interactive chat mode. 570 | 571 | 572 | 573 | ```python 574 | async def main_interactive(): 575 | async with app.run_context() as rt: 576 | # If you have multiple flows, it will ask you to choose one. 577 | # Or, you can specify a default flow to start with: 578 | # await rt.interactive_chat(default_flow_name="SmartDelegateFlow") 579 | await rt.interactive_chat() 580 | 581 | 582 | if __name__ == "__main__": 583 | # ... (ensure app, agents, and flows are defined and registered before running) 584 | # This is a simplified main guard. 585 | if 'app' in globals() and app.default_llm: 586 | import asyncio 587 | asyncio.run(main_interactive()) 588 | else: 589 | print("Please ensure 'app' and its 'default_llm' are initialized for interactive chat.") 590 | ``` 591 | 592 | 593 | --- 594 | 595 | 596 | 597 | ## 🌟 Use Cases 598 | 599 | 600 | 601 | TFrameX is ideal for a wide range of applications: 602 | 603 | 604 | 605 | * **Complex Task Decomposition:** Break down large tasks (e.g., "research the impact of AI on healthcare, find three key papers, summarize them, and draft a blog post") into smaller, manageable sub-tasks handled by specialized agents coordinated by a supervisor. 606 | 607 | * **Multi-Agent Collaboration:** 608 | 609 | * Simulate debates (Optimist vs. Pessimist vs. Realist). 610 | 611 | * Collaborative problem-solving teams (e.g., Developer Agent, QA Agent, ProductManager Agent working on a feature). 612 | 613 | * Creative writing ensembles where different agents contribute different parts of a story. 614 | 615 | * **Tool-Augmented LLM Applications:** 616 | 617 | * Customer support bots that can query databases, CRM systems, or knowledge bases. 618 | 619 | * Data analysis agents that can execute Python code (via a tool) or fetch real-time financial data. 620 | 621 | * Personal assistants that manage calendars, send emails, or control smart home devices. 622 | 623 | * A Reddit chatbot that can fetch top posts, analyze sentiment, and engage in discussions (see `examples/redditchatbot` for inspiration). 624 | 625 | * **Dynamic Chatbots:** Create chatbots that can intelligently route user queries to the most appropriate agent or tool based on context and conversation history. 626 | 627 | * **Automated Content Generation Pipelines:** Chain agents for drafting, revising, fact-checking, and formatting content for various platforms. 628 | 629 | * **Educational Tutors:** Agents specializing in different subjects collaborating to provide comprehensive explanations. 630 | 631 | 632 | 633 | --- 634 | 635 | 636 | 637 | ## 🔬 Advanced Concepts 638 | 639 | 640 | 641 | * **`TFrameXRuntimeContext` (`rt`):** 642 | 643 | * Created when you use `async with app.run_context() as rt:`. 644 | 645 | * Manages the lifecycle of agent instances and LLM clients for a given execution scope. 646 | 647 | * Provides methods like `rt.call_agent()`, `rt.run_flow()`, `rt.call_tool()`. 648 | 649 | * Can have its own LLM override, distinct from the app's default or agent-specific LLMs, useful for setting a context-wide LLM for all operations within that `with` block unless an agent has its own override. 650 | 651 | 652 | 653 | * **`FlowContext`:** 654 | 655 | * Passed between steps in a `Flow`. 656 | 657 | * Holds `current_message` (the output of the last step), `history` (all messages exchanged in the current flow execution, including intermediate agent calls), and `shared_data` (a dictionary for patterns/steps to pass arbitrary data or control signals like `STOP_FLOW`). 658 | 659 | 660 | 661 | * **Template Variables in Prompts & Flows:** 662 | 663 | * System prompts for agents can include placeholders like `{variable_name}`. 664 | 665 | * When calling an agent directly or running a flow, you can pass a `template_vars` (for `call_agent`) or `flow_template_vars` (for `run_flow`) dictionary: 666 | 667 | ```python 668 | # For call_agent 669 | await rt.call_agent( 670 | "MyAgentWithTemplates", 671 | input_msg, 672 | template_vars={"user_name": "Alice", "current_date": "2024-07-15"} 673 | 674 | ) 675 | 676 | # For run_flow 677 | await rt.run_flow( 678 | "MyFlowWithTemplates", 679 | initial_msg, 680 | flow_template_vars={"project_id": "XYZ123", "target_audience": "developers"} 681 | ) 682 | ``` 683 | 684 | * These variables are made available during system prompt rendering for all agents invoked within that specific call or flow execution, enhancing dynamic behavior and context-awareness. The `system_prompt_template` in an agent's definition (e.g., `@app.agent(system_prompt="Hello {user_name}")`) will be formatted using these variables. 685 | 686 | 687 | 688 | --- 689 | 690 | 691 | 692 | ## 🤝 Contributing 693 | 694 | 695 | 696 | Contributions are welcome! We're excited to see how the community extends and builds upon TFrameX. Please feel free to open an issue for discussions, bug reports, or feature requests, or submit a pull request. 697 | 698 | 699 | 700 | --- 701 | 702 | 703 | [TframeX Documentation Website](https://tframex.tesslate.com/) 704 | 705 | ## 📜 License 706 | 707 | 708 | 709 | This project is licensed under the MIT License. See the `LICENSE` file for details. 710 | -------------------------------------------------------------------------------- /examples/all_design_patterns/patterns_example.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import os 4 | from typing import Any, Dict, List, Optional 5 | 6 | from dotenv import load_dotenv 7 | 8 | from tframex import ( 9 | DiscussionPattern, 10 | Flow, 11 | InMemoryMemoryStore, 12 | Message, 13 | OpenAIChatLLM, 14 | ParallelPattern, 15 | RouterPattern, 16 | SequentialPattern, 17 | TFrameXApp, 18 | TFrameXRuntimeContext, 19 | ) 20 | 21 | # --- Environment and Logging Setup --- 22 | load_dotenv() 23 | logging.basicConfig( 24 | level=logging.INFO, 25 | format="%(asctime)s - %(levelname)s - %(name)s - [%(funcName)s] - %(message)s", 26 | ) 27 | logging.getLogger("tframex").setLevel(logging.INFO) 28 | # For more detailed logs: 29 | logging.getLogger("tframex.agents.llm_agent").setLevel(logging.DEBUG) 30 | # logging.getLogger("tframex.agents.base").setLevel(logging.DEBUG) 31 | # logging.getLogger("tframex.app").setLevel(logging.DEBUG) 32 | 33 | # --- LLM Configurations --- 34 | # Default LLM (e.g., a local, faster model for general tasks) 35 | default_llm_config = OpenAIChatLLM( 36 | model_name=os.getenv("OPENAI_MODEL_NAME", "gpt-3.5-turbo"), # Your default model 37 | api_base_url=os.getenv("OPENAI_API_BASE", "http://localhost:11434"), 38 | api_key=os.getenv("OPENAI_API_KEY", "ollama"), 39 | ) 40 | 41 | # A more powerful/specialized LLM for specific agents 42 | # Ensure you have another model configured if you use this (e.g., OPENAI_GPT4_MODEL_NAME) 43 | # For this example, we'll just use a different name but point to the same base URL. 44 | # In a real scenario, api_base_url or model_name would differ. 45 | special_llm_config = OpenAIChatLLM( 46 | model_name=os.getenv("SPECIAL_MODEL_NAME", "gpt-4"), # A different model 47 | api_base_url=os.getenv( 48 | "OPENAI_API_BASE_SPECIAL", 49 | os.getenv("OPENAI_API_BASE", "http://localhost:11434"), 50 | ), 51 | api_key=os.getenv("OPENAI_API_KEY_SPECIAL", os.getenv("OPENAI_API_KEY", "ollama")), 52 | ) 53 | 54 | if not default_llm_config.api_base_url: 55 | print("Error: OPENAI_API_BASE not set for default LLM.") 56 | exit(1) 57 | if not special_llm_config.api_base_url: # Check if you intend to use a special LLM 58 | print( 59 | "Warning: OPENAI_API_BASE_SPECIAL not set for special LLM. It will use the default base if not overridden." 60 | ) 61 | 62 | 63 | # --- Initialize TFrameX Application --- 64 | app = TFrameXApp(default_llm=default_llm_config) 65 | 66 | 67 | # --- Tool Definitions (Unchanged) --- 68 | @app.tool(description="Gets the current weather for a specific location.") 69 | async def get_current_weather(location: str, unit: str = "celsius") -> str: 70 | logging.info( 71 | f"TOOL EXECUTED: get_current_weather(location='{location}', unit='{unit}')" 72 | ) 73 | if "tokyo" in location.lower(): 74 | return f"The current weather in Tokyo is 25°{unit.upper()[0]} and sunny." 75 | if "paris" in location.lower(): 76 | return f"The current weather in Paris is 18°{unit.upper()[0]} and cloudy." 77 | return f"Weather data for {location} is currently unavailable." 78 | 79 | 80 | @app.tool(description="Retrieves general information about a city.") 81 | async def get_city_info(city_name: str, info_type: str = "population") -> str: 82 | logging.info( 83 | f"TOOL EXECUTED: get_city_info(city_name='{city_name}', info_type='{info_type}')" 84 | ) 85 | if "paris" in city_name.lower(): 86 | if info_type == "population": 87 | return "Population of Paris is approximately 2.1 million." 88 | if info_type == "attractions": 89 | return "Main attractions: Eiffel Tower, Louvre Museum." 90 | if "tokyo" in city_name.lower(): 91 | if info_type == "population": 92 | return "Population of Tokyo is approximately 14 million." 93 | if info_type == "attractions": 94 | return "Main attractions: Tokyo Skytree, Senso-ji Temple." 95 | return f"Information of type '{info_type}' for {city_name} not found." 96 | 97 | 98 | # --- Agent Definitions --- 99 | 100 | 101 | # Basic Agents 102 | @app.agent( 103 | name="EchoAgent", 104 | description="Repeats the input.", 105 | system_prompt="Repeat the user's message verbatim.", 106 | ) 107 | async def echo_agent_placeholder(): 108 | pass 109 | 110 | 111 | @app.agent( 112 | name="UpperCaseAgent", 113 | description="Converts input to uppercase.", 114 | system_prompt="Convert user's message to uppercase. ONLY respond with uppercased text.", 115 | ) 116 | async def uppercase_agent_placeholder(): 117 | pass 118 | 119 | 120 | @app.agent( 121 | name="ReverseAgent", 122 | description="Reverses the input text.", 123 | system_prompt="Reverse the text of user's message. ONLY respond with reversed text.", 124 | ) 125 | async def reverse_agent_placeholder(): 126 | pass 127 | 128 | 129 | # Tool-using Agents 130 | @app.agent( 131 | name="WeatherAgent", 132 | description="Provides weather information using the 'get_current_weather' tool.", 133 | system_prompt="You are a Weather Assistant. Use 'get_current_weather' for the location. If not about weather, state your purpose.", 134 | tools=["get_current_weather"], 135 | ) 136 | async def weather_agent_placeholder(): 137 | pass 138 | 139 | 140 | @app.agent( 141 | name="CityInfoAgent", 142 | description="Provides city details using 'get_city_info' tool.", 143 | system_prompt="You are a City Information Provider. Use 'get_city_info'. Infer 'info_type' or default to 'attractions'. If not about city info, state purpose.", 144 | tools=["get_city_info"], 145 | ) 146 | async def city_info_agent_placeholder(): 147 | pass 148 | 149 | 150 | # Summarizer Agent 151 | @app.agent( 152 | name="SummarizerAgent", 153 | description="Summarizes input text.", 154 | system_prompt="Provide a concise summary of the input text.", 155 | ) 156 | async def summarizer_agent_placeholder(): 157 | pass 158 | 159 | 160 | # Agent for Router Pattern (Old way) 161 | @app.agent( 162 | name="TaskRouterAgent", 163 | description="Classifies query for RouterPattern: 'weather', 'city_info', or 'echo'.", 164 | system_prompt="Analyze user query. Respond with ONE route key: 'weather', 'city_info', or 'echo'. NO OTHER TEXT.", 165 | ) 166 | async def task_router_placeholder(): 167 | pass 168 | 169 | 170 | # NEW: Supervisor Agent using "Agent as Tool" 171 | @app.agent( 172 | name="SmartQueryDelegateAgent", 173 | description="Supervises WeatherAgent and CityInfoAgent, calling them as tools.", 174 | system_prompt=( 175 | "You are a Smart Query Supervisor. Delegate to specialist agents based on user's request.\n" 176 | "Available specialist agents (call as functions):\n{available_agents_descriptions}\n\n" 177 | "Call the appropriate agent with user's query as 'input_message'. Present their response." 178 | ), 179 | callable_agents=["WeatherAgent", "CityInfoAgent"], 180 | ) 181 | async def smart_query_delegate_placeholder(): 182 | pass 183 | 184 | 185 | # NEW: Agent with a specific LLM and think tag stripping 186 | @app.agent( 187 | name="CreativeWriterAgent", 188 | description="A creative writer that uses a specialized LLM and might have thinking steps.", 189 | system_prompt=( 190 | "You are a highly creative writer. Generate a short, imaginative story based on the user's prompt. " 191 | "You might use ... tags for your internal monologue before the final story. " 192 | "The final story should be engaging and whimsical." 193 | ), 194 | llm=special_llm_config, # Uses the special_llm_config 195 | ) 196 | async def creative_writer_placeholder(): 197 | pass 198 | 199 | 200 | # Agents for Discussion Pattern 201 | @app.agent( 202 | name="OptimistAgent", 203 | description="Optimistic discussant.", 204 | system_prompt="You are the Optimist. Find positive aspects. Start with 'As an optimist, I see that...'", 205 | ) 206 | async def optimist_placeholder(): 207 | pass 208 | 209 | 210 | @app.agent( 211 | name="PessimistAgent", 212 | description="Pessimistic discussant.", 213 | system_prompt="You are the Pessimist. Point out downsides. Start with 'However, from a pessimistic view, ...'", 214 | ) 215 | async def pessimist_placeholder(): 216 | pass 217 | 218 | 219 | @app.agent( 220 | name="RealistAgent", 221 | description="Realistic discussant.", 222 | system_prompt="You are the Realist. Provide a balanced view. Start with 'Realistically speaking, ...'", 223 | ) 224 | async def realist_placeholder(): 225 | pass 226 | 227 | 228 | @app.agent( 229 | name="DiscussionModeratorAgent", 230 | description="Moderates discussions.", 231 | system_prompt="Summarize discussion round, identify themes, pose follow-up question.", 232 | ) 233 | async def discussion_moderator_placeholder(): 234 | pass 235 | 236 | 237 | # --- Flow Definitions --- 238 | 239 | # 1. Sequential Flow 240 | sequential_flow = Flow( 241 | flow_name="SequentialEchoUpperReverse", 242 | description="Echoes, uppercases, then reverses input.", 243 | ) 244 | sequential_flow.add_step("EchoAgent").add_step("UpperCaseAgent").add_step( 245 | "ReverseAgent" 246 | ) 247 | app.register_flow(sequential_flow) 248 | 249 | # 2. Parallel Flow 250 | parallel_flow = Flow( 251 | flow_name="ParallelWeatherCityInfoSummarize", 252 | description="Gets weather & city info in parallel, then summarizes.", 253 | ) 254 | parallel_flow.add_step( 255 | ParallelPattern( 256 | pattern_name="GetInfoInParallel", tasks=["WeatherAgent", "CityInfoAgent"] 257 | ) 258 | ) 259 | parallel_flow.add_step("SummarizerAgent") 260 | app.register_flow(parallel_flow) 261 | 262 | # 3. Router Flow (Old method) 263 | router_flow_old = Flow( 264 | flow_name="RouterFlow_OldMethod", description="Routes task using TaskRouterAgent." 265 | ) 266 | router_flow_old.add_step( 267 | RouterPattern( 268 | pattern_name="MainTaskRouter", 269 | router_agent_name="TaskRouterAgent", 270 | routes={ 271 | "weather": "WeatherAgent", 272 | "city_info": "CityInfoAgent", 273 | "echo": "EchoAgent", 274 | }, 275 | default_route="EchoAgent", 276 | ) 277 | ) 278 | app.register_flow(router_flow_old) 279 | 280 | # 4. Discussion Flow 281 | discussion_flow_example = Flow( 282 | flow_name="TeamDebateFlow", 283 | description="Optimist, Pessimist, Realist discuss a topic.", 284 | ) 285 | discussion_flow_example.add_step( 286 | DiscussionPattern( 287 | pattern_name="TeamDebateOnTopic", 288 | participant_agent_names=["OptimistAgent", "PessimistAgent", "RealistAgent"], 289 | discussion_rounds=2, 290 | moderator_agent_name="DiscussionModeratorAgent", 291 | stop_phrase="end discussion now", 292 | ) 293 | ) 294 | app.register_flow(discussion_flow_example) 295 | 296 | # 5. NEW: Flow using Agent-as-Tool (Supervisor Agent) 297 | smart_delegate_flow = Flow( 298 | flow_name="SmartDelegateFlow_NewMethod", description="Uses SmartQueryDelegateAgent." 299 | ) 300 | smart_delegate_flow.add_step("SmartQueryDelegateAgent") 301 | app.register_flow(smart_delegate_flow) 302 | 303 | # 6. NEW: Flow demonstrating per-agent LLM and think tag stripping 304 | creative_writing_flow = Flow( 305 | flow_name="CreativeWriterFlow", 306 | description="Demonstrates specialized LLM and think tag stripping.", 307 | ) 308 | creative_writing_flow.add_step( 309 | "CreativeWriterAgent" 310 | ) # This agent has its own LLM and strip_think_tags=True 311 | app.register_flow(creative_writing_flow) 312 | 313 | 314 | # 7. Flow demonstrating template variables (No changes from before, just for completeness) 315 | @app.agent( 316 | name="GreetingAgent", 317 | description="Greets a user by name.", 318 | system_prompt="User's name: {user_name}. Greet them for query: '{user_query}'.", 319 | ) 320 | async def greeting_agent_placeholder(): 321 | pass 322 | 323 | 324 | templated_flow_example = Flow( 325 | flow_name="TemplatedGreetingFlow", description="Greets a user by name via template." 326 | ) 327 | templated_flow_example.add_step("GreetingAgent") 328 | app.register_flow(templated_flow_example) 329 | 330 | 331 | # --- Main Application CLI --- 332 | async def main(): 333 | async with app.run_context( 334 | llm_override=None 335 | ) as rt: # Can override context LLM here if needed 336 | # Example of running a specific flow directly (e.g., creative writer) 337 | # print("\n--- Testing CreativeWriterFlow ---") 338 | # creative_input = Message(role="user", content="Tell me a story about a mischievous cloud.") 339 | # creative_context = await rt.run_flow("CreativeWriterFlow", creative_input) 340 | # print("Creative Writer Output:", creative_context.current_message.content) 341 | # print("---------------------------------") 342 | 343 | # Example of running templated flow 344 | # print("\n--- Testing TemplatedGreetingFlow ---") 345 | # greet_input = Message(role="user", content="I want to know about LLMs.") 346 | # greet_context = await rt.run_flow( 347 | # "TemplatedGreetingFlow", 348 | # greet_input, 349 | # flow_template_vars={"user_name": "Valued Customer", "user_query": greet_input.content} 350 | # ) 351 | # print("Templated Greeting Output:", greet_context.current_message.content) 352 | # print("---------------------------------") 353 | 354 | await rt.interactive_chat() 355 | 356 | 357 | if __name__ == "__main__": 358 | if not default_llm_config.api_base_url: # Check the default LLM used by TFrameXApp 359 | print( 360 | "FATAL: OPENAI_API_BASE environment variable is not set for the default LLM." 361 | ) 362 | else: 363 | asyncio.run(main()) 364 | -------------------------------------------------------------------------------- /examples/documentation_generator/documentation.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import os 4 | 5 | from dotenv import load_dotenv 6 | 7 | # Assuming tframex is installed and your environment is set up 8 | from tframex import ParallelPattern # Import patterns if you use them 9 | from tframex import Flow, OpenAIChatLLM, SequentialPattern, TFrameXApp 10 | 11 | # --- Environment and Logging Setup (Optional for just docs, but good practice) --- 12 | load_dotenv() 13 | logging.basicConfig(level=logging.INFO) 14 | logging.getLogger("tframex").setLevel(logging.INFO) 15 | 16 | # --- LLM Configuration (Dummy for this example if not running flows) --- 17 | # If you're only generating documentation, the LLM config might not be strictly necessary 18 | # unless agent definitions themselves depend on it during app setup. 19 | # For robustness, provide a minimal configuration. 20 | default_llm_config = OpenAIChatLLM( 21 | model_name=os.getenv("OPENAI_MODEL_NAME", "gpt-3.5-turbo"), 22 | api_base_url=os.getenv( 23 | "OPENAI_API_BASE", "http://localhost:11434/v1" 24 | ), # Ollama example 25 | api_key=os.getenv("OPENAI_API_KEY", "ollama"), 26 | ) 27 | 28 | # --- Initialize TFrameX Application --- 29 | app = TFrameXApp(default_llm=default_llm_config) 30 | 31 | 32 | # --- Define some Tools --- 33 | @app.tool(description="A simple echo tool.") 34 | async def echo_tool(text: str) -> str: 35 | return f"Tool echoed: {text}" 36 | 37 | 38 | @app.tool(description="Adds two numbers.") 39 | async def add_numbers_tool(a: int, b: int) -> int: 40 | return a + b 41 | 42 | 43 | # --- Define some Agents --- 44 | @app.agent( 45 | name="GreeterAgent", 46 | description="Greets the user.", 47 | system_prompt="You are a friendly greeter.", 48 | ) 49 | async def greeter_agent_placeholder(): 50 | pass 51 | 52 | 53 | @app.agent( 54 | name="CalculatorAgent", 55 | description="Uses tools to perform calculations.", 56 | system_prompt="You are a calculator. Use your tools. Available tools: {available_tools_descriptions}", 57 | tools=["add_numbers_tool"], 58 | ) 59 | async def calculator_agent_placeholder(): 60 | pass 61 | 62 | 63 | @app.agent( 64 | name="EchoerAgent", 65 | description="Echoes input using echo_tool.", 66 | system_prompt="Use the echo_tool.", 67 | tools=["echo_tool"], 68 | # Example of an agent calling another agent (if SupervisorAgent was defined and callable) 69 | # callable_agents=["GreeterAgent"] # For demonstration, let's assume GreeterAgent could be called 70 | ) 71 | async def echoer_agent_placeholder(): 72 | pass 73 | 74 | 75 | @app.agent( 76 | name="FarewellAgent", 77 | description="Says goodbye.", 78 | system_prompt="Bid the user farewell.", 79 | ) 80 | async def farewell_agent_placeholder(): 81 | pass 82 | 83 | 84 | # --- Create a Flow --- 85 | my_complex_flow = Flow( 86 | flow_name="GreetingAndCalculationFlow", 87 | description="A flow that greets, uses a tool via an agent, and then says goodbye.", 88 | ) 89 | 90 | # Add steps to the flow 91 | my_complex_flow.add_step("GreeterAgent") 92 | my_complex_flow.add_step( 93 | SequentialPattern( 94 | pattern_name="CalculationSequence", steps=["EchoerAgent", "CalculatorAgent"] 95 | ) 96 | ) 97 | my_complex_flow.add_step("FarewellAgent") 98 | 99 | # Register the flow with the app (good practice, though not strictly needed for docs if flow is passed directly) 100 | app.register_flow(my_complex_flow) 101 | 102 | 103 | # --- Generate Documentation --- 104 | def generate_and_save_documentation(): 105 | flow_to_document = app.get_flow( 106 | "GreetingAndCalculationFlow" 107 | ) # Or use my_complex_flow directly 108 | 109 | if not flow_to_document: 110 | print("Flow not found!") 111 | return 112 | 113 | print(f"Generating documentation for flow: '{flow_to_document.flow_name}'...") 114 | 115 | # The core call: 116 | mermaid_diagram_string, yaml_config_string = ( 117 | flow_to_document.generate_documentation(app) 118 | ) 119 | 120 | # --- Output or Save the Documentation --- 121 | 122 | # Print to console 123 | print("\n--- Mermaid Diagram ---") 124 | print(mermaid_diagram_string) 125 | print("\n--- YAML Configuration ---") 126 | print(yaml_config_string) 127 | 128 | # Save to files 129 | try: 130 | with open("flow_diagram.md", "w") as f: 131 | f.write("```mermaid\n") 132 | f.write(mermaid_diagram_string) 133 | f.write("\n```") 134 | print("\nMermaid diagram saved to flow_diagram.md") 135 | 136 | with open("flow_config.yaml", "w") as f: 137 | f.write(yaml_config_string) 138 | print("YAML configuration saved to flow_config.yaml") 139 | except IOError as e: 140 | print(f"Error saving files: {e}") 141 | 142 | 143 | # Run the documentation generation 144 | if __name__ == "__main__": 145 | # No async needed if you're *only* generating docs and not running flows/agents. 146 | # If your agent/tool definitions had async setup, you might need an event loop. 147 | # For this specific generate_documentation method, it's synchronous. 148 | generate_and_save_documentation() 149 | -------------------------------------------------------------------------------- /examples/documentation_generator/flow_config.yaml: -------------------------------------------------------------------------------- 1 | flow: 2 | name: GreetingAndCalculationFlow 3 | description: A flow that greets, uses a tool via an agent, and then says goodbye. 4 | steps: 5 | - type: LLMAgent 6 | name: GreeterAgent 7 | description: Greets the user. 8 | system_prompt: You are a friendly greeter. 9 | strip_think_tags: false 10 | llm: Uses context/app default LLM 11 | - type: pattern 12 | pattern_type: SequentialPattern 13 | name: CalculationSequence 14 | steps: 15 | - type: LLMAgent 16 | name: EchoerAgent 17 | description: Echoes input using echo_tool. 18 | system_prompt: Use the echo_tool. 19 | strip_think_tags: false 20 | llm: Uses context/app default LLM 21 | tools: 22 | - name: echo_tool 23 | description: A simple echo tool. 24 | parameters: 25 | properties: 26 | text: 27 | type: string 28 | description: Parameter 'text' 29 | required: 30 | - text 31 | - type: LLMAgent 32 | name: CalculatorAgent 33 | description: Uses tools to perform calculations. 34 | system_prompt: 'You are a calculator. Use your tools. Available tools: {available_tools_descriptions}' 35 | strip_think_tags: false 36 | llm: Uses context/app default LLM 37 | tools: 38 | - name: add_numbers_tool 39 | description: Adds two numbers. 40 | parameters: 41 | properties: 42 | a: 43 | type: integer 44 | description: Parameter 'a' 45 | b: 46 | type: integer 47 | description: Parameter 'b' 48 | required: 49 | - a 50 | - b 51 | - type: LLMAgent 52 | name: FarewellAgent 53 | description: Says goodbye. 54 | system_prompt: Bid the user farewell. 55 | strip_think_tags: false 56 | llm: Uses context/app default LLM 57 | -------------------------------------------------------------------------------- /examples/documentation_generator/flow_diagram.md: -------------------------------------------------------------------------------- 1 | ```mermaid 2 | graph TD 3 | subgraph GreetingAndCalculationFlow_overall ["Flow: GreetingAndCalculationFlow"] 4 | direction TD 5 | GreetingAndCalculationFlow_FlowStart(("Start")) 6 | GreetingAndCalculationFlow_FlowEnd(("End")) 7 | GreetingAndCalculationFlow_GreeterAgent_0["Unknown: GreeterAgent"] 8 | GreetingAndCalculationFlow_FlowStart --> GreetingAndCalculationFlow_GreeterAgent_0 9 | subgraph GreetingAndCalculationFlow_SequentialPattern_CalculationSequence_1_sub ["Pattern: SequentialPattern\n(CalculationSequence)"] 10 | direction LR 11 | GreetingAndCalculationFlow_SequentialPattern_CalculationSequence_1_start((:)) 12 | GreetingAndCalculationFlow_SequentialPattern_CalculationSequence_1_end((:)) 13 | GreetingAndCalculationFlow_GreeterAgent_0 --> GreetingAndCalculationFlow_SequentialPattern_CalculationSequence_1_start 14 | GreetingAndCalculationFlow_SequentialPattern_CalculationSequence_1_EchoerAgent_2["Unknown: EchoerAgent"] 15 | GreetingAndCalculationFlow_SequentialPattern_CalculationSequence_1_start --> GreetingAndCalculationFlow_SequentialPattern_CalculationSequence_1_EchoerAgent_2 16 | GreetingAndCalculationFlow_SequentialPattern_CalculationSequence_1_CalculatorAgent_3["Unknown: CalculatorAgent"] 17 | GreetingAndCalculationFlow_SequentialPattern_CalculationSequence_1_EchoerAgent_2 --> GreetingAndCalculationFlow_SequentialPattern_CalculationSequence_1_CalculatorAgent_3 18 | GreetingAndCalculationFlow_SequentialPattern_CalculationSequence_1_CalculatorAgent_3 --> GreetingAndCalculationFlow_SequentialPattern_CalculationSequence_1_end 19 | end 20 | GreetingAndCalculationFlow_FarewellAgent_4["Unknown: FarewellAgent"] 21 | GreetingAndCalculationFlow_SequentialPattern_CalculationSequence_1_end --> GreetingAndCalculationFlow_FarewellAgent_4 22 | GreetingAndCalculationFlow_FarewellAgent_4 --> GreetingAndCalculationFlow_FlowEnd 23 | end 24 | ``` -------------------------------------------------------------------------------- /examples/redditchatbot/flask_app.py: -------------------------------------------------------------------------------- 1 | # flask_app.py 2 | import asyncio 3 | import logging 4 | 5 | from dotenv import load_dotenv 6 | from flask import Flask, jsonify, render_template_string, request 7 | 8 | # Import the TFrameX app instance and Message primitive 9 | from tframex_config import get_tframex_app 10 | 11 | from tframex import Message # Crucial for history management 12 | 13 | load_dotenv() 14 | 15 | flask_app = Flask(__name__) 16 | tframex_app_instance = get_tframex_app() # Get the configured TFrameXApp instance 17 | 18 | # Configure Flask logging 19 | flask_app.logger.setLevel(logging.INFO) 20 | 21 | # In-memory session store for conversation history 22 | # Maps session_id (str) to a list of serialized Message objects (dicts) 23 | conversation_history_store = {} 24 | 25 | # HTML template for the chat interface 26 | CHAT_HTML_TEMPLATE = """ 27 | 28 | 29 | 30 | 31 | 32 | TFrameX Chatbot 33 | 48 | 49 | 50 |
51 |

TFrameX Multi-Tool Chatbot

52 |
53 | 54 |
55 |
56 | 57 | 58 |
59 |
60 | 61 | 141 | 142 | 143 | """ 144 | 145 | 146 | @flask_app.route("/") 147 | def index(): 148 | return render_template_string(CHAT_HTML_TEMPLATE) 149 | 150 | 151 | @flask_app.route("/chat", methods=["POST"]) 152 | async def chat(): 153 | try: 154 | data = request.get_json() 155 | user_message_content = data.get("message") 156 | session_id = data.get( 157 | "session_id", "default_session" 158 | ) # Get session ID from client 159 | 160 | if not user_message_content: 161 | return jsonify({"error": "No message provided"}), 400 162 | 163 | flask_app.logger.info( 164 | f"Received message for session '{session_id}': \"{user_message_content}\"" 165 | ) 166 | 167 | async with tframex_app_instance.run_context() as rt: 168 | # Get the agent instance. 169 | # The agent name here MUST match the name defined in tframex_config.py 170 | agent_name_to_use = "RedditAnalystAgent" # MODIFIED HERE 171 | chatbot_agent = rt._get_agent_instance(agent_name_to_use) 172 | 173 | # 1. Load history for the current session into the agent's memory 174 | if chatbot_agent.memory: # Ensure agent has a memory store 175 | session_history_data = conversation_history_store.get(session_id, []) 176 | if session_history_data: 177 | flask_app.logger.info( 178 | f"Loading {len(session_history_data)} messages from history for session '{session_id}' into agent '{agent_name_to_use}' memory." 179 | ) 180 | for msg_data in session_history_data: 181 | try: 182 | message_obj = Message.model_validate(msg_data) 183 | await chatbot_agent.memory.add_message(message_obj) 184 | except Exception as e: 185 | flask_app.logger.error( 186 | f"Error rehydrating message for session '{session_id}': {msg_data}, error: {e}" 187 | ) 188 | else: 189 | flask_app.logger.info( 190 | f"No prior history found for session '{session_id}'. Starting fresh for agent '{agent_name_to_use}'." 191 | ) 192 | else: 193 | flask_app.logger.warning( 194 | f"Agent '{agent_name_to_use}' does not have a memory store. History will not be maintained across calls." 195 | ) 196 | 197 | # 2. Call the agent with the new user message. 198 | bot_response_message = await rt.call_agent( 199 | agent_name_to_use, user_message_content # MODIFIED HERE 200 | ) 201 | 202 | # 3. Save updated history 203 | if chatbot_agent.memory: 204 | updated_full_history = await chatbot_agent.memory.get_history() 205 | conversation_history_store[session_id] = [ 206 | msg.model_dump(exclude_none=True) for msg in updated_full_history 207 | ] 208 | flask_app.logger.info( 209 | f"Saved {len(updated_full_history)} total messages to history for session '{session_id}' (Agent: {agent_name_to_use})." 210 | ) 211 | 212 | bot_reply_content = ( 213 | bot_response_message.content 214 | if bot_response_message 215 | else "Sorry, I couldn't process that." 216 | ) 217 | 218 | if bot_response_message and bot_response_message.tool_calls: 219 | flask_app.logger.warning( 220 | f"Bot response for session '{session_id}' unexpectedly included tool calls: {bot_response_message.tool_calls}" 221 | ) 222 | 223 | flask_app.logger.info( 224 | f"Bot reply for session '{session_id}': \"{bot_reply_content}\"" 225 | ) 226 | return jsonify({"reply": bot_reply_content}) 227 | 228 | except Exception as e: 229 | flask_app.logger.error(f"Error in /chat endpoint: {e}", exc_info=True) 230 | return ( 231 | jsonify( 232 | {"error": f"An internal server error occurred: {type(e).__name__}"} 233 | ), 234 | 500, 235 | ) 236 | 237 | 238 | if __name__ == "__main__": 239 | # For robust async, use an ASGI server like Uvicorn: 240 | # import uvicorn 241 | # uvicorn.run(flask_app, host="0.0.0.0", port=5001, log_level="info", reload=True) 242 | # Using Flask's built-in server for development simplicity (may have limitations with async): 243 | flask_app.run(debug=True, port=5001, use_reloader=True) 244 | -------------------------------------------------------------------------------- /examples/redditchatbot/tframex_config.py: -------------------------------------------------------------------------------- 1 | # tframex_config.py 2 | import logging 3 | import os 4 | 5 | import aiohttp # For async HTTP requests 6 | from dotenv import load_dotenv 7 | 8 | from tframex import ( # Message # Not strictly needed if the tool just returns a string of titles 9 | OpenAIChatLLM, 10 | TFrameXApp, 11 | ) 12 | 13 | # --- Environment and Logging Setup --- 14 | load_dotenv() 15 | logging.basicConfig( 16 | level=logging.INFO, 17 | format="%(asctime)s - %(levelname)s - %(name)s - [%(funcName)s] - %(message)s", 18 | ) 19 | logging.getLogger("tframex").setLevel(logging.INFO) 20 | 21 | # --- LLM Configuration --- 22 | default_llm_config = OpenAIChatLLM( 23 | model_name=os.getenv("OPENAI_MODEL_NAME", "gpt-3.5-turbo"), 24 | api_base_url=os.getenv("OPENAI_API_BASE", "http://localhost:11434/v1"), 25 | api_key=os.getenv("OPENAI_API_KEY", "ollama"), 26 | ) 27 | 28 | if not default_llm_config.api_base_url: 29 | raise ValueError("Error: OPENAI_API_BASE not set for default LLM configuration.") 30 | if not default_llm_config.api_key: 31 | raise ValueError("Error: OPENAI_API_KEY not set for default LLM configuration.") 32 | 33 | # --- Initialize TFrameX Application --- 34 | tframex_app = TFrameXApp(default_llm=default_llm_config) 35 | 36 | 37 | # --- Tool Definitions --- 38 | 39 | 40 | @tframex_app.tool( 41 | description="Retrieves the titles of the top (up to 10) hot posts from a given Reddit community. Requires 'community_name'." 42 | ) 43 | async def get_reddit_top_post_titles(community_name: str, limit: int = 10) -> str: 44 | """Fetches the titles of the top N hot posts from a subreddit.""" 45 | logging.info( 46 | f"TOOL EXECUTED: get_reddit_top_post_titles(community_name='{community_name}', limit={limit})" 47 | ) 48 | if not community_name: 49 | return "Error: Please provide a Reddit community name (subreddit)." 50 | if ( 51 | not 1 <= limit <= 25 52 | ): # Reddit API usually has a max of 100, but 25 is reasonable for this use. 53 | logging.warning( 54 | f"get_reddit_top_post_titles: Invalid limit '{limit}'. Clamping to 10." 55 | ) 56 | limit = 10 # Sensible default if invalid 57 | 58 | if community_name.lower().startswith("r/"): 59 | community_name = community_name[2:] 60 | 61 | url = f"https://www.reddit.com/r/{community_name}/hot/.json?limit={limit}" 62 | headers = {"User-Agent": "TFrameX Reddit Titles Tool v0.1 (ExampleApp)"} 63 | 64 | try: 65 | async with aiohttp.ClientSession() as session: 66 | async with session.get(url, headers=headers) as response: 67 | if response.status == 404: 68 | logging.warning( 69 | f"get_reddit_top_post_titles: Subreddit r/{community_name} not found. Status: {response.status}" 70 | ) 71 | return f"Error: Could not find the subreddit r/{community_name}. It might be private, banned, or non-existent." 72 | if response.status == 403: 73 | logging.warning( 74 | f"get_reddit_top_post_titles: Access forbidden to r/{community_name}. Status: {response.status}" 75 | ) 76 | return f"Error: Access to r/{community_name} is forbidden. It might be a private community." 77 | response.raise_for_status() # For other errors like 5xx or 429 78 | data = await response.json() 79 | 80 | titles = [ 81 | post["data"]["title"] 82 | for post in data.get("data", {}).get("children", []) 83 | if post.get("kind") == "t3" 84 | and "data" in post 85 | and "title" in post["data"] 86 | and post["data"]["title"] 87 | ] 88 | 89 | if not titles: 90 | logging.info( 91 | f"get_reddit_top_post_titles: No posts found in r/{community_name}." 92 | ) 93 | return f"No posts found in r/{community_name}, or titles could not be extracted. The community might be empty or have no recent posts with titles." 94 | 95 | titles_str = ( 96 | f"Top {len(titles)} post titles from r/{community_name}:\n" 97 | + "\n".join([f"- {title}" for title in titles]) 98 | ) 99 | logging.info( 100 | f"get_reddit_top_post_titles: Successfully retrieved {len(titles)} titles for r/{community_name}." 101 | ) 102 | return titles_str 103 | 104 | except aiohttp.ClientConnectorError as e: 105 | logging.error( 106 | f"get_reddit_top_post_titles: Network error for r/{community_name}: {e}" 107 | ) 108 | return f"Error: Could not connect to Reddit to fetch titles for r/{community_name}." 109 | except aiohttp.ClientResponseError as e: 110 | logging.error( 111 | f"get_reddit_top_post_titles: HTTP error for r/{community_name}. Status: {e.status}, Message: {e.message}" 112 | ) 113 | if e.status == 429: # Too Many Requests 114 | return f"Error: Requests to Reddit are temporarily rate-limited for r/{community_name}. Please try again later." 115 | return f"Error: API error fetching titles for r/{community_name} (HTTP {e.status})." 116 | except Exception as e: 117 | logging.exception( 118 | f"get_reddit_top_post_titles: An unexpected error occurred for r/{community_name}" 119 | ) 120 | return f"Error: An unexpected error occurred while fetching titles for r/{community_name}." 121 | 122 | 123 | # --- Agent Definition --- 124 | 125 | 126 | @tframex_app.agent( 127 | name="RedditAnalystAgent", 128 | description="Analyzes the current topics and sentiment of a Reddit community based on its top post titles.", 129 | system_prompt=( 130 | "You are a Reddit Analyst. Your goal is to understand what's currently being discussed and the general sentiment in a given subreddit. " 131 | "You have a tool called 'get_reddit_top_post_titles' that provides you with a list of recent post titles from a community.\n" 132 | "{available_tools_descriptions}\n\n" # TFrameX will populate this 133 | "When a user asks about a subreddit (e.g., 'What's happening in r/LocalLLaMA?' or 'What's the vibe in r/python?'):\n" 134 | "1. Use the 'get_reddit_top_post_titles' tool to fetch the titles for the specified 'community_name'.\n" 135 | "2. Once you receive the list of titles from the tool, analyze them to determine the main topics of discussion and the overall sentiment (e.g., positive, negative, neutral, excited, concerned, mixed, etc.).\n" 136 | "3. Provide a concise summary (2-3 sentences) of your analysis. If the tool returns an error or no titles, report that problem clearly.\n" 137 | "Example user query: 'What's up with r/futurology?'\n" 138 | "Example your thought process after getting titles: 'Okay, I have the titles. Looks like there's a lot of talk about AI ethics and space exploration. Sentiment seems cautiously optimistic.'\n" 139 | "Example your response to user: 'In r/futurology, recent discussions seem to focus on AI ethics and new space exploration initiatives. The general sentiment appears to be one of cautious optimism and active debate.'\n" 140 | "Be direct and informative in your analysis." 141 | ), 142 | tools=["get_reddit_top_post_titles"], 143 | strip_think_tags=True, 144 | ) 145 | async def reddit_analyst_agent_placeholder(): 146 | pass 147 | 148 | 149 | logging.info("TFrameX application, Reddit tool, and RedditAnalystAgent configured.") 150 | logging.info( 151 | "Reminder: This script requires 'aiohttp'. Install with 'pip install aiohttp'." 152 | ) 153 | 154 | 155 | # To make the app instance easily importable by flask_app.py 156 | def get_tframex_app(): 157 | return tframex_app 158 | -------------------------------------------------------------------------------- /examples/website_designer/designer.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import os 4 | from typing import Any, Dict, List, Optional 5 | 6 | from dotenv import load_dotenv 7 | 8 | from tframex import ( 9 | DiscussionPattern, 10 | Flow, 11 | InMemoryMemoryStore, 12 | Message, 13 | OpenAIChatLLM, 14 | ParallelPattern, 15 | RouterPattern, 16 | SequentialPattern, 17 | TFrameXApp, 18 | TFrameXRuntimeContext, 19 | ) 20 | 21 | # --- Environment and Logging Setup --- 22 | load_dotenv() 23 | logging.basicConfig( 24 | level=logging.INFO, 25 | format="%(asctime)s - %(levelname)s - %(name)s - [%(funcName)s] - %(message)s", 26 | ) 27 | logging.getLogger("tframex").setLevel(logging.INFO) 28 | # For more detailed logs: 29 | logging.getLogger("tframex.agents.llm_agent").setLevel(logging.DEBUG) 30 | # logging.getLogger("tframex.agents.base").setLevel(logging.DEBUG) 31 | # logging.getLogger("tframex.app").setLevel(logging.DEBUG) 32 | 33 | # --- LLM Configurations --- 34 | # Default LLM (e.g., a local, faster model for general tasks) 35 | default_llm_config = OpenAIChatLLM( 36 | model_name=os.getenv("OPENAI_MODEL_NAME", "gpt-3.5-turbo"), # Your default model 37 | api_base_url=os.getenv("OPENAI_API_BASE", "http://localhost:11434"), 38 | api_key=os.getenv("OPENAI_API_KEY", "ollama"), 39 | ) 40 | 41 | if not default_llm_config.api_base_url: 42 | print("Error: OPENAI_API_BASE not set for default LLM.") 43 | exit(1) 44 | 45 | # --- Initialize TFrameX Application --- 46 | app = TFrameXApp(default_llm=default_llm_config) 47 | 48 | 49 | @app.tool(description="Writes file to file system.") 50 | async def write_file(file_path: str, content: str): 51 | with open(file_path, "w") as f: 52 | f.write(content) 53 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | # tframex/pyproject.toml 2 | 3 | [build-system] 4 | requires = ["setuptools>=61.0"] 5 | build-backend = "setuptools.build_meta" 6 | 7 | [project] 8 | name = "tframex" 9 | version = "0.1.3" 10 | authors = [ 11 | { name = "Manav Majumdar", email = "manav@tesslate.com" }, 12 | { name = "Sanjit Verma", email = "sanjitverma@tesslate.com" }, 13 | { name = "Prajwal Moharana", email = "pmoharana@tesslate.com" }, 14 | { name = "TesslateAI" } 15 | ] 16 | description = "A framework for building agentic systems with large language models." 17 | readme = "README.md" 18 | requires-python = ">=3.8" 19 | license = { text = "MIT" } # Updated license specification format 20 | classifiers = [ 21 | "Programming Language :: Python :: 3", 22 | "Programming Language :: Python :: 3.8", 23 | "Programming Language :: Python :: 3.9", 24 | "Programming Language :: Python :: 3.10", 25 | "Programming Language :: Python :: 3.11", 26 | "Operating System :: OS Independent", 27 | "Intended Audience :: Developers", 28 | "Topic :: Software Development :: Libraries :: Python Modules", 29 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 30 | ] 31 | dependencies = [ 32 | "httpx>=0.25.0", # For OpenAIChatLLM and general HTTP 33 | "python-dotenv>=1.0.0", # For .env loading in examples 34 | "PyYAML>=6.0.1", # For YAML parsing (e.g., flow_config.yaml) 35 | "openai>=1.0.0", # Core for OpenAIChatLLM interacting with OpenAI spec 36 | "pydantic>=2.0.0", # For data models (Message, ToolDefinition, etc.) 37 | # aiohttp is used by the Reddit example tool. 38 | # If you want all examples to work out-of-the-box, include it here. 39 | # Otherwise, consider making it an optional dependency. 40 | "aiohttp>=3.0.0", 41 | ] 42 | 43 | [project.optional-dependencies] 44 | # Dependencies needed to run all provided examples 45 | examples = [ 46 | "Flask>=3.0.0" # For the Reddit chatbot web example 47 | ] 48 | # Dependencies for development (linting, formatting, pre-commit hooks) 49 | dev = [ 50 | "black>=24.3.0", # From .pre-commit-config.yaml 51 | "isort>=5.13.2", # From .pre-commit-config.yaml 52 | "pre-commit>=3.3.0" 53 | # If you add pytest or other testing tools, list them here 54 | # "pytest>=7.0.0", 55 | ] 56 | 57 | [project.urls] 58 | Homepage = "https://github.com/TesslateAI/TFrameX" 59 | Repository = "https://github.com/TesslateAI/TFrameX" 60 | 61 | [tool.setuptools.packages.find] 62 | where = ["."] 63 | include = ["tframex*"] 64 | exclude = ["examples*", "tests*"] 65 | 66 | [tool.black] 67 | line-length = 88 68 | target-version = ['py38'] 69 | exclude = ''' 70 | /( 71 | \.git 72 | | \.mypy_cache 73 | | \.venv 74 | | venv 75 | | build 76 | | dist 77 | )/ 78 | ''' 79 | 80 | [tool.isort] 81 | profile = "black" 82 | line_length = 88 83 | known_first_party = ["tframex"] -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohappyeyeballs==2.6.1 2 | aiohttp==3.11.18 3 | aiosignal==1.3.2 4 | annotated-types==0.7.0 5 | anyio==4.9.0 6 | asgiref==3.8.1 7 | attrs==25.3.0 8 | blinker==1.9.0 9 | certifi==2025.4.26 10 | click==8.1.8 11 | colorama==0.4.6 12 | Flask==3.1.0 13 | flask-cors==5.0.1 14 | frozenlist==1.6.0 15 | h11==0.16.0 16 | httpcore==1.0.9 17 | httpx==0.28.1 18 | idna==3.10 19 | itsdangerous==2.2.0 20 | Jinja2==3.1.6 21 | MarkupSafe==3.0.2 22 | multidict==6.4.3 23 | openai>=1.0.0 24 | propcache==0.3.1 25 | pydantic==2.11.4 26 | pydantic_core==2.33.2 27 | python-dotenv==1.1.0 28 | python-json-logger>=2.0.0 29 | PyYAML==6.0.2 30 | sniffio==1.3.1 31 | tframex 32 | typing-inspection==0.4.0 33 | typing_extensions==4.13.2 34 | Werkzeug==3.1.3 35 | yarl==1.20.0 36 | 37 | # Development dependencies 38 | black>=23.0.0 39 | isort>=5.12.0 40 | pre-commit>=3.3.0 -------------------------------------------------------------------------------- /tframex/__init__.py: -------------------------------------------------------------------------------- 1 | # tframex/__init__.py (NEW VERSION) 2 | import os 3 | from dotenv import load_dotenv 4 | 5 | # It's generally better for applications to handle dotenv loading. 6 | # load_dotenv() 7 | 8 | # Import from subpackages 9 | from .agents import BaseAgent, LLMAgent, ToolAgent 10 | from .app import TFrameXApp, TFrameXRuntimeContext # TFrameXRuntimeContext is now defined in app.py 11 | from .flows import FlowContext, Flow 12 | from .models.primitives import ( # Note the .models path 13 | FunctionCall, 14 | Message, 15 | MessageChunk, 16 | ToolCall, 17 | ToolDefinition, 18 | ToolParameterProperty, 19 | ToolParameters, 20 | ) 21 | from .patterns import ( # Note the .patterns path 22 | BasePattern, 23 | DiscussionPattern, 24 | ParallelPattern, 25 | RouterPattern, 26 | SequentialPattern, 27 | ) 28 | from .util.engine import Engine # Engine is now directly under util 29 | from .util.llms import BaseLLMWrapper, OpenAIChatLLM 30 | from .util.memory import BaseMemoryStore, InMemoryMemoryStore 31 | from .util.tools import Tool 32 | # setup_logging might be called by TFrameXApp itself, not typically part of public API to re-export 33 | # from .util.logging import setup_logging 34 | 35 | 36 | __all__ = [ 37 | # Agents 38 | "BaseAgent", 39 | "LLMAgent", 40 | "ToolAgent", 41 | # App & Runtime 42 | "TFrameXApp", 43 | "TFrameXRuntimeContext", # This was TFrameXRuntimeContext in the old __init__ 44 | "Engine", # New public component 45 | # Flows 46 | "FlowContext", 47 | "Flow", 48 | # Models (Primitives) 49 | "FunctionCall", 50 | "Message", 51 | "MessageChunk", 52 | "ToolCall", 53 | "ToolDefinition", 54 | "ToolParameterProperty", 55 | "ToolParameters", 56 | # Patterns 57 | "BasePattern", 58 | "DiscussionPattern", 59 | "ParallelPattern", 60 | "RouterPattern", 61 | "SequentialPattern", 62 | # Utilities 63 | "BaseLLMWrapper", 64 | "OpenAIChatLLM", 65 | "BaseMemoryStore", 66 | "InMemoryMemoryStore", 67 | "Tool", 68 | # "setup_logging", # Decide if this should be public 69 | ] -------------------------------------------------------------------------------- /tframex/agents/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseAgent 2 | from .llm_agent import LLMAgent 3 | from .tool_agent import ToolAgent 4 | 5 | __all__ = ["BaseAgent", "LLMAgent", "ToolAgent"] -------------------------------------------------------------------------------- /tframex/agents/base.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import re # For stripping think tags 3 | from abc import ABC, abstractmethod 4 | from typing import Any, Dict, List, Optional, Union 5 | 6 | from tframex.models.primitives import Message 7 | from tframex.util.llms import BaseLLMWrapper 8 | from tframex.util.memory import BaseMemoryStore, InMemoryMemoryStore 9 | from tframex.util.tools import Tool, ToolDefinition 10 | 11 | logger = logging.getLogger(__name__) 12 | agent_internal_debug_logger = logging.getLogger("tframex.agent_internal_debug") 13 | agent_internal_debug_logger.setLevel(logging.DEBUG) 14 | 15 | 16 | class BaseAgent(ABC): 17 | def __init__( 18 | self, 19 | agent_id: str, 20 | description: Optional[str] = None, 21 | llm: Optional[ 22 | BaseLLMWrapper 23 | ] = None, # This will be the resolved LLM for this agent instance 24 | tools: Optional[List[Tool]] = None, 25 | memory: Optional[BaseMemoryStore] = None, 26 | system_prompt_template: Optional[str] = None, 27 | callable_agent_definitions: Optional[List[ToolDefinition]] = None, 28 | strip_think_tags: bool = False, # True to remove, False to keep. (Default: False means keep) 29 | **config: Any, 30 | ): 31 | self.agent_id = agent_id 32 | self.description = ( 33 | description 34 | or f"Agent performing its designated role: {agent_id.split('_ctx')[0]}" 35 | ) 36 | self.llm = llm # The specific LLM instance this agent will use 37 | self.tools: Dict[str, Tool] = ( 38 | {tool.name: tool for tool in tools} if tools else {} 39 | ) 40 | self.memory: BaseMemoryStore = memory or InMemoryMemoryStore() 41 | self.system_prompt_template = system_prompt_template 42 | self.callable_agent_definitions: List[ToolDefinition] = ( 43 | callable_agent_definitions or [] 44 | ) 45 | self.strip_think_tags = strip_think_tags 46 | self.config = config 47 | 48 | agent_internal_debug_logger.debug( 49 | f"[{self.agent_id}] BaseAgent.__init__ called. Description: '{self.description}'. " 50 | f"LLM: {self.llm.model_id if self.llm else 'None'}. Tools: {list(self.tools.keys())}. " 51 | f"Callable Agents: {[cad.function['name'] for cad in self.callable_agent_definitions]}. " 52 | f"Strip Think Tags: {self.strip_think_tags}. " 53 | f"System Prompt: {bool(system_prompt_template)}. Config: {self.config}" 54 | ) 55 | logger.info( 56 | f"Agent '{agent_id}' initialized. LLM: {self.llm.model_id if self.llm else 'None'}. " 57 | f"Tools: {list(self.tools.keys())}. " 58 | f"Callable Agents: {[cad.function['name'] for cad in self.callable_agent_definitions]}. " 59 | f"Strip Think Tags: {self.strip_think_tags}." 60 | ) 61 | 62 | def _render_system_prompt(self, **kwargs_for_template: Any) -> Optional[Message]: 63 | agent_internal_debug_logger.debug( 64 | f"[{self.agent_id}] _render_system_prompt called. Template: '{self.system_prompt_template}', Args: {kwargs_for_template}" 65 | ) 66 | if not self.system_prompt_template: 67 | agent_internal_debug_logger.debug( 68 | f"[{self.agent_id}] No system_prompt_template defined." 69 | ) 70 | return None 71 | try: 72 | prompt_format_args = kwargs_for_template.copy() 73 | tool_descriptions = "\n".join( 74 | [f"- {name}: {tool.description}" for name, tool in self.tools.items()] 75 | ) 76 | prompt_format_args["available_tools_descriptions"] = ( 77 | tool_descriptions or "No tools available." 78 | ) 79 | callable_agent_tool_descs = "\n".join( 80 | [ 81 | f"- {cad.function['name']}: {cad.function['description']}" 82 | for cad in self.callable_agent_definitions 83 | ] 84 | ) 85 | prompt_format_args["available_agents_descriptions"] = ( 86 | callable_agent_tool_descs or "No callable agents available." 87 | ) 88 | 89 | content = self.system_prompt_template.format(**prompt_format_args) 90 | msg = Message(role="system", content=content) 91 | agent_internal_debug_logger.debug( 92 | f"[{self.agent_id}] Rendered system prompt: {msg}" 93 | ) 94 | return msg 95 | except KeyError as e: 96 | agent_internal_debug_logger.warning( 97 | f"[{self.agent_id}] Missing key '{e}' for system_prompt_template. Template: '{self.system_prompt_template}'" 98 | ) 99 | logger.warning( 100 | f"Agent '{self.agent_id}': Missing key '{e}' for system_prompt_template. Template: '{self.system_prompt_template}'" 101 | ) 102 | try: 103 | content = self.system_prompt_template.format(**kwargs_for_template) 104 | return Message(role="system", content=content) 105 | except KeyError: 106 | return Message(role="system", content=self.system_prompt_template) 107 | 108 | def _post_process_llm_response(self, message: Message) -> Message: 109 | """Applies post-processing to the LLM response, like stripping think tags.""" 110 | if self.strip_think_tags and message.content: 111 | # Using regex to remove ... blocks, including newlines within them 112 | # re.DOTALL makes . match newlines as well 113 | # Non-greedy match .*? is important 114 | original_content = message.content 115 | processed_content = re.sub( 116 | r".*?\s*", "", original_content, flags=re.DOTALL 117 | ).strip() 118 | if processed_content != original_content: 119 | agent_internal_debug_logger.debug( 120 | f"[{self.agent_id}] Stripped think tags. Original length: {len(original_content)}, Processed length: {len(processed_content)}" 121 | ) 122 | logger.debug( 123 | f"Agent '{self.agent_id}': Stripped think tags. Original: '{original_content[:100]}...', Processed: '{processed_content[:100]}...'" 124 | ) 125 | message.content = processed_content 126 | return message 127 | 128 | @abstractmethod 129 | async def run(self, input_message: Union[str, Message], **kwargs: Any) -> Message: 130 | """ 131 | Primary execution method. Takes input, returns a single Message from the assistant. 132 | kwargs can be used for runtime overrides or additional context, like 'template_vars'. 133 | """ 134 | agent_internal_debug_logger.debug( 135 | f"[{self.agent_id}] Abstract run method invoked with input: {input_message}, kwargs: {kwargs}. (Implementation specific logs will follow)" 136 | ) 137 | pass 138 | 139 | def add_tool(self, tool: Tool): 140 | agent_internal_debug_logger.debug( 141 | f"[{self.agent_id}] add_tool called. Tool: {tool.name}" 142 | ) 143 | if tool.name in self.tools: 144 | agent_internal_debug_logger.warning( 145 | f"[{self.agent_id}] Tool '{tool.name}' already exists. Overwriting." 146 | ) 147 | logger.warning( 148 | f"Tool '{tool.name}' already exists in agent '{self.agent_id}'. Overwriting." 149 | ) 150 | self.tools[tool.name] = tool 151 | logger.info(f"Tool '{tool.name}' added to agent '{self.agent_id}'.") 152 | 153 | @classmethod 154 | def get_agent_type_id(cls) -> str: 155 | return f"tframex.agents.{cls.__name__}" 156 | 157 | @classmethod 158 | def get_display_name(cls) -> str: 159 | return cls.__name__ 160 | -------------------------------------------------------------------------------- /tframex/agents/llm_agent.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union 4 | 5 | from tframex.models.primitives import FunctionCall, Message, ToolCall 6 | from tframex.util.llms import BaseLLMWrapper 7 | from tframex.util.memory import BaseMemoryStore 8 | from tframex.util.tools import Tool, ToolDefinition 9 | 10 | from .base import BaseAgent 11 | 12 | if TYPE_CHECKING: 13 | from tframex.util.engine import Engine 14 | 15 | logger = logging.getLogger(__name__) 16 | 17 | 18 | class LLMAgent(BaseAgent): 19 | """ 20 | An agent that uses an LLM to decide actions, potentially using tools, callable sub-agents, and memory. 21 | """ 22 | 23 | def __init__( 24 | self, 25 | agent_id: str, 26 | llm: BaseLLMWrapper, # This is the actual resolved LLM the agent will use 27 | engine: "Engine", 28 | description: Optional[str] = None, 29 | tools: Optional[List[Tool]] = None, 30 | memory: Optional[BaseMemoryStore] = None, 31 | system_prompt_template: Optional[str] = "You are a helpful assistant.", 32 | callable_agent_definitions: Optional[List[ToolDefinition]] = None, 33 | strip_think_tags: bool = False, # NEW: Passed to BaseAgent 34 | max_tool_iterations: int = 5, 35 | **config: Any, 36 | ): 37 | super().__init__( 38 | agent_id, 39 | description=description, 40 | llm=llm, # Pass the resolved LLM 41 | tools=tools, 42 | memory=memory, 43 | system_prompt_template=system_prompt_template, 44 | callable_agent_definitions=callable_agent_definitions, 45 | strip_think_tags=strip_think_tags, # NEW: Pass to BaseAgent 46 | **config, # Pass other configs from decorator 47 | ) 48 | self.engine = engine 49 | self.max_tool_iterations = max_tool_iterations 50 | if not self.llm: # self.llm is inherited from BaseAgent and set by super() 51 | raise ValueError(f"LLMAgent '{self.agent_id}' requires an LLM instance.") 52 | 53 | async def run(self, input_message: Union[str, Message], **kwargs: Any) -> Message: 54 | if isinstance(input_message, str): 55 | current_user_message = Message(role="user", content=input_message) 56 | else: 57 | current_user_message = input_message 58 | 59 | await self.memory.add_message(current_user_message) 60 | 61 | template_vars_for_prompt = kwargs.get("template_vars", {}) 62 | 63 | for iteration_count in range(self.max_tool_iterations + 1): 64 | history = await self.memory.get_history( 65 | limit=self.config.get("history_limit", 10) 66 | ) 67 | messages_for_llm: List[Message] = [] 68 | 69 | system_message = self._render_system_prompt(**template_vars_for_prompt) 70 | if system_message: 71 | messages_for_llm.append(system_message) 72 | 73 | messages_for_llm.extend(history) 74 | 75 | llm_call_kwargs = {k: v for k, v in kwargs.items() if k != "template_vars"} 76 | 77 | all_tool_definitions_for_llm: List[Dict[str, Any]] = [] 78 | if self.tools: 79 | all_tool_definitions_for_llm.extend( 80 | [ 81 | tool.get_openai_tool_definition().model_dump() 82 | for tool in self.tools.values() 83 | ] 84 | ) 85 | 86 | if self.callable_agent_definitions: 87 | all_tool_definitions_for_llm.extend( 88 | [cad.model_dump() for cad in self.callable_agent_definitions] 89 | ) 90 | 91 | if all_tool_definitions_for_llm: 92 | llm_call_kwargs["tools"] = all_tool_definitions_for_llm 93 | llm_call_kwargs["tool_choice"] = self.config.get("tool_choice", "auto") 94 | 95 | logger.debug( 96 | f"Agent '{self.agent_id}' (LLM: {self.llm.model_id}) calling LLM " # UPDATED LOG 97 | f"(Iter {iteration_count+1}/{self.max_tool_iterations+1}). " 98 | f"History depth: {len(history)}. " 99 | f"Regular Tools defined: {len(self.tools)}. " 100 | f"Callable Agents as Tools defined: {len(self.callable_agent_definitions)}." 101 | ) 102 | 103 | assistant_response_message = await self.llm.chat_completion( 104 | messages_for_llm, stream=False, **llm_call_kwargs 105 | ) 106 | await self.memory.add_message(assistant_response_message) 107 | 108 | if ( 109 | not assistant_response_message.tool_calls 110 | or iteration_count >= self.max_tool_iterations 111 | ): 112 | logger.info( 113 | f"Agent '{self.agent_id}' concluding with textual response. Iter: {iteration_count+1}." 114 | ) 115 | # NEW: Post-process before returning 116 | return self._post_process_llm_response(assistant_response_message) 117 | 118 | logger.info( 119 | f"Agent '{self.agent_id}' LLM requested tool_calls: {len(assistant_response_message.tool_calls)}" 120 | ) 121 | 122 | tool_response_messages: List[Message] = [] 123 | for tool_call in assistant_response_message.tool_calls: 124 | tool_name = tool_call.function.name 125 | tool_call_id = tool_call.id 126 | tool_args_json_str = tool_call.function.arguments 127 | 128 | is_sub_agent_call = any( 129 | cad.function["name"] == tool_name 130 | for cad in self.callable_agent_definitions 131 | ) 132 | 133 | if tool_name in self.tools: 134 | logger.info( 135 | f"Agent '{self.agent_id}' executing regular tool '{tool_name}'." 136 | ) 137 | tool_to_execute = self.tools[tool_name] 138 | tool_result_content = str( 139 | await tool_to_execute.execute(tool_args_json_str) 140 | ) 141 | elif is_sub_agent_call: 142 | logger.info( 143 | f"Agent '{self.agent_id}' calling sub-agent '{tool_name}' as a tool." 144 | ) 145 | try: 146 | sub_agent_args = json.loads(tool_args_json_str) 147 | sub_agent_input_content = sub_agent_args.get( 148 | "input_message", "" 149 | ) 150 | if not sub_agent_input_content and isinstance( 151 | sub_agent_args, str 152 | ): 153 | sub_agent_input_content = sub_agent_args 154 | elif not sub_agent_input_content and tool_args_json_str: 155 | sub_agent_input_content = tool_args_json_str 156 | 157 | sub_agent_input_msg = Message( 158 | role="user", content=str(sub_agent_input_content) 159 | ) 160 | sub_agent_call_kwargs = { 161 | "template_vars": template_vars_for_prompt 162 | } 163 | 164 | sub_agent_response = await self.engine.call_agent( 165 | agent_name=tool_name, 166 | input_message=sub_agent_input_msg, 167 | **sub_agent_call_kwargs, 168 | ) 169 | # Sub-agent's response is already post-processed by its own .run() method if it's an LLMAgent 170 | tool_result_content = ( 171 | sub_agent_response.content 172 | or "[Sub-agent produced no content]" 173 | ) 174 | if sub_agent_response.tool_calls: 175 | tc_summary = json.dumps( 176 | [ 177 | tc.model_dump(exclude_none=True) 178 | for tc in sub_agent_response.tool_calls 179 | ] 180 | ) 181 | tool_result_content += f"\n[Sub-agent '{tool_name}' also made tool calls: {tc_summary}]" 182 | logger.debug( 183 | f"Agent '{self.agent_id}': Sub-agent '{tool_name}' response: {tool_result_content[:200]}" 184 | ) 185 | 186 | except json.JSONDecodeError as e: 187 | logger.error( 188 | f"Agent '{self.agent_id}': Invalid JSON arguments for sub-agent '{tool_name}': {tool_args_json_str}. Error: {e}" 189 | ) 190 | tool_result_content = f"Error: Invalid JSON arguments for sub-agent '{tool_name}'." 191 | except Exception as e: 192 | logger.error( 193 | f"Agent '{self.agent_id}': Error calling sub-agent '{tool_name}': {e}", 194 | exc_info=True, 195 | ) 196 | tool_result_content = f"Error: Failed to execute sub-agent '{tool_name}': {str(e)}" 197 | else: 198 | logger.warning( 199 | f"Agent '{self.agent_id}': LLM requested unknown tool/agent '{tool_name}'. Available tools: {list(self.tools.keys())}, Callable agents: {[cad.function['name'] for cad in self.callable_agent_definitions]}" 200 | ) 201 | tool_result_content = ( 202 | f"Error: Tool or agent '{tool_name}' is not available to me." 203 | ) 204 | 205 | tool_response_messages.append( 206 | Message( 207 | role="tool", 208 | tool_call_id=tool_call_id, 209 | name=tool_name, 210 | content=tool_result_content, 211 | ) 212 | ) 213 | 214 | for tr_msg in tool_response_messages: 215 | await self.memory.add_message(tr_msg) 216 | 217 | logger.error( 218 | f"Agent '{self.agent_id}' exceeded max_tool_iterations ({self.max_tool_iterations}). Returning error message." 219 | ) 220 | # NEW: Post-process error message too 221 | error_message = Message( 222 | role="assistant", 223 | content=f"Error: Agent {self.agent_id} exceeded maximum tool processing iterations.", 224 | ) 225 | return self._post_process_llm_response(error_message) 226 | -------------------------------------------------------------------------------- /tframex/agents/tool_agent.py: -------------------------------------------------------------------------------- 1 | import json # Added 2 | import logging 3 | from typing import Any, Dict, List, Optional, Union # Added List, Optional, Dict 4 | 5 | from tframex.models.primitives import Message 6 | from tframex.util.llms import BaseLLMWrapper # Added 7 | from tframex.util.memory import BaseMemoryStore # Added 8 | from tframex.util.tools import Tool 9 | 10 | from .base import BaseAgent 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | class ToolAgent(BaseAgent): 16 | """ 17 | A stateless agent that wraps a single Tool for direct execution. 18 | It does not use an LLM for decision-making itself. 19 | The agent expects to be configured with exactly one tool. 20 | Input to its `run` method should be a dictionary of arguments for the tool, 21 | a JSON string of those arguments, or a Message whose content is that JSON string. 22 | """ 23 | 24 | def __init__( 25 | self, 26 | agent_id: str, 27 | tools: List[ 28 | Tool 29 | ], # Will be populated by TFrameXApp based on @app.agent(tools=["tool_name"]) 30 | llm: Optional[BaseLLMWrapper] = None, # For signature compatibility 31 | memory: Optional[BaseMemoryStore] = None, # For signature compatibility 32 | system_prompt_template: Optional[str] = None, # For signature compatibility 33 | **config: Any, 34 | ): 35 | 36 | # Determine the single tool this agent will run 37 | # Option 1: Agent is configured with exactly one tool in the 'tools' list. 38 | # Option 2: Agent is configured with 'target_tool_name' in its **config, if multiple tools are somehow passed. 39 | 40 | self.tool_to_run: Optional[Tool] = None 41 | effective_tools_for_base: List[Tool] = [] 42 | 43 | target_tool_name_from_config = config.get("target_tool_name") 44 | 45 | if target_tool_name_from_config: 46 | if not tools: 47 | raise ValueError( 48 | f"ToolAgent '{agent_id}' configured with target_tool_name '{target_tool_name_from_config}' but no tools list was provided." 49 | ) 50 | for t in tools: 51 | if t.name == target_tool_name_from_config: 52 | self.tool_to_run = t 53 | break 54 | if not self.tool_to_run: 55 | raise ValueError( 56 | f"ToolAgent '{agent_id}': Tool '{target_tool_name_from_config}' specified in config not found in the agent's tool list: {[t.name for t in tools]}." 57 | ) 58 | effective_tools_for_base = [self.tool_to_run] 59 | elif tools and len(tools) == 1: 60 | self.tool_to_run = tools[0] 61 | effective_tools_for_base = tools 62 | elif tools and len(tools) > 1: 63 | raise ValueError( 64 | f"ToolAgent '{agent_id}' was provided with multiple tools ({[t.name for t in tools]}) " 65 | f"but no 'target_tool_name' was specified in its configuration to select one." 66 | ) 67 | else: # No tools provided and no target_tool_name 68 | raise ValueError( 69 | f"ToolAgent '{agent_id}' must be configured with exactly one tool. " 70 | f"Provide one tool in the 'tools' list via @app.agent(tools=['my_tool_name']) " 71 | f"or specify 'target_tool_name' in agent_config if disambiguation is needed." 72 | ) 73 | 74 | # ToolAgent doesn't use an LLM, its own memory store, or a system prompt in the typical agent sense. 75 | super().__init__( 76 | agent_id, 77 | llm=None, 78 | tools=effective_tools_for_base, 79 | memory=None, 80 | system_prompt_template=None, 81 | **config, 82 | ) 83 | # self.tools dict in BaseAgent will contain the single tool. self.tool_to_run is also set. 84 | 85 | async def run( 86 | self, input_message: Union[str, Message, Dict[str, Any]], **kwargs: Any 87 | ) -> Message: 88 | input_args: Dict[str, Any] 89 | 90 | if isinstance(input_message, dict): 91 | input_args = input_message 92 | elif isinstance(input_message, Message): 93 | if input_message.content is None: 94 | logger.error( 95 | f"ToolAgent '{self.agent_id}': Received Message with None content." 96 | ) 97 | return Message( 98 | role="assistant", 99 | name=self.tool_to_run.name, 100 | content="Error: Input Message has no content.", 101 | ) 102 | try: 103 | input_args = json.loads(input_message.content) 104 | except json.JSONDecodeError as e: 105 | logger.error( 106 | f"ToolAgent '{self.agent_id}': Invalid JSON in Message content: '{input_message.content}'. Error: {e}" 107 | ) 108 | return Message( 109 | role="assistant", 110 | name=self.tool_to_run.name, 111 | content=f"Error: Invalid JSON input for ToolAgent '{self.agent_id}'. Content was not valid JSON.", 112 | ) 113 | elif isinstance(input_message, str): 114 | try: 115 | input_args = json.loads(input_message) 116 | except json.JSONDecodeError as e: 117 | logger.error( 118 | f"ToolAgent '{self.agent_id}': Invalid JSON string input: '{input_message}'. Error: {e}" 119 | ) 120 | return Message( 121 | role="assistant", 122 | name=self.tool_to_run.name, 123 | content=f"Error: Invalid JSON string input for ToolAgent '{self.agent_id}'. Expected a JSON string of arguments.", 124 | ) 125 | else: 126 | return Message( 127 | role="assistant", 128 | name=self.tool_to_run.name, 129 | content=f"Error: Invalid input type for ToolAgent '{self.agent_id}'. Expected JSON string, dict, or Message with JSON content.", 130 | ) 131 | 132 | # Tool.execute expects a JSON string of arguments. 133 | args_json_str_for_tool = json.dumps(input_args) 134 | 135 | logger.info( 136 | f"ToolAgent '{self.agent_id}' executing tool '{self.tool_to_run.name}' with JSON args: {args_json_str_for_tool[:200]}" 137 | ) 138 | try: 139 | tool_result = await self.tool_to_run.execute(args_json_str_for_tool) 140 | 141 | result_content: str 142 | if isinstance(tool_result, dict) and "error" in tool_result: 143 | result_content = f"Tool Error: {tool_result['error']}" 144 | elif isinstance( 145 | tool_result, (dict, list) 146 | ): # If result is complex, stringify as JSON 147 | result_content = json.dumps(tool_result) 148 | else: # Otherwise, simple string conversion 149 | result_content = str(tool_result) 150 | 151 | return Message( 152 | role="assistant", name=self.tool_to_run.name, content=result_content 153 | ) 154 | except Exception as e: 155 | logger.error( 156 | f"ToolAgent '{self.agent_id}' error running tool '{self.tool_to_run.name}': {e}", 157 | exc_info=True, 158 | ) 159 | return Message( 160 | role="assistant", 161 | name=self.tool_to_run.name, 162 | content=f"Error executing tool '{self.tool_to_run.name}': {str(e)}", 163 | ) 164 | -------------------------------------------------------------------------------- /tframex/app.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import inspect 3 | import logging 4 | import os # Already present, used in __init__ 5 | from typing import Any, Callable, Coroutine, Dict, List, Optional, Type, Union 6 | 7 | from .agents.base import BaseAgent 8 | from .agents.llm_agent import LLMAgent 9 | from .agents.tool_agent import ToolAgent 10 | from .flows.flow_context import FlowContext 11 | from .flows.flows import Flow 12 | from .models.primitives import Message, MessageChunk # MessageChunk already present 13 | from .patterns.patterns import BasePattern 14 | from .util.engine import Engine 15 | from .util.llms import BaseLLMWrapper 16 | from .util.logging.logging_config import setup_logging 17 | from .util.memory import BaseMemoryStore, InMemoryMemoryStore 18 | from .util.tools import ( # ToolDefinition already present 19 | Tool, 20 | ToolDefinition, 21 | ToolParameterProperty, 22 | ToolParameters, 23 | ) 24 | 25 | # Setup colored logging 26 | setup_logging(level=logging.INFO) 27 | 28 | logger = logging.getLogger("tframex.app") 29 | 30 | 31 | class TFrameXApp: 32 | def __init__( 33 | self, 34 | default_llm: Optional[BaseLLMWrapper] = None, 35 | default_memory_store_factory: Callable[ 36 | [], BaseMemoryStore 37 | ] = InMemoryMemoryStore, 38 | ): 39 | 40 | self._tools: Dict[str, Tool] = {} 41 | self._agents: Dict[str, Dict[str, Any]] = ( 42 | {} 43 | ) # Stores registration info: ref, config 44 | self._flows: Dict[str, Flow] = {} 45 | 46 | self.default_llm = default_llm 47 | self.default_memory_store_factory = default_memory_store_factory 48 | 49 | if not default_llm and not os.getenv("TFRAMEX_ALLOW_NO_DEFAULT_LLM"): 50 | logger.warning( 51 | "TFrameXApp initialized without a default LLM. LLM must be provided to run_context or agent if they don't have an override." 52 | ) 53 | 54 | def tool( 55 | self, 56 | name: Optional[str] = None, 57 | description: Optional[str] = None, 58 | parameters_schema: Optional[Dict[str, Dict[str, Any]]] = None, 59 | ) -> Callable: 60 | def decorator(func: Callable[..., Any]) -> Callable: 61 | tool_name = name or func.__name__ 62 | if tool_name in self._tools: 63 | raise ValueError(f"Tool '{tool_name}' already registered.") 64 | 65 | parsed_params_schema = None 66 | if parameters_schema: 67 | props = { 68 | p_name: ToolParameterProperty(**p_def) 69 | for p_name, p_def in parameters_schema.get("properties", {}).items() 70 | } 71 | required_list = parameters_schema.get("required") 72 | if not isinstance(required_list, list): 73 | required_list = None 74 | parsed_params_schema = ToolParameters( 75 | properties=props, required=required_list 76 | ) 77 | 78 | self._tools[tool_name] = Tool( 79 | name=tool_name, 80 | func=func, 81 | description=description, 82 | parameters_schema=parsed_params_schema, 83 | ) 84 | logger.debug(f"Registered tool: '{tool_name}'") 85 | return func 86 | 87 | return decorator 88 | 89 | def agent( 90 | self, 91 | name: Optional[str] = None, 92 | description: Optional[str] = None, 93 | callable_agents: Optional[List[str]] = None, 94 | system_prompt: Optional[str] = None, 95 | tools: Optional[List[str]] = None, 96 | llm: Optional[BaseLLMWrapper] = None, # This is the agent-specific LLM override 97 | memory_store: Optional[BaseMemoryStore] = None, 98 | agent_class: type[BaseAgent] = LLMAgent, 99 | strip_think_tags: bool = True, # NEW: Agent-specific setting 100 | **agent_config: Any, 101 | ) -> Callable: 102 | def decorator(target: Union[Callable, type]) -> Union[Callable, type]: 103 | agent_name = name or getattr(target, "__name__", str(target)) 104 | if agent_name in self._agents: 105 | raise ValueError(f"Agent '{agent_name}' already registered.") 106 | 107 | final_config = { 108 | "description": description, 109 | "callable_agent_names": callable_agents or [], 110 | "system_prompt_template": system_prompt, 111 | "tool_names": tools or [], 112 | "llm_instance_override": llm, # CHANGED key from llm_override 113 | "memory_override": memory_store, 114 | "agent_class_ref": agent_class, 115 | "strip_think_tags": strip_think_tags, # NEW: Storing the setting 116 | **agent_config, 117 | } 118 | 119 | is_class_based_agent = inspect.isclass(target) and issubclass( 120 | target, BaseAgent 121 | ) 122 | agent_class_to_log = ( 123 | target.__name__ if is_class_based_agent else agent_class.__name__ 124 | ) 125 | 126 | self._agents[agent_name] = { 127 | "type": ( 128 | "custom_class_agent" 129 | if is_class_based_agent 130 | else "framework_managed_agent" 131 | ), 132 | "ref": target, 133 | "config": final_config, 134 | } 135 | logger.debug( 136 | f"Registered agent: '{agent_name}' (Description: '{description}', " 137 | f"Class: {agent_class_to_log}, " 138 | f"LLM Override: {llm.model_id if llm else 'None'}, " 139 | f"Callable Agents: {callable_agents or []}, " 140 | f"Strip Think Tags: {strip_think_tags})" 141 | ) 142 | return target 143 | 144 | return decorator 145 | 146 | def get_tool(self, name: str) -> Optional[Tool]: 147 | return self._tools.get(name) 148 | 149 | def register_flow(self, flow_instance: Flow) -> None: 150 | if not isinstance(flow_instance, Flow): 151 | raise TypeError("Can only register an instance of the Flow class.") 152 | if flow_instance.flow_name in self._flows: 153 | raise ValueError( 154 | f"Flow with name '{flow_instance.flow_name}' already registered." 155 | ) 156 | self._flows[flow_instance.flow_name] = flow_instance 157 | logger.debug( 158 | f"Registered flow: '{flow_instance.flow_name}' with {len(flow_instance.steps)} steps." 159 | ) 160 | 161 | def get_flow(self, name: str) -> Optional[Flow]: 162 | return self._flows.get(name) 163 | 164 | def run_context( 165 | self, 166 | llm_override: Optional[BaseLLMWrapper] = None, 167 | context_memory_override: Optional[BaseMemoryStore] = None, 168 | ) -> "TFrameXRuntimeContext": 169 | ctx_llm = llm_override or self.default_llm 170 | ctx_memory = context_memory_override 171 | return TFrameXRuntimeContext(self, llm=ctx_llm, context_memory=ctx_memory) 172 | 173 | 174 | class TFrameXRuntimeContext: 175 | def __init__( 176 | self, 177 | app: TFrameXApp, 178 | llm: Optional[BaseLLMWrapper], 179 | context_memory: Optional[BaseMemoryStore] = None, 180 | ): 181 | self._app = app 182 | self.llm = llm # Context-level LLM 183 | self.context_memory = context_memory 184 | self.engine = Engine(app, self) 185 | 186 | async def __aenter__(self) -> "TFrameXRuntimeContext": 187 | llm_id = self.llm.model_id if self.llm else "None" 188 | ctx_mem_type = ( 189 | type(self.context_memory).__name__ if self.context_memory else "None" 190 | ) 191 | logger.info( 192 | f"TFrameXRuntimeContext entered. LLM: {llm_id}. Context Memory: {ctx_mem_type}" 193 | ) 194 | return self 195 | 196 | async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: 197 | # Close own LLM if it has a close method (some LLM wrappers might need this) 198 | if ( 199 | self.llm 200 | and hasattr(self.llm, "close") 201 | and inspect.iscoroutinefunction(self.llm.close) 202 | ): 203 | await self.llm.close() 204 | logger.info( 205 | f"TFrameXRuntimeContext exited. Context LLM client closed for {self.llm.model_id}." 206 | ) 207 | elif self.llm: 208 | logger.info( 209 | f"TFrameXRuntimeContext exited. Context LLM {self.llm.model_id} did not require async close." 210 | ) 211 | else: 212 | logger.info("TFrameXRuntimeContext exited. No context LLM client to close.") 213 | # Note: Agent-specific LLMs are not closed here; they are managed by the agent or assumed to share lifetime with context/app LLM. 214 | # If agent LLMs need explicit closing, agent's __del__ or a specific cleanup phase would handle it. 215 | 216 | async def run_flow( 217 | self, 218 | flow_ref: Union[str, Flow], 219 | initial_input: Message, 220 | initial_shared_data: Optional[Dict[str, Any]] = None, 221 | flow_template_vars: Optional[Dict[str, Any]] = None, 222 | ) -> FlowContext: 223 | flow_to_run: Optional[Flow] = None 224 | if isinstance(flow_ref, str): 225 | flow_to_run = self._app.get_flow(flow_ref) 226 | if not flow_to_run: 227 | raise ValueError(f"Flow with name '{flow_ref}' not found.") 228 | elif isinstance(flow_ref, Flow): 229 | flow_to_run = flow_ref 230 | else: 231 | raise TypeError("flow_ref must be a flow name (str) or a Flow instance.") 232 | return await flow_to_run.execute( 233 | initial_input, 234 | self.engine, 235 | initial_shared_data=initial_shared_data, 236 | flow_template_vars=flow_template_vars, 237 | ) 238 | 239 | async def interactive_chat(self, default_flow_name: Optional[str] = None) -> None: 240 | print("\n--- TFrameX Interactive Flow Chat ---") 241 | 242 | flow_to_use: Optional[Flow] = None 243 | if default_flow_name: 244 | flow_to_use = self._app.get_flow(default_flow_name) 245 | if flow_to_use: 246 | print(f"Default flow: '{default_flow_name}'") 247 | else: 248 | print(f"Warning: Default flow '{default_flow_name}' not found.") 249 | 250 | if not flow_to_use: 251 | if not self._app._flows: 252 | print( 253 | "No flows registered in the application. Exiting interactive chat." 254 | ) 255 | return 256 | 257 | print("Available flows:") 258 | flow_names_list = list(self._app._flows.keys()) 259 | for i, name in enumerate(flow_names_list): 260 | print(f" {i + 1}. {name}") 261 | 262 | while True: 263 | try: 264 | choice_str = await asyncio.to_thread( 265 | input, 266 | "Select a flow to chat with (number or name, or 'exit'): ", 267 | ) 268 | if choice_str.lower() == "exit": 269 | return 270 | 271 | selected_flow_name: Optional[str] = None 272 | if choice_str.isdigit(): 273 | choice_idx = int(choice_str) - 1 274 | if 0 <= choice_idx < len(flow_names_list): 275 | selected_flow_name = flow_names_list[choice_idx] 276 | else: 277 | if choice_str in self._app._flows: 278 | selected_flow_name = choice_str 279 | 280 | if selected_flow_name: 281 | flow_to_use = self._app.get_flow(selected_flow_name) 282 | break 283 | else: 284 | print("Invalid selection. Please try again.") 285 | except ValueError: 286 | print("Invalid input. Please enter a number or flow name.") 287 | except KeyboardInterrupt: 288 | print("\nExiting.") 289 | return 290 | 291 | if not flow_to_use: 292 | print("No flow selected. Exiting.") 293 | return 294 | 295 | print(f"\n--- Chatting with Flow: '{flow_to_use.flow_name}' ---") 296 | print(f"Description: {flow_to_use.description or 'No description'}") 297 | print("Type 'exit' or 'quit' to end this chat session.") 298 | 299 | while True: 300 | try: 301 | user_input_str = await asyncio.to_thread(input, "\nYou: ") 302 | if user_input_str.lower() in ["exit", "quit"]: 303 | break 304 | if not user_input_str.strip(): 305 | continue 306 | 307 | initial_message = Message(role="user", content=user_input_str) 308 | 309 | logger.info( 310 | f"CLI: Running flow '{flow_to_use.flow_name}' with input: '{user_input_str}'" 311 | ) 312 | final_flow_context: FlowContext = await self.run_flow( 313 | flow_to_use, initial_message 314 | ) 315 | 316 | final_output_message = final_flow_context.current_message 317 | 318 | print(f"\nFlow Output ({final_output_message.role}):") 319 | if final_output_message.content: 320 | print(f" Content: {final_output_message.content}") 321 | 322 | if final_output_message.tool_calls: 323 | print( 324 | f" Final Message Tool Calls (Unprocessed by Flow): {final_output_message.tool_calls}" 325 | ) 326 | 327 | if final_flow_context.shared_data: 328 | print(" Flow Shared Data (at end of execution):") 329 | for key, value in final_flow_context.shared_data.items(): 330 | value_str = str(value) 331 | print( 332 | f" {key}: {value_str[:200]}{'...' if len(value_str) > 200 else ''}" 333 | ) 334 | 335 | except KeyboardInterrupt: 336 | print("\nExiting chat session.") 337 | break 338 | except Exception as e: 339 | print( 340 | f"Error during interactive chat with flow '{flow_to_use.flow_name}': {e}" 341 | ) 342 | logger.error( 343 | f"Error in interactive_chat with flow '{flow_to_use.flow_name}'", 344 | exc_info=True, 345 | ) 346 | 347 | print(f"--- Ended chat with Flow: '{flow_to_use.flow_name}' ---") 348 | -------------------------------------------------------------------------------- /tframex/flows/__init__.py: -------------------------------------------------------------------------------- 1 | # tframex/flows/__init__.py 2 | from .flow_context import FlowContext 3 | from .flows import Flow 4 | 5 | __all__ = ["FlowContext", "Flow"] -------------------------------------------------------------------------------- /tframex/flows/flow_context.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, Optional 2 | 3 | from ..models.primitives import Message 4 | 5 | 6 | class FlowContext: 7 | """ 8 | Holds the current state and data being processed within a single execution of a Flow. 9 | """ 10 | 11 | def __init__( 12 | self, initial_input: Message, shared_data: Optional[Dict[str, Any]] = None 13 | ): 14 | self.current_message: Message = initial_input 15 | self.history: list[Message] = [ 16 | initial_input 17 | ] # History of messages within this flow execution 18 | self.shared_data: Dict[str, Any] = ( 19 | shared_data or {} 20 | ) # For patterns/steps to share data 21 | 22 | def update_current_message(self, message: Message): 23 | self.current_message = message 24 | self.history.append(message) 25 | 26 | def __str__(self): 27 | return ( 28 | f"FlowContext(current_message='{str(self.current_message.content)[:50]}...', " 29 | f"history_len={len(self.history)}, shared_data_keys={list(self.shared_data.keys())})" 30 | ) 31 | -------------------------------------------------------------------------------- /tframex/flows/flows.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import logging 3 | from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union 4 | 5 | import yaml # NEW IMPORT 6 | 7 | from ..models.primitives import Message 8 | from ..patterns.patterns import ( 9 | BasePattern, 10 | DiscussionPattern, 11 | ParallelPattern, 12 | RouterPattern, 13 | SequentialPattern, 14 | ) 15 | from ..util.engine import Engine 16 | from .flow_context import FlowContext 17 | 18 | logger = logging.getLogger(__name__) 19 | 20 | 21 | class Flow: 22 | """ 23 | Represents a defined sequence of operations (agents or patterns) to be executed. 24 | """ 25 | 26 | def __init__(self, flow_name: str, description: Optional[str] = None): 27 | self.flow_name = flow_name 28 | self.description = description 29 | self.steps: List[Union[str, BasePattern]] = ( 30 | [] 31 | ) # str for agent_name, or BasePattern instance 32 | logger.debug(f"Flow '{self.flow_name}' initialized.") 33 | 34 | def add_step(self, step: Union[str, BasePattern]): 35 | """Adds a step to the flow. A step can be an agent name or a Pattern instance.""" 36 | if not isinstance(step, (str, BasePattern)): 37 | raise TypeError( 38 | "Flow step must be an agent name (str) or a BasePattern instance." 39 | ) 40 | self.steps.append(step) 41 | logger.debug( 42 | f"Flow '{self.flow_name}': Added step '{str(step)}'. Total steps: {len(self.steps)}." 43 | ) 44 | return self 45 | 46 | async def execute( 47 | self, 48 | initial_input: Message, 49 | engine: Engine, 50 | initial_shared_data: Optional[Dict[str, Any]] = None, 51 | flow_template_vars: Optional[Dict[str, Any]] = None, 52 | ) -> FlowContext: 53 | """ 54 | Executes the flow with the given initial input and runtime context. 55 | Returns the final FlowContext after all steps. 56 | """ 57 | logger.info( 58 | f"Executing Flow '{self.flow_name}' with {len(self.steps)} steps. Initial input: {str(initial_input.content)[:50]}..." 59 | ) 60 | 61 | flow_ctx = FlowContext( 62 | initial_input=initial_input, shared_data=initial_shared_data 63 | ) 64 | agent_call_kwargs = ( 65 | {"template_vars": flow_template_vars} if flow_template_vars else {} 66 | ) 67 | 68 | for i, step in enumerate(self.steps): 69 | step_name = str(step) if isinstance(step, BasePattern) else step 70 | logger.info( 71 | f"Flow '{self.flow_name}' - Step {i+1}/{len(self.steps)}: Executing '{step_name}'. Current input: {str(flow_ctx.current_message.content)[:50]}..." 72 | ) 73 | 74 | try: 75 | if isinstance(step, str): 76 | output_message = await engine.call_agent( 77 | step, flow_ctx.current_message, **agent_call_kwargs 78 | ) 79 | flow_ctx.update_current_message(output_message) 80 | elif isinstance(step, BasePattern): 81 | # Patterns need to be aware of flow_template_vars if they call agents directly 82 | # Pass agent_call_kwargs to pattern's execute method 83 | flow_ctx = await step.execute( 84 | flow_ctx, engine, agent_call_kwargs=agent_call_kwargs 85 | ) 86 | else: 87 | raise TypeError( 88 | f"Invalid step type in flow '{self.flow_name}': {type(step)}" 89 | ) 90 | 91 | logger.info( 92 | f"Flow '{self.flow_name}' - Step {i+1} ('{step_name}') completed. Output: {str(flow_ctx.current_message.content)[:50]}..." 93 | ) 94 | 95 | if flow_ctx.shared_data.get("STOP_FLOW", False): 96 | logger.info( 97 | f"Flow '{self.flow_name}' - STOP_FLOW signal received. Halting execution." 98 | ) 99 | break 100 | 101 | except Exception as e: 102 | logger.error( 103 | f"Error during Flow '{self.flow_name}' at step '{step_name}': {e}", 104 | exc_info=True, 105 | ) 106 | error_msg = Message( 107 | role="assistant", 108 | content=f"Error in flow '{self.flow_name}' at step '{step_name}': {e}", 109 | ) 110 | flow_ctx.update_current_message(error_msg) 111 | return flow_ctx 112 | 113 | logger.info( 114 | f"Flow '{self.flow_name}' completed. Final output: {str(flow_ctx.current_message.content)[:50]}..." 115 | ) 116 | return flow_ctx 117 | 118 | # --- Documentation Generation Methods --- 119 | 120 | def generate_documentation(self, app: "TFrameXApp") -> Tuple[str, str]: 121 | """ 122 | Generates a Mermaid diagram string and a YAML representation for the entire flow. 123 | Requires a TFrameXApp instance to look up agent and tool details. 124 | """ 125 | if not TYPE_CHECKING: # Runtime imports if not already loaded by type checker 126 | from ..agents.llm_agent import LLMAgent 127 | from ..models.primitives import ToolDefinition 128 | from ..util.tools import ToolParameterProperty, ToolParameters 129 | 130 | yaml_data = self._generate_yaml_data(app) 131 | # Pass yaml_data to Mermaid generation to reuse processed structure and IDs if needed, 132 | # though current Mermaid generator re-traverses for simplicity. 133 | mermaid_string = self._generate_mermaid_string(app, yaml_data) 134 | 135 | yaml_string = yaml.dump( 136 | yaml_data, sort_keys=False, indent=2, width=120, default_flow_style=False 137 | ) 138 | return mermaid_string, yaml_string 139 | 140 | def _get_tool_details_for_yaml( 141 | self, tool_name: str, app: "TFrameXApp" 142 | ) -> Dict[str, Any]: 143 | from ..util.tools import ( # Ensure available at runtime 144 | ToolParameterProperty, 145 | ToolParameters, 146 | ) 147 | 148 | tool_obj = app.get_tool(tool_name) 149 | if not tool_obj: 150 | return {"name": tool_name, "error": "Tool not found in app registry"} 151 | 152 | params_repr = {} 153 | if tool_obj.parameters and tool_obj.parameters.properties: 154 | for p_name, p_prop_model in tool_obj.parameters.properties.items(): 155 | # p_prop_model is ToolParameterProperty instance 156 | params_repr[p_name] = { 157 | "type": p_prop_model.type, 158 | "description": p_prop_model.description, 159 | } 160 | if p_prop_model.enum: 161 | params_repr[p_name]["enum"] = p_prop_model.enum 162 | 163 | required_params = [] 164 | if tool_obj.parameters and tool_obj.parameters.required: 165 | required_params = tool_obj.parameters.required 166 | 167 | return { 168 | "name": tool_obj.name, 169 | "description": tool_obj.description, 170 | "parameters": ( 171 | {"properties": params_repr, "required": required_params} 172 | if params_repr 173 | else "No parameters defined" 174 | ), 175 | } 176 | 177 | def _get_agent_details_for_yaml( 178 | self, agent_name: str, app: "TFrameXApp" 179 | ) -> Dict[str, Any]: 180 | from ..agents.llm_agent import LLMAgent # For default type 181 | from ..models.primitives import ToolDefinition 182 | from ..util.tools import ToolParameterProperty, ToolParameters 183 | 184 | if agent_name not in app._agents: 185 | return {"name": agent_name, "error": "Agent not registered in app"} 186 | 187 | reg_info = app._agents[agent_name] 188 | config = reg_info["config"] 189 | 190 | # Determine agent class, defaulting to LLMAgent if not specified or not a class 191 | agent_class_ref = config.get("agent_class_ref", LLMAgent) 192 | agent_type_name = ( 193 | agent_class_ref.__name__ 194 | if inspect.isclass(agent_class_ref) 195 | else LLMAgent.__name__ 196 | ) 197 | 198 | agent_details = { 199 | "name": agent_name, 200 | "type": agent_type_name, 201 | "description": config.get("description"), 202 | "system_prompt": config.get("system_prompt_template"), 203 | "strip_think_tags": config.get("strip_think_tags", False), 204 | } 205 | 206 | llm_override = config.get("llm_instance_override") 207 | if llm_override: 208 | agent_details["llm"] = { 209 | "model_id": llm_override.model_id, 210 | "api_base_url": llm_override.api_base_url, 211 | } 212 | else: 213 | agent_details["llm"] = "Uses context/app default LLM" 214 | 215 | tool_names = config.get("tool_names", []) 216 | if tool_names: 217 | agent_details["tools"] = [ 218 | self._get_tool_details_for_yaml(tn, app) for tn in tool_names 219 | ] 220 | 221 | callable_agent_names_for_this_agent = config.get("callable_agent_names", []) 222 | if callable_agent_names_for_this_agent: 223 | callable_agents_tool_defs = [] 224 | for ca_name_to_call in callable_agent_names_for_this_agent: 225 | if ca_name_to_call in app._agents: 226 | called_agent_reg_info = app._agents[ca_name_to_call] 227 | called_agent_desc = ( 228 | called_agent_reg_info["config"].get("description") 229 | or f"Agent '{ca_name_to_call}' performing its designated role." 230 | ) 231 | 232 | agent_tool_params_dict = ToolParameters( 233 | properties={ 234 | "input_message": ToolParameterProperty( 235 | type="string", 236 | description=f"The specific query, task, or input content to pass to the '{ca_name_to_call}' agent.", 237 | ), 238 | }, 239 | required=["input_message"], 240 | ).model_dump(exclude_none=True) 241 | 242 | tool_def_for_ca = ToolDefinition( 243 | type="function", 244 | function={ 245 | "name": ca_name_to_call, 246 | "description": called_agent_desc, 247 | "parameters": agent_tool_params_dict, 248 | }, 249 | ) 250 | callable_agents_tool_defs.append(tool_def_for_ca.model_dump()) 251 | else: 252 | callable_agents_tool_defs.append( 253 | { 254 | "name": ca_name_to_call, 255 | "error": "Callable agent not registered", 256 | } 257 | ) 258 | if callable_agents_tool_defs: # Only add if not empty 259 | agent_details["callable_agents_as_tools"] = callable_agents_tool_defs 260 | return agent_details 261 | 262 | def _generate_yaml_data_recursive( 263 | self, step_or_task: Union[str, BasePattern], app: "TFrameXApp" 264 | ) -> Dict[str, Any]: 265 | if isinstance(step_or_task, str): 266 | return { 267 | "type": "agent", 268 | **self._get_agent_details_for_yaml(step_or_task, app), 269 | } 270 | elif isinstance(step_or_task, BasePattern): 271 | pattern_data: Dict[str, Any] = { 272 | "type": "pattern", 273 | "pattern_type": step_or_task.__class__.__name__, 274 | "name": step_or_task.pattern_name, 275 | } 276 | if isinstance(step_or_task, SequentialPattern): 277 | pattern_data["steps"] = [ 278 | self._generate_yaml_data_recursive(s, app) 279 | for s in step_or_task.steps 280 | ] 281 | elif isinstance(step_or_task, ParallelPattern): 282 | pattern_data["tasks"] = [ 283 | self._generate_yaml_data_recursive(t, app) 284 | for t in step_or_task.tasks 285 | ] 286 | elif isinstance(step_or_task, RouterPattern): 287 | pattern_data["router_agent"] = self._get_agent_details_for_yaml( 288 | step_or_task.router_agent_name, app 289 | ) 290 | routes_yaml = {} 291 | for key, target_step in step_or_task.routes.items(): 292 | routes_yaml[key] = self._generate_yaml_data_recursive( 293 | target_step, app 294 | ) 295 | pattern_data["routes"] = routes_yaml 296 | if step_or_task.default_route: 297 | pattern_data["default_route"] = self._generate_yaml_data_recursive( 298 | step_or_task.default_route, app 299 | ) 300 | elif isinstance(step_or_task, DiscussionPattern): 301 | pattern_data["participants"] = [ 302 | self._get_agent_details_for_yaml(p_name, app) 303 | for p_name in step_or_task.participant_agent_names 304 | ] 305 | if step_or_task.moderator_agent_name: 306 | pattern_data["moderator"] = self._get_agent_details_for_yaml( 307 | step_or_task.moderator_agent_name, app 308 | ) 309 | pattern_data["rounds"] = step_or_task.discussion_rounds 310 | pattern_data["stop_phrase"] = step_or_task.stop_phrase 311 | return pattern_data 312 | else: 313 | return {"error": f"Unknown step type: {type(step_or_task)}"} 314 | 315 | def _generate_yaml_data(self, app: "TFrameXApp") -> Dict[str, Any]: 316 | flow_data = { 317 | "flow": { 318 | "name": self.flow_name, 319 | "description": self.description, 320 | "steps": [ 321 | self._generate_yaml_data_recursive(step, app) for step in self.steps 322 | ], 323 | } 324 | } 325 | return flow_data 326 | 327 | def _generate_mermaid_string( 328 | self, app: "TFrameXApp", yaml_data_for_ids: Dict[str, Any] 329 | ) -> str: 330 | # yaml_data_for_ids is passed but not explicitly used to re-fetch IDs in this version. 331 | # The traversal logic for Mermaid is self-contained here. 332 | 333 | mermaid_lines = ["graph TD"] 334 | node_counter = 0 # Used for unique node IDs 335 | 336 | def escape_mermaid_label(label: str) -> str: 337 | """Escapes characters in labels and wraps in quotes.""" 338 | if not label: 339 | return '""' 340 | # Replace quotes with HTML entity, backticks with spaces (or other entity if preferred) 341 | escaped = ( 342 | label.replace('"', "#quot;").replace("`", "`").replace("\n", "\\n") 343 | ) 344 | return f'"{escaped}"' 345 | 346 | def get_item_name(item_data_dict_or_str: Union[str, Dict[str, Any]]) -> str: 347 | if isinstance(item_data_dict_or_str, str): # Agent name string 348 | return item_data_dict_or_str 349 | # For dicts (parsed YAML structure for an item) 350 | name = item_data_dict_or_str.get("name", "UnnamedItem") 351 | if item_data_dict_or_str.get("type") == "pattern": 352 | name = f"{item_data_dict_or_str.get('pattern_type', 'Pattern')}_{name}" 353 | return name 354 | 355 | # Main recursive function to add elements to Mermaid 356 | def add_mermaid_element( 357 | element_data: Union[ 358 | str, Dict[str, Any] 359 | ], # Can be agent name string or dict from YAML parse 360 | parent_id_prefix: str, 361 | prev_node_id_in_sequence: str, 362 | edge_label: Optional[str] = None, 363 | ) -> str: 364 | nonlocal node_counter 365 | 366 | # If element_data is a string, it's an agent name. Fetch its details. 367 | if isinstance(element_data, str): 368 | element_name_for_id = element_data 369 | # This agent's details are not yet fetched in this path, but we need them for the label. 370 | # For Mermaid, we might not need full YAML details, just name and type. 371 | # This implies _generate_mermaid_string should probably traverse the *original* flow steps, 372 | # not the YAML data, to have access to BasePattern instances etc. 373 | # Let's adjust: _generate_mermaid_string should call a new recursive helper that works on flow.steps directly. 374 | # For now, I will adapt to use the YAML structure that `yaml_data_for_ids` provides. 375 | # The `element_data` will be the dict from the YAML structure. 376 | if not isinstance( 377 | element_data, dict 378 | ): # Should not happen if called with YAML structure 379 | logger.error( 380 | f"Mermaid generator expected dict, got {type(element_data)}: {element_data}" 381 | ) 382 | return prev_node_id_in_sequence 383 | else: # It's a dict 384 | element_name_for_id = get_item_name(element_data) 385 | 386 | current_node_id = f"{parent_id_prefix}_{element_name_for_id.replace(' ', '_')}_{node_counter}" 387 | node_counter += 1 388 | 389 | label_text = "" 390 | if element_data.get("type") == "agent": 391 | label_text = f"Agent: {element_data.get('name', 'UnknownAgent')}" 392 | if element_data.get("tools"): 393 | label_text += f"\\nTools: {len(element_data['tools'])}" 394 | if element_data.get("callable_agents_as_tools"): 395 | label_text += ( 396 | f"\\nCalls: {len(element_data['callable_agents_as_tools'])}" 397 | ) 398 | mermaid_lines.append( 399 | f" {current_node_id}[{escape_mermaid_label(label_text)}]" 400 | ) 401 | 402 | elif element_data.get("type") == "pattern": 403 | pattern_type = element_data.get("pattern_type", "UnknownPattern") 404 | pattern_name = element_data.get("name", "UnnamedPattern") 405 | label_text = f"Pattern: {pattern_type}\\n({pattern_name})" 406 | 407 | # Define subgraph for the pattern 408 | mermaid_lines.append( 409 | f" subgraph {current_node_id}_sub [{escape_mermaid_label(label_text)}]" 410 | ) 411 | mermaid_lines.append(" direction LR") # Default for patterns 412 | 413 | pattern_internal_start_id = f"{current_node_id}_start" 414 | pattern_internal_end_id = f"{current_node_id}_end" 415 | mermaid_lines.append( 416 | f" {pattern_internal_start_id}((:))" 417 | ) # Smaller start/end for pattern internals 418 | mermaid_lines.append(f" {pattern_internal_end_id}((:))") 419 | 420 | # Link from previous sequence node to pattern's internal start 421 | connection_str = ( 422 | f" -->|{escape_mermaid_label(edge_label)}|" 423 | if edge_label 424 | else " -->" 425 | ) 426 | mermaid_lines.append( 427 | f" {prev_node_id_in_sequence}{connection_str} {pattern_internal_start_id}" 428 | ) 429 | 430 | # Process pattern-specific content 431 | if pattern_type == "SequentialPattern": 432 | prev_in_pattern = pattern_internal_start_id 433 | for sub_step_data in element_data.get("steps", []): 434 | prev_in_pattern = add_mermaid_element( 435 | sub_step_data, current_node_id, prev_in_pattern 436 | ) 437 | mermaid_lines.append( 438 | f" {prev_in_pattern} --> {pattern_internal_end_id}" 439 | ) 440 | 441 | elif pattern_type == "ParallelPattern": 442 | for task_data in element_data.get("tasks", []): 443 | task_output_node = add_mermaid_element( 444 | task_data, current_node_id, pattern_internal_start_id 445 | ) 446 | mermaid_lines.append( 447 | f" {task_output_node} --> {pattern_internal_end_id}" 448 | ) 449 | 450 | elif pattern_type == "RouterPattern": 451 | router_agent_data = element_data.get("router_agent", {}) 452 | # router_agent_node_id = add_mermaid_element(router_agent_data, current_node_id, pattern_internal_start_id) 453 | # Simplified router agent node definition: 454 | router_agent_name = router_agent_data.get("name", "RouterAgent") 455 | router_agent_node_id = ( 456 | f"{current_node_id}_{router_agent_name.replace(' ','_')}" 457 | ) 458 | mermaid_lines.append( 459 | f" {router_agent_node_id}[{escape_mermaid_label(f'Router: {router_agent_name}')}]" 460 | ) 461 | mermaid_lines.append( 462 | f" {pattern_internal_start_id} --> {router_agent_node_id}" 463 | ) 464 | 465 | for route_key, target_data in element_data.get( 466 | "routes", {} 467 | ).items(): 468 | target_output_node = add_mermaid_element( 469 | target_data, 470 | current_node_id, 471 | router_agent_node_id, 472 | edge_label=route_key, 473 | ) 474 | mermaid_lines.append( 475 | f" {target_output_node} --> {pattern_internal_end_id}" 476 | ) 477 | if element_data.get("default_route"): 478 | default_target_output = add_mermaid_element( 479 | element_data["default_route"], 480 | current_node_id, 481 | router_agent_node_id, 482 | edge_label="Default", 483 | ) 484 | mermaid_lines.append( 485 | f" {default_target_output} --> {pattern_internal_end_id}" 486 | ) 487 | 488 | elif pattern_type == "DiscussionPattern": 489 | # Simplified: Just list participants and moderator if any 490 | if element_data.get("moderator"): 491 | mod_data = element_data.get("moderator") 492 | mod_output = add_mermaid_element( 493 | mod_data, 494 | current_node_id, 495 | pattern_internal_start_id, 496 | edge_label="Moderates", 497 | ) 498 | mermaid_lines.append( 499 | f" {mod_output} --> {pattern_internal_end_id}" 500 | ) # Moderator leads to end 501 | for p_data in element_data.get("participants", []): 502 | p_output = add_mermaid_element( 503 | p_data, current_node_id, pattern_internal_start_id 504 | ) # All participants start from beginning 505 | mermaid_lines.append( 506 | f" {p_output} --> {pattern_internal_end_id}" 507 | ) # And contribute to end 508 | 509 | mermaid_lines.append(" end") # End subgraph 510 | # The "output" of a pattern subgraph for sequential linking is its internal end node 511 | current_node_id = pattern_internal_end_id 512 | # However, the connection from prev_node_id_in_sequence was already made to pattern_internal_start_id. 513 | # The current_node_id to be returned should be the one that the *next* sequential step connects *from*. 514 | # So, if this was a pattern, the next step connects from its internal_end_id. 515 | # This is correct. 516 | 517 | else: # Should not happen if YAML structure is correct 518 | mermaid_lines.append( 519 | f" {current_node_id}[{escape_mermaid_label(f'Unknown: {element_name_for_id}')}]" 520 | ) 521 | 522 | # Connect previous step to current step (if not a pattern, where connection is handled differently) 523 | if element_data.get("type") != "pattern": 524 | connection_str = ( 525 | f" -->|{escape_mermaid_label(edge_label)}|" 526 | if edge_label 527 | else " -->" 528 | ) 529 | mermaid_lines.append( 530 | f" {prev_node_id_in_sequence}{connection_str} {current_node_id}" 531 | ) 532 | 533 | return ( 534 | current_node_id # Return the ID of the last main node of this element 535 | ) 536 | 537 | # --- Start Mermaid generation --- 538 | flow_id_main = self.flow_name.replace(" ", "_") 539 | mermaid_lines.append( 540 | f"subgraph {flow_id_main}_overall [{escape_mermaid_label(f'Flow: {self.flow_name}')}]" 541 | ) 542 | mermaid_lines.append(" direction TD") 543 | 544 | flow_start_node = f"{flow_id_main}_FlowStart" 545 | flow_end_node = f"{flow_id_main}_FlowEnd" 546 | mermaid_lines.append(f' {flow_start_node}(("Start"))') 547 | mermaid_lines.append(f' {flow_end_node}(("End"))') 548 | 549 | last_node_in_flow_sequence = flow_start_node 550 | flow_steps_data = yaml_data_for_ids.get("flow", {}).get("steps", []) 551 | 552 | for step_data_item in flow_steps_data: 553 | last_node_in_flow_sequence = add_mermaid_element( 554 | step_data_item, flow_id_main, last_node_in_flow_sequence 555 | ) 556 | 557 | mermaid_lines.append(f" {last_node_in_flow_sequence} --> {flow_end_node}") 558 | mermaid_lines.append("end") # End main flow subgraph 559 | 560 | return "\n".join(mermaid_lines) 561 | -------------------------------------------------------------------------------- /tframex/models/__init__.py: -------------------------------------------------------------------------------- 1 | # tframex/models/__init__.py 2 | from .primitives import ( 3 | FunctionCall, 4 | Message, 5 | MessageChunk, 6 | ToolCall, 7 | ToolDefinition, 8 | ToolParameterProperty, 9 | ToolParameters, 10 | ) 11 | 12 | __all__ = [ 13 | "FunctionCall", 14 | "Message", 15 | "MessageChunk", 16 | "ToolCall", 17 | "ToolDefinition", 18 | "ToolParameterProperty", 19 | "ToolParameters", 20 | ] -------------------------------------------------------------------------------- /tframex/models/primitives.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, List, Literal, Optional 2 | 3 | from pydantic import BaseModel, Field, field_validator 4 | 5 | 6 | class FunctionCall(BaseModel): 7 | name: str 8 | arguments: str # Should be a JSON string 9 | 10 | 11 | class ToolCall(BaseModel): 12 | id: str # Tool call ID, generated by the model 13 | type: Literal["function"] = "function" 14 | function: FunctionCall 15 | 16 | 17 | class Message(BaseModel): 18 | role: Literal["system", "user", "assistant", "tool"] 19 | content: Optional[str] = None 20 | tool_calls: Optional[List[ToolCall]] = None 21 | tool_call_id: Optional[str] = None # For role="tool", linking to a ToolCall id 22 | name: Optional[str] = ( 23 | None # For role="tool", the name of the function that was called 24 | ) 25 | 26 | model_config = { 27 | "exclude_none": True, 28 | "extra": "ignore", # Ignore extra fields during parsing if model adds them 29 | } 30 | 31 | 32 | class MessageChunk(Message): 33 | """Represents a chunk of a message, typically used in streaming LLM responses.""" 34 | 35 | pass # Inherits fields, used for typing 36 | 37 | 38 | class ToolParameterProperty(BaseModel): 39 | type: str # e.g., "string", "number", "integer", "boolean", "array", "object" 40 | description: Optional[str] = None 41 | enum: Optional[List[Any]] = None 42 | # For array items or object properties, nested schema can be defined if needed 43 | 44 | 45 | class ToolParameters(BaseModel): 46 | type: Literal["object"] = "object" 47 | properties: Dict[str, ToolParameterProperty] = Field(default_factory=dict) 48 | required: Optional[List[str]] = Field(default_factory=list) 49 | 50 | 51 | class ToolDefinition(BaseModel): # For OpenAI tool format 52 | type: Literal["function"] = "function" 53 | function: Dict[str, Any] # name, description, parameters (ToolParameters schema) 54 | -------------------------------------------------------------------------------- /tframex/patterns/__init__.py: -------------------------------------------------------------------------------- 1 | # tframex/patterns/__init__.py 2 | from .patterns import ( 3 | BasePattern, 4 | SequentialPattern, 5 | ParallelPattern, 6 | RouterPattern, 7 | DiscussionPattern, 8 | ) 9 | 10 | __all__ = [ 11 | "BasePattern", 12 | "SequentialPattern", 13 | "ParallelPattern", 14 | "RouterPattern", 15 | "DiscussionPattern", 16 | ] -------------------------------------------------------------------------------- /tframex/patterns/patterns.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | from abc import ABC, abstractmethod 4 | from typing import Any, Dict, List, Optional, Tuple, Union 5 | 6 | from ..flows.flow_context import FlowContext 7 | from ..models.primitives import Message, ToolCall 8 | from ..util.engine import Engine 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | class BasePattern(ABC): 14 | def __init__(self, pattern_name: str): 15 | self.pattern_name = pattern_name 16 | logger.debug(f"Pattern '{self.pattern_name}' initialized.") 17 | 18 | @abstractmethod 19 | async def execute( 20 | self, 21 | flow_ctx: FlowContext, 22 | engine: Engine, 23 | agent_call_kwargs: Optional[Dict[str, Any]] = None, 24 | ) -> FlowContext: # NEW agent_call_kwargs 25 | pass 26 | 27 | def __str__(self): 28 | return f"{self.__class__.__name__}(name='{self.pattern_name}')" 29 | 30 | 31 | class SequentialPattern(BasePattern): 32 | def __init__(self, pattern_name: str, steps: List[Union[str, BasePattern]]): 33 | super().__init__(pattern_name) 34 | self.steps = steps 35 | 36 | async def execute( 37 | self, 38 | flow_ctx: FlowContext, 39 | engine: Engine, 40 | agent_call_kwargs: Optional[Dict[str, Any]] = None, 41 | ) -> FlowContext: # NEW 42 | logger.info( 43 | f"Executing SequentialPattern '{self.pattern_name}' with {len(self.steps)} steps. Input: {str(flow_ctx.current_message.content)[:50]}..." 44 | ) 45 | effective_agent_call_kwargs = agent_call_kwargs or {} 46 | 47 | for i, step in enumerate(self.steps): 48 | step_name = str(step) if isinstance(step, BasePattern) else step 49 | logger.info( 50 | f"SequentialPattern '{self.pattern_name}' - Step {i + 1}/{len(self.steps)}: Executing '{step_name}'." 51 | ) 52 | 53 | if isinstance(step, str): # Agent name 54 | try: 55 | output_message = await engine.call_agent( 56 | step, flow_ctx.current_message, **effective_agent_call_kwargs 57 | ) 58 | flow_ctx.update_current_message(output_message) 59 | except Exception as e: # ... error handling ... 60 | logger.error( 61 | f"Error in SequentialPattern '{self.pattern_name}' calling agent '{step}': {e}", 62 | exc_info=True, 63 | ) 64 | error_msg = Message( 65 | role="assistant", 66 | content=f"Error executing agent '{step}' in sequence '{self.pattern_name}': {e}", 67 | ) 68 | flow_ctx.update_current_message(error_msg) 69 | return flow_ctx 70 | elif isinstance(step, BasePattern): # Nested pattern 71 | try: 72 | flow_ctx = await step.execute( 73 | flow_ctx, engine, agent_call_kwargs=effective_agent_call_kwargs 74 | ) # Pass kwargs 75 | except Exception as e: # ... error handling ... 76 | logger.error( 77 | f"Error in SequentialPattern '{self.pattern_name}' executing nested pattern '{step.pattern_name}': {e}", 78 | exc_info=True, 79 | ) 80 | error_msg = Message( 81 | role="assistant", 82 | content=f"Error executing nested pattern '{step.pattern_name}' in sequence '{self.pattern_name}': {e}", 83 | ) 84 | flow_ctx.update_current_message(error_msg) 85 | return flow_ctx 86 | else: # ... error handling ... 87 | logger.error( 88 | f"SequentialPattern '{self.pattern_name}': Invalid step type: {type(step)}" 89 | ) 90 | error_msg = Message( 91 | role="assistant", 92 | content=f"Invalid step type in sequence '{self.pattern_name}'.", 93 | ) 94 | flow_ctx.update_current_message(error_msg) 95 | return flow_ctx 96 | logger.info(f"SequentialPattern '{self.pattern_name}' completed.") 97 | return flow_ctx 98 | 99 | 100 | class ParallelPattern(BasePattern): 101 | def __init__(self, pattern_name: str, tasks: List[Union[str, BasePattern]]): 102 | super().__init__(pattern_name) 103 | self.tasks = tasks 104 | 105 | async def execute( 106 | self, 107 | flow_ctx: FlowContext, 108 | engine: Engine, 109 | agent_call_kwargs: Optional[Dict[str, Any]] = None, 110 | ) -> FlowContext: # NEW 111 | logger.info( 112 | f"Executing ParallelPattern '{self.pattern_name}' with {len(self.tasks)} tasks. Input: {str(flow_ctx.current_message.content)[:50]}..." 113 | ) 114 | initial_input_msg = flow_ctx.current_message 115 | effective_agent_call_kwargs = agent_call_kwargs or {} 116 | coroutines = [] 117 | task_identifiers = [] 118 | 119 | for task_item in self.tasks: 120 | task_name = ( 121 | str(task_item) if isinstance(task_item, BasePattern) else task_item 122 | ) 123 | task_identifiers.append(task_name) 124 | 125 | if isinstance(task_item, str): # Agent name 126 | coroutines.append( 127 | engine.call_agent( 128 | task_item, initial_input_msg, **effective_agent_call_kwargs 129 | ) 130 | ) 131 | elif isinstance(task_item, BasePattern): 132 | branch_flow_ctx = FlowContext( 133 | initial_input=initial_input_msg, 134 | shared_data=flow_ctx.shared_data.copy(), 135 | ) 136 | coroutines.append( 137 | task_item.execute( 138 | branch_flow_ctx, 139 | engine, 140 | agent_call_kwargs=effective_agent_call_kwargs, 141 | ) 142 | ) # Pass kwargs 143 | else: # ... error handling ... 144 | 145 | async def error_coro(): 146 | return Message( 147 | role="assistant", 148 | content=f"Invalid task type in parallel pattern '{self.pattern_name}'.", 149 | ) 150 | 151 | coroutines.append(error_coro()) 152 | 153 | results = await asyncio.gather(*coroutines, return_exceptions=True) 154 | # ... (rest of result aggregation logic - no changes needed here for agent_call_kwargs) ... 155 | aggregated_content_parts = [] 156 | result_artifacts = [] 157 | 158 | for i, res_item in enumerate(results): 159 | task_id = task_identifiers[i] 160 | if isinstance(res_item, Exception): 161 | logger.error( 162 | f"ParallelPattern '{self.pattern_name}' - Task '{task_id}' failed: {res_item}", 163 | exc_info=False, 164 | ) 165 | aggregated_content_parts.append( 166 | f"Task '{task_id}' failed: {str(res_item)}" 167 | ) 168 | result_artifacts.append( 169 | { 170 | "name": f"Result_for_{task_id.replace(' ', '_')}", 171 | "parts": [{"type": "text", "text": f"Error: {str(res_item)}"}], 172 | } 173 | ) 174 | elif isinstance(res_item, FlowContext): 175 | logger.info( 176 | f"ParallelPattern '{self.pattern_name}' - Task '{task_id}' (pattern) completed. Output: {str(res_item.current_message.content)[:50]}..." 177 | ) 178 | aggregated_content_parts.append( 179 | f"Task '{task_id}' (pattern) completed. Result: {str(res_item.current_message.content)[:100]}..." 180 | ) 181 | result_artifacts.append( 182 | { 183 | "name": f"Result_for_{task_id.replace(' ', '_')}", 184 | "parts": [ 185 | { 186 | "type": "data", 187 | "data": res_item.current_message.model_dump( 188 | exclude_none=True 189 | ), 190 | } 191 | ], 192 | } 193 | ) 194 | elif isinstance(res_item, Message): 195 | logger.info( 196 | f"ParallelPattern '{self.pattern_name}' - Task '{task_id}' (agent) completed. Output: {str(res_item.content)[:50]}..." 197 | ) 198 | aggregated_content_parts.append( 199 | f"Task '{task_id}' (agent) completed. Result: {str(res_item.content)[:100]}..." 200 | ) 201 | result_artifacts.append( 202 | { 203 | "name": f"Result_for_{task_id.replace(' ', '_')}", 204 | "parts": [ 205 | { 206 | "type": "data", 207 | "data": res_item.model_dump(exclude_none=True), 208 | } 209 | ], 210 | } 211 | ) 212 | else: 213 | logger.warning( 214 | f"ParallelPattern '{self.pattern_name}' - Task '{task_id}' returned unexpected type: {type(res_item)}" 215 | ) 216 | aggregated_content_parts.append( 217 | f"Task '{task_id}' returned unexpected data." 218 | ) 219 | result_artifacts.append( 220 | { 221 | "name": f"Result_for_{task_id.replace(' ', '_')}", 222 | "parts": [{"type": "text", "text": "Unexpected result type."}], 223 | } 224 | ) 225 | 226 | summary_content = ( 227 | f"Parallel execution of '{self.pattern_name}' completed with {len(self.tasks)} tasks.\n" 228 | + "\n".join(aggregated_content_parts) 229 | ) 230 | final_output_message = Message(role="assistant", content=summary_content) 231 | flow_ctx.shared_data[f"{self.pattern_name}_results"] = result_artifacts 232 | flow_ctx.update_current_message(final_output_message) 233 | 234 | logger.info(f"ParallelPattern '{self.pattern_name}' completed.") 235 | return flow_ctx 236 | 237 | 238 | class RouterPattern(BasePattern): 239 | def __init__( 240 | self, 241 | pattern_name: str, 242 | router_agent_name: str, 243 | routes: Dict[str, Union[str, BasePattern]], 244 | default_route: Optional[Union[str, BasePattern]] = None, 245 | ): 246 | super().__init__(pattern_name) 247 | self.router_agent_name = router_agent_name 248 | self.routes = routes 249 | self.default_route = default_route 250 | 251 | async def execute( 252 | self, 253 | flow_ctx: FlowContext, 254 | engine: Engine, 255 | agent_call_kwargs: Optional[Dict[str, Any]] = None, 256 | ) -> FlowContext: # NEW 257 | logger.info( 258 | f"Executing RouterPattern '{self.pattern_name}'. Input: {str(flow_ctx.current_message.content)[:50]}..." 259 | ) 260 | effective_agent_call_kwargs = agent_call_kwargs or {} 261 | try: 262 | router_response: Message = await engine.call_agent( 263 | self.router_agent_name, 264 | flow_ctx.current_message, 265 | **effective_agent_call_kwargs, 266 | ) 267 | flow_ctx.history.append(router_response) 268 | route_key = (router_response.content or "").strip() 269 | logger.info( 270 | f"RouterPattern '{self.pattern_name}': Router agent '{self.router_agent_name}' decided route_key: '{route_key}'." 271 | ) 272 | except Exception as e: # ... error handling ... 273 | logger.error( 274 | f"Error calling router agent '{self.router_agent_name}' in RouterPattern '{self.pattern_name}': {e}", 275 | exc_info=True, 276 | ) 277 | error_msg = Message( 278 | role="assistant", 279 | content=f"Error in router agent '{self.router_agent_name}': {e}", 280 | ) 281 | flow_ctx.update_current_message(error_msg) 282 | return flow_ctx 283 | 284 | target_step = self.routes.get(route_key) 285 | if target_step is None: 286 | logger.warning( 287 | f"RouterPattern '{self.pattern_name}': Route key '{route_key}' not found. Using default." 288 | ) 289 | target_step = self.default_route 290 | if target_step is None: # ... error handling ... 291 | logger.error( 292 | f"RouterPattern '{self.pattern_name}': No route for key '{route_key}' and no default." 293 | ) 294 | error_msg = Message( 295 | role="assistant", content=f"Routing error: No path for '{route_key}'." 296 | ) 297 | flow_ctx.update_current_message(error_msg) 298 | return flow_ctx 299 | 300 | target_name = ( 301 | str(target_step) if isinstance(target_step, BasePattern) else target_step 302 | ) 303 | logger.info( 304 | f"RouterPattern '{self.pattern_name}': Executing routed step '{target_name}'." 305 | ) 306 | 307 | if isinstance(target_step, str): # Agent name 308 | try: 309 | output_message = await engine.call_agent( 310 | target_step, flow_ctx.current_message, **effective_agent_call_kwargs 311 | ) 312 | flow_ctx.update_current_message(output_message) 313 | except Exception as e: # ... error handling ... 314 | logger.error( 315 | f"Error in RouterPattern '{self.pattern_name}' calling routed agent '{target_step}': {e}", 316 | exc_info=True, 317 | ) 318 | error_msg = Message( 319 | role="assistant", 320 | content=f"Error executing routed agent '{target_step}': {e}", 321 | ) 322 | flow_ctx.update_current_message(error_msg) 323 | elif isinstance(target_step, BasePattern): # Nested pattern 324 | try: 325 | flow_ctx = await target_step.execute( 326 | flow_ctx, engine, agent_call_kwargs=effective_agent_call_kwargs 327 | ) # Pass kwargs 328 | except Exception as e: # ... error handling ... 329 | logger.error( 330 | f"Error in RouterPattern '{self.pattern_name}' executing routed pattern '{target_step.pattern_name}': {e}", 331 | exc_info=True, 332 | ) 333 | error_msg = Message( 334 | role="assistant", 335 | content=f"Error executing routed pattern '{target_step.pattern_name}': {e}", 336 | ) 337 | flow_ctx.update_current_message(error_msg) 338 | 339 | logger.info(f"RouterPattern '{self.pattern_name}' completed.") 340 | return flow_ctx 341 | 342 | 343 | class DiscussionPattern(BasePattern): 344 | def __init__( 345 | self, 346 | pattern_name: str, 347 | participant_agent_names: List[str], 348 | discussion_rounds: int = 1, 349 | moderator_agent_name: Optional[str] = None, 350 | stop_phrase: Optional[str] = None, 351 | ): 352 | super().__init__(pattern_name) 353 | if not participant_agent_names: 354 | raise ValueError("DiscussionPattern needs participants.") 355 | self.participant_agent_names = participant_agent_names 356 | self.discussion_rounds = discussion_rounds 357 | self.moderator_agent_name = moderator_agent_name 358 | self.stop_phrase = stop_phrase.lower() if stop_phrase else None 359 | 360 | async def execute( 361 | self, 362 | flow_ctx: FlowContext, 363 | engine: Engine, 364 | agent_call_kwargs: Optional[Dict[str, Any]] = None, 365 | ) -> FlowContext: # NEW 366 | logger.info( 367 | f"Executing DiscussionPattern '{self.pattern_name}' for {self.discussion_rounds} rounds. Topic: {str(flow_ctx.current_message.content)[:50]}..." 368 | ) 369 | current_discussion_topic_msg = flow_ctx.current_message 370 | effective_agent_call_kwargs = agent_call_kwargs or {} 371 | 372 | for round_num in range(1, self.discussion_rounds + 1): 373 | logger.info( 374 | f"DiscussionPattern '{self.pattern_name}' - Round {round_num}/{self.discussion_rounds}" 375 | ) 376 | round_messages: List[Tuple[str, Message]] = [] 377 | 378 | for agent_name in self.participant_agent_names: 379 | logger.info( 380 | f"DiscussionPattern '{self.pattern_name}' - Round {round_num}: Agent '{agent_name}' speaking on: {str(current_discussion_topic_msg.content)[:50]}..." 381 | ) 382 | try: 383 | # Each agent gets the current_discussion_topic_msg as input 384 | # Pass effective_agent_call_kwargs which might contain global template_vars 385 | agent_response: Message = await engine.call_agent( 386 | agent_name, 387 | current_discussion_topic_msg, 388 | **effective_agent_call_kwargs, 389 | ) 390 | flow_ctx.history.append(agent_response) # Record turn 391 | round_messages.append((agent_name, agent_response)) 392 | current_discussion_topic_msg = ( 393 | agent_response # Next agent responds to this 394 | ) 395 | 396 | if ( 397 | self.stop_phrase 398 | and self.stop_phrase in (agent_response.content or "").lower() 399 | ): 400 | logger.info( 401 | f"DiscussionPattern '{self.pattern_name}': Agent '{agent_name}' said stop phrase. Ending." 402 | ) 403 | flow_ctx.update_current_message(agent_response) 404 | return flow_ctx 405 | except Exception as e: # ... error handling ... 406 | logger.error( 407 | f"Error in DiscussionPattern '{self.pattern_name}' with agent '{agent_name}': {e}", 408 | exc_info=True, 409 | ) 410 | error_response = Message( 411 | role="assistant", content=f"Agent {agent_name} error: {e}" 412 | ) 413 | round_messages.append((agent_name, error_response)) 414 | current_discussion_topic_msg = error_response 415 | 416 | if not round_messages: 417 | break # End if round is empty 418 | 419 | if self.moderator_agent_name and round_num < self.discussion_rounds: 420 | mod_input_parts = [ 421 | f"Summary of Round {round_num} for discussion on '{str(flow_ctx.current_message.content)[:50]}...':" 422 | ] 423 | for name, msg in round_messages: 424 | mod_input_parts.append(f"- {name}: {msg.content}") 425 | mod_input_msg = Message( 426 | role="user", 427 | content="\n".join(mod_input_parts) + "\n\nPlease moderate.", 428 | ) 429 | 430 | logger.info( 431 | f"DiscussionPattern '{self.pattern_name}' - Round {round_num}: Calling moderator '{self.moderator_agent_name}'." 432 | ) 433 | try: 434 | moderator_response: Message = await engine.call_agent( 435 | self.moderator_agent_name, 436 | mod_input_msg, 437 | **effective_agent_call_kwargs, 438 | ) 439 | flow_ctx.history.append(moderator_response) 440 | current_discussion_topic_msg = moderator_response 441 | except Exception as e: # ... error handling ... 442 | logger.error( 443 | f"Error calling moderator agent '{self.moderator_agent_name}': {e}", 444 | exc_info=True, 445 | ) 446 | current_discussion_topic_msg = Message( 447 | role="assistant", content=f"Moderator error: {e}. Continuing." 448 | ) 449 | elif round_messages: 450 | current_discussion_topic_msg = round_messages[-1][1] 451 | 452 | flow_ctx.update_current_message(current_discussion_topic_msg) 453 | logger.info( 454 | f"DiscussionPattern '{self.pattern_name}' completed. Final message: {str(flow_ctx.current_message.content)[:50]}..." 455 | ) 456 | return flow_ctx 457 | -------------------------------------------------------------------------------- /tframex/util/__init__.py: -------------------------------------------------------------------------------- 1 | # tframex/util/__init__.py 2 | # This can re-export or be left empty if util modules are imported directly. 3 | # For exposing to the main tframex API, we'll re-export key components. 4 | from .llms import BaseLLMWrapper, OpenAIChatLLM 5 | from .memory import BaseMemoryStore, InMemoryMemoryStore 6 | from .tools import Tool 7 | from .engine import Engine 8 | from .logging.logging_config import setup_logging 9 | 10 | __all__ = [ 11 | "BaseLLMWrapper", 12 | "OpenAIChatLLM", 13 | "BaseMemoryStore", 14 | "InMemoryMemoryStore", 15 | "Tool", 16 | "Engine", 17 | "setup_logging", 18 | ] -------------------------------------------------------------------------------- /tframex/util/engine.py: -------------------------------------------------------------------------------- 1 | # tframex/engine.py 2 | 3 | """ 4 | Core execution engine for TFrameX agents and tools within a specific runtime context. 5 | 6 | This module defines the `Engine` class, responsible for managing the lifecycle 7 | and execution of agents registered within a TFrameXApp instance. It handles 8 | agent instantiation, configuration resolution (LLM, memory, tools), and 9 | delegates calls to the appropriate agent or tool methods. 10 | """ 11 | 12 | import inspect 13 | import logging 14 | from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union 15 | 16 | # Import primitives and utilities - generally safe from circular dependencies 17 | from ..models.primitives import Message 18 | from ..util.tools import Tool, ToolDefinition, ToolParameterProperty, ToolParameters 19 | 20 | # Use TYPE_CHECKING block for imports needed only for static analysis 21 | # This avoids runtime circular imports. 22 | if TYPE_CHECKING: 23 | from ..agents.base import BaseAgent 24 | from ..agents.llm_agent import LLMAgent 25 | from ..app import TFrameXApp # Assuming app type for better hinting 26 | from ..runtime import RuntimeContext # Assuming context type for better hinting 27 | 28 | 29 | logger = logging.getLogger("tframex.engine") 30 | 31 | 32 | class Engine: 33 | """ 34 | Manages agent instantiation and execution within a TFrameX runtime context. 35 | 36 | An Engine instance is typically created per request or session (via RuntimeContext) 37 | and provides the necessary environment for agents to run, resolving dependencies 38 | like LLMs, memory stores, and tools based on application defaults, context 39 | overrides, and agent-specific configurations. 40 | """ 41 | 42 | def __init__(self, app: 'TFrameXApp', runtime_context: 'RuntimeContext'): 43 | """ 44 | Initializes the Engine. 45 | 46 | Args: 47 | app: The main TFrameXApp instance containing agent/tool registrations. 48 | runtime_context: The specific runtime context for this engine instance, 49 | potentially holding session-specific state or overrides (e.g., LLM). 50 | """ 51 | self._app = app 52 | self._runtime_context = runtime_context 53 | # Use string literal for type hint to avoid import at class definition time 54 | # Stores agent instances, keyed by agent name. Instantiated lazily. 55 | self._agent_instances: Dict[str, 'BaseAgent'] = {} 56 | 57 | def _get_agent_instance(self, agent_name: str) -> 'BaseAgent': 58 | """ 59 | Retrieves or lazily instantiates an agent based on its registered configuration. 60 | 61 | This method handles the core logic of agent creation: 62 | 1. Checks if an instance for the given `agent_name` already exists for this engine. 63 | 2. If not, retrieves the agent's registration info from the `TFrameXApp`. 64 | 3. Resolves the LLM instance (Agent config > Context > App default). 65 | 4. Resolves the MemoryStore instance (Agent config > App default factory). 66 | 5. Resolves the Tools list based on registered `tool_names`. 67 | 6. Gathers other configuration: description, `strip_think_tags`, callable agents. 68 | 7. Creates ToolDefinitions for any specified `callable_agent_names`. 69 | 8. Determines the correct agent class to instantiate. 70 | 9. Filters registration config to pass only valid constructor arguments. 71 | 10. Validates required dependencies (e.g., LLM for LLMAgents). 72 | 11. Instantiates the agent class with the resolved configuration. 73 | 12. Stores and returns the new agent instance. 74 | 75 | Args: 76 | agent_name: The registered name of the agent to get or create. 77 | 78 | Returns: 79 | The agent instance corresponding to the `agent_name`. 80 | 81 | Raises: 82 | ValueError: If the `agent_name` is not registered in the app. 83 | ValueError: If an LLMAgent is required but no LLM is available. 84 | """ 85 | # Import agent classes here, INSIDE the method, only when needed for instantiation 86 | # This prevents module-level circular dependencies. 87 | from ..agents.base import BaseAgent 88 | from ..agents.llm_agent import LLMAgent 89 | 90 | if agent_name not in self._agent_instances: 91 | # --- Agent Registration Lookup --- 92 | if agent_name not in self._app._agents: 93 | raise ValueError( 94 | f"Agent '{agent_name}' not registered with the TFrameXApp." 95 | ) 96 | reg_info = self._app._agents[agent_name] 97 | agent_config = reg_info["config"] # Use a shorter alias 98 | 99 | # --- Dependency Resolution --- 100 | # Resolve LLM: Agent-specific config > Context > App default 101 | agent_llm = ( 102 | agent_config.get("llm_instance_override") 103 | or self._runtime_context.llm 104 | or self._app.default_llm 105 | ) 106 | 107 | # Resolve Memory: Agent-specific config > App default factory 108 | agent_memory = ( 109 | agent_config.get("memory_override") 110 | or self._app.default_memory_store_factory() # Ensure factory provides a new instance 111 | ) 112 | 113 | # Resolve Tools: Look up tools by name from app registry 114 | agent_tools_resolved: List[Tool] = [] 115 | tool_names = agent_config.get("tool_names", []) 116 | if tool_names: 117 | for tool_name_ref in tool_names: 118 | tool_obj = self._app.get_tool(tool_name_ref) 119 | if tool_obj: 120 | agent_tools_resolved.append(tool_obj) 121 | else: 122 | logger.warning( 123 | f"Tool '{tool_name_ref}' specified for agent '{agent_name}' " 124 | f"not found in the app registry. Skipping." 125 | ) 126 | 127 | # --- Agent Configuration --- 128 | agent_description = agent_config.get("description") 129 | strip_think_tags_for_agent = agent_config.get( 130 | "strip_think_tags", False # Default to False if not specified in config 131 | ) 132 | 133 | # --- Callable Agent Definitions --- 134 | # Define other agents this agent can call as tools 135 | callable_agent_definitions: List[ToolDefinition] = [] 136 | callable_agent_names = agent_config.get("callable_agent_names", []) 137 | for sub_agent_name in callable_agent_names: 138 | if sub_agent_name not in self._app._agents: 139 | logger.warning( 140 | f"Agent '{agent_name}' configured to call non-existent agent " 141 | f"'{sub_agent_name}'. Skipping definition." 142 | ) 143 | continue 144 | 145 | # Fetch sub-agent info to create a tool-like definition 146 | sub_agent_reg_info = self._app._agents[sub_agent_name] 147 | sub_agent_description = ( 148 | sub_agent_reg_info["config"].get("description") 149 | or f"Invoke the '{sub_agent_name}' agent. Provide the specific input message for it." 150 | ) 151 | 152 | # Define standard parameters for calling another agent 153 | agent_tool_params = ToolParameters( 154 | properties={ 155 | "input_message": ToolParameterProperty( 156 | type="string", 157 | description=f"The specific query, task, or input content to pass to the '{sub_agent_name}' agent.", 158 | ), 159 | }, 160 | required=["input_message"], 161 | ) 162 | 163 | callable_agent_definitions.append( 164 | ToolDefinition( 165 | type="function", 166 | function={ 167 | "name": sub_agent_name, # The name the primary agent uses to call 168 | "description": sub_agent_description, 169 | "parameters": agent_tool_params.model_dump(exclude_none=True), 170 | }, 171 | ) 172 | ) 173 | 174 | # --- Agent Instantiation --- 175 | instance_id = f"{agent_name}_ctx{id(self._runtime_context)}" 176 | AgentClassToInstantiate: Type[BaseAgent] = agent_config["agent_class_ref"] 177 | 178 | # Identify keys used internally for setup vs. those passed to the constructor 179 | internal_config_keys = { 180 | "llm_instance_override", "memory_override", "tool_names", 181 | "system_prompt_template", "agent_class_ref", "description", 182 | "callable_agent_names", "strip_think_tags" 183 | } 184 | additional_constructor_args = { 185 | k: v 186 | for k, v in agent_config.items() 187 | if k not in internal_config_keys 188 | } 189 | 190 | # Runtime check: LLMAgent requires an LLM 191 | if issubclass(AgentClassToInstantiate, LLMAgent) and not agent_llm: 192 | raise ValueError( 193 | f"Agent '{agent_name}' (type: {AgentClassToInstantiate.__name__}) " 194 | f"requires an LLM, but none could be resolved (check agent config, " 195 | f"runtime context, and app defaults)." 196 | ) 197 | 198 | # Prepare arguments for the agent's constructor 199 | agent_init_kwargs = { 200 | "agent_id": instance_id, 201 | "description": agent_description, 202 | "llm": agent_llm, 203 | "tools": agent_tools_resolved, 204 | "memory": agent_memory, 205 | "system_prompt_template": agent_config.get("system_prompt_template"), 206 | "callable_agent_definitions": callable_agent_definitions, 207 | "strip_think_tags": strip_think_tags_for_agent, 208 | **additional_constructor_args, # Include any other config values 209 | } 210 | 211 | # Inject engine dependency specifically for LLMAgents (if needed by their impl) 212 | # Check inheritance dynamically using the imported LLMAgent class 213 | if issubclass(AgentClassToInstantiate, LLMAgent): 214 | agent_init_kwargs["engine"] = self # Pass self (the engine) 215 | 216 | # Create the agent instance 217 | self._agent_instances[agent_name] = AgentClassToInstantiate(**agent_init_kwargs) 218 | 219 | logger.debug( 220 | f"Instantiated agent '{instance_id}' " 221 | f"(Name: '{agent_name}', Type: {AgentClassToInstantiate.__name__}, " 222 | f"LLM: {agent_llm.model_id if agent_llm else 'None'}, " 223 | f"Memory: {type(agent_memory).__name__}, " 224 | f"Tools: {[t.name for t in agent_tools_resolved]}, " 225 | f"Callable Agents: {callable_agent_names}, " 226 | f"Strip Tags: {strip_think_tags_for_agent})" 227 | ) 228 | 229 | # Return the existing or newly created instance 230 | return self._agent_instances[agent_name] 231 | 232 | async def call_agent( 233 | self, agent_name: str, input_message: Union[str, Message], **kwargs: Any 234 | ) -> Message: 235 | """ 236 | Executes a registered agent with the given input. 237 | 238 | This method retrieves (or instantiates) the specified agent and calls its 239 | `run` method. 240 | 241 | Args: 242 | agent_name: The registered name of the agent to call. 243 | input_message: The input message for the agent, either as a string 244 | (which will be wrapped in a 'user' Message) or a Message object. 245 | **kwargs: Additional keyword arguments to be passed directly to the 246 | agent's `run` method. 247 | 248 | Returns: 249 | The response Message object from the agent's execution. 250 | 251 | Raises: 252 | ValueError: If the agent is not registered. 253 | (Potentially others depending on the agent's `run` method) 254 | """ 255 | # Ensure input is a Message object 256 | if isinstance(input_message, str): 257 | input_msg_obj = Message(role="user", content=input_message) 258 | elif isinstance(input_message, Message): 259 | input_msg_obj = input_message 260 | else: 261 | # Add type checking for clarity, though Union hint covers it 262 | raise TypeError(f"input_message must be str or Message, not {type(input_message).__name__}") 263 | 264 | # Get the agent instance (will create if first time for this engine) 265 | agent_instance = self._get_agent_instance(agent_name) 266 | 267 | # Execute the agent's primary run logic 268 | return await agent_instance.run(input_msg_obj, **kwargs) 269 | 270 | async def call_tool(self, tool_name: str, arguments_json_str: str) -> Any: 271 | """ 272 | Executes a registered tool with the provided arguments. 273 | 274 | This method looks up the tool in the application's registry and calls 275 | its `execute` method. This is typically used internally by agents that 276 | decide to use a tool. 277 | 278 | Args: 279 | tool_name: The registered name of the tool to execute. 280 | arguments_json_str: A JSON string containing the arguments for the tool, 281 | as expected by the tool's definition. 282 | 283 | Returns: 284 | The result returned by the tool's `execute` method. This can be of Any type. 285 | Returns an error dictionary if the tool is not found. 286 | """ 287 | tool = self._app.get_tool(tool_name) 288 | if not tool: 289 | logger.error( 290 | f"Engine requested to call tool '{tool_name}', but it was not " 291 | f"found in the app registry." 292 | ) 293 | # Return a consistent error format that agents might handle 294 | return {"error": f"Tool '{tool_name}' not found."} 295 | 296 | logger.debug(f"Engine executing tool '{tool_name}' with args: {arguments_json_str}") 297 | # Execute the tool 298 | try: 299 | result = await tool.execute(arguments_json_str) 300 | logger.debug(f"Tool '{tool_name}' executed successfully.") 301 | return result 302 | except Exception as e: 303 | logger.error(f"Error executing tool '{tool_name}': {e}", exc_info=True) 304 | # Propagate error in a structured way if possible 305 | return {"error": f"Error executing tool '{tool_name}': {str(e)}"} -------------------------------------------------------------------------------- /tframex/util/llms.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | import logging 4 | from abc import ABC, abstractmethod 5 | from typing import ( 6 | Any, 7 | AsyncGenerator, 8 | Coroutine, 9 | Dict, 10 | List, 11 | Literal, 12 | Optional, 13 | Union, 14 | overload, 15 | ) 16 | 17 | import httpx 18 | 19 | from tframex.models.primitives import FunctionCall, Message, MessageChunk, ToolCall 20 | 21 | logger = logging.getLogger(__name__) 22 | 23 | 24 | class BaseLLMWrapper(ABC): 25 | def __init__( 26 | self, 27 | model_id: str, 28 | api_key: Optional[str] = None, 29 | api_base_url: Optional[str] = None, 30 | client_kwargs: Optional[Dict[str, Any]] = None, 31 | ): 32 | self.model_id = model_id 33 | self.api_key = api_key 34 | self.api_base_url = api_base_url.rstrip("/") if api_base_url else None 35 | self.client_kwargs = client_kwargs or {} 36 | self._client: Optional[httpx.AsyncClient] = None 37 | logger.info(f"BaseLLMWrapper initialized for model_id: {model_id}") 38 | 39 | async def _get_client(self) -> httpx.AsyncClient: 40 | if self._client is None or self._client.is_closed: 41 | headers = {} 42 | if self.api_key: 43 | headers["Authorization"] = f"Bearer {self.api_key}" 44 | headers["Content-Type"] = "application/json" 45 | 46 | timeout_config = self.client_kwargs.pop("timeout", None) 47 | if timeout_config is None: 48 | timeouts = httpx.Timeout( 49 | 300.0, connect=60.0 50 | ) # Default: 5 min total, 1 min connect 51 | elif isinstance(timeout_config, (int, float)): 52 | timeouts = httpx.Timeout(timeout_config) 53 | else: # Assumes httpx.Timeout object 54 | timeouts = timeout_config 55 | 56 | self._client = httpx.AsyncClient( 57 | headers=headers, timeout=timeouts, **self.client_kwargs 58 | ) 59 | return self._client 60 | 61 | @overload 62 | @abstractmethod 63 | async def chat_completion( 64 | self, messages: List[Message], stream: Literal[True], **kwargs: Any 65 | ) -> Coroutine[Any, Any, AsyncGenerator[MessageChunk, None]]: ... 66 | 67 | @overload 68 | @abstractmethod 69 | async def chat_completion( 70 | self, messages: List[Message], stream: Literal[False] = False, **kwargs: Any 71 | ) -> Coroutine[Any, Any, Message]: ... 72 | 73 | @abstractmethod 74 | async def chat_completion( 75 | self, messages: List[Message], stream: bool = False, **kwargs: Any 76 | ) -> Coroutine[Any, Any, Union[Message, AsyncGenerator[MessageChunk, None]]]: 77 | pass 78 | 79 | async def close(self): 80 | if self._client and not self._client.is_closed: 81 | await self._client.aclose() 82 | logger.info(f"LLM client for {self.model_id} closed.") 83 | 84 | 85 | class OpenAIChatLLM(BaseLLMWrapper): 86 | def __init__( 87 | self, 88 | model_name: str, 89 | api_base_url: str, 90 | api_key: Optional[str] = None, 91 | default_max_tokens: int = 4096, 92 | default_temperature: float = 0.7, 93 | **kwargs: Any, 94 | ): 95 | super().__init__( 96 | model_id=model_name, 97 | api_key=api_key, 98 | api_base_url=api_base_url, 99 | client_kwargs=kwargs, 100 | ) 101 | self.chat_completions_url = f"{self.api_base_url}/chat/completions" 102 | self.default_max_tokens = default_max_tokens 103 | self.default_temperature = default_temperature 104 | 105 | async def chat_completion( 106 | self, 107 | messages: List[Message], 108 | stream: bool = False, 109 | max_retries: int = 2, 110 | **kwargs: Any, 111 | ) -> Union[Message, AsyncGenerator[MessageChunk, None]]: 112 | client = await self._get_client() 113 | payload: Dict[str, Any] = { 114 | "model": self.model_id, 115 | "messages": [msg.model_dump(exclude_none=True) for msg in messages], 116 | "stream": stream, 117 | "max_tokens": kwargs.get("max_tokens", self.default_max_tokens), 118 | "temperature": kwargs.get("temperature", self.default_temperature), 119 | } 120 | 121 | # Handle tools if provided 122 | if "tools" in kwargs and kwargs["tools"]: 123 | payload["tools"] = kwargs[ 124 | "tools" 125 | ] # Expects List[ToolDefinition.model_dump()] 126 | if "tool_choice" in kwargs: 127 | payload["tool_choice"] = kwargs["tool_choice"] 128 | 129 | # Remove our custom/internal kwargs before sending to OpenAI 130 | internal_kwargs = ["max_retries"] 131 | for ikw in internal_kwargs: 132 | kwargs.pop(ikw, None) 133 | payload.update(kwargs) 134 | 135 | last_exception = None 136 | for attempt in range(max_retries + 1): 137 | try: 138 | logger.debug( 139 | f"OpenAIChatLLM: Attempt {attempt+1} to {self.chat_completions_url}. Stream: {stream}. Model: {self.model_id}" 140 | ) 141 | if stream: 142 | return self._stream_response(client, self.chat_completions_url, payload) # type: ignore 143 | else: 144 | response = await client.post( 145 | self.chat_completions_url, json=payload 146 | ) 147 | response.raise_for_status() 148 | response_data = response.json() 149 | choice = response_data.get("choices", [{}])[0] 150 | msg_data = choice.get("message", {}) 151 | return Message(**msg_data) 152 | 153 | except httpx.HTTPStatusError as e: 154 | logger.error( 155 | f"HTTP Error from LLM API ({self.model_id}): {e.response.status_code} - {e.response.text}", 156 | exc_info=False, 157 | ) # Reduce noise 158 | err_content = f"LLM API Error: {e.response.status_code}" 159 | try: 160 | err_detail = e.response.json().get("error", {}).get("message", "") 161 | err_content += f" - {err_detail}" if err_detail else "" 162 | except: 163 | pass 164 | if stream: 165 | 166 | async def err_gen(): 167 | yield MessageChunk(role="assistant", content=err_content) 168 | return 169 | 170 | return err_gen() # type: ignore 171 | return Message(role="assistant", content=err_content) 172 | except ( 173 | httpx.ReadError, 174 | httpx.ConnectError, 175 | httpx.PoolTimeout, 176 | httpx.RemoteProtocolError, 177 | ) as e: 178 | last_exception = e 179 | logger.warning( 180 | f"LLM Call Attempt {attempt+1} for {self.model_id} failed with {type(e).__name__}: {e}. Retrying..." 181 | ) 182 | if attempt < max_retries: 183 | await asyncio.sleep(1 * (2**attempt)) 184 | except Exception as e: 185 | logger.error( 186 | f"Unexpected error during LLM call ({self.model_id}): {e}", 187 | exc_info=True, 188 | ) 189 | err_content = f"Unexpected error: {e}" 190 | if stream: 191 | 192 | async def err_gen(): 193 | yield MessageChunk(role="assistant", content=err_content) 194 | return 195 | 196 | return err_gen() # type: ignore 197 | return Message(role="assistant", content=err_content) 198 | 199 | err_msg = f"LLM call ({self.model_id}) failed after {max_retries + 1} attempts. Last error: {last_exception}" 200 | logger.error(err_msg) 201 | if stream: 202 | 203 | async def err_gen(): 204 | yield MessageChunk(role="assistant", content=err_msg) 205 | return 206 | 207 | return err_gen() # type: ignore 208 | return Message(role="assistant", content=err_msg) 209 | 210 | async def _stream_response( 211 | self, client: httpx.AsyncClient, url: str, payload: Dict[str, Any] 212 | ) -> AsyncGenerator[MessageChunk, None]: 213 | async with client.stream("POST", url, json=payload) as response: 214 | if response.status_code != 200: 215 | error_content_bytes = await response.aread() 216 | error_content = error_content_bytes.decode(errors="replace") 217 | logger.error( 218 | f"LLM API Stream Error ({self.model_id}): Status {response.status_code}, Response: {error_content}" 219 | ) 220 | yield MessageChunk( 221 | role="assistant", 222 | content=f"LLM API Stream Error: {response.status_code} - {error_content}", 223 | ) 224 | return 225 | 226 | # Tool call accumulation logic (OpenAI specific streaming format for tools) 227 | current_tool_calls: List[Dict[str, Any]] = [] 228 | 229 | async for line in response.aiter_lines(): 230 | if line.startswith("data:"): 231 | data_content = line[len("data:") :].strip() 232 | if data_content == "[DONE]": 233 | break 234 | try: 235 | chunk_data = json.loads(data_content) 236 | delta = chunk_data.get("choices", [{}])[0].get("delta", {}) 237 | 238 | role_chunk = delta.get( 239 | "role" 240 | ) # Will be "assistant" on first useful chunk 241 | content_chunk = delta.get("content") 242 | tool_calls_chunk = delta.get("tool_calls") 243 | 244 | # Yield content immediately if present 245 | if content_chunk: 246 | yield MessageChunk(role="assistant", content=content_chunk) 247 | 248 | if tool_calls_chunk: 249 | for tc_delta in tool_calls_chunk: 250 | index = tc_delta.get("index", 0) 251 | 252 | # Ensure current_tool_calls list is long enough 253 | while index >= len(current_tool_calls): 254 | current_tool_calls.append( 255 | {} 256 | ) # Initialize with empty dict 257 | 258 | if "id" in tc_delta: 259 | current_tool_calls[index]["id"] = tc_delta["id"] 260 | current_tool_calls[index][ 261 | "type" 262 | ] = "function" # OpenAI specific 263 | 264 | if "function" in tc_delta: 265 | current_tool_calls[index].setdefault("function", {}) 266 | if "name" in tc_delta["function"]: 267 | current_tool_calls[index]["function"][ 268 | "name" 269 | ] = tc_delta["function"]["name"] 270 | if "arguments" in tc_delta["function"]: 271 | current_tool_calls[index][ 272 | "function" 273 | ].setdefault("arguments", "") 274 | current_tool_calls[index]["function"][ 275 | "arguments" 276 | ] += tc_delta["function"]["arguments"] 277 | 278 | finish_reason = chunk_data.get("choices", [{}])[0].get( 279 | "finish_reason" 280 | ) 281 | if finish_reason == "tool_calls" or ( 282 | finish_reason 283 | and not tool_calls_chunk 284 | and current_tool_calls 285 | ): # End of stream and we have tool calls 286 | parsed_tool_calls_list = [] 287 | for tc_data in current_tool_calls: 288 | if tc_data.get("id") and tc_data.get( 289 | "function", {} 290 | ).get("name"): 291 | parsed_tool_calls_list.append( 292 | ToolCall( 293 | id=tc_data["id"], 294 | function=FunctionCall( 295 | name=tc_data["function"]["name"], 296 | arguments=tc_data["function"].get( 297 | "arguments", "{}" 298 | ), # Default to empty JSON obj string 299 | ), 300 | ) 301 | ) 302 | if parsed_tool_calls_list: 303 | yield MessageChunk( 304 | role="assistant", 305 | content=None, 306 | tool_calls=parsed_tool_calls_list, 307 | ) 308 | current_tool_calls = ( 309 | [] 310 | ) # Reset for potential future chunks (though unlikely with OpenAI) 311 | 312 | except json.JSONDecodeError: 313 | logger.warning( 314 | f"Could not decode stream chunk for {self.model_id}: {data_content}" 315 | ) 316 | 317 | # If stream ended and there are still unyielded tool calls (e.g. no explicit finish_reason="tool_calls") 318 | if current_tool_calls: 319 | parsed_tool_calls_list = [] 320 | for tc_data in current_tool_calls: 321 | if tc_data.get("id") and tc_data.get("function", {}).get("name"): 322 | parsed_tool_calls_list.append( 323 | ToolCall( 324 | id=tc_data["id"], 325 | function=FunctionCall( 326 | name=tc_data["function"]["name"], 327 | arguments=tc_data["function"].get( 328 | "arguments", "{}" 329 | ), 330 | ), 331 | ) 332 | ) 333 | if parsed_tool_calls_list: 334 | yield MessageChunk( 335 | role="assistant", 336 | content=None, 337 | tool_calls=parsed_tool_calls_list, 338 | ) 339 | -------------------------------------------------------------------------------- /tframex/util/logging/__init__.py: -------------------------------------------------------------------------------- 1 | # tframex/util/logging/__init__.py 2 | from .logging_config import setup_logging, ColoredFormatter 3 | 4 | __all__ = ["setup_logging", "ColoredFormatter"] -------------------------------------------------------------------------------- /tframex/util/logging/logging_config.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | from typing import Optional 4 | 5 | 6 | class ColoredFormatter(logging.Formatter): 7 | """Custom formatter that adds colors to log messages.""" 8 | 9 | # ANSI color codes 10 | COLORS = { 11 | "DEBUG": "\033[36m", # Cyan 12 | "INFO": "\033[32m", # Green 13 | "WARNING": "\033[33m", # Yellow 14 | "ERROR": "\033[31m", # Red 15 | "CRITICAL": "\033[41m", # Red background 16 | "RESET": "\033[0m", # Reset 17 | } 18 | 19 | def format(self, record: logging.LogRecord) -> str: 20 | # Add color to the level name 21 | levelname = record.levelname 22 | if levelname in self.COLORS: 23 | record.levelname = ( 24 | f"{self.COLORS[levelname]}{levelname}{self.COLORS['RESET']}" 25 | ) 26 | 27 | # Format the message with timestamp 28 | formatted = super().format(record) 29 | return formatted 30 | 31 | 32 | def setup_logging( 33 | level: int = logging.INFO, log_format: Optional[str] = None, use_colors: bool = True 34 | ) -> None: 35 | """ 36 | Configure logging with colors and custom formatting. 37 | 38 | Args: 39 | level: The logging level (default: INFO) 40 | log_format: Custom log format string (optional) 41 | use_colors: Whether to use colored output (default: True) 42 | """ 43 | root_logger = logging.getLogger() 44 | root_logger.setLevel(level) 45 | 46 | # Remove existing handlers 47 | for handler in root_logger.handlers[:]: 48 | root_logger.removeHandler(handler) 49 | 50 | # Create console handler 51 | console_handler = logging.StreamHandler(sys.stdout) 52 | 53 | # Set format 54 | if log_format is None: 55 | log_format = "%(asctime)s | %(levelname)-8s | %(name)s | %(message)s" 56 | 57 | if use_colors: 58 | formatter = ColoredFormatter(log_format, datefmt="%Y-%m-%d %H:%M:%S") 59 | else: 60 | formatter = logging.Formatter(log_format, datefmt="%Y-%m-%d %H:%M:%S") 61 | 62 | console_handler.setFormatter(formatter) 63 | root_logger.addHandler(console_handler) 64 | -------------------------------------------------------------------------------- /tframex/util/memory.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from abc import ABC, abstractmethod 3 | from typing import List, Optional 4 | 5 | from tframex.models.primitives import Message 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | class BaseMemoryStore(ABC): 11 | @abstractmethod 12 | async def add_message(self, message: Message) -> None: ... 13 | 14 | @abstractmethod 15 | async def get_history( 16 | self, 17 | limit: Optional[int] = None, 18 | offset: int = 0, 19 | roles: Optional[List[str]] = None, 20 | ) -> List[Message]: ... 21 | 22 | @abstractmethod 23 | async def clear(self) -> None: ... 24 | 25 | 26 | class InMemoryMemoryStore(BaseMemoryStore): 27 | def __init__(self, max_history_size: Optional[int] = None): 28 | self._history: List[Message] = [] 29 | self.max_history_size = max_history_size 30 | logger.debug(f"InMemoryMemoryStore initialized. Max size: {max_history_size}") 31 | 32 | async def add_message(self, message: Message) -> None: 33 | self._history.append(message) 34 | if ( 35 | self.max_history_size is not None 36 | and len(self._history) > self.max_history_size 37 | ): 38 | self._history.pop(0) # Keep it a rolling window 39 | logger.debug( 40 | f"Added message to InMemoryMemoryStore: Role={message.role}, Content='{str(message.content)[:50]}...', ToolCalls={bool(message.tool_calls)}" 41 | ) 42 | 43 | async def get_history( 44 | self, 45 | limit: Optional[int] = None, 46 | offset: int = 0, 47 | roles: Optional[List[str]] = None, 48 | ) -> List[Message]: 49 | 50 | filtered_history = self._history 51 | if roles: 52 | filtered_history = [msg for msg in self._history if msg.role in roles] 53 | 54 | start_index = offset 55 | end_index = len(filtered_history) 56 | if limit is not None: 57 | # If limit is used, it usually means "last N messages" 58 | # So offset should count from the end if limit is present 59 | # For simplicity now, offset from start, then limit 60 | end_index = min(start_index + limit, len(filtered_history)) 61 | 62 | return list(filtered_history[start_index:end_index]) 63 | 64 | async def clear(self) -> None: 65 | self._history = [] 66 | logger.info("InMemoryMemoryStore cleared.") 67 | -------------------------------------------------------------------------------- /tframex/util/tools.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import inspect 3 | import json 4 | import logging 5 | from typing import Any, Callable, Coroutine, Dict, List, Optional 6 | 7 | from tframex.models.primitives import ( 8 | ToolDefinition, 9 | ToolParameterProperty, 10 | ToolParameters, 11 | ) 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | 16 | class Tool: 17 | def __init__( 18 | self, 19 | name: str, 20 | func: Callable[..., Any], 21 | description: Optional[str] = None, 22 | parameters_schema: Optional[ToolParameters] = None, 23 | ): 24 | self.name = name 25 | self.func = func 26 | self.description = ( 27 | description or inspect.getdoc(func) or f"Tool named '{name}'." 28 | ) 29 | 30 | if parameters_schema: 31 | self.parameters = parameters_schema 32 | else: 33 | self.parameters = self._infer_schema_from_func(func) 34 | 35 | logger.debug( 36 | f"Tool '{self.name}' initialized. Schema: {self.parameters.model_dump_json(indent=2)}" 37 | ) 38 | 39 | def _infer_schema_from_func(self, func: Callable) -> ToolParameters: 40 | sig = inspect.signature(func) 41 | properties: Dict[str, ToolParameterProperty] = {} 42 | required_params: List[str] = [] 43 | 44 | for param_name, param in sig.parameters.items(): 45 | if param_name in [ 46 | "self", 47 | "cls", 48 | "rt_ctx", 49 | "runtime_context", 50 | "loop", 51 | "_loop", 52 | ]: # Skip common bound/context args 53 | continue 54 | 55 | # Basic type mapping (can be expanded significantly) 56 | param_type_str = "string" # Default 57 | param_description = f"Parameter '{param_name}'" 58 | 59 | if param.annotation != inspect.Parameter.empty: 60 | if param.annotation == int: 61 | param_type_str = "integer" 62 | elif param.annotation == float: 63 | param_type_str = "number" 64 | elif param.annotation == bool: 65 | param_type_str = "boolean" 66 | elif ( 67 | param.annotation == list 68 | or getattr(param.annotation, "__origin__", None) == list 69 | ): 70 | param_type_str = "array" # Basic list 71 | elif ( 72 | param.annotation == dict 73 | or getattr(param.annotation, "__origin__", None) == dict 74 | ): 75 | param_type_str = "object" # Basic dict 76 | # For more complex annotations (e.g., List[str], MyPydanticModel), more advanced parsing is needed. 77 | # Type hints in docstrings could also be parsed. 78 | 79 | properties[param_name] = ToolParameterProperty( 80 | type=param_type_str, description=param_description 81 | ) 82 | if param.default == inspect.Parameter.empty: 83 | required_params.append(param_name) 84 | 85 | return ToolParameters( 86 | properties=properties, required=required_params or None 87 | ) # None if empty list 88 | 89 | def get_openai_tool_definition(self) -> ToolDefinition: 90 | """Returns schema in OpenAI function calling format.""" 91 | return ToolDefinition( 92 | type="function", 93 | function={ 94 | "name": self.name, 95 | "description": self.description, 96 | "parameters": self.parameters.model_dump( 97 | exclude_none=True 98 | ), # Pydantic handles required list correctly 99 | }, 100 | ) 101 | 102 | async def execute(self, arguments_json_str: str) -> Any: 103 | logger.info( 104 | f"Executing tool '{self.name}' with JSON arguments: {arguments_json_str}" 105 | ) 106 | try: 107 | kwargs = json.loads(arguments_json_str) 108 | except json.JSONDecodeError as e: 109 | err_msg = f"Invalid JSON arguments for tool '{self.name}': {arguments_json_str}. Error: {e}" 110 | logger.error(err_msg) 111 | return {"error": err_msg} # Return a dict for error consistency 112 | 113 | # TODO: Add Pydantic validation of kwargs against self.parameters schema here for robustness 114 | 115 | try: 116 | if asyncio.iscoroutinefunction(self.func): 117 | return await self.func(**kwargs) 118 | else: 119 | # Consider context for to_thread if event loop isn't guaranteed to be the one expected by func's potential side effects 120 | return await asyncio.to_thread(self.func, **kwargs) 121 | except Exception as e: 122 | logger.error( 123 | f"Error during execution of tool '{self.name}': {e}", exc_info=True 124 | ) 125 | return {"error": f"Execution error in tool '{self.name}': {str(e)}"} 126 | --------------------------------------------------------------------------------