├── examples └── .gitkeep ├── tests └── .gitkeep ├── optimization └── .gitkeep ├── awesome_dspy_agents ├── tools │ ├── __init__.py │ ├── registry.py │ └── ascii_to_png.py ├── patterns │ ├── __init__.py │ ├── debate │ │ ├── __init__.py │ │ ├── config.yaml │ │ ├── signatures.py │ │ └── pattern.py │ ├── addition_by_subtraction │ │ ├── __init__.py │ │ ├── config.yaml │ │ ├── signatures.py │ │ └── pattern.py │ └── interface.py ├── __init__.py ├── logging_setup.py ├── tui.py ├── config.py └── cli.py ├── pyproject.toml ├── .gitignore └── README.md /examples/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /optimization/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /awesome_dspy_agents/tools/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /awesome_dspy_agents/patterns/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /awesome_dspy_agents/patterns/debate/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /awesome_dspy_agents/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ["__version__"] 2 | __version__ = "0.1.0" 3 | -------------------------------------------------------------------------------- /awesome_dspy_agents/patterns/addition_by_subtraction/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Addition-by-Subtraction collaboration pattern module package. 3 | """ 4 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "dspy-agents" 3 | version = "0.1.0" 4 | description = "Best agents" 5 | authors = [ 6 | {name = "Michael Pavlukhin",email = "michael@pavlukhinlab.com"} 7 | ] 8 | readme = "README.md" 9 | requires-python = ">=3.12,<3.14" 10 | dependencies = [ 11 | "dspy (>=3.0.3,<4.0.0)", 12 | "typer (>=0.19.2,<0.20.0)", 13 | "rich (>=14.2.0,<15.0.0)", 14 | "pydantic (>=2.8,<3.0)", 15 | "pyyaml (>=6.0.1,<7.0)", 16 | "pillow (>=11.3.0,<12.0.0)", 17 | "questionary (>=2.0.1,<3.0.0)", 18 | "structlog (>=25.4.0,<26.0.0)", 19 | "attachments (>=0.25.1,<0.26.0)" 20 | ] 21 | 22 | 23 | [project.scripts] 24 | dspy-agents = "awesome_dspy_agents.cli:app" 25 | 26 | [tool.poetry] 27 | packages = [ 28 | { include = "awesome_dspy_agents" }, 29 | ] 30 | 31 | 32 | [build-system] 33 | requires = ["poetry-core>=2.0.0,<3.0.0"] 34 | build-backend = "poetry.core.masonry.api" 35 | -------------------------------------------------------------------------------- /awesome_dspy_agents/patterns/addition_by_subtraction/config.yaml: -------------------------------------------------------------------------------- 1 | default_lm: 2 | provider: gemini 3 | model: gemini-2.5-flash-preview-09-2025 4 | temperature: 0.5 5 | max_tokens: 20000 6 | api_key_env: GEMINI_API_KEY 7 | 8 | agents: 9 | addition: 10 | persona: | 11 | You are the Addition agent. Extract as much relevant information as possible, 12 | synthesize details, and provide comprehensive content to solve the task. 13 | lm: 14 | provider: gemini 15 | model: gemini-2.5-flash-preview-09-2025 16 | temperature: 0.8 17 | max_tokens: 20000 18 | module_type: react 19 | tools: ["ascii_to_png", "read_file_attachment", "list_files", "math_eval", "word_count"] 20 | subtraction: 21 | persona: | 22 | You are the Subtraction agent. Review the candidate response, remove redundancies, 23 | and return a concise, non-redundant version. Provide clear feedback for the Addition agent. 24 | lm: 25 | provider: gemini 26 | model: gemini-2.5-flash-preview-09-2025 27 | temperature: 0.7 28 | max_tokens: 20000 29 | module_type: react 30 | tools: ["ascii_to_png", "read_file_attachment", "list_files", "math_eval", "word_count"] 31 | 32 | abs: 33 | max_iterations: 2 34 | early_exit: true 35 | 36 | 37 | -------------------------------------------------------------------------------- /awesome_dspy_agents/patterns/addition_by_subtraction/signatures.py: -------------------------------------------------------------------------------- 1 | import dspy # type: ignore 2 | 3 | 4 | class AdditionAgentSignature(dspy.Signature): 5 | """Addition agent produces a comprehensive candidate response.""" 6 | 7 | context: str = dspy.InputField(desc="Context C") 8 | instruction: str = dspy.InputField(desc="Instruction I") 9 | history: str = dspy.InputField(desc="Conversation history H") 10 | persona: str = dspy.InputField(desc="Persona influencing tone and style") 11 | 12 | candidate_response: str = dspy.OutputField(desc="Detailed candidate response R'") 13 | reasoning: str = dspy.OutputField(desc="Step-by-step reasoning for additions") 14 | 15 | 16 | class SubtractionAgentSignature(dspy.Signature): 17 | """Subtraction agent removes redundancies and provides feedback.""" 18 | 19 | context: str = dspy.InputField(desc="Context C") 20 | instruction: str = dspy.InputField(desc="Instruction I") 21 | history: str = dspy.InputField(desc="Conversation history H including latest R'") 22 | candidate_response: str = dspy.InputField(desc="Latest candidate response R'") 23 | persona: str = dspy.InputField(desc="Persona influencing tone and style") 24 | 25 | refined_response: str = dspy.OutputField(desc="Concise response after subtraction") 26 | feedback: str = dspy.OutputField(desc="Feedback to guide the next addition step") 27 | -------------------------------------------------------------------------------- /awesome_dspy_agents/patterns/debate/config.yaml: -------------------------------------------------------------------------------- 1 | default_lm: 2 | provider: gemini 3 | model: gemini-2.5-flash-preview-09-2025 4 | temperature: 0.5 5 | max_tokens: 20000 6 | api_key_env: GEMINI_API_KEY 7 | 8 | agents: 9 | affirmative: 10 | persona: | 11 | You are an optimistic and evidence-driven problem solver. Prioritize clarity, 12 | show working, and propose pragmatic solutions. Keep responses concise. 13 | lm: 14 | provider: gemini 15 | model: gemini-2.5-flash-preview-09-2025 16 | temperature: 0.8 17 | max_tokens: 20000 18 | module_type: react 19 | tools: ["ascii_to_png", "read_file_attachment", "list_files"] 20 | negative: 21 | persona: | 22 | You are a rigorous skeptic and red-team analyst. Identify flaws, edge cases, 23 | and missing assumptions. Be constructive but direct and critical. 24 | lm: 25 | provider: gemini 26 | model: gemini-2.5-flash-preview-09-2025 27 | temperature: 0.9 28 | max_tokens: 20000 29 | module_type: react 30 | tools: ["ascii_to_png", "read_file_attachment", "list_files"] 31 | 32 | judge: 33 | discriminative_lm: 34 | provider: gemini 35 | model: gemini-2.5-flash-preview-09-2025 36 | temperature: 0.2 37 | extractive_lm: 38 | provider: gemini 39 | model: gemini-2.5-flash-preview-09-2025 40 | temperature: 0.3 41 | module_type: react 42 | tools: ["write_file"] 43 | 44 | debate: 45 | max_iterations: 5 46 | debate_level: 2 47 | adaptive_break: true 48 | 49 | 50 | -------------------------------------------------------------------------------- /awesome_dspy_agents/patterns/interface.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from pathlib import Path 4 | from typing import Any, Callable, Dict, Iterable, Optional, Protocol 5 | 6 | 7 | class AgentPattern(Protocol): 8 | """Protocol all agent patterns must implement.""" 9 | 10 | name: str 11 | 12 | def describe(self) -> str: 13 | """Return a short Markdown description of the pattern.""" 14 | ... 15 | 16 | def default_config_path(self) -> Optional[Path]: 17 | """Return the default config.yaml path for this pattern, if any.""" 18 | ... 19 | 20 | def available_configs(self) -> Iterable[Path]: 21 | """Return a list of scenario/config files under this pattern directory.""" 22 | ... 23 | 24 | def available_tools(self) -> Iterable[str]: 25 | """Return a list of tool names this pattern uses by default (if any).""" 26 | ... 27 | 28 | def available_scripts(self) -> Dict[str, str]: 29 | """Return mapping of script-name -> description for optimization/eval scripts.""" 30 | ... 31 | 32 | def run( 33 | self, 34 | topic: str, 35 | config_path: Path, 36 | overrides: Optional[Dict[str, Any]] = None, 37 | on_iteration: Optional[Callable[[int, Dict[str, Any], str], None]] = None, 38 | ) -> Dict[str, Any]: 39 | """Execute the pattern and return a machine-friendly summary of results.""" 40 | ... 41 | 42 | 43 | def find_patterns(root: Path) -> Dict[str, AgentPattern]: 44 | """Scan `root` for pattern packages exposing get_pattern().""" 45 | patterns: Dict[str, AgentPattern] = {} 46 | for entry in root.iterdir(): 47 | if not entry.is_dir(): 48 | continue 49 | candidate = entry / "pattern.py" 50 | if not candidate.exists(): 51 | continue 52 | # Dynamic import 53 | module_name = f"awesome_dspy_agents.patterns.{entry.name}.pattern" 54 | try: 55 | mod = __import__(module_name, fromlist=["get_pattern"]) # type: ignore 56 | get_pattern = getattr(mod, "get_pattern", None) 57 | if get_pattern is None: 58 | continue 59 | pattern: AgentPattern = get_pattern() 60 | patterns[entry.name] = pattern 61 | except Exception: 62 | # Skip broken patterns; CLI can show warnings later 63 | continue 64 | return patterns 65 | -------------------------------------------------------------------------------- /awesome_dspy_agents/patterns/debate/signatures.py: -------------------------------------------------------------------------------- 1 | import dspy 2 | 3 | 4 | class AffirmativeDebater(dspy.Signature): 5 | """Affirmative debater presents initial argument.""" 6 | 7 | debate_topic: str = dspy.InputField(desc="Topic to debate") 8 | debate_history: str = dspy.InputField(desc="Previous debate exchanges") 9 | role_instruction: str = dspy.InputField( 10 | desc="Role-specific instructions for affirmative side" 11 | ) 12 | persona: str = dspy.InputField( 13 | desc="Persona profile influencing tone, priorities, and style" 14 | ) 15 | 16 | argument: str = dspy.OutputField(desc="Affirmative side's argument") 17 | reasoning: str = dspy.OutputField(desc="Step-by-step reasoning") 18 | 19 | 20 | class NegativeDebater(dspy.Signature): 21 | """Negative debater provides counter-argument.""" 22 | 23 | debate_topic: str = dspy.InputField(desc="Topic to debate") 24 | debate_history: str = dspy.InputField(desc="Previous debate exchanges") 25 | affirmative_argument: str = dspy.InputField(desc="Current affirmative argument") 26 | role_instruction: str = dspy.InputField( 27 | desc="Role-specific instructions for negative side" 28 | ) 29 | persona: str = dspy.InputField( 30 | desc="Persona profile influencing tone, priorities, and style" 31 | ) 32 | 33 | counter_argument: str = dspy.OutputField(desc="Negative side's counter-argument") 34 | reasoning: str = dspy.OutputField(desc="Step-by-step reasoning") 35 | 36 | 37 | class JudgeDiscriminative(dspy.Signature): 38 | """Judge evaluates if correct solution has been reached.""" 39 | 40 | debate_topic: str = dspy.InputField(desc="Original problem/question") 41 | debate_history: str = dspy.InputField(desc="Complete debate transcript") 42 | current_iteration: int = dspy.InputField(desc="Current iteration number") 43 | 44 | solution_found: bool = dspy.OutputField( 45 | desc="True if correct solution identified, False otherwise" 46 | ) 47 | confidence: float = dspy.OutputField(desc="Confidence in decision (0.0 to 1.0)") 48 | reasoning: str = dspy.OutputField(desc="Explanation for decision") 49 | 50 | 51 | class JudgeExtractive(dspy.Signature): 52 | """Judge extracts final answer from debate history.""" 53 | 54 | debate_topic: str = dspy.InputField(desc="Original problem/question") 55 | debate_history: str = dspy.InputField(desc="Complete debate transcript") 56 | 57 | final_answer: str = dspy.OutputField(desc="Extracted final solution") 58 | justification: str = dspy.OutputField( 59 | desc="Why this answer was chosen based on debate" 60 | ) 61 | -------------------------------------------------------------------------------- /awesome_dspy_agents/logging_setup.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import logging 4 | import os 5 | import sys 6 | from logging.handlers import RotatingFileHandler 7 | from pathlib import Path 8 | from typing import Optional 9 | 10 | import structlog # type: ignore 11 | 12 | _CONFIGURED: bool = False 13 | 14 | 15 | def configure_logging(pretty_console: Optional[bool] = None) -> None: 16 | """Configure structlog once for the entire application. 17 | 18 | - Pretty console by default when TTY or MAD_LOG_PRETTY is truthy. 19 | - JSON rendering otherwise (useful for production/log files). 20 | """ 21 | global _CONFIGURED 22 | if _CONFIGURED: 23 | return 24 | 25 | if pretty_console is None: 26 | env_val = os.getenv("MAD_LOG_PRETTY", "1").lower() 27 | pretty_console = env_val not in ("0", "false", "no") and sys.stderr.isatty() 28 | 29 | processors = [ 30 | structlog.processors.TimeStamper(fmt="iso"), 31 | structlog.stdlib.add_log_level, 32 | structlog.processors.StackInfoRenderer(), 33 | structlog.processors.format_exc_info, 34 | ] 35 | 36 | if pretty_console: 37 | processors.append(structlog.dev.ConsoleRenderer()) 38 | else: 39 | processors.append(structlog.processors.JSONRenderer()) 40 | 41 | structlog.configure( 42 | processors=processors, 43 | logger_factory=structlog.stdlib.LoggerFactory(), 44 | cache_logger_on_first_use=True, 45 | ) 46 | 47 | _CONFIGURED = True 48 | 49 | 50 | def get_logger( 51 | name: str, 52 | file_name: str, 53 | *, 54 | max_bytes: int = 2_000_000, 55 | backup_count: int = 3, 56 | logs_dir: Optional[Path] = None, 57 | ): 58 | """Return a structlog logger and ensure a rotating file handler is attached. 59 | 60 | The file handler is attached to the stdlib logger with the same name so that 61 | structlog's output is written as line-delimited JSON (or pretty text) into 62 | the specified file. 63 | """ 64 | configure_logging() 65 | 66 | if logs_dir is None: 67 | logs_dir = Path(__file__).resolve().parent / "logs" 68 | logs_dir.mkdir(parents=True, exist_ok=True) 69 | log_path = logs_dir / file_name 70 | 71 | std_logger = logging.getLogger(name) 72 | # Avoid duplicating the same file handler if called multiple times 73 | has_file = False 74 | for h in std_logger.handlers: 75 | try: 76 | if isinstance(h, RotatingFileHandler) and getattr( 77 | h, "baseFilename", None 78 | ) == str(log_path): 79 | has_file = True 80 | break 81 | except Exception: 82 | continue 83 | 84 | if not has_file: 85 | handler = RotatingFileHandler( 86 | str(log_path), maxBytes=max_bytes, backupCount=backup_count 87 | ) 88 | # structlog renders into the message field 89 | handler.setFormatter(logging.Formatter("%(message)s")) 90 | std_logger.setLevel(logging.INFO) 91 | std_logger.addHandler(handler) 92 | 93 | return structlog.get_logger(name) 94 | -------------------------------------------------------------------------------- /awesome_dspy_agents/tui.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import Any, Dict, List 4 | 5 | from rich.console import Console # type: ignore 6 | from rich.markdown import Markdown # type: ignore 7 | from rich.panel import Panel # type: ignore 8 | from rich.table import Table # type: ignore 9 | 10 | console = Console() 11 | 12 | 13 | def render_header(pattern_name: str, topic: str) -> None: 14 | console.print( 15 | Panel.fit( 16 | f"Running [bold cyan]{pattern_name}[/bold cyan] Pattern", 17 | style="bold blue", 18 | ) 19 | ) 20 | console.print(Panel.fit(Markdown(f"**Topic:** {topic}"))) 21 | 22 | 23 | def render_iteration(iteration: int, exchange: Dict[str, Any]) -> None: 24 | if "judge_eval" in exchange: 25 | console.print( 26 | Panel.fit( 27 | exchange["judge_eval"], 28 | title=f"Judge @ {iteration}", 29 | border_style="yellow", 30 | ) 31 | ) 32 | return 33 | 34 | if "affirmative" in exchange or "negative" in exchange: 35 | table = Table(show_header=True, header_style="bold magenta") 36 | table.add_column("Iteration", justify="right", style="cyan", width=10) 37 | table.add_column("Affirmative", style="green") 38 | table.add_column("Negative", style="red") 39 | table.add_row( 40 | str(iteration), 41 | exchange.get("affirmative", ""), 42 | exchange.get("negative", ""), 43 | ) 44 | console.print(table) 45 | return 46 | 47 | if "addition" in exchange or "subtraction" in exchange: 48 | table = Table(show_header=True, header_style="bold magenta") 49 | table.add_column("Iteration", justify="right", style="cyan", width=10) 50 | table.add_column("Addition", style="green") 51 | table.add_column("Subtraction", style="red") 52 | table.add_row( 53 | str(iteration), 54 | exchange.get("addition", ""), 55 | exchange.get("subtraction", ""), 56 | ) 57 | console.print(table) 58 | if exchange.get("feedback"): 59 | console.print( 60 | Panel.fit( 61 | exchange["feedback"], 62 | title=f"Feedback @ {iteration}", 63 | border_style="yellow", 64 | ) 65 | ) 66 | return 67 | 68 | console.print(Panel.fit(str(exchange), title=f"Iteration {iteration}")) 69 | 70 | 71 | def render_tool_events( 72 | iteration: int, events: List[Dict[str, Any]], start_idx: int 73 | ) -> int: 74 | new_events = events[start_idx:] 75 | if not new_events: 76 | return start_idx 77 | t = Table(show_header=True, header_style="bold blue") 78 | t.title = f"Tools used @ iteration {iteration} (+{len(new_events)})" 79 | t.add_column("Agent", style="cyan") 80 | t.add_column("Tool", style="magenta") 81 | t.add_column("Event", style="yellow") 82 | t.add_column("Details", style="green") 83 | for ev in new_events: 84 | agent = ev.get("agent", "unknown") 85 | tool = ev.get("tool", "?") 86 | evt = ev.get("event", "") 87 | details = "" 88 | if evt == "tool_call": 89 | details = ev.get("args_preview", "") 90 | elif evt == "tool_result": 91 | rt = ev.get("result_type", "") 92 | sz = ev.get("size") 93 | details = f"{rt} size={sz}" if sz is not None else rt 94 | elif evt == "tool_error": 95 | details = ev.get("error", "") 96 | t.add_row(str(agent), str(tool), str(evt), str(details)) 97 | console.print(t) 98 | return start_idx + len(new_events) 99 | 100 | 101 | def render_final(final_answer: str, justification: str) -> None: 102 | console.rule("Final Decision") 103 | console.print( 104 | Panel.fit(justification, title="Justification", border_style="magenta") 105 | ) 106 | console.print(Panel.fit(final_answer, title="Final Answer", border_style="green")) 107 | -------------------------------------------------------------------------------- /awesome_dspy_agents/config.py: -------------------------------------------------------------------------------- 1 | # pyright: reportMissingTypeStubs=false 2 | import os 3 | from typing import Any, Dict, List, Literal, Optional 4 | 5 | import dspy 6 | from pydantic import BaseModel, ConfigDict, Field, ValidationError 7 | 8 | 9 | class LMSettings(BaseModel): 10 | model_config = ConfigDict(extra="ignore") 11 | 12 | provider: str = Field(description="Provider namespace for DSPy LM, e.g., 'openai'") 13 | model: str = Field(description="Model name, e.g., 'gpt-4o-mini'") 14 | temperature: float = Field(default=0.7, ge=0.0, le=2.0) 15 | top_p: float = Field(default=1.0, ge=0.0, le=1.0) 16 | max_tokens: Optional[int] = Field(default=None, ge=1) 17 | api_base: Optional[str] = None 18 | api_key: Optional[str] = None 19 | api_key_env: Optional[str] = Field( 20 | default=None, description="Env var name for API key" 21 | ) 22 | 23 | def resolve_api_key(self) -> Optional[str]: 24 | if self.api_key: 25 | return self.api_key 26 | if self.api_key_env: 27 | return os.getenv(self.api_key_env) 28 | return None 29 | 30 | 31 | class AgentConfig(BaseModel): 32 | model_config = ConfigDict(extra="ignore") 33 | 34 | persona: str = Field( 35 | default="", description="Persona prompt that shapes agent behavior" 36 | ) 37 | lm: Optional[LMSettings] = None 38 | module_type: Literal["predict", "chain_of_thought", "react"] = Field( 39 | default="predict", description="Which DSPy module to use for this agent" 40 | ) 41 | tools: List[str] = Field( 42 | default_factory=list, description="Names of tools from registry for ReAct" 43 | ) 44 | 45 | 46 | class JudgeConfig(BaseModel): 47 | model_config = ConfigDict(extra="ignore") 48 | 49 | discriminative_lm: Optional[LMSettings] = None 50 | extractive_lm: Optional[LMSettings] = None 51 | module_type: Literal["predict", "chain_of_thought", "react"] = Field( 52 | default="predict", description="Judge mode for discriminative/extractive" 53 | ) 54 | tools: List[str] = Field( 55 | default_factory=list, description="Names of tools from registry for Judge ReAct" 56 | ) 57 | 58 | 59 | class DebateConfig(BaseModel): 60 | model_config = ConfigDict(extra="ignore") 61 | 62 | max_iterations: int = Field(default=3, ge=1) 63 | debate_level: int = Field(default=2, ge=0, le=3) 64 | adaptive_break: bool = True 65 | 66 | 67 | class AbsConfig(BaseModel): 68 | model_config = ConfigDict(extra="ignore") 69 | 70 | max_iterations: int = Field(default=2, ge=1) 71 | early_exit: bool = True 72 | 73 | 74 | class AppConfig(BaseModel): 75 | model_config = ConfigDict(extra="ignore") 76 | 77 | default_lm: Optional[LMSettings] = None 78 | agents: Dict[str, AgentConfig] = Field(default_factory=dict) 79 | judge: JudgeConfig = Field(default_factory=JudgeConfig) 80 | debate: DebateConfig = Field(default_factory=DebateConfig) 81 | abs: AbsConfig = Field(default_factory=AbsConfig) 82 | 83 | 84 | def build_lm(settings: LMSettings) -> dspy.LM: 85 | full_model_id = f"{settings.provider}/{settings.model}" 86 | api_key = settings.resolve_api_key() 87 | lm_kwargs: Dict[str, Any] = { 88 | "temperature": settings.temperature, 89 | "top_p": settings.top_p, 90 | } 91 | if settings.max_tokens is not None: 92 | lm_kwargs["max_tokens"] = settings.max_tokens 93 | if settings.api_base: 94 | lm_kwargs["api_base"] = settings.api_base 95 | if api_key: 96 | lm_kwargs["api_key"] = api_key 97 | return dspy.LM(full_model_id, **lm_kwargs) 98 | 99 | 100 | # YAML loader kept local to avoid hard dep outside CLI usage 101 | def _expand_env_vars_in_data(value: Any) -> Any: 102 | """Recursively expand environment variables like ${VAR} in strings.""" 103 | if isinstance(value, dict): 104 | return {k: _expand_env_vars_in_data(v) for k, v in value.items()} 105 | if isinstance(value, list): 106 | return [_expand_env_vars_in_data(v) for v in value] 107 | if isinstance(value, str): 108 | try: 109 | return os.path.expandvars(value) 110 | except Exception: 111 | return value 112 | return value 113 | 114 | 115 | def _read_yaml(path: str) -> Dict[str, Any]: 116 | try: 117 | import yaml # type: ignore 118 | except Exception as e: 119 | raise RuntimeError( 120 | "PyYAML is required to load YAML configs. Install with 'pip install pyyaml'." 121 | ) from e 122 | 123 | with open(path, "r", encoding="utf-8") as f: 124 | data: Dict[str, Any] = yaml.safe_load(f) or {} 125 | return _expand_env_vars_in_data(data) 126 | 127 | 128 | def load_config(path: str) -> AppConfig: 129 | data = _read_yaml(path) 130 | try: 131 | return AppConfig(**data) 132 | except ValidationError as ve: 133 | raise RuntimeError(f"Invalid configuration file: {ve}") 134 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[codz] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py.cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | # Pipfile.lock 96 | 97 | # UV 98 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # uv.lock 102 | 103 | # poetry 104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 105 | # This is especially recommended for binary packages to ensure reproducibility, and is more 106 | # commonly ignored for libraries. 107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 108 | # poetry.lock 109 | # poetry.toml 110 | 111 | # pdm 112 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 113 | # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python. 114 | # https://pdm-project.org/en/latest/usage/project/#working-with-version-control 115 | # pdm.lock 116 | # pdm.toml 117 | .pdm-python 118 | .pdm-build/ 119 | 120 | # pixi 121 | # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control. 122 | # pixi.lock 123 | # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one 124 | # in the .venv directory. It is recommended not to include this directory in version control. 125 | .pixi 126 | 127 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 128 | __pypackages__/ 129 | 130 | # Celery stuff 131 | celerybeat-schedule 132 | celerybeat.pid 133 | 134 | # Redis 135 | *.rdb 136 | *.aof 137 | *.pid 138 | 139 | # RabbitMQ 140 | mnesia/ 141 | rabbitmq/ 142 | rabbitmq-data/ 143 | 144 | # ActiveMQ 145 | activemq-data/ 146 | 147 | # SageMath parsed files 148 | *.sage.py 149 | 150 | # Environments 151 | .env 152 | .envrc 153 | .venv 154 | env/ 155 | venv/ 156 | ENV/ 157 | env.bak/ 158 | venv.bak/ 159 | 160 | # Spyder project settings 161 | .spyderproject 162 | .spyproject 163 | 164 | # Rope project settings 165 | .ropeproject 166 | 167 | # mkdocs documentation 168 | /site 169 | 170 | # mypy 171 | .mypy_cache/ 172 | .dmypy.json 173 | dmypy.json 174 | 175 | # Pyre type checker 176 | .pyre/ 177 | 178 | # pytype static type analyzer 179 | .pytype/ 180 | 181 | # Cython debug symbols 182 | cython_debug/ 183 | 184 | # PyCharm 185 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 186 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 187 | # and can be added to the global gitignore or merged into this file. For a more nuclear 188 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 189 | # .idea/ 190 | 191 | # Abstra 192 | # Abstra is an AI-powered process automation framework. 193 | # Ignore directories containing user credentials, local state, and settings. 194 | # Learn more at https://abstra.io/docs 195 | .abstra/ 196 | 197 | # Visual Studio Code 198 | # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore 199 | # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore 200 | # and can be added to the global gitignore or merged into this file. However, if you prefer, 201 | # you could uncomment the following to ignore the entire vscode folder 202 | # .vscode/ 203 | 204 | # Ruff stuff: 205 | .ruff_cache/ 206 | 207 | # PyPI configuration file 208 | .pypirc 209 | 210 | # Marimo 211 | marimo/_static/ 212 | marimo/_lsp/ 213 | __marimo__/ 214 | 215 | # Streamlit 216 | .streamlit/secrets.toml 217 | -------------------------------------------------------------------------------- /awesome_dspy_agents/tools/registry.py: -------------------------------------------------------------------------------- 1 | """ 2 | Global tools registry for DSPy agents. 3 | 4 | Provides: 5 | - ToolRegistry: register callables by name and build dspy.Tool list 6 | - Agent context helpers: set_current_agent/reset_current_agent for logging 7 | - Built-in tools: math_eval, word_count, ascii_to_png 8 | 9 | This module centralizes tool registration so all patterns can use the same 10 | registry and logging. 11 | """ 12 | 13 | # pyright: reportMissingTypeStubs=false 14 | 15 | from __future__ import annotations 16 | 17 | import base64 18 | import inspect 19 | import os 20 | from contextvars import ContextVar 21 | from functools import wraps 22 | from io import BytesIO 23 | from pathlib import Path 24 | from typing import Any, Callable, Dict, List 25 | 26 | import dspy # type: ignore 27 | from attachments.dspy import Attachments # type: ignore 28 | 29 | from awesome_dspy_agents.logging_setup import get_logger 30 | from awesome_dspy_agents.tools.ascii_to_png import AsciiToPngConverter 31 | 32 | tools_logger = get_logger("mad.tools", "tools.log", max_bytes=1_000_000, backup_count=3) 33 | 34 | 35 | # Agent/iteration context for tool calls 36 | _current_agent: ContextVar[str] = ContextVar("mad_current_agent", default="unknown") 37 | _current_iteration: ContextVar[int] = ContextVar("mad_current_iteration", default=0) 38 | 39 | 40 | def set_current_agent(role: str): 41 | return _current_agent.set(role) 42 | 43 | 44 | def reset_current_agent(token: Any) -> None: 45 | try: 46 | _current_agent.reset(token) 47 | except Exception: 48 | pass 49 | 50 | 51 | def get_current_agent() -> str: 52 | try: 53 | return _current_agent.get() 54 | except Exception: 55 | return "unknown" 56 | 57 | 58 | def set_current_iteration(iteration: int): 59 | return _current_iteration.set(iteration) 60 | 61 | 62 | def reset_current_iteration(token: Any) -> None: 63 | try: 64 | _current_iteration.reset(token) 65 | except Exception: 66 | pass 67 | 68 | 69 | def get_current_iteration() -> int: 70 | try: 71 | return _current_iteration.get() 72 | except Exception: 73 | return 0 74 | 75 | 76 | class ToolRegistry: 77 | """Simple registry mapping string names to callable tools. 78 | 79 | Wraps callables with dspy.Tool lazily to keep registration straightforward. 80 | """ 81 | 82 | def __init__(self) -> None: 83 | self._functions: Dict[str, Callable[..., Any]] = {} 84 | self._listeners: List[Callable[[str, Dict[str, Any]], None]] = [] 85 | # sandbox roots: absolute directories allowed for file tools 86 | self._sandbox_roots: List[Path] = [] 87 | 88 | def add_listener(self, listener: Callable[[str, Dict[str, Any]], None]) -> None: 89 | """Subscribe to tool events. 90 | 91 | Listener signature: (event: str, payload: Dict[str, Any]) -> None 92 | 93 | Events emitted: 94 | - "tool_call": before a tool is executed 95 | payload keys: tool, agent, iteration, args_preview, kwargs_preview 96 | - "tool_result": after a tool returns 97 | payload keys: tool, agent, iteration, result_type, size 98 | - "tool_error": when a tool raises an exception 99 | payload keys: tool, agent, iteration, error 100 | """ 101 | if listener not in self._listeners: 102 | self._listeners.append(listener) 103 | 104 | def set_sandbox_roots(self, roots: List[str]) -> None: 105 | self._sandbox_roots = [Path(os.path.expanduser(r)).resolve() for r in roots] 106 | 107 | def _in_sandbox(self, path: Path) -> bool: 108 | if not self._sandbox_roots: 109 | return True 110 | try: 111 | p = path.resolve() 112 | except Exception: 113 | return False 114 | return any( 115 | str(p).startswith(str(root) + os.sep) or p == root 116 | for root in self._sandbox_roots 117 | ) 118 | 119 | def remove_listener(self, listener: Callable[[str, Dict[str, Any]], None]) -> None: 120 | try: 121 | self._listeners.remove(listener) 122 | except ValueError: 123 | pass 124 | 125 | def _emit(self, event: str, payload: Dict[str, Any]) -> None: 126 | for listener in list(self._listeners): 127 | try: 128 | listener(event, payload) 129 | except Exception: 130 | # Never let listeners break tool execution 131 | pass 132 | 133 | def register(self, name: str, fn: Callable[..., Any]) -> None: 134 | if not callable(fn): 135 | raise TypeError("Tool must be callable") 136 | 137 | # Wrap with logging 138 | @wraps(fn) 139 | def _wrapped(*args: Any, **kwargs: Any) -> Any: 140 | agent = get_current_agent() 141 | iteration = get_current_iteration() 142 | try: 143 | preview_args = str(args)[:200] 144 | preview_kwargs = str(kwargs)[:200] 145 | tools_logger.info( 146 | "tool_call", 147 | agent=agent, 148 | tool=name, 149 | iteration=iteration, 150 | args_preview=preview_args, 151 | kwargs_preview=preview_kwargs, 152 | ) 153 | self._emit( 154 | "tool_call", 155 | { 156 | "tool": name, 157 | "agent": agent, 158 | "iteration": iteration, 159 | "args_preview": preview_args, 160 | "kwargs_preview": preview_kwargs, 161 | }, 162 | ) 163 | except Exception: 164 | pass 165 | try: 166 | result = fn(*args, **kwargs) 167 | except Exception as e: 168 | try: 169 | tools_logger.info( 170 | "tool_error", 171 | agent=agent, 172 | tool=name, 173 | iteration=iteration, 174 | error=str(e), 175 | ) 176 | self._emit( 177 | "tool_error", 178 | { 179 | "tool": name, 180 | "agent": agent, 181 | "iteration": iteration, 182 | "error": str(e), 183 | }, 184 | ) 185 | except Exception: 186 | pass 187 | raise 188 | try: 189 | result_repr = type(result).__name__ 190 | size_hint = None 191 | if isinstance(result, str): 192 | size_hint = len(result) 193 | tools_logger.info( 194 | "tool_result", 195 | agent=agent, 196 | tool=name, 197 | iteration=iteration, 198 | result_type=result_repr, 199 | size=size_hint, 200 | ) 201 | self._emit( 202 | "tool_result", 203 | { 204 | "tool": name, 205 | "agent": agent, 206 | "iteration": iteration, 207 | "result_type": result_repr, 208 | "size": size_hint, 209 | }, 210 | ) 211 | except Exception: 212 | pass 213 | return result 214 | 215 | # Preserve original callable signature for DSPy Tool introspection 216 | try: 217 | _wrapped.__signature__ = inspect.signature(fn) # type: ignore[attr-defined] 218 | except Exception: 219 | pass 220 | 221 | self._functions[name] = _wrapped 222 | 223 | def get(self, name: str) -> Callable[..., Any]: 224 | return self._functions[name] 225 | 226 | def names(self) -> List[str]: 227 | return sorted(self._functions.keys()) 228 | 229 | def build_dspy_tools(self, names: List[str]) -> List[dspy.Tool]: # type: ignore[name-defined] 230 | tools: List[dspy.Tool] = [] # type: ignore[name-defined] 231 | for n in names: 232 | fn = self._functions.get(n) 233 | if fn is None: 234 | raise KeyError(f"Unknown tool '{n}'") 235 | tools.append(dspy.Tool(fn)) # type: ignore[attr-defined] 236 | return tools 237 | 238 | 239 | # Global registry instance and built-in example tools 240 | registry = ToolRegistry() 241 | 242 | 243 | def math_eval(expression: str) -> str: 244 | """Evaluate a simple Python math expression safely.""" 245 | try: 246 | result = eval(expression, {"__builtins__": {}}, {}) 247 | return str(result) 248 | except Exception as e: 249 | return f"error: {e}" 250 | 251 | 252 | def word_count(text: str) -> str: 253 | """Count words in the input text.""" 254 | return str(len(text.split())) 255 | 256 | 257 | def ascii_to_png(text: str) -> dspy.Image: 258 | """Render ASCII text to PNG for internal analysis. 259 | 260 | Returns a data-URL (base64) image suitable for DSPy Image fields. 261 | """ 262 | converter = AsciiToPngConverter(font_size=16, padding=20) 263 | img = converter.convert_text_to_image_hq(text, scale_factor=2) 264 | buffer = BytesIO() 265 | img.save(buffer, format="PNG") 266 | png_bytes = buffer.getvalue() 267 | b64 = base64.b64encode(png_bytes).decode("ascii") 268 | tools_logger.info("ascii_to_png", rendered_bytes=len(png_bytes)) 269 | return dspy.Image(url=f"data:image/png;base64,{b64}") 270 | 271 | 272 | def list_files(directory: str) -> str: 273 | """Return newline-separated absolute file paths inside a directory. 274 | 275 | If `directory` is a file, returns the absolute file path. 276 | Expands '~' and resolves relative paths. 277 | """ 278 | try: 279 | base = Path(os.path.expanduser(directory)).resolve() 280 | except Exception as e: 281 | return f"error: {e}" 282 | 283 | if not base.exists(): 284 | return f"error: path does not exist: {base}" 285 | if base.is_file(): 286 | # sandbox check for file 287 | if not registry._in_sandbox(base): # type: ignore[attr-defined] 288 | return f"error: access denied by sandbox: {base}" 289 | return str(base) 290 | 291 | try: 292 | files = [] 293 | for p in base.iterdir(): 294 | if p.is_file(): 295 | rp = p.resolve() 296 | if registry._in_sandbox(rp): # type: ignore[attr-defined] 297 | files.append(str(rp)) 298 | return "\n".join(files) 299 | except Exception as e: 300 | return f"error: {e}" 301 | 302 | 303 | def read_file_attachment(path: str) -> Attachments: 304 | """Return an Attachments object for the given file path. 305 | 306 | This integrates with DSPy by returning an "Attachments"-typed object that 307 | models can consume via signatures that accept Attachments. 308 | """ 309 | p = Path(os.path.expanduser(path)).resolve() 310 | # sandbox check 311 | if not registry._in_sandbox(p): # type: ignore[attr-defined] 312 | raise PermissionError(f"access denied by sandbox: {p}") 313 | if not p.exists() or not p.is_file(): 314 | raise FileNotFoundError(f"File not found: {p}") 315 | return Attachments(str(p)) # type: ignore[call-arg] 316 | 317 | 318 | def write_file(path: str, content: str) -> str: 319 | """Write text content to a file (UTF-8). Returns absolute path or error.""" 320 | try: 321 | p = Path(os.path.expanduser(path)).resolve() 322 | # sandbox check 323 | if not registry._in_sandbox(p): # type: ignore[attr-defined] 324 | return f"error: access denied by sandbox: {p}" 325 | p.parent.mkdir(parents=True, exist_ok=True) 326 | p.write_text(content, encoding="utf-8") 327 | return str(p) 328 | except Exception as e: 329 | return f"error: {e}" 330 | 331 | 332 | # Register built-ins 333 | registry.register("math_eval", math_eval) 334 | registry.register("word_count", word_count) 335 | registry.register("ascii_to_png", ascii_to_png) 336 | registry.register("list_files", list_files) 337 | registry.register("read_file_attachment", read_file_attachment) 338 | registry.register("write_file", write_file) 339 | -------------------------------------------------------------------------------- /awesome_dspy_agents/patterns/addition_by_subtraction/pattern.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from typing import Any, Callable, Dict, List, Optional 3 | 4 | import dspy # type: ignore 5 | 6 | from awesome_dspy_agents.config import AppConfig, build_lm, load_config 7 | from awesome_dspy_agents.logging_setup import get_logger 8 | from awesome_dspy_agents.patterns.interface import AgentPattern 9 | from awesome_dspy_agents.tools.registry import (registry, reset_current_agent, 10 | reset_current_iteration, 11 | set_current_agent, 12 | set_current_iteration) 13 | 14 | from .signatures import AdditionAgentSignature, SubtractionAgentSignature 15 | 16 | llm_logger = get_logger("mad.llm", "llm_calls.log", max_bytes=2_000_000, backup_count=3) 17 | 18 | 19 | class AdditionModule(dspy.Module): 20 | def __init__( 21 | self, 22 | persona: str = "", 23 | lm: Optional[dspy.LM] = None, 24 | module_type: str = "predict", 25 | tool_names: Optional[List[str]] = None, 26 | react_max_iters: int = 3, 27 | ): 28 | super().__init__() 29 | self.persona = persona 30 | self.module_type = module_type 31 | self.tool_names = tool_names or [] 32 | self.react_max_iters = react_max_iters 33 | 34 | if module_type == "predict": 35 | self.predict = dspy.Predict(AdditionAgentSignature) 36 | elif module_type == "chain_of_thought": 37 | self.predict = dspy.ChainOfThought(AdditionAgentSignature) 38 | elif module_type == "react": 39 | tools = registry.build_dspy_tools(self.tool_names) 40 | llm_logger.info( 41 | "abs_react_tools", role="addition", tools=[str(t) for t in tools] 42 | ) 43 | self.predict = dspy.ReAct( 44 | AdditionAgentSignature, tools=tools, max_iters=self.react_max_iters 45 | ) 46 | else: 47 | raise ValueError(f"Unsupported module_type: {module_type}") 48 | 49 | if lm is not None: 50 | try: 51 | self.set_lm(lm) 52 | except Exception: 53 | self.predict.set_lm(lm) 54 | 55 | def forward(self, context: str, instruction: str, history: str): 56 | llm_logger.info( 57 | "predict", 58 | role="addition", 59 | module=self.module_type, 60 | ctx_len=len(context), 61 | hist_len=len(history), 62 | ) 63 | token = set_current_agent("addition") 64 | try: 65 | out = self.predict( 66 | context=context, 67 | instruction=instruction, 68 | history=history, 69 | persona=self.persona, 70 | ) 71 | finally: 72 | reset_current_agent(token) 73 | return out 74 | 75 | 76 | class SubtractionModule(dspy.Module): 77 | def __init__( 78 | self, 79 | persona: str = "", 80 | lm: Optional[dspy.LM] = None, 81 | module_type: str = "predict", 82 | tool_names: Optional[List[str]] = None, 83 | react_max_iters: int = 3, 84 | ): 85 | super().__init__() 86 | self.persona = persona 87 | self.module_type = module_type 88 | self.tool_names = tool_names or [] 89 | self.react_max_iters = react_max_iters 90 | 91 | if module_type == "predict": 92 | self.predict = dspy.Predict(SubtractionAgentSignature) 93 | elif module_type == "chain_of_thought": 94 | self.predict = dspy.ChainOfThought(SubtractionAgentSignature) 95 | elif module_type == "react": 96 | tools = registry.build_dspy_tools(self.tool_names) 97 | llm_logger.info( 98 | "abs_react_tools", role="subtraction", tools=[str(t) for t in tools] 99 | ) 100 | self.predict = dspy.ReAct( 101 | SubtractionAgentSignature, tools=tools, max_iters=self.react_max_iters 102 | ) 103 | else: 104 | raise ValueError(f"Unsupported module_type: {module_type}") 105 | 106 | if lm is not None: 107 | try: 108 | self.set_lm(lm) 109 | except Exception: 110 | self.predict.set_lm(lm) 111 | 112 | def forward( 113 | self, context: str, instruction: str, history: str, candidate_response: str 114 | ): 115 | llm_logger.info( 116 | "predict", 117 | role="subtraction", 118 | module=self.module_type, 119 | ctx_len=len(context), 120 | hist_len=len(history), 121 | ) 122 | token = set_current_agent("subtraction") 123 | try: 124 | out = self.predict( 125 | context=context, 126 | instruction=instruction, 127 | history=history, 128 | candidate_response=candidate_response, 129 | persona=self.persona, 130 | ) 131 | finally: 132 | reset_current_agent(token) 133 | return out 134 | 135 | 136 | class ABSFramework(dspy.Module): 137 | """Addition-by-Subtraction collaboration framework.""" 138 | 139 | def __init__( 140 | self, 141 | max_iterations: int = 2, 142 | early_exit: bool = True, 143 | addition_persona: str = "", 144 | subtraction_persona: str = "", 145 | addition_lm: Optional[dspy.LM] = None, 146 | subtraction_lm: Optional[dspy.LM] = None, 147 | addition_module_type: str = "predict", 148 | subtraction_module_type: str = "predict", 149 | addition_tools: Optional[List[str]] = None, 150 | subtraction_tools: Optional[List[str]] = None, 151 | react_max_iters: int = 6, 152 | on_iteration: Optional[Callable[[int, Dict[str, Any], str], None]] = None, 153 | ): 154 | super().__init__() 155 | self.max_iterations = max_iterations 156 | self.early_exit = early_exit 157 | self.on_iteration = on_iteration 158 | 159 | self.addition = AdditionModule( 160 | persona=addition_persona, 161 | lm=addition_lm, 162 | module_type=addition_module_type, 163 | tool_names=addition_tools, 164 | react_max_iters=react_max_iters, 165 | ) 166 | self.subtraction = SubtractionModule( 167 | persona=subtraction_persona, 168 | lm=subtraction_lm, 169 | module_type=subtraction_module_type, 170 | tool_names=subtraction_tools, 171 | react_max_iters=react_max_iters, 172 | ) 173 | 174 | self.history: List[Dict[str, Any]] = [] 175 | 176 | def format_history(self) -> str: 177 | if not self.history: 178 | return "No conversation yet." 179 | formatted = [] 180 | for i, ex in enumerate(self.history, 1): 181 | formatted.append(f"\n--- Iteration {i} ---") 182 | if "addition" in ex: 183 | formatted.append(f"Addition: {ex['addition']}") 184 | if "subtraction" in ex: 185 | formatted.append(f"Subtraction: {ex['subtraction']}") 186 | if "feedback" in ex: 187 | formatted.append(f"Feedback: {ex['feedback']}") 188 | return "\n".join(formatted) 189 | 190 | def forward(self, context: str, instruction: str): 191 | self.history = [] 192 | final_response = "" 193 | 194 | H_context = context 195 | H_instruction = instruction 196 | 197 | prev_R = None 198 | for iteration in range(1, self.max_iterations + 1): 199 | hist_str = self.format_history() 200 | iter_token = set_current_iteration(iteration) 201 | try: 202 | # Addition produces candidate 203 | add_out = self.addition( 204 | context=H_context, instruction=H_instruction, history=hist_str 205 | ) 206 | 207 | # Subtraction refines 208 | sub_out = self.subtraction( 209 | context=H_context, 210 | instruction=H_instruction, 211 | history=hist_str, 212 | candidate_response=add_out.candidate_response, 213 | ) 214 | 215 | exchange = { 216 | "addition": add_out.candidate_response, 217 | "addition_reasoning": add_out.reasoning, 218 | "subtraction": sub_out.refined_response, 219 | "feedback": sub_out.feedback, 220 | } 221 | self.history.append(exchange) 222 | 223 | # callback for TUI 224 | if self.on_iteration is not None: 225 | try: 226 | self.on_iteration(iteration, exchange, self.format_history()) 227 | except Exception: 228 | pass 229 | 230 | # Early exit if no changes 231 | if ( 232 | prev_R is not None 233 | and prev_R.strip() == add_out.candidate_response.strip() 234 | and self.early_exit 235 | ): 236 | final_response = add_out.candidate_response 237 | break 238 | 239 | prev_R = add_out.candidate_response 240 | H_context = context 241 | H_instruction = instruction 242 | finally: 243 | reset_current_iteration(iter_token) 244 | 245 | # Final output: prefer last refined response if exists, else last candidate 246 | if self.history: 247 | last = self.history[-1] 248 | final_response = last.get("subtraction") or last.get("addition") or "" 249 | 250 | return dspy.Prediction( 251 | final_answer=final_response, 252 | justification="Refined via Addition-by-Subtraction iterative collaboration.", 253 | history=self.history, 254 | iterations_used=len(self.history), 255 | adaptive_break_triggered=self.early_exit 256 | and len(self.history) < self.max_iterations, 257 | ) 258 | 259 | 260 | class AdditionBySubtractionPattern(AgentPattern): 261 | name = "addition_by_subtraction" 262 | 263 | def __init__(self) -> None: 264 | self._root = Path(__file__).resolve().parent 265 | 266 | def describe(self) -> str: 267 | readme = self._root / "README.md" 268 | if readme.exists(): 269 | try: 270 | return readme.read_text(encoding="utf-8") 271 | except Exception: 272 | return "Addition-by-Subtraction collaboration pattern." 273 | return "Addition-by-Subtraction collaboration pattern." 274 | 275 | def default_config_path(self) -> Optional[Path]: 276 | cfg = self._root / "config.yaml" 277 | return cfg if cfg.exists() else None 278 | 279 | def available_configs(self) -> List[Path]: 280 | configs: List[Path] = [] 281 | cfg = self.default_config_path() 282 | if cfg: 283 | configs.append(cfg) 284 | scenarios = self._root / "scenarios" 285 | if scenarios.exists(): 286 | for p in scenarios.glob("*.yaml"): 287 | configs.append(p) 288 | return configs 289 | 290 | def available_tools(self) -> List[str]: 291 | cfg_path = self.default_config_path() 292 | try: 293 | if cfg_path: 294 | cfg = load_config(str(cfg_path)) 295 | tools = set() 296 | add = cfg.agents.get("addition") 297 | sub = cfg.agents.get("subtraction") 298 | if add: 299 | tools.update(add.tools) 300 | if sub: 301 | tools.update(sub.tools) 302 | return sorted(tools) 303 | except Exception: 304 | pass 305 | return [] 306 | 307 | def available_scripts(self) -> Dict[str, str]: 308 | return {} 309 | 310 | def run( 311 | self, 312 | topic: str, 313 | config_path: Path, 314 | overrides: Optional[Dict[str, Any]] = None, 315 | on_iteration: Optional[Callable[[int, Dict[str, Any], str], None]] = None, 316 | ) -> Dict[str, Any]: 317 | # In ABS, interpret topic as the instruction; allow empty context by default 318 | cfg: AppConfig = load_config(str(config_path)) 319 | 320 | # Apply overrides for abs.* keys optionally 321 | if overrides: 322 | abs_over = overrides.get("abs") 323 | if isinstance(abs_over, dict): 324 | if "max_iterations" in abs_over: 325 | cfg.abs.max_iterations = int(abs_over["max_iterations"]) # type: ignore[assignment] 326 | if "early_exit" in abs_over: 327 | cfg.abs.early_exit = bool(abs_over["early_exit"]) # type: ignore[assignment] 328 | 329 | if cfg.default_lm is not None: 330 | dspy.configure(lm=build_lm(cfg.default_lm)) 331 | 332 | add_cfg = cfg.agents.get("addition") 333 | sub_cfg = cfg.agents.get("subtraction") 334 | 335 | add_lm = build_lm(add_cfg.lm) if add_cfg and add_cfg.lm else None 336 | sub_lm = build_lm(sub_cfg.lm) if sub_cfg and sub_cfg.lm else None 337 | 338 | framework = ABSFramework( 339 | max_iterations=cfg.abs.max_iterations, 340 | early_exit=cfg.abs.early_exit, 341 | addition_persona=(add_cfg.persona if add_cfg else ""), 342 | subtraction_persona=(sub_cfg.persona if sub_cfg else ""), 343 | addition_lm=add_lm, 344 | subtraction_lm=sub_lm, 345 | addition_module_type=(add_cfg.module_type if add_cfg else "predict"), 346 | subtraction_module_type=(sub_cfg.module_type if sub_cfg else "predict"), 347 | addition_tools=(add_cfg.tools if add_cfg else []), 348 | subtraction_tools=(sub_cfg.tools if sub_cfg else []), 349 | ) 350 | 351 | if on_iteration is not None: 352 | framework.on_iteration = on_iteration 353 | 354 | # For simplicity, treat topic as instruction; context is empty 355 | final = framework(context="", instruction=topic) 356 | 357 | return { 358 | "final_answer": final.final_answer, 359 | "justification": final.justification, 360 | "iterations_used": final.iterations_used, 361 | "adaptive_break_triggered": final.adaptive_break_triggered, 362 | "history": final.history, 363 | } 364 | 365 | 366 | def get_pattern() -> AgentPattern: 367 | return AdditionBySubtractionPattern() 368 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Awesome DSPy Agents 2 | [![Python Version](https://img.shields.io/badge/python-3.12+-blue.svg)](https://www.python.org/downloads/) 3 | [![Poetry](https://img.shields.io/badge/poetry-1.8.2+-blue.svg)](https://python-poetry.org/) 4 | [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) 5 | [![Buy Me a Coffee](https://img.shields.io/badge/Buy%20Me%20a%20Coffee-orange?logo=buy-me-a-coffee)](https://buymeacoffee.com/mike_pavlukhin) 6 | 7 | A collection of multi-agent systems implemented with the [DSPy](https://github.com/stanfordnlp/dspy) framework. 8 | 9 | ## Installation 10 | 11 | ### Prerequisites 12 | 13 | - Python 3.12 or higher 14 | - Git 15 | 16 | ### Setup 17 | 18 | 1. **Clone the repository:** 19 | ```bash 20 | git clone https://github.com/Archelunch/awesome-dspy-agents 21 | cd awesome-dspy-agents 22 | ``` 23 | 24 | 2. **Install Poetry (if not already installed):** 25 | ```bash 26 | pip install poetry 27 | ``` 28 | 29 | 3. **Install dependencies:** 30 | ```bash 31 | poetry install 32 | ``` 33 | 34 | 4. **Set up environment variables:** 35 | Set your API keys for the language model providers you want to use: 36 | ```bash 37 | export GEMINI_API_KEY=your_gemini_api_key_here 38 | export OPENAI_API_KEY=your_openai_api_key_here 39 | # Add other provider keys as needed 40 | ``` 41 | 42 | 5. **Verify installation:** 43 | ```bash 44 | poetry run dspy-agents --help 45 | ``` 46 | 47 | 6. **Optional: Install shell completion:** 48 | ```bash 49 | poetry run dspy-agents --install-completion 50 | ``` 51 | 52 | ## Table of Contents 53 | 54 | - [Awesome DSPy Agents](#awesome-dspy-agents) 55 | - [Installation](#installation) 56 | - [Prerequisites](#prerequisites) 57 | - [Setup](#setup) 58 | - [Table of Contents](#table-of-contents) 59 | - [Available Patterns](#available-patterns) 60 | - [Debates](#debates) 61 | - [Addition by Subtraction](#addition-by-subtraction) 62 | - [Available Tools](#available-tools) 63 | - [Using the CLI](#using-the-cli) 64 | - [Configuration](#configuration) 65 | - [Examples](#examples) 66 | - [Addition-by-Subtraction Pattern](#addition-by-subtraction-pattern) 67 | - [Development Guide](#development-guide) 68 | - [Project layout](#project-layout) 69 | - [Common practices](#common-practices) 70 | - [Adding a new pattern](#adding-a-new-pattern) 71 | - [Adding tools to the global registry](#adding-tools-to-the-global-registry) 72 | - [Configuration](#configuration-1) 73 | - [Observability \& logs](#observability--logs) 74 | - [Roadmap ideas](#roadmap-ideas) 75 | 76 | ## Available Patterns 77 | 78 | These are the built-in patterns. Use the CLI to explore and run them. 79 | 80 | | Pattern | Description | Strengths | Weaknesses | 81 | | --- | --- | --- | --- | 82 | | debate | Multi‑Agent Debate with a Judge. Two agents argue iteratively; an optional judge evaluates progress and extracts the final answer. | Strong adversarial reasoning; adaptive early stop; ReAct tool support. | Higher token usage; needs careful judge configuration. | 83 | | addition_by_subtraction | Addition‑by‑Subtraction collaboration. Addition expands details; Subtraction removes redundancy and feeds back; early exit when stable. | Concise refined answers; low iteration count by default (M≤2); ReAct tool support. | Can miss alternative directions; relies on good subtraction feedback. | 84 | 85 | Related work: 86 | #### Debates 87 | Encouraging Divergent Thinking in Large Language Models through Multi-Agent Debate: https://arxiv.org/abs/2305.19118 88 | 89 | #### Addition by Subtraction 90 | (Perhaps) Beyond Human Translation: Harnessing Multi-Agent Collaboration for Translating Ultra-Long Literary Texts: https://arxiv.org/abs/2305.19118 91 | 92 | ### Available Tools 93 | 94 | Tools are available to ReAct modules across patterns via a shared registry. List tools and inspect details: 95 | 96 | ```bash 97 | poetry run dspy-agents tools 98 | poetry run dspy-agents tools --describe read_file_attachment 99 | ``` 100 | 101 | - math_eval: Evaluate a simple Python math expression safely; returns string. 102 | - word_count: Count words in text; returns string number. 103 | - ascii_to_png: Render ASCII text into a PNG (`dspy.Image`) for visual reasoning. 104 | - list_files: List absolute file paths under a directory (sandboxed). 105 | - read_file_attachment: Return an `Attachments` object for a local file (sandboxed). 106 | - write_file: Write text to a local file path; returns absolute path (sandboxed). 107 | 108 | Sandboxing: File tools are restricted to allowed directories. Use `--allow-path /abs/dir` to opt‑in per run (can repeat). 109 | 110 | ## Using the CLI 111 | 112 | Install and run with Poetry: 113 | 114 | ```bash 115 | poetry install 116 | poetry run dspy-agents --help 117 | poetry run dspy-agents --install-completion # optional shell completion 118 | ``` 119 | 120 | Set provider credentials via environment variables (example): 121 | 122 | ```bash 123 | export GEMINI_API_KEY=... # for Gemini 124 | export OPENAI_API_KEY=... # for OpenAI 125 | ``` 126 | 127 | Discover: 128 | 129 | ```bash 130 | poetry run dspy-agents list 131 | poetry run dspy-agents describe debate 132 | poetry run dspy-agents configs debate 133 | poetry run dspy-agents tools 134 | poetry run dspy-agents tools --pattern debate 135 | poetry run dspy-agents tools --describe ascii_to_png 136 | ``` 137 | 138 | Run patterns: 139 | 140 | ```bash 141 | # default config of the pattern 142 | poetry run dspy-agents run debate "Is RLHF always beneficial?" 143 | 144 | # custom config 145 | poetry run dspy-agents run debate -c awesome_dspy_agents/patterns/debate/config.yaml "Debate topic" 146 | 147 | # override nested config values at runtime (typed casting: bool/int/float) 148 | poetry run dspy-agents run debate "Topic" --set debate.max_iterations=3 --set debate.debate_level=2 --set judge.module_type=react 149 | 150 | # interactive guided run with arrow-key selection 151 | poetry run dspy-agents interactive 152 | ``` 153 | 154 | JSON output and session save/replay: 155 | 156 | ```bash 157 | # Emit machine-readable JSON and save the full session 158 | poetry run dspy-agents run debate "Is RLHF always beneficial?" --json --save runs/rlhf.json 159 | 160 | # Replay a saved session locally without model calls 161 | poetry run dspy-agents replay runs/rlhf.json 162 | ``` 163 | 164 | Compare patterns on the same topic: 165 | 166 | ```bash 167 | poetry run dspy-agents compare debate addition_by_subtraction "What is chain-of-thought?" --metric jaccard 168 | ``` 169 | 170 | Version and sandbox: 171 | 172 | ```bash 173 | poetry run dspy-agents version 174 | poetry run dspy-agents run addition_by_subtraction "Summarize file" --allow-path . --set abs.max_iterations=2 175 | ``` 176 | 177 | During runs you will see per-iteration exchanges (debate or addition/subtraction), optional judge evaluations, and a final decision. Tool usage is summarized under each iteration. 178 | 179 | ## Configuration 180 | 181 | Configuration is layered and typed: 182 | 183 | 1. Pattern default config (e.g., `patterns/debate/config.yaml`). 184 | 2. User-provided file via `-c/--config`. 185 | 3. CLI overrides via `--set a.b=value` (auto‑casts `true/false`, integers, and floats). 186 | 4. Environment variable expansion inside YAML values: `${OPENAI_API_KEY}`. 187 | 188 | Minimal examples: 189 | 190 | ```yaml 191 | # debate/config.yaml (excerpt) 192 | default_lm: 193 | provider: gemini 194 | model: gemini-2.5-flash-preview-09-2025 195 | api_key_env: GEMINI_API_KEY 196 | 197 | agents: 198 | affirmative: 199 | persona: "Optimistic, evidence-driven." 200 | module_type: react 201 | tools: ["ascii_to_png", "read_file_attachment", "list_files"] 202 | negative: 203 | persona: "Rigorous skeptic." 204 | module_type: react 205 | tools: ["ascii_to_png", "read_file_attachment", "list_files"] 206 | 207 | judge: 208 | module_type: react 209 | tools: ["write_file"] 210 | 211 | debate: 212 | max_iterations: 5 213 | debate_level: 2 214 | adaptive_break: true 215 | ``` 216 | 217 | ```yaml 218 | # addition_by_subtraction/config.yaml (excerpt) 219 | default_lm: 220 | provider: gemini 221 | model: gemini-2.5-flash-preview-09-2025 222 | api_key_env: GEMINI_API_KEY 223 | 224 | agents: 225 | addition: 226 | persona: "Expand relevant information and synthesize details." 227 | module_type: react 228 | tools: ["ascii_to_png", "read_file_attachment", "list_files", "math_eval", "word_count"] 229 | subtraction: 230 | persona: "Remove redundancy and provide clear feedback." 231 | module_type: react 232 | tools: ["ascii_to_png", "read_file_attachment", "list_files", "math_eval", "word_count"] 233 | 234 | abs: 235 | max_iterations: 2 236 | early_exit: true 237 | ``` 238 | 239 | Tips: 240 | 241 | - Point to a custom config: `-c path/to/config.yaml`. 242 | - Override nested values at runtime (typed): `--set debate.max_iterations=3 --set judge.module_type=react`. 243 | - Per‑agent LM: set `agents..lm` block with `provider/model/api_base/api_key(_env)`. 244 | - File tools are sandboxed; add `--allow-path /abs/dir` to enable local file access. 245 | 246 | ## Examples 247 | 248 | Run Debate with a custom judge and fewer iterations: 249 | 250 | ```bash 251 | poetry run dspy-agents run debate "When to use CoT?" \ 252 | --set debate.max_iterations=3 \ 253 | --set judge.module_type=react 254 | ``` 255 | 256 | Run ABS with early exit disabled and save JSON: 257 | 258 | ```bash 259 | poetry run dspy-agents run addition_by_subtraction "Summarize the paper" \ 260 | --set abs.early_exit=false --json --save runs/abs.json 261 | ``` 262 | 263 | Use a local file during a run (sandboxed): 264 | 265 | ```bash 266 | poetry run dspy-agents run addition_by_subtraction "Summarize the attached doc" \ 267 | --allow-path "$PWD" \ 268 | --set agents.addition.tools="[read_file_attachment]" 269 | ``` 270 | 271 | ### Addition-by-Subtraction Pattern 272 | 273 | This collaboration uses two agents only: Addition (expands and aggregates relevant details) and Subtraction (removes redundancy and provides feedback). It iterates up to `abs.max_iterations` with an early-exit when no further revision is needed. 274 | 275 | - Default config: `awesome_dspy_agents/patterns/addition_by_subtraction/config.yaml` 276 | - Supports tools via ReAct (same registry as debate). Tools used are rendered in TUI under each iteration. 277 | 278 | Examples: 279 | 280 | ```bash 281 | poetry run dspy-agents describe addition_by_subtraction 282 | poetry run dspy-agents configs addition_by_subtraction 283 | poetry run dspy-agents run addition_by_subtraction "Summarize the key ideas from the attached document" 284 | # Override ABS parameters 285 | poetry run dspy-agents run addition_by_subtraction "Instruction" --set abs.max_iterations=2 --set abs.early_exit=true 286 | ``` 287 | 288 | TUI displays two columns: Addition and Subtraction, plus a Feedback panel each iteration. Tool usage events are summarized below the panels. 289 | 290 | Note: Early exit happens when subsequent additions stabilize. Default maximum iterations M=2 (configurable via `abs.max_iterations`). 291 | 292 | 293 | ## Development Guide 294 | 295 | This section documents how to extend and maintain the CLI and pattern ecosystem. 296 | 297 | ### Project layout 298 | 299 | ``` 300 | awesome_dspy_agents/ 301 | cli.py # CLI entrypoint (Typer + Rich) 302 | config.py # AppConfig and LM settings 303 | tools/ 304 | registry.py # Global tool registry (shared for all patterns) 305 | ascii_to_png.py # Example image tool 306 | patterns/ 307 | interface.py # AgentPattern protocol and discovery 308 | debate/ 309 | pattern.py # DebatePattern + MADFramework 310 | signatures.py # DSPy signatures 311 | config.yaml # Default config 312 | addition_by_subtraction/ 313 | pattern.py # Addition-by-Subtraction Pattern + ABSFramework 314 | signatures.py # DSPy signatures 315 | config.yaml # Default config 316 | ``` 317 | 318 | ### Common practices 319 | - Prefer typed configuration via `AppConfig` and validated YAML with env-var expansion (`${VAR}`). 320 | - Keep tool implementations deterministic and side-effect minimal; log via `mad.tools`. 321 | - Keep per-pattern logic inside `patterns//pattern.py`; expose a `get_pattern()` factory. 322 | - Use Rich tables and panels for readable CLI output; avoid noisy logs by default. 323 | - Support per-agent LM configuration (provider/model/api_base/api_key) per DSPy conventions. 324 | 325 | ### Adding a new pattern 326 | 1. Create a new folder under `awesome_dspy_agents/patterns//` with at least: 327 | - `pattern.py`: implement your DSPy modules and wrap them in a class that implements `AgentPattern`. 328 | - `config.yaml`: default configuration for the pattern (agents, judge, debate params, etc.). 329 | - `signatures.py` as needed. 330 | 2. In `pattern.py`, implement: 331 | - `class YourPattern(AgentPattern)` with: 332 | - `name`: a unique string 333 | - `describe(self) -> str`: short Markdown description 334 | - `default_config_path(self) -> Path | None` 335 | - `available_configs(self) -> Iterable[Path]`: include default + optional `scenarios/*.yaml` 336 | - `available_tools(self) -> Iterable[str]`: the tool names used by default 337 | - `available_scripts(self) -> dict[str, str]`: mapping of script name to description (MIPRO/GEPA) 338 | - `run(self, topic, config_path, overrides=None, on_iteration=None) -> dict` 339 | - Configure default LM if provided; construct your DSPy program; call `on_iteration` after each step. 340 | - `def get_pattern() -> AgentPattern: return YourPattern()` 341 | 3. The CLI will discover it automatically via `find_patterns()` if `pattern.py` exports `get_pattern()`. 342 | 343 | Example `run` implementation sketch: 344 | 345 | ```python 346 | def run(self, topic, config_path, overrides=None, on_iteration=None): 347 | cfg = load_config(str(config_path)) 348 | if cfg.default_lm: 349 | dspy.configure(lm=build_lm(cfg.default_lm)) 350 | # build modules based on cfg; call on_iteration(i, exchange, history) 351 | final = program(...) 352 | return {"final_answer": final.final_answer, "justification": final.justification} 353 | ``` 354 | 355 | ### Adding tools to the global registry 356 | 1. Implement a pure function in `awesome_dspy_agents/tools/*.py`. 357 | 2. Register it in `awesome_dspy_agents/tools/registry.py`: 358 | ```python 359 | from awesome_dspy_agents.tools.registry import registry 360 | 361 | def my_tool(arg1: str) -> str: 362 | return arg1.upper() 363 | 364 | registry.register("my_tool", my_tool) 365 | ``` 366 | 3. Reference the tool by name in pattern configs (for ReAct tools) or in code: 367 | ```yaml 368 | agents: 369 | affirmative: 370 | module_type: react 371 | tools: ["my_tool"] 372 | ``` 373 | 374 | Guidelines: 375 | - Keep tool I/O small; return primitives, `dspy.Image` for images or `Attachments` for other files. 376 | 377 | 378 | ### Configuration 379 | - Layering: default pattern config -> user-provided file (`-c`) -> CLI overrides (`--set a.b=val`). 380 | - Use env var placeholders in YAML (`${GEMINI_API_KEY}`) to avoid committing secrets. 381 | - For local models (Ollama, vLLM), set `api_base` and `api_key` in the config. 382 | 383 | ### Observability & logs 384 | - LLM calls are logged under `mad.llm` rotating files in `patterns/logs/`. 385 | - Tool calls are logged under `mad.tools` rotating files in the same directory. 386 | 387 | ### Roadmap ideas 388 | - [x] Add Addition-by-Subtraction pattern 389 | - [x] Add tools 390 | - [ ] Add MAPS pattern 391 | - [ ] Optional token streaming via `dspy.streamify` with a `--stream` flag. 392 | - [ ] Built-in optimization scripts (MIPRO/GEPA) surfaced in `scripts` command. 393 | - [ ] Integration with MLFlow 394 | - [ ] Session profiles (token counts, latency) and `--profile` flag. 395 | - [ ] Batch runs (`run-batch --topics file.txt --concurrency N`). 396 | - [ ] Add tests 397 | - [ ] More examples 398 | - [ ] More tools 399 | - [ ] Improve agents communication 400 | -------------------------------------------------------------------------------- /awesome_dspy_agents/tools/ascii_to_png.py: -------------------------------------------------------------------------------- 1 | """ 2 | ASCII to PNG Converter 3 | 4 | This module provides functionality to convert ASCII art text into PNG images. 5 | It uses PIL (Pillow) to render text with monospace fonts and save as image files. 6 | """ 7 | 8 | # pyright: reportMissingTypeStubs=false 9 | 10 | import os 11 | from pathlib import Path 12 | from typing import Optional, Tuple, Union 13 | 14 | from PIL import Image, ImageDraw, ImageFont # type: ignore 15 | 16 | 17 | class AsciiToPngConverter: 18 | """ 19 | A class to convert ASCII art text into PNG images. 20 | 21 | This converter handles proper spacing, font selection, and image sizing 22 | to maintain the visual structure of ASCII art when converted to images. 23 | """ 24 | 25 | def __init__( 26 | self, 27 | font_size: int = 16, 28 | background_color: Tuple[int, int, int] = (255, 255, 255), 29 | text_color: Tuple[int, int, int] = (0, 0, 0), 30 | padding: int = 20, 31 | char_width: Optional[int] = None, 32 | char_height: Optional[int] = None, 33 | ): 34 | """ 35 | Initialize the ASCII to PNG converter. 36 | 37 | Args: 38 | font_size: Size of the font to use for rendering 39 | background_color: RGB tuple for background color (default: white) 40 | text_color: RGB tuple for text color (default: black) 41 | padding: Padding around the text in pixels 42 | char_width: Fixed character width in pixels (auto-calculated if None) 43 | char_height: Fixed character height in pixels (auto-calculated if None) 44 | """ 45 | self.font_size = font_size 46 | self.background_color = background_color 47 | self.text_color = text_color 48 | self.padding = padding 49 | # Initialize then compute concrete int dimensions 50 | self.char_width: int = 0 51 | self.char_height: int = 0 52 | self.font = self._load_font() 53 | self._calculate_char_dimensions() 54 | 55 | def _load_font(self) -> ImageFont.ImageFont: 56 | """ 57 | Load a monospace font for proper ASCII art rendering. 58 | 59 | Returns: 60 | ImageFont object for text rendering 61 | """ 62 | # Try to load common monospace fonts 63 | font_paths = [ 64 | # macOS fonts 65 | "/System/Library/Fonts/Menlo.ttc", 66 | "/System/Library/Fonts/Monaco.ttf", 67 | "/Library/Fonts/Monaco.ttf", 68 | # Linux fonts 69 | "/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", 70 | "/usr/share/fonts/truetype/liberation/LiberationMono-Regular.ttf", 71 | # Windows fonts 72 | "C:/Windows/Fonts/consola.ttf", 73 | "C:/Windows/Fonts/cour.ttf", 74 | ] 75 | 76 | for font_path in font_paths: 77 | if os.path.exists(font_path): 78 | try: 79 | return ImageFont.truetype(font_path, self.font_size) 80 | except OSError: 81 | continue 82 | 83 | # Fallback to default font 84 | try: 85 | return ImageFont.load_default() 86 | except OSError: 87 | # If all else fails, use PIL's basic font 88 | return ImageFont.load_default() 89 | 90 | def _calculate_char_dimensions(self): 91 | """ 92 | Calculate character dimensions for consistent spacing. 93 | """ 94 | # Use 'M' as it's typically the widest character in monospace fonts 95 | bbox = self.font.getbbox("M") 96 | self.char_width = bbox[2] - bbox[0] 97 | self.char_height = bbox[3] - bbox[1] 98 | 99 | def _calculate_text_dimensions(self, text: str) -> Tuple[int, int]: 100 | """ 101 | Calculate the dimensions needed to render the given text. 102 | 103 | Args: 104 | text: The ASCII text to measure 105 | 106 | Returns: 107 | Tuple of (width, height) in pixels 108 | """ 109 | lines = text.split("\n") 110 | 111 | # Calculate width based on the longest line using fixed character width 112 | max_chars = max(len(line) for line in lines) if lines else 0 113 | max_width = max_chars * self.char_width 114 | 115 | # Calculate height using fixed character height 116 | total_height = len(lines) * self.char_height 117 | 118 | return max_width, total_height 119 | 120 | def convert_text_to_png( 121 | self, 122 | ascii_text: str, 123 | output_path: str, 124 | image_width: Optional[int] = None, 125 | image_height: Optional[int] = None, 126 | ) -> str: 127 | """ 128 | Convert ASCII text to a PNG image. 129 | 130 | Args: 131 | ascii_text: The ASCII art text to convert 132 | output_path: Path where the PNG file should be saved 133 | image_width: Optional fixed width for the image 134 | image_height: Optional fixed height for the image 135 | 136 | Returns: 137 | Path to the created PNG file 138 | """ 139 | # Remove line numbers if present (format: " 1|content") 140 | cleaned_lines = [] 141 | for line in ascii_text.split("\n"): 142 | # Check if line starts with line number format 143 | if "|" in line and line[: line.find("|")].strip().isdigit(): 144 | # Extract content after the pipe 145 | cleaned_lines.append(line[line.find("|") + 1 :]) 146 | else: 147 | cleaned_lines.append(line) 148 | 149 | cleaned_text = "\n".join(cleaned_lines) 150 | 151 | # Calculate image dimensions 152 | text_width, text_height = self._calculate_text_dimensions(cleaned_text) 153 | 154 | # Use provided dimensions or calculate from text 155 | img_width = image_width or (text_width + 2 * self.padding) 156 | img_height = image_height or (text_height + 2 * self.padding) 157 | 158 | # Create image with higher quality settings 159 | image = Image.new("RGB", (img_width, img_height), self.background_color) 160 | draw = ImageDraw.Draw(image) 161 | 162 | # Draw text character by character for precise positioning 163 | lines = cleaned_text.split("\n") 164 | 165 | for line_idx, line in enumerate(lines): 166 | y_pos = self.padding + (line_idx * self.char_height) 167 | 168 | for char_idx, char in enumerate(line): 169 | if char != " ": # Skip spaces for efficiency 170 | x_pos = self.padding + (char_idx * self.char_width) 171 | draw.text( 172 | (x_pos, y_pos), char, font=self.font, fill=self.text_color 173 | ) 174 | 175 | # Save the image 176 | image.save(output_path, "PNG") 177 | return output_path 178 | 179 | def convert_file_to_png( 180 | self, 181 | input_file: Union[str, Path], 182 | output_file: Optional[Union[str, Path]] = None, 183 | ) -> str: 184 | """ 185 | Convert an ASCII art file to a PNG image. 186 | 187 | Args: 188 | input_file: Path to the ASCII art text file 189 | output_file: Optional output path for PNG file. If not provided, 190 | will use the input filename with .png extension 191 | 192 | Returns: 193 | Path to the created PNG file 194 | """ 195 | input_path = Path(input_file) 196 | 197 | # Read the ASCII text 198 | with open(input_path, "r", encoding="utf-8") as f: 199 | ascii_text = f.read() 200 | 201 | # Determine output path 202 | if output_file is None: 203 | output_file = input_path.with_suffix(".png") 204 | 205 | return self.convert_text_to_png(ascii_text, str(output_file)) 206 | 207 | def batch_convert_directory( 208 | self, 209 | input_dir: Union[str, Path], 210 | output_dir: Optional[Union[str, Path]] = None, 211 | file_pattern: str = "*.txt", 212 | ) -> list: 213 | """ 214 | Convert all ASCII art files in a directory to PNG images. 215 | 216 | Args: 217 | input_dir: Directory containing ASCII art files 218 | output_dir: Directory to save PNG files. If None, saves in input_dir 219 | file_pattern: Glob pattern for input files (default: "*.txt") 220 | 221 | Returns: 222 | List of paths to created PNG files 223 | """ 224 | input_path = Path(input_dir) 225 | output_path = Path(output_dir) if output_dir else input_path 226 | 227 | # Create output directory if it doesn't exist 228 | output_path.mkdir(parents=True, exist_ok=True) 229 | 230 | created_files = [] 231 | 232 | # Process all matching files 233 | for input_file in input_path.glob(file_pattern): 234 | output_file = output_path / f"{input_file.stem}.png" 235 | try: 236 | result = self.convert_file_to_png(input_file, output_file) 237 | created_files.append(result) 238 | print(f"Converted: {input_file.name} -> {output_file.name}") 239 | except Exception as e: 240 | print(f"Error converting {input_file.name}: {e}") 241 | 242 | return created_files 243 | 244 | def convert_text_to_png_hq( 245 | self, 246 | ascii_text: str, 247 | output_path: str, 248 | scale_factor: int = 2, 249 | image_width: Optional[int] = None, 250 | image_height: Optional[int] = None, 251 | ) -> str: 252 | """ 253 | Convert ASCII text to a high-quality PNG image with scaling. 254 | 255 | Args: 256 | ascii_text: The ASCII art text to convert 257 | output_path: Path where the PNG file should be saved 258 | scale_factor: Scaling factor for higher resolution (default: 2) 259 | image_width: Optional fixed width for the image 260 | image_height: Optional fixed height for the image 261 | 262 | Returns: 263 | Path to the created PNG file 264 | """ 265 | # Create a temporary converter with scaled font size 266 | hq_font_size = self.font_size * scale_factor 267 | hq_padding = self.padding * scale_factor 268 | 269 | # Create temporary high-res converter 270 | temp_converter = AsciiToPngConverter( 271 | font_size=hq_font_size, 272 | background_color=self.background_color, 273 | text_color=self.text_color, 274 | padding=hq_padding, 275 | ) 276 | 277 | # Create temporary high-res image 278 | temp_path = output_path.replace(".png", "_temp_hq.png") 279 | temp_converter.convert_text_to_png( 280 | ascii_text, 281 | temp_path, 282 | image_width and image_width * scale_factor, 283 | image_height and image_height * scale_factor, 284 | ) 285 | 286 | # Load and resize with high-quality resampling 287 | high_res_image = Image.open(temp_path) 288 | if scale_factor > 1: 289 | # Calculate final size 290 | final_width = high_res_image.width // scale_factor 291 | final_height = high_res_image.height // scale_factor 292 | 293 | # Resize with high-quality filter 294 | final_image = high_res_image.resize( 295 | (final_width, final_height), Image.Resampling.LANCZOS 296 | ) 297 | else: 298 | final_image = high_res_image 299 | 300 | # Save final image 301 | final_image.save(output_path, "PNG") 302 | 303 | # Clean up temporary file 304 | os.remove(temp_path) 305 | 306 | return output_path 307 | 308 | def convert_text_to_image_hq( 309 | self, 310 | ascii_text: str, 311 | scale_factor: int = 3, 312 | image_width: Optional[int] = None, 313 | image_height: Optional[int] = None, 314 | ) -> Image.Image: 315 | """ 316 | Convert ASCII text to a high-quality PIL Image in memory without saving to disk. 317 | 318 | Args: 319 | ascii_text: The ASCII art text to convert 320 | scale_factor: Scaling factor for higher resolution (default: 3) 321 | image_width: Optional fixed width for the image 322 | image_height: Optional fixed height for the image 323 | 324 | Returns: 325 | PIL Image object with high-quality rendering 326 | """ 327 | # Create a high-resolution converter 328 | hq_font_size = self.font_size * scale_factor 329 | hq_padding = self.padding * scale_factor 330 | 331 | # Create temporary high-res converter 332 | temp_converter = AsciiToPngConverter( 333 | font_size=hq_font_size, 334 | background_color=self.background_color, 335 | text_color=self.text_color, 336 | padding=hq_padding, 337 | ) 338 | 339 | # Remove line numbers if present (format: " 1|content") 340 | cleaned_lines = [] 341 | for line in ascii_text.split("\n"): 342 | # Check if line starts with line number format 343 | if "|" in line and line[: line.find("|")].strip().isdigit(): 344 | # Extract content after the pipe 345 | cleaned_lines.append(line[line.find("|") + 1 :]) 346 | else: 347 | cleaned_lines.append(line) 348 | 349 | cleaned_text = "\n".join(cleaned_lines) 350 | 351 | # Calculate image dimensions at high resolution 352 | text_width, text_height = temp_converter._calculate_text_dimensions( 353 | cleaned_text 354 | ) 355 | 356 | # Use provided dimensions or calculate from text 357 | img_width = ( 358 | image_width * scale_factor if image_width else (text_width + 2 * hq_padding) 359 | ) 360 | img_height = ( 361 | image_height * scale_factor 362 | if image_height 363 | else (text_height + 2 * hq_padding) 364 | ) 365 | 366 | # Create high-resolution image 367 | high_res_image = Image.new( 368 | "RGB", (img_width, img_height), self.background_color 369 | ) 370 | draw = ImageDraw.Draw(high_res_image) 371 | 372 | # Draw text character by character for precise positioning 373 | lines = cleaned_text.split("\n") 374 | 375 | for line_idx, line in enumerate(lines): 376 | y_pos = hq_padding + (line_idx * temp_converter.char_height) 377 | 378 | for char_idx, char in enumerate(line): 379 | if char != " ": # Skip spaces for efficiency 380 | x_pos = hq_padding + (char_idx * temp_converter.char_width) 381 | draw.text( 382 | (x_pos, y_pos), 383 | char, 384 | font=temp_converter.font, 385 | fill=self.text_color, 386 | ) 387 | 388 | # If scale factor > 1, resize with high-quality filter 389 | if scale_factor > 1: 390 | final_width = high_res_image.width // scale_factor 391 | final_height = high_res_image.height // scale_factor 392 | 393 | # Resize with high-quality filter 394 | final_image = high_res_image.resize( 395 | (final_width, final_height), Image.Resampling.LANCZOS 396 | ) 397 | else: 398 | final_image = high_res_image 399 | 400 | return final_image 401 | 402 | 403 | def create_png_from_ascii( 404 | ascii_text: str, 405 | output_path: str, 406 | font_size: int = 16, 407 | background_color: Tuple[int, int, int] = (255, 255, 255), 408 | text_color: Tuple[int, int, int] = (0, 0, 0), 409 | high_quality: bool = False, 410 | scale_factor: int = 2, 411 | ) -> str: 412 | """ 413 | Convenience function to convert ASCII text to PNG with default settings. 414 | 415 | Args: 416 | ascii_text: The ASCII art text to convert 417 | output_path: Path where the PNG file should be saved 418 | font_size: Size of the font to use 419 | background_color: RGB tuple for background color 420 | text_color: RGB tuple for text color 421 | high_quality: Use high-quality rendering with scaling 422 | scale_factor: Scaling factor for high-quality mode 423 | 424 | Returns: 425 | Path to the created PNG file 426 | """ 427 | converter = AsciiToPngConverter( 428 | font_size=font_size, background_color=background_color, text_color=text_color 429 | ) 430 | 431 | if high_quality: 432 | return converter.convert_text_to_png_hq(ascii_text, output_path, scale_factor) 433 | else: 434 | return converter.convert_text_to_png(ascii_text, output_path) 435 | -------------------------------------------------------------------------------- /awesome_dspy_agents/cli.py: -------------------------------------------------------------------------------- 1 | # pyright: reportMissingTypeStubs=false 2 | import json 3 | from datetime import datetime 4 | from pathlib import Path 5 | from typing import Any, Dict, List, Optional 6 | 7 | import dspy # type: ignore 8 | import questionary # type: ignore 9 | import typer # type: ignore 10 | from rich.console import Console # type: ignore 11 | from rich.markdown import Markdown # type: ignore 12 | from rich.panel import Panel # type: ignore 13 | from rich.table import Table # type: ignore 14 | 15 | from awesome_dspy_agents import __version__, tui 16 | from awesome_dspy_agents.config import AppConfig, build_lm 17 | from awesome_dspy_agents.patterns.interface import find_patterns 18 | from awesome_dspy_agents.tools.registry import registry 19 | 20 | # --- Setup --- 21 | 22 | app = typer.Typer( 23 | name="dspy-agents", 24 | help="CLI to manage and run DSPy multi-agent patterns.", 25 | add_completion=True, 26 | rich_markup_mode="markdown", 27 | ) 28 | 29 | console = Console() 30 | PATTERNS_DIR = Path(__file__).parent / "patterns" 31 | 32 | 33 | # --- Helper Functions --- 34 | 35 | 36 | def _install_default_lm(cfg: AppConfig) -> None: 37 | if cfg.default_lm is None: 38 | return 39 | default_lm = build_lm(cfg.default_lm) 40 | dspy.configure(lm=default_lm) 41 | 42 | 43 | def _iteration_callback(iteration: int, exchange: dict, history: str) -> None: 44 | # When called the first time (no judge_eval), print the exchange table. 45 | # When called the second time (with judge_eval), only print the judge panel. 46 | if "judge_eval" not in exchange: 47 | table = Table(show_header=True, header_style="bold magenta") 48 | table.add_column("Iteration", justify="right", style="cyan", width=10) 49 | table.add_column("Affirmative", style="green") 50 | table.add_column("Negative", style="red") 51 | table.add_row( 52 | str(iteration), 53 | exchange.get("affirmative", ""), 54 | exchange.get("negative", ""), 55 | ) 56 | console.print(table) 57 | 58 | if "judge_eval" in exchange: 59 | console.print( 60 | Panel.fit( 61 | exchange["judge_eval"], 62 | title=f"Judge @ {iteration}", 63 | border_style="yellow", 64 | ) 65 | ) 66 | 67 | 68 | def _pattern_map(): 69 | return find_patterns(PATTERNS_DIR) 70 | 71 | 72 | # --- CLI Commands --- 73 | 74 | 75 | @app.callback() 76 | def _setup( 77 | allow_path: Optional[List[str]] = typer.Option( 78 | None, 79 | "--allow-path", 80 | help="Allow file tools to access given absolute directories (repeatable)", 81 | ), 82 | ): 83 | if allow_path: 84 | registry.set_sandbox_roots(allow_path) 85 | 86 | 87 | def _coerce_value(s: str): 88 | sl = s.lower() 89 | if sl in ("true", "false"): 90 | return sl == "true" 91 | try: 92 | if "." in s: 93 | return float(s) 94 | return int(s) 95 | except ValueError: 96 | return s 97 | 98 | 99 | def _parse_overrides(set: Optional[list[str]]) -> dict: 100 | overrides: dict = {} 101 | if not set: 102 | return overrides 103 | for kv in set: 104 | if "=" not in kv: 105 | continue 106 | key, val = kv.split("=", 1) 107 | cursor = overrides 108 | parts = key.split(".") 109 | for p in parts[:-1]: 110 | cursor = cursor.setdefault(p, {}) 111 | cursor[parts[-1]] = _coerce_value(val) 112 | return overrides 113 | 114 | 115 | def _execute_pattern( 116 | pattern_name: str, 117 | topic: str, 118 | config_path: Path, 119 | overrides: dict, 120 | stream: bool, 121 | json_output: bool, 122 | save_path: Optional[Path], 123 | ) -> Dict[str, Any]: 124 | patterns = _pattern_map() 125 | pat = patterns.get(pattern_name) 126 | if not pat: 127 | console.print( 128 | f"[bold red]Error:[/bold red] Pattern '{pattern_name}' not found." 129 | ) 130 | raise typer.Exit(code=1) 131 | 132 | # Tool event capture for TUI 133 | tool_events_by_iter: Dict[int, List[Dict[str, Any]]] = {} 134 | printed_index_by_iter: Dict[int, int] = {} 135 | 136 | def _tool_listener(event: str, payload: Dict[str, Any]) -> None: 137 | try: 138 | iteration = int(payload.get("iteration", 0)) 139 | except Exception: 140 | iteration = 0 141 | if iteration <= 0: 142 | return 143 | tool_events_by_iter.setdefault(iteration, []).append( 144 | { 145 | "event": event, 146 | **payload, 147 | } 148 | ) 149 | 150 | def _on_iteration(iteration: int, exchange: dict, history: str) -> None: 151 | if not json_output: 152 | tui.render_iteration(iteration, exchange) 153 | events = tool_events_by_iter.get(iteration, []) 154 | start_idx = printed_index_by_iter.get(iteration, 0) 155 | printed_index_by_iter[iteration] = tui.render_tool_events( 156 | iteration, events, start_idx 157 | ) 158 | 159 | if not json_output: 160 | tui.render_header(pattern_name, topic) 161 | 162 | registry.add_listener(_tool_listener) 163 | try: 164 | result = pat.run( 165 | topic=topic, 166 | config_path=config_path, 167 | overrides=overrides, 168 | on_iteration=_on_iteration, 169 | ) 170 | except Exception as e: 171 | console.print(f"[bold red]Error during run:[/bold red] {e}") 172 | raise typer.Exit(code=1) 173 | finally: 174 | registry.remove_listener(_tool_listener) 175 | 176 | record = { 177 | "schema": 1, 178 | "timestamp": datetime.utcnow().isoformat() + "Z", 179 | "version": __version__, 180 | "pattern": pattern_name, 181 | "topic": topic, 182 | "config_path": str(config_path), 183 | "overrides": overrides, 184 | "tool_events_by_iter": tool_events_by_iter, 185 | "result": result, 186 | } 187 | 188 | if save_path is not None: 189 | try: 190 | save_path.parent.mkdir(parents=True, exist_ok=True) 191 | save_path.write_text( 192 | json.dumps(record, ensure_ascii=False, indent=2), encoding="utf-8" 193 | ) 194 | except Exception as e: 195 | console.print(f"[yellow]Warning:[/yellow] Failed to save session: {e}") 196 | 197 | if json_output: 198 | console.print_json(data=record) 199 | else: 200 | tui.render_final( 201 | result.get("final_answer", ""), result.get("justification", "") 202 | ) 203 | 204 | return record 205 | 206 | 207 | @app.command("list") 208 | def list_cmd(): 209 | """List all available agent patterns.""" 210 | console.print(Panel.fit("Available DSPy Agent Patterns", style="bold blue")) 211 | table = Table(show_header=True, header_style="bold magenta") 212 | table.add_column("Pattern", style="cyan") 213 | table.add_column("Description", style="green") 214 | table.add_column("Configs", justify="right", style="magenta") 215 | table.add_column("Tools", style="yellow") 216 | 217 | patterns = _pattern_map() 218 | for name, pat in patterns.items(): 219 | # short description from first non-empty line 220 | desc_text = pat.describe() or "" 221 | first_line = "" 222 | for line in desc_text.splitlines(): 223 | if line.strip(): 224 | first_line = line.strip().lstrip("# ") 225 | break 226 | if len(first_line) > 90: 227 | first_line = first_line[:87] + "..." 228 | 229 | # configs and tools summary 230 | cfgs = list(pat.available_configs()) 231 | cfg_count = str(len(cfgs)) 232 | tools = list(pat.available_tools()) 233 | if tools: 234 | tools_preview = ", ".join(tools[:3]) + (" …" if len(tools) > 3 else "") 235 | else: 236 | tools_preview = "-" 237 | 238 | table.add_row(name, first_line or "-", cfg_count, tools_preview) 239 | 240 | console.print(table) 241 | 242 | 243 | @app.command() 244 | def describe( 245 | pattern_name: str = typer.Argument( 246 | ..., help="The name of the pattern to describe." 247 | ), 248 | ): 249 | """Show details about a specific pattern.""" 250 | patterns = _pattern_map() 251 | pat = patterns.get(pattern_name) 252 | if not pat: 253 | console.print( 254 | f"[bold red]Error:[/bold red] Pattern '{pattern_name}' not found." 255 | ) 256 | raise typer.Exit(code=1) 257 | 258 | console.print( 259 | Panel.fit(f"Pattern: [bold cyan]{pattern_name}[/bold cyan]", style="bold blue") 260 | ) 261 | 262 | desc = pat.describe() 263 | if desc: 264 | console.print(Panel(Markdown(desc), title="Description")) 265 | 266 | cfg = pat.default_config_path() 267 | if cfg and cfg.exists(): 268 | with open(cfg, "r") as f: 269 | console.print(Panel(f.read(), title="Default Configuration (config.yaml)")) 270 | else: 271 | console.print("No config.yaml found for this pattern.") 272 | 273 | 274 | @app.command() 275 | def run( 276 | pattern_name: str = typer.Argument(..., help="The name of the pattern to run."), 277 | topic: Optional[str] = typer.Argument( 278 | None, help="The topic or task for the pattern." 279 | ), 280 | config: Optional[Path] = typer.Option( 281 | None, "--config", "-c", help="Path to a YAML config file." 282 | ), 283 | set: Optional[list[str]] = typer.Option( 284 | None, "--set", help="Override config values, e.g. debate.max_iterations=5" 285 | ), 286 | stream: bool = typer.Option(False, "--stream", help="Stream tokens if supported"), 287 | json_output: bool = typer.Option( 288 | False, "--json", help="Emit JSON record instead of TUI" 289 | ), 290 | save: Optional[Path] = typer.Option( 291 | None, "--save", help="Save session to JSON file" 292 | ), 293 | ): 294 | """Run a specific agent pattern.""" 295 | patterns = _pattern_map() 296 | pat = patterns.get(pattern_name) 297 | if not pat: 298 | console.print( 299 | f"[bold red]Error:[/bold red] Pattern '{pattern_name}' not found." 300 | ) 301 | raise typer.Exit(code=1) 302 | # Determine config path 303 | if config: 304 | config_path = config 305 | else: 306 | cfg = pat.default_config_path() 307 | if not cfg: 308 | console.print( 309 | f"[bold red]Error:[/bold red] No default config for '{pattern_name}'. Use --config" 310 | ) 311 | raise typer.Exit(code=1) 312 | config_path = cfg 313 | 314 | if not config_path.exists(): 315 | console.print( 316 | f"[bold red]Error:[/bold red] Config file not found at {config_path}" 317 | ) 318 | raise typer.Exit(code=1) 319 | 320 | if not topic: 321 | topic = typer.prompt("Please enter the topic/task") 322 | 323 | overrides = _parse_overrides(set) 324 | assert topic is not None 325 | _execute_pattern( 326 | pattern_name=pattern_name, 327 | topic=topic, 328 | config_path=config_path, 329 | overrides=overrides, 330 | stream=stream, 331 | json_output=json_output, 332 | save_path=save, 333 | ) 334 | 335 | 336 | @app.command("tools") 337 | def tools( 338 | pattern_name: Optional[str] = typer.Option( 339 | None, "--pattern", help="Filter tools for a specific pattern" 340 | ), 341 | describe: Optional[str] = typer.Option( 342 | None, "--describe", help="Describe specific tool" 343 | ), 344 | ): 345 | """List available tools (global or for a given pattern).""" 346 | console.print(Panel.fit("Available Tools", style="bold blue")) 347 | if describe: 348 | try: 349 | fn = registry.get(describe) 350 | except KeyError: 351 | console.print(f"[bold red]Error:[/bold red] Unknown tool '{describe}'") 352 | raise typer.Exit(code=1) 353 | sig = getattr(fn, "__signature__", None) 354 | doc = getattr(fn, "__doc__", None) or "(no docstring)" 355 | console.print( 356 | Panel.fit(str(sig), title=f"Signature: {describe}", border_style="magenta") 357 | ) 358 | console.print(Panel.fit(doc.strip(), title="Docstring")) 359 | return 360 | 361 | table = Table(show_header=True, header_style="bold magenta") 362 | table.add_column("Tool Name", style="cyan") 363 | table.add_column("Source", style="green") 364 | 365 | if pattern_name: 366 | pat = _pattern_map().get(pattern_name) 367 | if not pat: 368 | console.print( 369 | f"[bold red]Error:[/bold red] Pattern '{pattern_name}' not found." 370 | ) 371 | raise typer.Exit(code=1) 372 | for t in pat.available_tools(): 373 | table.add_row(t, f"pattern:{pattern_name}") 374 | else: 375 | for t in registry.names(): 376 | table.add_row(t, "global") 377 | 378 | console.print(table) 379 | 380 | 381 | @app.command("configs") 382 | def configs( 383 | pattern_name: str = typer.Argument(..., help="Pattern to list configs for"), 384 | ): 385 | """List available configs/scenarios for a pattern.""" 386 | pat = _pattern_map().get(pattern_name) 387 | if not pat: 388 | console.print( 389 | f"[bold red]Error:[/bold red] Pattern '{pattern_name}' not found." 390 | ) 391 | raise typer.Exit(code=1) 392 | 393 | console.print( 394 | Panel.fit( 395 | f"Configs for [bold cyan]{pattern_name}[/bold cyan]", style="bold blue" 396 | ) 397 | ) 398 | table = Table(show_header=True, header_style="bold magenta") 399 | table.add_column("Name", style="cyan") 400 | table.add_column("Path", style="green") 401 | for p in pat.available_configs(): 402 | table.add_row(p.stem, str(p)) 403 | console.print(table) 404 | 405 | 406 | @app.command("scripts") 407 | def scripts( 408 | pattern_name: str = typer.Argument( 409 | ..., help="Pattern to list optimization/eval scripts for" 410 | ), 411 | ): 412 | """List optimization/evaluation scripts provided by a pattern.""" 413 | pat = _pattern_map().get(pattern_name) 414 | if not pat: 415 | console.print( 416 | f"[bold red]Error:[/bold red] Pattern '{pattern_name}' not found." 417 | ) 418 | raise typer.Exit(code=1) 419 | scripts_map = pat.available_scripts() 420 | console.print( 421 | Panel.fit( 422 | f"Scripts for [bold cyan]{pattern_name}[/bold cyan]", style="bold blue" 423 | ) 424 | ) 425 | if not scripts_map: 426 | console.print("No scripts available.") 427 | return 428 | table = Table(show_header=True, header_style="bold magenta") 429 | table.add_column("Script", style="cyan") 430 | table.add_column("Description", style="green") 431 | for k, v in scripts_map.items(): 432 | table.add_row(k, v) 433 | console.print(table) 434 | 435 | 436 | @app.command("interactive") 437 | def interactive(): 438 | """Guided run: choose pattern, config, topic, and overrides interactively.""" 439 | patterns = _pattern_map() 440 | if not patterns: 441 | console.print("No patterns found.") 442 | raise typer.Exit(code=1) 443 | 444 | names = sorted(patterns.keys()) 445 | pattern_name = questionary.select("Select a pattern", choices=names).ask() 446 | pat = patterns.get(pattern_name) 447 | if not pat: 448 | console.print( 449 | f"[bold red]Error:[/bold red] Pattern '{pattern_name}' not found." 450 | ) 451 | raise typer.Exit(code=1) 452 | 453 | configs = list(pat.available_configs()) 454 | config_path: Optional[Path] = None 455 | if configs: 456 | console.print(Panel.fit("Available configs:", style="bold blue")) 457 | for p in configs: 458 | console.print(f"- {p.stem}: {p}") 459 | choice = typer.prompt("Config name (leave empty for default)", default="") 460 | if choice: 461 | for p in configs: 462 | if p.stem == choice: 463 | config_path = p 464 | break 465 | if config_path is None: 466 | config_path = pat.default_config_path() 467 | if not config_path: 468 | console.print( 469 | "No default config found; please provide a config with --config in 'run'." 470 | ) 471 | raise typer.Exit(code=1) 472 | 473 | topic = typer.prompt("Enter topic/task") 474 | 475 | overrides: dict = {} 476 | if typer.confirm("Override debate.max_iterations?", default=False): 477 | val = typer.prompt("New max_iterations", default="5") 478 | overrides.setdefault("debate", {})["max_iterations"] = val 479 | 480 | _execute_pattern( 481 | pattern_name=pattern_name, 482 | topic=topic, 483 | config_path=config_path, 484 | overrides=overrides, 485 | stream=False, 486 | json_output=False, 487 | save_path=None, 488 | ) 489 | 490 | 491 | @app.command("replay") 492 | def replay(path: Path = typer.Argument(..., help="Path to saved session JSON")): 493 | try: 494 | data = json.loads(path.read_text(encoding="utf-8")) 495 | except Exception as e: 496 | console.print(f"[bold red]Error:[/bold red] {e}") 497 | raise typer.Exit(code=1) 498 | 499 | pattern_name = data.get("pattern", "?") 500 | topic = data.get("topic", "") 501 | tui.render_header(pattern_name, topic) 502 | events_by_iter: Dict[int, List[Dict[str, Any]]] = { 503 | int(k): v for k, v in (data.get("tool_events_by_iter", {}) or {}).items() 504 | } 505 | printed_index_by_iter: Dict[int, int] = {} 506 | history = ( 507 | data.get("result", {}).get("history") 508 | or data.get("result", {}).get("debate_history") 509 | or [] 510 | ) 511 | for i, exchange in enumerate(history, 1): 512 | tui.render_iteration(i, exchange) 513 | events = events_by_iter.get(i, []) 514 | idx = printed_index_by_iter.get(i, 0) 515 | printed_index_by_iter[i] = tui.render_tool_events(i, events, idx) 516 | tui.render_final( 517 | data.get("result", {}).get("final_answer", ""), 518 | data.get("result", {}).get("justification", ""), 519 | ) 520 | 521 | 522 | @app.command("compare") 523 | def compare( 524 | pattern_a: str = typer.Argument(..., help="First pattern"), 525 | pattern_b: str = typer.Argument(..., help="Second pattern"), 526 | topic: str = typer.Argument(..., help="Topic"), 527 | config_a: Optional[Path] = typer.Option(None, "--config-a"), 528 | config_b: Optional[Path] = typer.Option(None, "--config-b"), 529 | metric: str = typer.Option("exact", "--metric", help="exact|len|jaccard"), 530 | ): 531 | patterns = _pattern_map() 532 | cfg_a = config_a or patterns[pattern_a].default_config_path() 533 | cfg_b = config_b or patterns[pattern_b].default_config_path() 534 | rec_a = _execute_pattern( 535 | pattern_name=pattern_a, 536 | topic=topic, 537 | config_path=cfg_a, 538 | overrides={}, 539 | stream=False, 540 | json_output=False, 541 | save_path=None, 542 | ) 543 | rec_b = _execute_pattern( 544 | pattern_name=pattern_b, 545 | topic=topic, 546 | config_path=cfg_b, 547 | overrides={}, 548 | stream=False, 549 | json_output=False, 550 | save_path=None, 551 | ) 552 | a = (rec_a.get("result", {}).get("final_answer", "") or "").strip() 553 | b = (rec_b.get("result", {}).get("final_answer", "") or "").strip() 554 | 555 | t = Table(show_header=True, header_style="bold magenta") 556 | t.add_column("Pattern", style="cyan") 557 | t.add_column("Final Answer (truncated)", style="green") 558 | t.add_row(pattern_a, (a[:120] + "…") if len(a) > 120 else a) 559 | t.add_row(pattern_b, (b[:120] + "…") if len(b) > 120 else b) 560 | console.print(t) 561 | 562 | 563 | @app.command("version") 564 | def version_cmd(): 565 | console.print( 566 | Panel.fit(f"awesome-dspy-agents [bold cyan]{__version__}[/bold cyan]") 567 | ) 568 | 569 | 570 | if __name__ == "__main__": 571 | app() 572 | -------------------------------------------------------------------------------- /awesome_dspy_agents/patterns/debate/pattern.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from typing import Any, Callable, Dict, List, Optional 3 | 4 | import dspy # type: ignore 5 | 6 | from awesome_dspy_agents.config import AppConfig, build_lm, load_config 7 | from awesome_dspy_agents.logging_setup import get_logger 8 | from awesome_dspy_agents.patterns.interface import AgentPattern 9 | from awesome_dspy_agents.tools.registry import (registry, reset_current_agent, 10 | reset_current_iteration, 11 | set_current_agent, 12 | set_current_iteration) 13 | 14 | from .signatures import (AffirmativeDebater, JudgeDiscriminative, 15 | JudgeExtractive, NegativeDebater) 16 | 17 | llm_logger = get_logger("mad.llm", "llm_calls.log", max_bytes=2_000_000, backup_count=3) 18 | 19 | 20 | class DebaterModule(dspy.Module): 21 | """Base debater module with configurable behavior.""" 22 | 23 | def __init__( 24 | self, 25 | role: str, 26 | debate_level: int = 2, 27 | persona: str = "", 28 | lm: Optional[dspy.LM] = None, 29 | module_type: str = "predict", 30 | tool_names: Optional[List[str]] = None, 31 | react_max_iters: int = 3, 32 | ): 33 | super().__init__() 34 | self.role = role 35 | self.debate_level = debate_level 36 | self.persona = persona 37 | self.module_type = module_type 38 | self.tool_names = tool_names or [] 39 | self.react_max_iters = react_max_iters 40 | 41 | # Select signature based on role 42 | if role == "affirmative": 43 | signature = AffirmativeDebater 44 | else: 45 | signature = NegativeDebater 46 | 47 | if module_type == "predict": 48 | self.predict = dspy.Predict(signature) 49 | elif module_type == "chain_of_thought": 50 | self.predict = dspy.ChainOfThought(signature) 51 | elif module_type == "react": 52 | tools = registry.build_dspy_tools(self.tool_names) 53 | llm_logger.info( 54 | "react_tools", role=self.role, tools=[str(t) for t in tools] 55 | ) 56 | self.predict = dspy.ReAct( 57 | signature, tools=tools, max_iters=self.react_max_iters 58 | ) 59 | else: 60 | raise ValueError(f"Unsupported module_type: {module_type}") 61 | 62 | # Optionally set a specific LM for this debater 63 | if lm is not None: 64 | # Set LM for this module and its children 65 | try: 66 | self.set_lm(lm) 67 | except Exception: 68 | # Fallback to setting on the predictor only 69 | self.predict.set_lm(lm) 70 | 71 | def get_role_instruction(self) -> str: 72 | """Get role instruction based on debate level.""" 73 | level_instructions = { 74 | 0: "Both sides must reach full consensus on every point.", 75 | 1: "Most debate should be disagreements, but some consensus on minor points.", 76 | 2: "It's not necessary to fully agree. Find the correct answer.", 77 | 3: "Both sides must disagree on every point. No consensus.", 78 | } 79 | 80 | base_instruction = level_instructions.get( 81 | self.debate_level, level_instructions[2] 82 | ) 83 | 84 | if self.role == "affirmative": 85 | return ( 86 | f"You are affirmative side. {base_instruction} Present your viewpoint." 87 | ) 88 | else: 89 | return ( 90 | f"You are negative side. {base_instruction} Provide counter-arguments." 91 | ) 92 | 93 | def forward( 94 | self, 95 | debate_topic: str, 96 | debate_history: str, 97 | affirmative_argument: str = "", 98 | ): 99 | role_instruction = self.get_role_instruction() 100 | 101 | if self.role == "affirmative": 102 | llm_logger.info( 103 | "predict", 104 | role="affirmative", 105 | module=self.module_type, 106 | topic_len=len(debate_topic), 107 | history_len=len(debate_history), 108 | ) 109 | token = set_current_agent(self.role) 110 | out = self.predict( 111 | debate_topic=debate_topic, 112 | debate_history=debate_history, 113 | role_instruction=role_instruction, 114 | persona=self.persona, 115 | ) 116 | reset_current_agent(token) 117 | return out 118 | else: 119 | llm_logger.info( 120 | "predict", 121 | role="negative", 122 | module=self.module_type, 123 | topic_len=len(debate_topic), 124 | history_len=len(debate_history), 125 | ) 126 | token = set_current_agent(self.role) 127 | out = self.predict( 128 | debate_topic=debate_topic, 129 | debate_history=debate_history, 130 | affirmative_argument=affirmative_argument, 131 | role_instruction=role_instruction, 132 | persona=self.persona, 133 | ) 134 | reset_current_agent(token) 135 | return out 136 | 137 | 138 | class JudgeModule(dspy.Module): 139 | """Judge module with discriminative and extractive modes.""" 140 | 141 | def __init__( 142 | self, 143 | lm_discriminative: Optional[dspy.LM] = None, 144 | lm_extractive: Optional[dspy.LM] = None, 145 | module_type: str = "predict", 146 | tool_names: Optional[List[str]] = None, 147 | react_max_iters: int = 3, 148 | ): 149 | super().__init__() 150 | self.module_type = module_type 151 | self.tool_names = tool_names or [] 152 | self.react_max_iters = react_max_iters 153 | 154 | if module_type == "predict": 155 | self.discriminative = dspy.Predict(JudgeDiscriminative) 156 | self.extractive = dspy.Predict(JudgeExtractive) 157 | elif module_type == "chain_of_thought": 158 | self.discriminative = dspy.ChainOfThought(JudgeDiscriminative) 159 | self.extractive = dspy.ChainOfThought(JudgeExtractive) 160 | elif module_type == "react": 161 | tools = registry.build_dspy_tools(self.tool_names) 162 | llm_logger.info("judge_react_tools", tools=[str(t) for t in tools]) 163 | self.discriminative = dspy.ReAct( 164 | JudgeDiscriminative, tools=tools, max_iters=self.react_max_iters 165 | ) 166 | self.extractive = dspy.ReAct( 167 | JudgeExtractive, tools=tools, max_iters=self.react_max_iters 168 | ) 169 | else: 170 | raise ValueError(f"Unsupported judge module_type: {module_type}") 171 | 172 | # Optionally set specific LMs for judge sub-modules 173 | if lm_discriminative is not None: 174 | try: 175 | self.discriminative.set_lm(lm_discriminative) 176 | except Exception: 177 | pass 178 | if lm_extractive is not None: 179 | try: 180 | self.extractive.set_lm(lm_extractive) 181 | except Exception: 182 | pass 183 | 184 | def evaluate_debate( 185 | self, 186 | debate_topic: str, 187 | debate_history: str, 188 | current_iteration: int, 189 | ) -> dspy.Prediction: 190 | """Discriminative mode: Decide if solution is found.""" 191 | llm_logger.info( 192 | "judge_discriminative", 193 | iter=current_iteration, 194 | topic_len=len(debate_topic), 195 | history_len=len(debate_history), 196 | ) 197 | return self.discriminative( 198 | debate_topic=debate_topic, 199 | debate_history=debate_history, 200 | current_iteration=current_iteration, 201 | ) 202 | 203 | def extract_solution( 204 | self, 205 | debate_topic: str, 206 | debate_history: str, 207 | ) -> dspy.Prediction: 208 | """Extractive mode: Extract final answer from debate.""" 209 | llm_logger.info( 210 | "judge_extractive", 211 | topic_len=len(debate_topic), 212 | history_len=len(debate_history), 213 | ) 214 | return self.extractive( 215 | debate_topic=debate_topic, 216 | debate_history=debate_history, 217 | ) 218 | 219 | 220 | class MADFramework(dspy.Module): 221 | """Complete Multi-Agent Debate framework.""" 222 | 223 | def __init__( 224 | self, 225 | max_iterations: int = 3, 226 | debate_level: int = 2, 227 | adaptive_break: bool = True, 228 | affirmative_persona: str = "", 229 | negative_persona: str = "", 230 | affirmative_lm: Optional[dspy.LM] = None, 231 | negative_lm: Optional[dspy.LM] = None, 232 | judge_lm_discriminative: Optional[dspy.LM] = None, 233 | judge_lm_extractive: Optional[dspy.LM] = None, 234 | affirmative_module_type: str = "predict", 235 | negative_module_type: str = "predict", 236 | judge_module_type: str = "predict", 237 | affirmative_tools: Optional[List[str]] = None, 238 | negative_tools: Optional[List[str]] = None, 239 | react_max_iters: int = 6, 240 | judge_tool_names: Optional[List[str]] = None, 241 | on_iteration: Optional[Callable[[int, Dict[str, Any], str], None]] = None, 242 | ): 243 | super().__init__() 244 | 245 | self.max_iterations = max_iterations 246 | self.debate_level = debate_level 247 | self.adaptive_break = adaptive_break 248 | self.on_iteration = on_iteration 249 | 250 | # Initialize agents 251 | self.affirmative = DebaterModule( 252 | "affirmative", 253 | debate_level, 254 | persona=affirmative_persona, 255 | lm=affirmative_lm, 256 | module_type=affirmative_module_type, 257 | tool_names=affirmative_tools, 258 | react_max_iters=react_max_iters, 259 | ) 260 | self.negative = DebaterModule( 261 | "negative", 262 | debate_level, 263 | persona=negative_persona, 264 | lm=negative_lm, 265 | module_type=negative_module_type, 266 | tool_names=negative_tools, 267 | react_max_iters=react_max_iters, 268 | ) 269 | self.judge = JudgeModule( 270 | lm_discriminative=judge_lm_discriminative, 271 | lm_extractive=judge_lm_extractive, 272 | module_type=judge_module_type, 273 | tool_names=judge_tool_names, 274 | react_max_iters=react_max_iters, 275 | ) 276 | 277 | # Debate state 278 | self.debate_history: List[Dict[str, Any]] = [] 279 | 280 | def format_history(self) -> str: 281 | """Format debate history as readable text.""" 282 | if not self.debate_history: 283 | return "No debate history yet." 284 | 285 | formatted = [] 286 | for i, exchange in enumerate(self.debate_history, 1): 287 | formatted.append(f"\n--- Iteration {i} ---") 288 | formatted.append(f"Affirmative: {exchange['affirmative']}") 289 | formatted.append(f"Negative: {exchange['negative']}") 290 | if "judge_eval" in exchange: 291 | formatted.append(f"Judge: {exchange['judge_eval']}") 292 | 293 | return "\n".join(formatted) 294 | 295 | def forward( 296 | self, 297 | debate_topic: str, 298 | ): 299 | """Run complete debate process.""" 300 | self.debate_history = [] 301 | solution_found = False 302 | final_answer = None 303 | 304 | for iteration in range(1, self.max_iterations + 1): 305 | history_str = self.format_history() 306 | iter_token = set_current_iteration(iteration) 307 | try: 308 | # Affirmative speaks 309 | aff_response = self.affirmative( 310 | debate_topic=debate_topic, 311 | debate_history=history_str, 312 | ) 313 | 314 | # Negative responds 315 | neg_response = self.negative( 316 | debate_topic=debate_topic, 317 | debate_history=history_str, 318 | affirmative_argument=aff_response.argument, 319 | ) 320 | 321 | # Record exchange 322 | exchange = { 323 | "affirmative": aff_response.argument, 324 | "affirmative_reasoning": aff_response.reasoning, 325 | "negative": neg_response.counter_argument, 326 | "negative_reasoning": neg_response.reasoning, 327 | } 328 | self.debate_history.append(exchange) 329 | 330 | # Callback after initial exchange 331 | if self.on_iteration is not None: 332 | try: 333 | self.on_iteration(iteration, exchange, self.format_history()) 334 | except Exception: 335 | pass 336 | 337 | # Judge evaluates 338 | if self.adaptive_break: 339 | history_str = self.format_history() 340 | judge_token = set_current_agent("judge") 341 | try: 342 | judge_eval = self.judge.evaluate_debate( 343 | debate_topic=debate_topic, 344 | debate_history=history_str, 345 | current_iteration=iteration, 346 | ) 347 | finally: 348 | reset_current_agent(judge_token) 349 | 350 | exchange["judge_eval"] = judge_eval.reasoning 351 | 352 | # Callback after judge evaluation 353 | if self.on_iteration is not None: 354 | try: 355 | self.on_iteration( 356 | iteration, exchange, self.format_history() 357 | ) 358 | except Exception: 359 | pass 360 | 361 | if judge_eval.solution_found and judge_eval.confidence > 0.7: 362 | solution_found = True 363 | judge_token = set_current_agent("judge") 364 | try: 365 | final_answer = self.judge.extract_solution( 366 | debate_topic=debate_topic, 367 | debate_history=history_str, 368 | ) 369 | finally: 370 | reset_current_agent(judge_token) 371 | break 372 | finally: 373 | reset_current_iteration(iter_token) 374 | 375 | # Extract final answer if not found adaptively 376 | if not solution_found: 377 | history_str = self.format_history() 378 | final_prediction = self.judge.extract_solution( 379 | debate_topic=debate_topic, 380 | debate_history=history_str, 381 | ) 382 | final_answer = final_prediction 383 | 384 | if final_answer is None: 385 | return dspy.Prediction( 386 | final_answer="", 387 | justification="", 388 | debate_history=self.debate_history, 389 | iterations_used=len(self.debate_history), 390 | adaptive_break_triggered=solution_found, 391 | ) 392 | 393 | return dspy.Prediction( 394 | final_answer=final_answer.final_answer, 395 | justification=final_answer.justification, 396 | debate_history=self.debate_history, 397 | iterations_used=len(self.debate_history), 398 | adaptive_break_triggered=solution_found, 399 | ) 400 | 401 | 402 | class DebatePattern(AgentPattern): 403 | name = "debate" 404 | 405 | def __init__(self) -> None: 406 | self._root = Path(__file__).resolve().parent 407 | 408 | def describe(self) -> str: 409 | readme = self._root / "README.md" 410 | if readme.exists(): 411 | try: 412 | return readme.read_text(encoding="utf-8") 413 | except Exception: 414 | return "Multi-Agent Debate pattern." 415 | return "Multi-Agent Debate pattern." 416 | 417 | def default_config_path(self) -> Optional[Path]: 418 | cfg = self._root / "config.yaml" 419 | return cfg if cfg.exists() else None 420 | 421 | def available_configs(self) -> List[Path]: 422 | configs: List[Path] = [] 423 | # default 424 | cfg = self.default_config_path() 425 | if cfg: 426 | configs.append(cfg) 427 | # scenarios directory optional 428 | scenarios = self._root / "scenarios" 429 | if scenarios.exists(): 430 | for p in scenarios.glob("*.yaml"): 431 | configs.append(p) 432 | return configs 433 | 434 | def available_tools(self) -> List[str]: 435 | # reflect from default config 436 | cfg_path = self.default_config_path() 437 | try: 438 | if cfg_path: 439 | cfg = load_config(str(cfg_path)) 440 | tools = set() 441 | aff = cfg.agents.get("affirmative") 442 | neg = cfg.agents.get("negative") 443 | if aff: 444 | tools.update(aff.tools) 445 | if neg: 446 | tools.update(neg.tools) 447 | return sorted(tools) 448 | except Exception: 449 | pass 450 | return [] 451 | 452 | def available_scripts(self) -> Dict[str, str]: 453 | # Placeholder for optimization scripts discovery later 454 | return {} 455 | 456 | def run( 457 | self, 458 | topic: str, 459 | config_path: Path, 460 | overrides: Optional[Dict[str, Any]] = None, 461 | on_iteration: Optional[Callable[[int, Dict[str, Any], str], None]] = None, 462 | ) -> Dict[str, Any]: 463 | cfg: AppConfig = load_config(str(config_path)) 464 | 465 | # apply simple overrides for debate.* keys 466 | if overrides: 467 | debate_over = overrides.get("debate") 468 | if isinstance(debate_over, dict): 469 | if "max_iterations" in debate_over: 470 | cfg.debate.max_iterations = int(debate_over["max_iterations"]) # type: ignore[assignment] 471 | if "debate_level" in debate_over: 472 | cfg.debate.debate_level = int(debate_over["debate_level"]) # type: ignore[assignment] 473 | if "adaptive_break" in debate_over: 474 | cfg.debate.adaptive_break = bool(debate_over["adaptive_break"]) # type: ignore[assignment] 475 | 476 | # Configure default lm if provided 477 | if cfg.default_lm is not None: 478 | dspy.configure(lm=build_lm(cfg.default_lm)) 479 | 480 | # Build per-agent LMs 481 | aff_cfg = cfg.agents.get("affirmative") 482 | neg_cfg = cfg.agents.get("negative") 483 | 484 | aff_lm = build_lm(aff_cfg.lm) if aff_cfg and aff_cfg.lm else None 485 | neg_lm = build_lm(neg_cfg.lm) if neg_cfg and neg_cfg.lm else None 486 | 487 | judge_disc_lm = ( 488 | build_lm(cfg.judge.discriminative_lm) 489 | if cfg.judge.discriminative_lm 490 | else None 491 | ) 492 | judge_ext_lm = ( 493 | build_lm(cfg.judge.extractive_lm) if cfg.judge.extractive_lm else None 494 | ) 495 | 496 | # Create framework 497 | framework = MADFramework( 498 | max_iterations=cfg.debate.max_iterations, 499 | debate_level=cfg.debate.debate_level, 500 | adaptive_break=cfg.debate.adaptive_break, 501 | affirmative_persona=(aff_cfg.persona if aff_cfg else ""), 502 | negative_persona=(neg_cfg.persona if neg_cfg else ""), 503 | affirmative_lm=aff_lm, 504 | negative_lm=neg_lm, 505 | judge_lm_discriminative=judge_disc_lm, 506 | judge_lm_extractive=judge_ext_lm, 507 | affirmative_module_type=(aff_cfg.module_type if aff_cfg else "predict"), 508 | negative_module_type=(neg_cfg.module_type if neg_cfg else "predict"), 509 | judge_module_type=cfg.judge.module_type, 510 | affirmative_tools=(aff_cfg.tools if aff_cfg else []), 511 | negative_tools=(neg_cfg.tools if neg_cfg else []), 512 | judge_tool_names=( 513 | cfg.judge.tools if getattr(cfg.judge, "tools", None) is not None else [] 514 | ), 515 | ) 516 | 517 | # attach iteration callback 518 | if on_iteration is not None: 519 | framework.on_iteration = on_iteration 520 | 521 | final = framework(debate_topic=topic) 522 | 523 | return { 524 | "final_answer": final.final_answer, 525 | "justification": final.justification, 526 | "iterations_used": final.iterations_used, 527 | "adaptive_break_triggered": final.adaptive_break_triggered, 528 | "history": final.debate_history, 529 | } 530 | 531 | 532 | def get_pattern() -> AgentPattern: 533 | return DebatePattern() 534 | --------------------------------------------------------------------------------