├── app ├── __init__.py ├── api │ ├── __init__.py │ ├── conversations.py │ ├── tools.py │ ├── agents.py │ ├── crews.py │ └── mcp_servers.py ├── core │ ├── __init__.py │ ├── logging.py │ ├── database.py │ ├── agents.py │ └── tools.py ├── services │ ├── __init__.py │ ├── conversation.py │ ├── tool.py │ ├── agent.py │ └── mcp_server.py ├── schemas │ ├── prompt.py │ ├── crew.py │ ├── mcp_server.py │ ├── tool.py │ ├── conversation.py │ └── agent.py ├── models │ ├── __init__.py │ ├── agent_tool.py │ ├── crew.py │ ├── mcp_server.py │ ├── conversation.py │ ├── tool.py │ ├── agent.py │ ├── relationships.py │ ├── setup_relationships.py │ └── base.py └── main.py ├── .env.example ├── docker-compose.yml ├── requirements.txt ├── create_tables.py ├── Dockerfile ├── migration.log ├── .gitignore ├── .github └── workflows │ ├── test.yml │ ├── claude.yml │ └── claude-code-review.yml ├── LICENSE ├── tests ├── conftest.py ├── test_crews.py ├── test_conversations.py ├── test_mcp_servers.py ├── test_tools.py ├── test_agents.py ├── test_agent_model_field.py ├── test_ai_crew_chat.py └── test_readme_workflow.py ├── example.py ├── reset_database.py ├── test_basic_uuid_model.py ├── .windsurfrules ├── GEMINI.md ├── docs ├── UUID_MIGRATION.md ├── getting-started.md ├── README.md ├── testing.md └── ai-crews-management.md ├── CLAUDE.md ├── README.md ├── test_uuid_models.py ├── AGENT_INSTRUCTIONS.md ├── example_ai_crew_chat.py └── PROJECT_OVERVIEW.md /app/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /app/api/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /app/core/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /app/services/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | DATABASE_URL="" 2 | OPENROUTER_API_KEY="" 3 | TAVILY_API_KEY="" -------------------------------------------------------------------------------- /app/schemas/prompt.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | class PromptBase(BaseModel): 4 | prompt: str 5 | 6 | class PromptCreate(PromptBase): 7 | pass 8 | 9 | class Prompt(PromptBase): 10 | pass 11 | -------------------------------------------------------------------------------- /app/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import Base 2 | from .crew import Crew 3 | from .agent import Agent 4 | from .mcp_server import McpServer 5 | from .tool import Tool 6 | from .agent_tool import agent_tool 7 | 8 | __all__ = ["Base", "Crew", "Agent", "McpServer", "Tool", "agent_tool"] 9 | -------------------------------------------------------------------------------- /app/schemas/crew.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel, UUID4, ConfigDict 2 | 3 | class CrewBase(BaseModel): 4 | name: str 5 | 6 | class CrewCreate(CrewBase): 7 | pass 8 | 9 | class Crew(CrewBase): 10 | id: UUID4 11 | 12 | model_config = ConfigDict(from_attributes=True) 13 | -------------------------------------------------------------------------------- /app/models/agent_tool.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import Column, ForeignKey, Table 2 | from .base import Base, GUID 3 | 4 | agent_tool = Table( 5 | "agent_tool", 6 | Base.metadata, 7 | Column("agent_id", GUID, ForeignKey("agents.id"), primary_key=True), 8 | Column("tool_id", GUID, ForeignKey("tools.id"), primary_key=True), 9 | ) 10 | -------------------------------------------------------------------------------- /app/schemas/mcp_server.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel, UUID4, ConfigDict 2 | 3 | class McpServerBase(BaseModel): 4 | name: str 5 | url: str 6 | 7 | class McpServerCreate(McpServerBase): 8 | pass 9 | 10 | class McpServer(McpServerBase): 11 | id: UUID4 12 | 13 | model_config = ConfigDict(from_attributes=True) 14 | -------------------------------------------------------------------------------- /app/schemas/tool.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel, UUID4, ConfigDict 2 | 3 | class ToolBase(BaseModel): 4 | name: str 5 | description: str 6 | mcp_server_id: UUID4 7 | 8 | class ToolCreate(ToolBase): 9 | pass 10 | 11 | class Tool(ToolBase): 12 | id: UUID4 13 | 14 | model_config = ConfigDict(from_attributes=True) 15 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | db: 3 | image: postgres:13 4 | volumes: 5 | - postgres_data:/var/lib/postgresql/data/ 6 | env_file: 7 | - .env 8 | web: 9 | build: . 10 | ports: 11 | - "8000:8000" 12 | env_file: 13 | - .env 14 | depends_on: 15 | - db 16 | 17 | volumes: 18 | postgres_data: 19 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | langchain>=0.1.0 2 | langgraph>=0.1.0 3 | fastapi>=0.100.0 4 | uvicorn>=0.20.0 5 | psycopg2-binary>=2.9.0 6 | sqlalchemy>=2.0.0 7 | python-dotenv>=1.0.0 8 | langchain-openai>=0.0.5 9 | tavily-python>=0.3.0 10 | langchain-community>=0.0.10 11 | mcp>=0.1.0 12 | langchain-mcp-adapters>=0.1.0 13 | httpx>=0.25.0 14 | pydantic>=2.0.0 15 | numpy>=1.24.0 16 | -------------------------------------------------------------------------------- /app/core/logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | 4 | def get_logger(name: str): 5 | logger = logging.getLogger(name) 6 | logger.setLevel(logging.INFO) 7 | handler = logging.StreamHandler(sys.stdout) 8 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 9 | handler.setFormatter(formatter) 10 | logger.addHandler(handler) 11 | return logger 12 | -------------------------------------------------------------------------------- /app/schemas/conversation.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel, UUID4, ConfigDict 2 | from typing import Optional 3 | 4 | class ConversationBase(BaseModel): 5 | user_input: str 6 | agent_output: str 7 | crew_id: Optional[UUID4] = None 8 | agent_id: Optional[UUID4] = None 9 | 10 | class ConversationCreate(ConversationBase): 11 | pass 12 | 13 | class Conversation(ConversationBase): 14 | id: UUID4 15 | 16 | model_config = ConfigDict(from_attributes=True) 17 | -------------------------------------------------------------------------------- /create_tables.py: -------------------------------------------------------------------------------- 1 | from app.core.database import engine 2 | from app.models.base import Base 3 | from app.models.crew import Crew 4 | from app.models.agent import Agent 5 | from app.models.mcp_server import McpServer 6 | from app.models.tool import Tool 7 | from app.models.conversation import Conversation 8 | from app.models.agent_tool import agent_tool 9 | 10 | def create_tables(): 11 | Base.metadata.drop_all(bind=engine) 12 | Base.metadata.create_all(bind=engine) 13 | 14 | if __name__ == "__main__": 15 | create_tables() 16 | -------------------------------------------------------------------------------- /app/schemas/agent.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel, UUID4, ConfigDict 2 | from typing import List, Optional 3 | 4 | class AgentBase(BaseModel): 5 | name: str 6 | crew_id: UUID4 7 | role: str 8 | system_prompt: str 9 | model: Optional[str] = None # OpenRouter model identifier, e.g., 'anthropic/claude-3-opus-20240229' 10 | 11 | class AgentCreate(AgentBase): 12 | tools: Optional[List[UUID4]] = [] 13 | 14 | class Agent(AgentBase): 15 | id: UUID4 16 | 17 | model_config = ConfigDict(from_attributes=True) 18 | -------------------------------------------------------------------------------- /app/models/crew.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import Column, String, DateTime 2 | from sqlalchemy.sql import func 3 | from .base import Base, GUID, generate_uuid 4 | 5 | class Crew(Base): 6 | __tablename__ = "crews" 7 | 8 | id = Column(GUID, primary_key=True, index=True, default=generate_uuid) 9 | name = Column(String, index=True) 10 | created_at = Column(DateTime(timezone=True), server_default=func.now()) 11 | updated_at = Column(DateTime(timezone=True), onupdate=func.now()) 12 | 13 | # Relationships are defined in setup_relationships.py to avoid circular dependencies 14 | pass 15 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Use an official Python runtime as a parent image 2 | FROM python:3.11-slim 3 | 4 | # Set the working directory in the container 5 | WORKDIR /app 6 | 7 | # Copy the dependencies file to the working directory 8 | COPY requirements.txt . 9 | 10 | # Install any needed packages specified in requirements.txt 11 | RUN pip install --no-cache-dir -r requirements.txt 12 | 13 | # Copy the rest of the application's code to the working directory 14 | COPY ./app /app/app 15 | 16 | # Expose the port the app runs on 17 | EXPOSE 8000 18 | 19 | # Define the command to run the application 20 | CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"] 21 | -------------------------------------------------------------------------------- /app/models/mcp_server.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import Column, String, DateTime 2 | from sqlalchemy.sql import func 3 | from .base import Base, GUID, generate_uuid 4 | 5 | class McpServer(Base): 6 | __tablename__ = "mcp_servers" 7 | 8 | id = Column(GUID, primary_key=True, index=True, default=generate_uuid) 9 | name = Column(String, index=True) 10 | url = Column(String, unique=True, index=True) 11 | created_at = Column(DateTime(timezone=True), server_default=func.now()) 12 | updated_at = Column(DateTime(timezone=True), onupdate=func.now()) 13 | 14 | # Relationships are defined in setup_relationships.py to avoid circular dependencies 15 | pass 16 | -------------------------------------------------------------------------------- /migration.log: -------------------------------------------------------------------------------- 1 | 2025-07-03 12:53:50,682 - INFO - Starting migration from integer IDs to UUIDs... 2 | 2025-07-03 12:53:50,795 - INFO - Created new tables with UUID schema 3 | 2025-07-03 12:53:50,795 - INFO - Migrating crews... 4 | 2025-07-03 12:53:50,812 - INFO - Migrating mcp_servers... 5 | 2025-07-03 12:53:50,815 - INFO - Migrating agents... 6 | 2025-07-03 12:53:50,827 - INFO - Migrating tools... 7 | 2025-07-03 12:53:50,829 - INFO - Migrating conversations... 8 | 2025-07-03 12:53:50,830 - INFO - Migrating agent_tool associations... 9 | 2025-07-03 12:53:50,833 - INFO - Migration completed successfully! Now replacing old tables... 10 | 2025-07-03 12:53:50,833 - INFO - Dropping old tables... 11 | -------------------------------------------------------------------------------- /app/models/conversation.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import Column, String, DateTime, ForeignKey, Text 2 | from sqlalchemy.sql import func 3 | from .base import Base, GUID, generate_uuid 4 | 5 | class Conversation(Base): 6 | __tablename__ = "conversations" 7 | 8 | id = Column(GUID, primary_key=True, index=True, default=generate_uuid) 9 | crew_id = Column(GUID, ForeignKey("crews.id")) 10 | agent_id = Column(GUID, ForeignKey("agents.id")) 11 | user_input = Column(Text) 12 | agent_output = Column(Text) 13 | created_at = Column(DateTime(timezone=True), server_default=func.now()) 14 | 15 | # Relationships are defined in setup_relationships.py to avoid circular dependencies 16 | pass 17 | -------------------------------------------------------------------------------- /app/models/tool.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import Column, String, DateTime, ForeignKey 2 | from sqlalchemy.sql import func 3 | from .base import Base, GUID, generate_uuid 4 | 5 | class Tool(Base): 6 | __tablename__ = "tools" 7 | 8 | id = Column(GUID, primary_key=True, index=True, default=generate_uuid) 9 | name = Column(String, index=True) 10 | description = Column(String) 11 | mcp_server_id = Column(GUID, ForeignKey("mcp_servers.id")) 12 | created_at = Column(DateTime(timezone=True), server_default=func.now()) 13 | updated_at = Column(DateTime(timezone=True), onupdate=func.now()) 14 | 15 | # Relationships are defined in setup_relationships.py to avoid circular dependencies 16 | pass 17 | -------------------------------------------------------------------------------- /app/services/conversation.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy.orm import Session 2 | from uuid import UUID 3 | from app.models.conversation import Conversation 4 | from app.schemas.conversation import ConversationCreate 5 | 6 | def create_conversation(db: Session, conversation: ConversationCreate): 7 | db_conversation = Conversation(**conversation.model_dump()) 8 | db.add(db_conversation) 9 | db.commit() 10 | db.refresh(db_conversation) 11 | return db_conversation 12 | 13 | def get_conversation(db: Session, conversation_id: UUID): 14 | return db.query(Conversation).filter(Conversation.id == conversation_id).first() 15 | 16 | def get_conversations(db: Session, skip: int = 0, limit: int = 100): 17 | return db.query(Conversation).offset(skip).limit(limit).all() 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.* 7 | .yarn/* 8 | !.yarn/patches 9 | !.yarn/plugins 10 | !.yarn/releases 11 | !.yarn/versions 12 | 13 | # testing 14 | /coverage 15 | 16 | # next.js 17 | /.next/ 18 | /out/ 19 | 20 | # production 21 | /build 22 | 23 | # misc 24 | .DS_Store 25 | *.pem 26 | 27 | # debug 28 | npm-debug.log* 29 | yarn-debug.log* 30 | yarn-error.log* 31 | .pnpm-debug.log* 32 | 33 | # env files (can opt-in for committing if needed) 34 | .env* 35 | !.env.example 36 | 37 | # vercel 38 | .vercel 39 | 40 | # typescript 41 | *.tsbuildinfo 42 | next-env.d.ts 43 | 44 | /src/generated/prisma 45 | 46 | # python 47 | __pycache__ 48 | server.log 49 | -------------------------------------------------------------------------------- /app/models/agent.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import Column, String, DateTime, ForeignKey, Text 2 | from sqlalchemy.sql import func 3 | from .base import Base, GUID, generate_uuid 4 | 5 | class Agent(Base): 6 | __tablename__ = "agents" 7 | 8 | id = Column(GUID, primary_key=True, index=True, default=generate_uuid) 9 | name = Column(String, index=True) 10 | role = Column(String) 11 | system_prompt = Column(Text) 12 | model = Column(String, nullable=True) # OpenRouter model identifier 13 | crew_id = Column(GUID, ForeignKey("crews.id")) 14 | created_at = Column(DateTime(timezone=True), server_default=func.now()) 15 | updated_at = Column(DateTime(timezone=True), onupdate=func.now()) 16 | 17 | # Relationships are defined in setup_relationships.py to avoid circular dependencies 18 | pass 19 | -------------------------------------------------------------------------------- /app/core/database.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import create_engine 2 | from sqlalchemy.orm import sessionmaker 3 | from dotenv import load_dotenv 4 | import os 5 | 6 | # Import for setting up relationships after all models are loaded 7 | from app.models.setup_relationships import setup_relationships 8 | 9 | load_dotenv() 10 | 11 | DATABASE_URL = os.getenv("DATABASE_URL") 12 | 13 | engine_args = {} 14 | if not DATABASE_URL.startswith("sqlite"): 15 | engine_args["pool_size"] = 100 16 | engine_args["pool_timeout"] = 30 17 | 18 | engine = create_engine(DATABASE_URL, **engine_args) 19 | SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) 20 | 21 | # Set up relationships after all models are loaded 22 | setup_relationships() 23 | 24 | def get_db(): 25 | db = SessionLocal() 26 | try: 27 | yield db 28 | finally: 29 | db.close() 30 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | env: 10 | OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }} 11 | TAVILY_API_KEY: ${{ secrets.TAVILY_API_KEY }} 12 | 13 | jobs: 14 | build: 15 | runs-on: ubuntu-latest 16 | strategy: 17 | matrix: 18 | python-version: ["3.10", "3.11", "3.12"] 19 | steps: 20 | - uses: actions/checkout@v3 21 | - name: Set up Python ${{ matrix.python-version }} 22 | uses: actions/setup-python@v3 23 | with: 24 | python-version: ${{ matrix.python-version }} 25 | - name: Install dependencies 26 | run: | 27 | python -m pip install --upgrade pip 28 | pip install -r requirements.txt 29 | pip install pytest pytest-asyncio pytest-cov 30 | - name: Test with pytest 31 | run: | 32 | export PYTHONPATH=. 33 | export DATABASE_URL="sqlite:///:memory:" 34 | pytest 35 | -------------------------------------------------------------------------------- /app/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, Request 2 | from app.api import crews, agents, mcp_servers, tools, conversations 3 | from app.models import * 4 | from app.core.logging import get_logger 5 | 6 | logger = get_logger(__name__) 7 | 8 | app = FastAPI() 9 | 10 | @app.middleware("http") 11 | async def log_requests(request: Request, call_next): 12 | logger.info(f"Request: {request.method} {request.url}") 13 | response = await call_next(request) 14 | logger.info(f"Response: {response.status_code}") 15 | return response 16 | 17 | app.include_router(crews.router, prefix="/crews", tags=["crews"]) 18 | app.include_router(agents.router, prefix="/agents", tags=["agents"]) 19 | app.include_router(mcp_servers.router, prefix="/mcp_servers", tags=["mcp_servers"]) 20 | app.include_router(tools.router, prefix="/tools", tags=["tools"]) 21 | app.include_router(conversations.router, prefix="/conversations", tags=["conversations"]) 22 | 23 | @app.get("/") 24 | def read_root(): 25 | return {"message": "Welcome to the Multi AI Agents System Boilerplate"} 26 | -------------------------------------------------------------------------------- /app/models/relationships.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module sets up SQLAlchemy relationships between models after they are all defined. 3 | This helps avoid circular dependencies that can cause import errors. 4 | """ 5 | 6 | from .crew import Crew 7 | from .agent import Agent 8 | from .conversation import Conversation 9 | from .tool import Tool 10 | from .mcp_server import McpServer 11 | 12 | # Set up Crew relationships 13 | Crew.agents = Crew.agents.property.parent.class_attribute.property 14 | Crew.conversations = Crew.conversations.property.parent.class_attribute.property 15 | 16 | # Set up Agent relationships 17 | Agent.crew = Agent.crew.property.parent.class_attribute.property 18 | Agent.conversations = Agent.conversations.property.parent.class_attribute.property 19 | 20 | # Set up Conversation relationships 21 | Conversation.crew = Conversation.crew.property.parent.class_attribute.property 22 | Conversation.agent = Conversation.agent.property.parent.class_attribute.property 23 | 24 | # Set up Tool relationships 25 | Tool.mcp_server = Tool.mcp_server.property.parent.class_attribute.property 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Goon 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /app/services/tool.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy.orm import Session 2 | from uuid import UUID 3 | from app.models.tool import Tool 4 | from app.schemas.tool import ToolCreate 5 | 6 | def create_tool(db: Session, tool: ToolCreate): 7 | db_tool = Tool(name=tool.name, description=tool.description, mcp_server_id=tool.mcp_server_id) 8 | db.add(db_tool) 9 | db.commit() 10 | db.refresh(db_tool) 11 | return db_tool 12 | 13 | def get_tool(db: Session, tool_id: UUID): 14 | return db.query(Tool).filter(Tool.id == tool_id).first() 15 | 16 | def get_tools(db: Session, skip: int = 0, limit: int = 100): 17 | return db.query(Tool).offset(skip).limit(limit).all() 18 | 19 | def update_tool(db: Session, tool_id: UUID, tool: ToolCreate): 20 | db_tool = db.query(Tool).filter(Tool.id == tool_id).first() 21 | db_tool.name = tool.name 22 | db_tool.description = tool.description 23 | db_tool.mcp_server_id = tool.mcp_server_id 24 | db.commit() 25 | db.refresh(db_tool) 26 | return db_tool 27 | 28 | def delete_tool(db: Session, tool_id: UUID): 29 | db_tool = db.query(Tool).filter(Tool.id == tool_id).first() 30 | db.delete(db_tool) 31 | db.commit() 32 | return db_tool 33 | -------------------------------------------------------------------------------- /app/api/conversations.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Depends, HTTPException 2 | from sqlalchemy.orm import Session 3 | from uuid import UUID 4 | from app.core.database import get_db 5 | from app.services import conversation as conversation_service 6 | from app.schemas import conversation as conversation_schema 7 | 8 | router = APIRouter() 9 | 10 | @router.post("/", response_model=conversation_schema.Conversation) 11 | def create_conversation(conversation: conversation_schema.ConversationCreate, db: Session = Depends(get_db)): 12 | return conversation_service.create_conversation(db=db, conversation=conversation) 13 | 14 | @router.get("/", response_model=list[conversation_schema.Conversation]) 15 | def read_conversations(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)): 16 | conversations = conversation_service.get_conversations(db, skip=skip, limit=limit) 17 | return conversations 18 | 19 | @router.get("/{conversation_id}", response_model=conversation_schema.Conversation) 20 | def read_conversation(conversation_id: UUID, db: Session = Depends(get_db)): 21 | db_conversation = conversation_service.get_conversation(db, conversation_id=conversation_id) 22 | if db_conversation is None: 23 | raise HTTPException(status_code=404, detail="Conversation not found") 24 | return db_conversation 25 | -------------------------------------------------------------------------------- /app/api/tools.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Depends, HTTPException 2 | from sqlalchemy.orm import Session 3 | from uuid import UUID 4 | from app.core.database import get_db 5 | from app.services import tool as tool_service 6 | from app.schemas import tool as tool_schema 7 | 8 | router = APIRouter() 9 | 10 | @router.post("/", response_model=tool_schema.Tool) 11 | def create_tool(tool: tool_schema.ToolCreate, db: Session = Depends(get_db)): 12 | return tool_service.create_tool(db=db, tool=tool) 13 | 14 | @router.get("/", response_model=list[tool_schema.Tool]) 15 | def read_tools(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)): 16 | tools = tool_service.get_tools(db, skip=skip, limit=limit) 17 | return tools 18 | 19 | @router.get("/{tool_id}", response_model=tool_schema.Tool) 20 | def read_tool(tool_id: UUID, db: Session = Depends(get_db)): 21 | db_tool = tool_service.get_tool(db, tool_id=tool_id) 22 | if db_tool is None: 23 | raise HTTPException(status_code=404, detail="Tool not found") 24 | return db_tool 25 | 26 | @router.put("/{tool_id}", response_model=tool_schema.Tool) 27 | def update_tool(tool_id: UUID, tool: tool_schema.ToolCreate, db: Session = Depends(get_db)): 28 | return tool_service.update_tool(db=db, tool_id=tool_id, tool=tool) 29 | 30 | @router.delete("/{tool_id}", response_model=tool_schema.Tool) 31 | def delete_tool(tool_id: UUID, db: Session = Depends(get_db)): 32 | return tool_service.delete_tool(db=db, tool_id=tool_id) 33 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | 4 | # Add the project root to the Python path to allow for absolute imports 5 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 6 | 7 | import pytest 8 | from fastapi.testclient import TestClient 9 | from sqlalchemy import create_engine 10 | from sqlalchemy.orm import sessionmaker, Session 11 | from app.main import app 12 | from app.models.base import Base 13 | from app.core.database import get_db 14 | 15 | SQLALCHEMY_DATABASE_URL = "sqlite:///:memory:" 16 | 17 | engine = create_engine( 18 | SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False} 19 | ) 20 | TestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) 21 | 22 | @pytest.fixture(scope="session", autouse=True) 23 | def setup_database(): 24 | Base.metadata.create_all(bind=engine) 25 | yield 26 | Base.metadata.drop_all(bind=engine) 27 | 28 | @pytest.fixture(scope="function") 29 | def db_session(): 30 | connection = engine.connect() 31 | transaction = connection.begin() 32 | session = TestingSessionLocal(bind=connection) 33 | 34 | yield session 35 | 36 | session.close() 37 | transaction.rollback() 38 | connection.close() 39 | 40 | 41 | @pytest.fixture(scope="function") 42 | def client(db_session: Session): 43 | def override_get_db(): 44 | yield db_session 45 | 46 | app.dependency_overrides[get_db] = override_get_db 47 | with TestClient(app) as c: 48 | yield c -------------------------------------------------------------------------------- /example.py: -------------------------------------------------------------------------------- 1 | from langchain_openai import ChatOpenAI 2 | from app.core.agents import create_agent, create_supervisor 3 | from app.core.graph import AgentGraph, AgentState 4 | from langchain_core.tools import tool 5 | from langchain_core.messages import HumanMessage 6 | from dotenv import load_dotenv 7 | import os 8 | 9 | load_dotenv() 10 | 11 | # Set up the models 12 | llm = ChatOpenAI( 13 | base_url="https://openrouter.ai/api/v1", 14 | api_key=os.getenv("OPENROUTER_API_KEY"), 15 | model="google/gemini-2.5-flash", 16 | ) 17 | 18 | # Define a simple tool 19 | @tool 20 | def simple_tool(query: str) -> str: 21 | """A simple tool that returns a fixed string.""" 22 | return "This is a simple tool." 23 | 24 | # Create the agents 25 | agent1 = create_agent(llm, [simple_tool], "You are agent 1.") 26 | agent2 = create_agent(llm, [], "You are agent 2.") 27 | 28 | agents = [ 29 | {"name": "agent1", "agent": agent1}, 30 | {"name": "agent2", "agent": agent2}, 31 | ] 32 | 33 | # Create the supervisor 34 | supervisor = create_supervisor( 35 | llm, 36 | agents, 37 | "You are the supervisor. Your job is to route the conversation to the correct agent.", 38 | ) 39 | 40 | # Create the graph 41 | graph = AgentGraph(supervisor, agents) 42 | 43 | # Compile the graph 44 | app = graph.compile() 45 | 46 | # Run the graph 47 | for s in app.stream( 48 | { 49 | "messages": [ 50 | HumanMessage(content="What is the weather in San Francisco?") 51 | ] 52 | } 53 | ): 54 | if "__end__" not in s: 55 | print(s) 56 | print("----") 57 | -------------------------------------------------------------------------------- /app/models/setup_relationships.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module handles late-binding relationship setup for SQLAlchemy models. 3 | It imports and configures relationships after all model classes have been defined, 4 | avoiding circular dependency issues. 5 | """ 6 | 7 | from sqlalchemy.orm import relationship 8 | from .agent import Agent 9 | from .crew import Crew 10 | from .conversation import Conversation 11 | from .tool import Tool 12 | from .mcp_server import McpServer 13 | from .agent_tool import agent_tool 14 | 15 | # Define relationships here after all models have been fully defined 16 | def setup_relationships(): 17 | """ 18 | Set up relationships between models after they're all defined. 19 | This resolves circular dependency issues. 20 | """ 21 | # Add Crew relationships 22 | Crew.agents = relationship("Agent", back_populates="crew") 23 | Crew.conversations = relationship("Conversation", back_populates="crew", lazy="dynamic") 24 | 25 | # Add Agent relationships 26 | Agent.crew = relationship("Crew", back_populates="agents") 27 | Agent.conversations = relationship("Conversation", back_populates="agent", lazy="dynamic") 28 | Agent.tools = relationship("Tool", secondary=agent_tool, back_populates="agents") 29 | 30 | # Add Conversation relationships 31 | Conversation.crew = relationship("Crew", back_populates="conversations") 32 | Conversation.agent = relationship("Agent", back_populates="conversations") 33 | 34 | # Add Tool relationships 35 | Tool.agents = relationship("Agent", secondary=agent_tool, back_populates="tools") 36 | Tool.mcp_server = relationship("McpServer", back_populates="tools") 37 | 38 | # Add McpServer relationships 39 | McpServer.tools = relationship("Tool", back_populates="mcp_server") 40 | -------------------------------------------------------------------------------- /app/api/agents.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Depends, HTTPException 2 | from sqlalchemy.orm import Session 3 | from uuid import UUID 4 | from app.core.database import get_db 5 | from app.services import agent as agent_service 6 | from app.schemas import agent as agent_schema 7 | 8 | router = APIRouter() 9 | 10 | @router.post("/", response_model=agent_schema.Agent) 11 | def create_agent(agent: agent_schema.AgentCreate, db: Session = Depends(get_db)): 12 | return agent_service.create_agent(db=db, agent=agent) 13 | 14 | @router.get("/", response_model=list[agent_schema.Agent]) 15 | def read_agents(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)): 16 | agents = agent_service.get_agents(db, skip=skip, limit=limit) 17 | return agents 18 | 19 | @router.get("/{agent_id}", response_model=agent_schema.Agent) 20 | def read_agent(agent_id: UUID, db: Session = Depends(get_db)): 21 | db_agent = agent_service.get_agent(db, agent_id=agent_id) 22 | if db_agent is None: 23 | raise HTTPException(status_code=404, detail="Agent not found") 24 | return db_agent 25 | 26 | @router.put("/{agent_id}", response_model=agent_schema.Agent) 27 | def update_agent(agent_id: UUID, agent: agent_schema.AgentCreate, db: Session = Depends(get_db)): 28 | return agent_service.update_agent(db=db, agent_id=agent_id, agent=agent) 29 | 30 | @router.delete("/{agent_id}", response_model=agent_schema.Agent) 31 | def delete_agent(agent_id: UUID, db: Session = Depends(get_db)): 32 | return agent_service.delete_agent(db=db, agent_id=agent_id) 33 | 34 | @router.post("/{agent_id}/tools/{tool_id}", response_model=agent_schema.Agent) 35 | def add_tool_to_agent(agent_id: UUID, tool_id: UUID, db: Session = Depends(get_db)): 36 | return agent_service.add_tool_to_agent(db=db, agent_id=agent_id, tool_id=tool_id) 37 | -------------------------------------------------------------------------------- /app/models/base.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | from sqlalchemy.orm import declarative_base 3 | from sqlalchemy.dialects.postgresql import UUID 4 | from sqlalchemy.types import TypeDecorator, String 5 | 6 | # Custom UUID type for consistent handling 7 | class GUID(TypeDecorator): 8 | """ 9 | Platform-independent GUID type. 10 | 11 | Uses PostgreSQL's UUID type, otherwise uses 12 | VARCHAR(36), storing as stringified hex values. 13 | """ 14 | impl = String(36) 15 | cache_ok = True 16 | 17 | def load_dialect_impl(self, dialect): 18 | if dialect.name == 'postgresql': 19 | return dialect.type_descriptor(UUID()) 20 | else: 21 | return dialect.type_descriptor(String(36)) 22 | 23 | def process_bind_param(self, value, dialect): 24 | if value is None: 25 | return value 26 | elif dialect.name == 'postgresql': 27 | if not isinstance(value, uuid.UUID): 28 | return uuid.UUID(str(value)) 29 | return value 30 | else: 31 | # For SQLite and other dialects, convert to string 32 | if not isinstance(value, uuid.UUID): 33 | try: 34 | value = uuid.UUID(str(value)) 35 | except (ValueError, TypeError): 36 | raise ValueError(f"Invalid UUID value: {value}") 37 | return str(value) 38 | 39 | def process_result_value(self, value, dialect): 40 | if value is None: 41 | return value 42 | if dialect.name == 'postgresql': 43 | return value 44 | else: 45 | # For SQLite and other dialects, we get back a string 46 | if not isinstance(value, uuid.UUID): 47 | try: 48 | value = uuid.UUID(value) 49 | except (ValueError, TypeError): 50 | return value 51 | return value 52 | 53 | # Helper function to generate UUIDs 54 | def generate_uuid(): 55 | return uuid.uuid4() 56 | 57 | Base = declarative_base() 58 | -------------------------------------------------------------------------------- /tests/test_crews.py: -------------------------------------------------------------------------------- 1 | from fastapi.testclient import TestClient 2 | from sqlalchemy.orm import Session 3 | 4 | def test_create_crew(client: TestClient, db_session: Session): 5 | response = client.post("/crews/", json={"name": "Test Crew"}) 6 | assert response.status_code == 200 7 | data = response.json() 8 | assert data["name"] == "Test Crew" 9 | assert "id" in data 10 | 11 | def test_read_crews(client: TestClient, db_session: Session): 12 | client.post("/crews/", json={"name": "Test Crew 1"}) 13 | client.post("/crews/", json={"name": "Test Crew 2"}) 14 | response = client.get("/crews/") 15 | assert response.status_code == 200 16 | data = response.json() 17 | assert len(data) == 2 18 | assert data[0]["name"] == "Test Crew 1" 19 | assert data[1]["name"] == "Test Crew 2" 20 | 21 | def test_read_crew(client: TestClient, db_session: Session): 22 | response = client.post("/crews/", json={"name": "Test Crew"}) 23 | crew_id = response.json()["id"] 24 | response = client.get(f"/crews/{crew_id}") 25 | assert response.status_code == 200 26 | data = response.json() 27 | assert data["name"] == "Test Crew" 28 | assert data["id"] == crew_id 29 | 30 | def test_update_crew(client: TestClient, db_session: Session): 31 | response = client.post("/crews/", json={"name": "Test Crew"}) 32 | crew_id = response.json()["id"] 33 | response = client.put(f"/crews/{crew_id}", json={"name": "Updated Crew"}) 34 | assert response.status_code == 200 35 | data = response.json() 36 | assert data["name"] == "Updated Crew" 37 | assert data["id"] == crew_id 38 | 39 | def test_delete_crew(client: TestClient, db_session: Session): 40 | response = client.post("/crews/", json={"name": "Test Crew"}) 41 | crew_id = response.json()["id"] 42 | response = client.delete(f"/crews/{crew_id}") 43 | assert response.status_code == 200 44 | data = response.json() 45 | assert data["name"] == "Test Crew" 46 | assert data["id"] == crew_id 47 | response = client.get(f"/crews/{crew_id}") 48 | assert response.status_code == 404 49 | -------------------------------------------------------------------------------- /reset_database.py: -------------------------------------------------------------------------------- 1 | import os 2 | from sqlalchemy import create_engine, MetaData, inspect, text 3 | from dotenv import load_dotenv 4 | import logging 5 | 6 | # Configure logging 7 | logging.basicConfig( 8 | level=logging.INFO, 9 | format='%(asctime)s - %(levelname)s - %(message)s', 10 | handlers=[ 11 | logging.StreamHandler() 12 | ] 13 | ) 14 | 15 | # Load environment variables 16 | load_dotenv() 17 | 18 | # Explicitly set the database connection string 19 | DATABASE_URL = "postgresql://postgres:postgresql@127.0.0.1:5432/a2a-langgraph-boilerplate" 20 | 21 | # Create engine 22 | engine_args = {} 23 | if not DATABASE_URL.startswith("sqlite"): 24 | engine_args["pool_pre_ping"] = True 25 | engine = create_engine(DATABASE_URL, **engine_args) 26 | 27 | def reset_database(): 28 | """Drop all tables and prepare for re-initialization with UUID schema.""" 29 | 30 | with engine.connect() as conn: 31 | conn.execution_options(isolation_level="AUTOCOMMIT") 32 | inspector = inspect(engine) 33 | 34 | # Get all table names 35 | tables = inspector.get_table_names() 36 | logging.info(f"Found tables: {tables}") 37 | 38 | try: 39 | # Disable foreign key constraints temporarily 40 | conn.execute(text("SET session_replication_role = 'replica';")) 41 | logging.info("Disabled foreign key constraints") 42 | 43 | # Drop all tables 44 | for table in tables: 45 | logging.info(f"Dropping table: {table}") 46 | conn.execute(text(f"DROP TABLE IF EXISTS {table} CASCADE;")) 47 | 48 | # Re-enable foreign key constraints 49 | conn.execute(text("SET session_replication_role = 'origin';")) 50 | logging.info("Re-enabled foreign key constraints") 51 | 52 | logging.info("Database reset complete!") 53 | logging.info("You can now run your application to re-initialize the database with UUID schema.") 54 | 55 | except Exception as e: 56 | logging.error(f"Error resetting database: {e}") 57 | raise 58 | 59 | if __name__ == "__main__": 60 | reset_database() 61 | -------------------------------------------------------------------------------- /.github/workflows/claude.yml: -------------------------------------------------------------------------------- 1 | name: Claude Code 2 | 3 | on: 4 | issue_comment: 5 | types: [created] 6 | pull_request_review_comment: 7 | types: [created] 8 | issues: 9 | types: [opened, assigned] 10 | pull_request_review: 11 | types: [submitted] 12 | 13 | jobs: 14 | claude: 15 | if: | 16 | (github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) || 17 | (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) || 18 | (github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) || 19 | (github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude'))) 20 | runs-on: ubuntu-latest 21 | permissions: 22 | contents: read 23 | pull-requests: read 24 | issues: read 25 | id-token: write 26 | steps: 27 | - name: Checkout repository 28 | uses: actions/checkout@v4 29 | with: 30 | fetch-depth: 1 31 | 32 | - name: Run Claude Code 33 | id: claude 34 | uses: anthropics/claude-code-action@beta 35 | with: 36 | anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} 37 | 38 | # Optional: Specify model (defaults to Claude Sonnet 4, uncomment for Claude Opus 4) 39 | # model: "claude-opus-4-20250514" 40 | 41 | # Optional: Customize the trigger phrase (default: @claude) 42 | # trigger_phrase: "/claude" 43 | 44 | # Optional: Trigger when specific user is assigned to an issue 45 | # assignee_trigger: "claude-bot" 46 | 47 | # Optional: Allow Claude to run specific commands 48 | # allowed_tools: "Bash(npm install),Bash(npm run build),Bash(npm run test:*),Bash(npm run lint:*)" 49 | 50 | # Optional: Add custom instructions for Claude to customize its behavior for your project 51 | # custom_instructions: | 52 | # Follow our coding standards 53 | # Ensure all new code has tests 54 | # Use TypeScript for new files 55 | 56 | # Optional: Custom environment variables for Claude 57 | # claude_env: | 58 | # NODE_ENV: test 59 | 60 | -------------------------------------------------------------------------------- /app/api/crews.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Depends, HTTPException 2 | from sqlalchemy.orm import Session 3 | from uuid import UUID 4 | from app.core.database import get_db 5 | from app.services import crew as crew_service 6 | from app.schemas import crew as crew_schema 7 | from app.schemas import prompt as prompt_schema 8 | 9 | router = APIRouter() 10 | 11 | @router.post("/", response_model=crew_schema.Crew) 12 | def create_crew(crew: crew_schema.CrewCreate, db: Session = Depends(get_db)): 13 | return crew_service.create_crew(db=db, crew=crew) 14 | 15 | @router.get("/", response_model=list[crew_schema.Crew]) 16 | def read_crews(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)): 17 | crews = crew_service.get_crews(db, skip=skip, limit=limit) 18 | return crews 19 | 20 | @router.get("/{crew_id}", response_model=crew_schema.Crew) 21 | def read_crew(crew_id: UUID, db: Session = Depends(get_db)): 22 | db_crew = crew_service.get_crew(db, crew_id=crew_id) 23 | if db_crew is None: 24 | raise HTTPException(status_code=404, detail="Crew not found") 25 | return db_crew 26 | 27 | @router.put("/{crew_id}", response_model=crew_schema.Crew) 28 | def update_crew(crew_id: UUID, crew: crew_schema.CrewCreate, db: Session = Depends(get_db)): 29 | db_crew = crew_service.get_crew(db, crew_id=crew_id) 30 | if db_crew is None: 31 | raise HTTPException(status_code=404, detail="Crew not found") 32 | return crew_service.update_crew(db=db, crew_id=crew_id, crew=crew) 33 | 34 | @router.delete("/{crew_id}", response_model=crew_schema.Crew) 35 | def delete_crew(crew_id: UUID, db: Session = Depends(get_db)): 36 | db_crew = crew_service.get_crew(db, crew_id=crew_id) 37 | if db_crew is None: 38 | raise HTTPException(status_code=404, detail="Crew not found") 39 | return crew_service.delete_crew(db=db, crew_id=crew_id) 40 | 41 | @router.post("/{crew_id}/execute", response_model=dict) 42 | def execute_prompt(crew_id: UUID, prompt: prompt_schema.PromptCreate, db: Session = Depends(get_db)): 43 | db_crew = crew_service.get_crew(db, crew_id=crew_id) 44 | if db_crew is None: 45 | raise HTTPException(status_code=404, detail="Crew not found") 46 | result = crew_service.execute_prompt(db=db, crew_id=crew_id, prompt=prompt) 47 | if isinstance(result, dict) and "error" in result: 48 | raise HTTPException(status_code=400, detail=result["error"]) 49 | return result 50 | -------------------------------------------------------------------------------- /app/services/agent.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy.orm import Session 2 | from uuid import UUID 3 | from app.models.agent import Agent 4 | from app.models.tool import Tool 5 | from app.schemas.agent import AgentCreate 6 | from app.core.tools import create_mcp_tools 7 | 8 | def create_agent(db: Session, agent: AgentCreate): 9 | db_agent = Agent(name=agent.name, crew_id=agent.crew_id, role=agent.role, system_prompt=agent.system_prompt, model=agent.model) 10 | if agent.tools: 11 | for tool_id in agent.tools: 12 | tool = db.query(Tool).filter(Tool.id == tool_id).first() 13 | if tool: 14 | db_agent.tools.append(tool) 15 | db.add(db_agent) 16 | db.commit() 17 | db.refresh(db_agent) 18 | return db_agent 19 | 20 | def get_agent(db: Session, agent_id: UUID): 21 | return db.query(Agent).filter(Agent.id == agent_id).first() 22 | 23 | def get_agents(db: Session, skip: int = 0, limit: int = 100): 24 | return db.query(Agent).offset(skip).limit(limit).all() 25 | 26 | def update_agent(db: Session, agent_id: UUID, agent: AgentCreate): 27 | db_agent = db.query(Agent).filter(Agent.id == agent_id).first() 28 | db_agent.name = agent.name 29 | db_agent.crew_id = agent.crew_id 30 | db_agent.role = agent.role 31 | db_agent.system_prompt = agent.system_prompt 32 | db_agent.model = agent.model 33 | if agent.tools: 34 | db_agent.tools = [] 35 | for tool_id in agent.tools: 36 | tool = db.query(Tool).filter(Tool.id == tool_id).first() 37 | if tool: 38 | db_agent.tools.append(tool) 39 | db.commit() 40 | db.refresh(db_agent) 41 | return db_agent 42 | 43 | def delete_agent(db: Session, agent_id: UUID): 44 | db_agent = db.query(Agent).filter(Agent.id == agent_id).first() 45 | db.delete(db_agent) 46 | db.commit() 47 | return db_agent 48 | 49 | def add_tool_to_agent(db: Session, agent_id: UUID, tool_id: UUID): 50 | agent = get_agent(db, agent_id) 51 | tool = db.query(Tool).filter(Tool.id == tool_id).first() 52 | if tool.mcp_server_id: 53 | mcp_server = tool.mcp_server 54 | mcp_tools = create_mcp_tools(mcp_server.url) 55 | for mcp_tool in mcp_tools: 56 | if mcp_tool.name == tool.name: 57 | agent.tools.append(mcp_tool) 58 | else: 59 | agent.tools.append(tool) 60 | db.commit() 61 | db.refresh(agent) 62 | return agent 63 | -------------------------------------------------------------------------------- /test_basic_uuid_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Simple test script to verify that a single UUID-based model works correctly. 4 | This script will: 5 | 1. Create a new MCP server 6 | 2. Retrieve the created MCP server by UUID 7 | """ 8 | 9 | import os 10 | import random 11 | from dotenv import load_dotenv 12 | from sqlalchemy.orm import Session 13 | from app.core.database import SessionLocal 14 | from app.models.mcp_server import McpServer 15 | import uuid 16 | 17 | # Load environment variables 18 | load_dotenv() 19 | 20 | def test_uuid_model(): 21 | """Test a single UUID-based model by creating and retrieving a record.""" 22 | # Create a session 23 | db = SessionLocal() 24 | 25 | try: 26 | print("Testing basic UUID-based model...") 27 | 28 | # Create a new MCP server with a unique URL to avoid unique constraint violation 29 | print("Creating MCP server...") 30 | unique_port = random.randint(8000, 9999) 31 | unique_url = f"http://localhost:{unique_port}" 32 | mcp_server = McpServer(name="Test MCP Server", url=unique_url) 33 | db.add(mcp_server) 34 | db.flush() # Flush to get the UUID 35 | mcp_server_id = mcp_server.id 36 | print(f"Created MCP server with ID: {mcp_server_id}") 37 | 38 | # Commit the changes 39 | db.commit() 40 | print("Record committed to the database.") 41 | 42 | # Retrieve and verify the record 43 | print("\nVerifying record...") 44 | 45 | # Retrieve the MCP server 46 | retrieved_mcp_server = db.query(McpServer).filter(McpServer.id == mcp_server_id).first() 47 | print(f"Retrieved MCP server: {retrieved_mcp_server.name} (ID: {retrieved_mcp_server.id})") 48 | 49 | # Verify equality of UUIDs 50 | print(f"\nVerifying UUID equivalence...") 51 | print(f"Original UUID: {mcp_server_id}") 52 | print(f"Retrieved UUID: {retrieved_mcp_server.id}") 53 | print(f"UUIDs match: {mcp_server_id == retrieved_mcp_server.id}") 54 | 55 | print("\nVerification completed successfully!") 56 | print("Basic UUID-based model is working correctly.") 57 | 58 | except Exception as e: 59 | db.rollback() 60 | print(f"Error during testing: {e}") 61 | raise 62 | finally: 63 | db.close() 64 | 65 | if __name__ == "__main__": 66 | test_uuid_model() 67 | -------------------------------------------------------------------------------- /app/api/mcp_servers.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Depends, HTTPException 2 | from sqlalchemy.orm import Session 3 | from uuid import UUID 4 | from app.core.database import get_db 5 | from app.services import mcp_server as mcp_server_service 6 | from app.schemas import mcp_server as mcp_server_schema 7 | 8 | router = APIRouter() 9 | 10 | @router.post("/", response_model=mcp_server_schema.McpServer) 11 | def create_mcp_server(mcp_server: mcp_server_schema.McpServerCreate, db: Session = Depends(get_db)): 12 | return mcp_server_service.create_mcp_server(db=db, mcp_server=mcp_server) 13 | 14 | @router.get("/", response_model=list[mcp_server_schema.McpServer]) 15 | def read_mcp_servers(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)): 16 | mcp_servers = mcp_server_service.get_mcp_servers(db, skip=skip, limit=limit) 17 | return mcp_servers 18 | 19 | @router.get("/{mcp_server_id}", response_model=mcp_server_schema.McpServer) 20 | def read_mcp_server(mcp_server_id: UUID, db: Session = Depends(get_db)): 21 | db_mcp_server = mcp_server_service.get_mcp_server(db, mcp_server_id=mcp_server_id) 22 | if db_mcp_server is None: 23 | raise HTTPException(status_code=404, detail="MCP Server not found") 24 | return db_mcp_server 25 | 26 | @router.put("/{mcp_server_id}", response_model=mcp_server_schema.McpServer) 27 | def update_mcp_server(mcp_server_id: UUID, mcp_server: mcp_server_schema.McpServerCreate, db: Session = Depends(get_db)): 28 | return mcp_server_service.update_mcp_server(db=db, mcp_server_id=mcp_server_id, mcp_server=mcp_server) 29 | 30 | @router.delete("/{mcp_server_id}", response_model=mcp_server_schema.McpServer) 31 | def delete_mcp_server(mcp_server_id: UUID, db: Session = Depends(get_db)): 32 | return mcp_server_service.delete_mcp_server(db=db, mcp_server_id=mcp_server_id) 33 | 34 | @router.get("/{mcp_server_id}/tools") 35 | async def get_mcp_server_tools(mcp_server_id: UUID, db: Session = Depends(get_db)): 36 | return await mcp_server_service.get_mcp_server_tools(db=db, mcp_server_id=mcp_server_id) 37 | 38 | @router.get("/{mcp_server_id}/resources") 39 | async def get_mcp_server_resources(mcp_server_id: UUID, db: Session = Depends(get_db)): 40 | return await mcp_server_service.get_mcp_server_resources(db=db, mcp_server_id=mcp_server_id) 41 | 42 | @router.get("/{mcp_server_id}/prompts") 43 | async def get_mcp_server_prompts(mcp_server_id: UUID, db: Session = Depends(get_db)): 44 | return await mcp_server_service.get_mcp_server_prompts(db=db, mcp_server_id=mcp_server_id) 45 | -------------------------------------------------------------------------------- /.windsurfrules: -------------------------------------------------------------------------------- 1 | ## Overview 2 | This project is a boilerplate for developers who want to start building an AI agent cluster faster and more efficient. 3 | 4 | ## Concept 5 | * Each AI agent cluster can have multiple AI agent crews (AI Crews) 6 | * Each AI crew can have multiple AI agent, leaded by a superviser (a default AI agent of an AI crew) 7 | * Each AI agent can call tools via MCP servers integration 8 | 9 | ## How it works 10 | * A supervisor agent will receive input (prompt) from a user via API call, then create a detailed plan with its current capabilities (AI agents underneat and their tools) 11 | * Then request the AI agents to perform tasks via A2A protocol 12 | * Wait for all AI agents finish given tasks 13 | * Grab all the results, analyze and respond to user based on the original input prompt. 14 | 15 | ## Core Features 16 | * Create & manage AI crews easily (with a default supervisor agent, add/remove AI agents) 17 | * Create & manage AI agents easily (add/remove MCP tools) 18 | * Create & manage MCP servers easily (supports Streamable HTTP transport only) 19 | * Able to monitor all the activity logs of AI crews and AI agents easily 20 | 21 | ## Technical Requirements 22 | - Programming language: Python 23 | - Store variables in `.env` file 24 | - AI framework: LangGraph (with OpenRouter AI API) 25 | - Supports Agent-to-Agent (A2A) protocol for AI agents to communicate with each others ("Supervisor" architecture) 26 | - Supports Model Context Protocol (MCP) servers integration (for AI agents to use tool call) 27 | - Expose API for frontend (nextjs) interaction (support streaming request) 28 | - Database: PostgreSQL 29 | - Cloud storage: Cloudflare R2 bucket 30 | 31 | ## Documentations & References 32 | * https://langchain-ai.github.io/langgraph/concepts/multi_agent/ 33 | * https://github.com/langchain-ai/langgraph 34 | * https://github.com/a2aproject/A2A/tree/main 35 | * https://openrouter.ai/docs/quickstart 36 | * https://www.relari.ai/blog/ai-agent-framework-comparison-langgraph-crewai-openai-swarm 37 | * https://langchain-ai.github.io/langgraph/agents/mcp/ 38 | * https://modelcontextprotocol.io/introduction 39 | * https://github.com/modelcontextprotocol/python-sdk 40 | 41 | ## Instructions 42 | * always store relevent data, application's states, user's states,... in database (PostgreSQL) 43 | * always create/update `PROJECT_OVERVIEW.md` after every task with: 44 | * project structure (use `tree -L 3 -I 'node_modules|.git|.next'` to generate, then explain the directories briefly) 45 | * features 46 | * dependencies 47 | * api routes 48 | * changelog 49 | * always check `PROJECT_OVERVIEW.md` before starting a new task 50 | * always create/update `.md` after every feature implementation with task overview and todos 51 | * always use `context7` MCP tool to study dependencies/plugins/frameworks' docs carefully while implementing them 52 | * always implement error catching handler 53 | * always implement user-friendly flows 54 | * always follow security best practices 55 | * always write tests for every feature 56 | * always commit your code after finishing fixing a bug or implementing a feature completely (make sure `.env` file is not committed) -------------------------------------------------------------------------------- /.github/workflows/claude-code-review.yml: -------------------------------------------------------------------------------- 1 | name: Claude Code Review 2 | 3 | on: 4 | pull_request: 5 | types: [opened, synchronize] 6 | # Optional: Only run on specific file changes 7 | # paths: 8 | # - "src/**/*.ts" 9 | # - "src/**/*.tsx" 10 | # - "src/**/*.js" 11 | # - "src/**/*.jsx" 12 | 13 | jobs: 14 | claude-review: 15 | # Optional: Filter by PR author 16 | # if: | 17 | # github.event.pull_request.user.login == 'external-contributor' || 18 | # github.event.pull_request.user.login == 'new-developer' || 19 | # github.event.pull_request.author_association == 'FIRST_TIME_CONTRIBUTOR' 20 | 21 | runs-on: ubuntu-latest 22 | permissions: 23 | contents: read 24 | pull-requests: read 25 | issues: read 26 | id-token: write 27 | 28 | steps: 29 | - name: Checkout repository 30 | uses: actions/checkout@v4 31 | with: 32 | fetch-depth: 1 33 | 34 | - name: Run Claude Code Review 35 | id: claude-review 36 | uses: anthropics/claude-code-action@beta 37 | with: 38 | anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} 39 | 40 | # Optional: Specify model (defaults to Claude Sonnet 4, uncomment for Claude Opus 4) 41 | # model: "claude-opus-4-20250514" 42 | 43 | # Direct prompt for automated review (no @claude mention needed) 44 | direct_prompt: | 45 | Please review this pull request and provide feedback on: 46 | - Code quality and best practices 47 | - Potential bugs or issues 48 | - Performance considerations 49 | - Security concerns 50 | - Test coverage 51 | 52 | Be constructive and helpful in your feedback. 53 | 54 | # Optional: Customize review based on file types 55 | # direct_prompt: | 56 | # Review this PR focusing on: 57 | # - For TypeScript files: Type safety and proper interface usage 58 | # - For API endpoints: Security, input validation, and error handling 59 | # - For React components: Performance, accessibility, and best practices 60 | # - For tests: Coverage, edge cases, and test quality 61 | 62 | # Optional: Different prompts for different authors 63 | # direct_prompt: | 64 | # ${{ github.event.pull_request.author_association == 'FIRST_TIME_CONTRIBUTOR' && 65 | # 'Welcome! Please review this PR from a first-time contributor. Be encouraging and provide detailed explanations for any suggestions.' || 66 | # 'Please provide a thorough code review focusing on our coding standards and best practices.' }} 67 | 68 | # Optional: Add specific tools for running tests or linting 69 | # allowed_tools: "Bash(npm run test),Bash(npm run lint),Bash(npm run typecheck)" 70 | 71 | # Optional: Skip review for certain conditions 72 | # if: | 73 | # !contains(github.event.pull_request.title, '[skip-review]') && 74 | # !contains(github.event.pull_request.title, '[WIP]') 75 | 76 | -------------------------------------------------------------------------------- /tests/test_conversations.py: -------------------------------------------------------------------------------- 1 | from fastapi.testclient import TestClient 2 | from sqlalchemy.orm import Session 3 | 4 | def test_create_conversation(client: TestClient, db_session: Session): 5 | crew_response = client.post("/crews/", json={"name": "Test Crew"}) 6 | crew_id = crew_response.json()["id"] 7 | agent_response = client.post( 8 | "/agents/", 9 | json={ 10 | "name": "Test Agent", 11 | "role": "worker", 12 | "system_prompt": "You are a test agent.", 13 | "crew_id": crew_id, 14 | }, 15 | ) 16 | agent_id = agent_response.json()["id"] 17 | response = client.post( 18 | "/conversations/", 19 | json={ 20 | "user_input": "Hello", 21 | "agent_output": "Hi", 22 | "crew_id": crew_id, 23 | "agent_id": agent_id, 24 | }, 25 | ) 26 | assert response.status_code == 200 27 | data = response.json() 28 | assert data["user_input"] == "Hello" 29 | assert data["agent_output"] == "Hi" 30 | assert data["crew_id"] == crew_id 31 | assert data["agent_id"] == agent_id 32 | assert "id" in data 33 | 34 | def test_read_conversations(client: TestClient, db_session: Session): 35 | crew_response = client.post("/crews/", json={"name": "Test Crew"}) 36 | crew_id = crew_response.json()["id"] 37 | agent_response = client.post( 38 | "/agents/", 39 | json={ 40 | "name": "Test Agent", 41 | "role": "worker", 42 | "system_prompt": "You are a test agent.", 43 | "crew_id": crew_id, 44 | }, 45 | ) 46 | agent_id = agent_response.json()["id"] 47 | client.post( 48 | "/conversations/", 49 | json={ 50 | "user_input": "Hello", 51 | "agent_output": "Hi", 52 | "crew_id": crew_id, 53 | "agent_id": agent_id, 54 | }, 55 | ) 56 | client.post( 57 | "/conversations/", 58 | json={ 59 | "user_input": "How are you?", 60 | "agent_output": "I'm fine, thank you.", 61 | "crew_id": crew_id, 62 | "agent_id": agent_id, 63 | }, 64 | ) 65 | response = client.get("/conversations/") 66 | assert response.status_code == 200 67 | data = response.json() 68 | assert len(data) == 2 69 | assert data[0]["user_input"] == "Hello" 70 | assert data[1]["user_input"] == "How are you?" 71 | 72 | def test_read_conversation(client: TestClient, db_session: Session): 73 | crew_response = client.post("/crews/", json={"name": "Test Crew"}) 74 | crew_id = crew_response.json()["id"] 75 | agent_response = client.post( 76 | "/agents/", 77 | json={ 78 | "name": "Test Agent", 79 | "role": "worker", 80 | "system_prompt": "You are a test agent.", 81 | "crew_id": crew_id, 82 | }, 83 | ) 84 | agent_id = agent_response.json()["id"] 85 | response = client.post( 86 | "/conversations/", 87 | json={ 88 | "user_input": "Hello", 89 | "agent_output": "Hi", 90 | "crew_id": crew_id, 91 | "agent_id": agent_id, 92 | }, 93 | ) 94 | conversation_id = response.json()["id"] 95 | response = client.get(f"/conversations/{conversation_id}") 96 | assert response.status_code == 200 97 | data = response.json() 98 | assert data["user_input"] == "Hello" 99 | assert data["id"] == conversation_id 100 | -------------------------------------------------------------------------------- /GEMINI.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | This project is a boilerplate for developers who want to start building an AI agent cluster faster and more efficient. 3 | 4 | ## Concept 5 | * Each AI agent cluster can have multiple AI agent crews (AI Crews) 6 | * Each AI crew can have multiple AI agent, leaded by a superviser (a default AI agent of an AI crew) 7 | * Each AI agent can call tools via MCP servers integration 8 | 9 | ## How it works 10 | * A supervisor agent will receive input (prompt) from a user via API call, then create a detailed plan with its current capabilities (AI agents underneat and their tools) 11 | * Then request the AI agents to perform tasks via A2A protocol 12 | * Wait for all AI agents finish given tasks 13 | * Grab all the results, analyze and respond to user based on the original input prompt. 14 | 15 | ## Core Features 16 | * Create & manage AI crews easily (with a default supervisor agent, add/remove AI agents) 17 | * Create & manage AI agents easily (add/remove MCP tools) 18 | * Create & manage MCP servers easily (supports Streamable HTTP transport only) 19 | * Create & manage conversations with AI crews / AI agents easily 20 | * Able to monitor all the activity logs of AI crews and AI agents easily 21 | * Expose API for frontend (nextjs) interaction (support streaming request) 22 | * Expose Swagger API Docs for frontend integration instructions 23 | 24 | ## Technical Requirements 25 | - Programming language: Python 26 | - Store variables in `.env` file 27 | - AI framework: LangGraph (with OpenRouter AI API) 28 | - Supports Agent-to-Agent (A2A) protocol for AI agents to communicate with each others ("Supervisor" architecture) 29 | - Supports Model Context Protocol (MCP) servers integration (for AI agents to use tool call) 30 | - Expose API for frontend (nextjs) interaction (support streaming request) 31 | - Database: PostgreSQL 32 | - Cloud storage: Cloudflare R2 bucket 33 | 34 | ## Environment Variables (Development Environment / localhost) 35 | 36 | ``` 37 | DATABASE_URL="" 38 | OPENROUTER_API_KEY="" 39 | ... 40 | ``` 41 | 42 | ## Documentations & References 43 | * https://langchain-ai.github.io/langgraph/concepts/multi_agent/ 44 | * https://github.com/langchain-ai/langgraph 45 | * https://github.com/a2aproject/A2A/tree/main 46 | * https://openrouter.ai/docs/quickstart 47 | * https://www.relari.ai/blog/ai-agent-framework-comparison-langgraph-crewai-openai-swarm 48 | * https://langchain-ai.github.io/langgraph/agents/mcp/ 49 | * https://modelcontextprotocol.io/introduction 50 | * https://github.com/modelcontextprotocol/python-sdk 51 | 52 | ## Instructions 53 | * always store relevent data, application's states, user's states,... in database (PostgreSQL) 54 | * always create/update `PROJECT_OVERVIEW.md` after every task with: 55 | * project structure (use `tree -L 3 -I 'node_modules|.git|.next'` to generate, then explain the directories briefly) 56 | * features 57 | * dependencies 58 | * api routes 59 | * changelog 60 | * always check `PROJECT_OVERVIEW.md` before starting a new task 61 | * always create/update `.md` after every feature implementation with task overview and todos 62 | * always use `context7` MCP tool to study dependencies/plugins/frameworks' docs carefully while implementing them 63 | * always implement error catching handler 64 | * always implement user-friendly flows 65 | * always follow security best practices 66 | * always commit your code after finishing fixing a bug or implementing a feature completely (DO NOT commit `.env` file) 67 | * always run the development environment in another process and export logs to `./server.log` (view this file to check the logs and debug) -------------------------------------------------------------------------------- /app/services/mcp_server.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy.orm import Session 2 | from uuid import UUID 3 | from app.models.mcp_server import McpServer 4 | from app.schemas.mcp_server import McpServerCreate 5 | from mcp import ClientSession 6 | from mcp.client.streamable_http import streamablehttp_client 7 | from contextlib import AsyncExitStack 8 | from mcp.shared.exceptions import McpError 9 | 10 | async def get_mcp_server_tools(db: Session, mcp_server_id: UUID): 11 | mcp_server = get_mcp_server(db, mcp_server_id) 12 | if not mcp_server: 13 | return {"error": "MCP Server not found"} 14 | 15 | async with AsyncExitStack() as stack: 16 | read, write, get_session_id = await stack.enter_async_context( 17 | streamablehttp_client(url=mcp_server.url) 18 | ) 19 | async with ClientSession(read, write) as session: 20 | await session.initialize() 21 | try: 22 | tools = await session.list_tools() 23 | return tools 24 | except McpError as e: 25 | if "Method not found" in str(e): 26 | return [] 27 | raise e 28 | 29 | async def get_mcp_server_resources(db: Session, mcp_server_id: UUID): 30 | mcp_server = get_mcp_server(db, mcp_server_id) 31 | if not mcp_server: 32 | return {"error": "MCP Server not found"} 33 | 34 | async with AsyncExitStack() as stack: 35 | read, write, get_session_id = await stack.enter_async_context( 36 | streamablehttp_client(url=mcp_server.url) 37 | ) 38 | async with ClientSession(read, write) as session: 39 | await session.initialize() 40 | try: 41 | resources = await session.list_resources() 42 | return resources 43 | except McpError as e: 44 | if "Method not found" in str(e): 45 | return [] 46 | raise e 47 | 48 | async def get_mcp_server_prompts(db: Session, mcp_server_id: UUID): 49 | mcp_server = get_mcp_server(db, mcp_server_id) 50 | if not mcp_server: 51 | return {"error": "MCP Server not found"} 52 | 53 | async with AsyncExitStack() as stack: 54 | read, write, get_session_id = await stack.enter_async_context( 55 | streamablehttp_client(url=mcp_server.url) 56 | ) 57 | async with ClientSession(read, write) as session: 58 | await session.initialize() 59 | try: 60 | prompts = await session.list_prompts() 61 | return prompts 62 | except McpError as e: 63 | if "Method not found" in str(e): 64 | return [] 65 | raise e 66 | 67 | def create_mcp_server(db: Session, mcp_server: McpServerCreate): 68 | db_mcp_server = McpServer(name=mcp_server.name, url=mcp_server.url) 69 | db.add(db_mcp_server) 70 | db.commit() 71 | db.refresh(db_mcp_server) 72 | return db_mcp_server 73 | 74 | def get_mcp_server(db: Session, mcp_server_id: UUID): 75 | return db.query(McpServer).filter(McpServer.id == mcp_server_id).first() 76 | 77 | def get_mcp_servers(db: Session, skip: int = 0, limit: int = 100): 78 | return db.query(McpServer).offset(skip).limit(limit).all() 79 | 80 | def update_mcp_server(db: Session, mcp_server_id: UUID, mcp_server: McpServerCreate): 81 | db_mcp_server = db.query(McpServer).filter(McpServer.id == mcp_server_id).first() 82 | db_mcp_server.name = mcp_server.name 83 | db_mcp_server.url = mcp_server.url 84 | db.commit() 85 | db.refresh(db_mcp_server) 86 | return db_mcp_server 87 | 88 | def delete_mcp_server(db: Session, mcp_server_id: UUID): 89 | db_mcp_server = db.query(McpServer).filter(McpServer.id == mcp_server_id).first() 90 | db.delete(db_mcp_server) 91 | db.commit() 92 | return db_mcp_server 93 | -------------------------------------------------------------------------------- /tests/test_mcp_servers.py: -------------------------------------------------------------------------------- 1 | from fastapi.testclient import TestClient 2 | from sqlalchemy.orm import Session 3 | from unittest.mock import patch 4 | 5 | def test_create_mcp_server(client: TestClient, db_session: Session): 6 | response = client.post( 7 | "/mcp_servers/", 8 | json={"name": "Test MCP Server", "url": "http://localhost:8001"}, 9 | ) 10 | assert response.status_code == 200 11 | data = response.json() 12 | assert data["name"] == "Test MCP Server" 13 | assert data["url"] == "http://localhost:8001" 14 | assert "id" in data 15 | 16 | def test_read_mcp_servers(client: TestClient, db_session: Session): 17 | client.post( 18 | "/mcp_servers/", 19 | json={"name": "Test MCP Server 1", "url": "http://localhost:8001"}, 20 | ) 21 | client.post( 22 | "/mcp_servers/", 23 | json={"name": "Test MCP Server 2", "url": "http://localhost:8002"}, 24 | ) 25 | response = client.get("/mcp_servers/") 26 | assert response.status_code == 200 27 | data = response.json() 28 | assert len(data) == 2 29 | assert data[0]["name"] == "Test MCP Server 1" 30 | assert data[1]["name"] == "Test MCP Server 2" 31 | 32 | def test_read_mcp_server(client: TestClient, db_session: Session): 33 | response = client.post( 34 | "/mcp_servers/", 35 | json={"name": "Test MCP Server", "url": "http://localhost:8001"}, 36 | ) 37 | mcp_server_id = response.json()["id"] 38 | response = client.get(f"/mcp_servers/{mcp_server_id}") 39 | assert response.status_code == 200 40 | data = response.json() 41 | assert data["name"] == "Test MCP Server" 42 | assert data["id"] == mcp_server_id 43 | 44 | def test_update_mcp_server(client: TestClient, db_session: Session): 45 | response = client.post( 46 | "/mcp_servers/", 47 | json={"name": "Test MCP Server", "url": "http://localhost:8001"}, 48 | ) 49 | mcp_server_id = response.json()["id"] 50 | response = client.put( 51 | f"/mcp_servers/{mcp_server_id}", 52 | json={"name": "Updated MCP Server", "url": "http://localhost:8002"}, 53 | ) 54 | assert response.status_code == 200 55 | data = response.json() 56 | assert data["name"] == "Updated MCP Server" 57 | assert data["url"] == "http://localhost:8002" 58 | assert data["id"] == mcp_server_id 59 | 60 | def test_delete_mcp_server(client: TestClient, db_session: Session): 61 | response = client.post( 62 | "/mcp_servers/", 63 | json={"name": "Test MCP Server", "url": "http://localhost:8001"}, 64 | ) 65 | mcp_server_id = response.json()["id"] 66 | response = client.delete(f"/mcp_servers/{mcp_server_id}") 67 | assert response.status_code == 200 68 | data = response.json() 69 | assert data["name"] == "Test MCP Server" 70 | assert data["id"] == mcp_server_id 71 | response = client.get(f"/mcp_servers/{mcp_server_id}") 72 | assert response.status_code == 404 73 | 74 | @patch("app.services.mcp_server.streamablehttp_client") 75 | def test_get_mcp_server_tools(mock_streamablehttp_client, client: TestClient, db_session: Session): 76 | # Mock the streamablehttp_client to avoid actual network calls 77 | mock_streamablehttp_client.return_value.__aenter__.return_value = ( 78 | None, 79 | None, 80 | None, 81 | ) 82 | response = client.post( 83 | "/mcp_servers/", 84 | json={"name": "Test MCP Server", "url": "http://localhost:8001"}, 85 | ) 86 | mcp_server_id = response.json()["id"] 87 | with patch("app.services.mcp_server.ClientSession") as mock_clientsession: 88 | mock_session = mock_clientsession.return_value.__aenter__.return_value 89 | mock_session.list_tools.return_value = [{"name": "test_tool"}] 90 | response = client.get(f"/mcp_servers/{mcp_server_id}/tools") 91 | assert response.status_code == 200 92 | data = response.json() 93 | assert data == [{"name": "test_tool"}] 94 | -------------------------------------------------------------------------------- /docs/UUID_MIGRATION.md: -------------------------------------------------------------------------------- 1 | # UUID Migration Documentation 2 | 3 | ## Overview 4 | 5 | This document provides details on the database migration from integer-based primary keys to UUID-based primary keys in the A2A LangGraph boilerplate project. The migration was performed to enhance scalability, security, and compatibility with distributed systems. 6 | 7 | ## Changes Made 8 | 9 | ### Database Schema 10 | 11 | 1. **Primary Key Type Change**: 12 | - All primary keys in database models changed from incremental integers to UUIDs 13 | - Using PostgreSQL's native UUID type support through SQLAlchemy's GUID type 14 | 15 | 2. **Foreign Key Type Change**: 16 | - All foreign keys updated to use UUIDs consistently with related models 17 | - Association tables (e.g., agent_tool) updated to use UUID columns 18 | 19 | 3. **Tables Affected**: 20 | - Crew 21 | - Agent 22 | - Conversation 23 | - McpServer 24 | - Tool 25 | - agent_tool (association table) 26 | 27 | ### Model Relationships 28 | 29 | To resolve circular dependency issues that occurred during the migration, the relationship definitions were moved to a late-binding approach: 30 | 31 | 1. **Late-Binding Relationships**: 32 | - Removed relationship definitions from individual model classes 33 | - Created a dedicated `setup_relationships.py` module that defines all relationships after all models are loaded 34 | - Setup process is automatically triggered during database initialization 35 | 36 | 2. **Configuration**: 37 | - Updated `app/core/database.py` to call the relationship setup function after engine initialization 38 | - Ensured consistent model loading order 39 | 40 | ### API and Service Layer 41 | 42 | 1. **Type Annotations**: 43 | - Updated all function signatures to use `UUID` type instead of `int` for ID parameters 44 | - Added proper imports: `from uuid import UUID` 45 | 46 | 2. **Error Handling**: 47 | - Enhanced error handling for UUID operations 48 | - Added validation for existence of resources before operations 49 | - Implemented proper exception handling with HTTP status codes 50 | 51 | 3. **Frontend/API Validation**: 52 | - Updated request/response models to use UUIDs for ID fields 53 | - FastAPI's built-in UUID validation leveraged for path parameters 54 | 55 | ## Testing 56 | 57 | 1. **Unit Tests**: 58 | - Comprehensive test created (`test_uuid_models.py`) to verify UUID functionality 59 | - Tests validate both CRUD operations and relationship traversal 60 | 61 | 2. **Data Integrity**: 62 | - Tested creation, retrieval, updating, and deletion of records using UUIDs 63 | - Verified relationship integrity between models 64 | 65 | ## Security Considerations 66 | 67 | 1. **Benefits of UUID**: 68 | - Eliminates sequential ID prediction and enumeration attacks 69 | - Improves security by using non-sequential, globally unique identifiers 70 | 71 | 2. **API Security**: 72 | - Added proper validation and error handling for UUID parameters 73 | - Prevents exposing system internals through error messages 74 | 75 | ## Implementation Notes 76 | 77 | ### Usage in Code 78 | 79 | When working with UUIDs in the codebase: 80 | 81 | 1. **Importing UUID Type**: 82 | ```python 83 | from uuid import UUID 84 | ``` 85 | 86 | 2. **Function Parameter Types**: 87 | ```python 88 | def get_crew(db: Session, crew_id: UUID): 89 | # function body 90 | ``` 91 | 92 | 3. **FastAPI Path Parameters**: 93 | ```python 94 | @router.get("/{crew_id}", response_model=crew_schema.Crew) 95 | def read_crew(crew_id: UUID, db: Session = Depends(get_db)): 96 | # function body 97 | ``` 98 | 99 | 4. **Creating UUID Values**: 100 | Model UUIDs are automatically generated using the `generate_uuid` function in `base.py` 101 | 102 | ### Best Practices 103 | 104 | 1. Always use the UUID type for ID parameters in function signatures 105 | 2. Provide proper error handling for cases where a valid UUID is provided but the resource doesn't exist 106 | 3. Remember that UUIDs are case-insensitive in PostgreSQL 107 | 4. UUID values should be handled as strings in API requests and responses 108 | -------------------------------------------------------------------------------- /tests/test_tools.py: -------------------------------------------------------------------------------- 1 | from fastapi.testclient import TestClient 2 | from sqlalchemy.orm import Session 3 | 4 | def test_create_tool(client: TestClient, db_session: Session): 5 | mcp_server_response = client.post( 6 | "/mcp_servers/", 7 | json={"name": "Test MCP Server", "url": "http://localhost:8001"}, 8 | ) 9 | mcp_server_id = mcp_server_response.json()["id"] 10 | response = client.post( 11 | "/tools/", 12 | json={ 13 | "name": "Test Tool", 14 | "description": "A tool for testing.", 15 | "mcp_server_id": mcp_server_id, 16 | }, 17 | ) 18 | assert response.status_code == 200 19 | data = response.json() 20 | assert data["name"] == "Test Tool" 21 | assert data["description"] == "A tool for testing." 22 | assert "id" in data 23 | 24 | def test_read_tools(client: TestClient, db_session: Session): 25 | mcp_server_response = client.post( 26 | "/mcp_servers/", 27 | json={"name": "Test MCP Server", "url": "http://localhost:8001"}, 28 | ) 29 | mcp_server_id = mcp_server_response.json()["id"] 30 | client.post( 31 | "/tools/", 32 | json={ 33 | "name": "Test Tool 1", 34 | "description": "A tool for testing.", 35 | "mcp_server_id": mcp_server_id, 36 | }, 37 | ) 38 | client.post( 39 | "/tools/", 40 | json={ 41 | "name": "Test Tool 2", 42 | "description": "A tool for testing.", 43 | "mcp_server_id": mcp_server_id, 44 | }, 45 | ) 46 | response = client.get("/tools/") 47 | assert response.status_code == 200 48 | data = response.json() 49 | assert len(data) == 2 50 | assert data[0]["name"] == "Test Tool 1" 51 | assert data[1]["name"] == "Test Tool 2" 52 | 53 | def test_read_tool(client: TestClient, db_session: Session): 54 | mcp_server_response = client.post( 55 | "/mcp_servers/", 56 | json={"name": "Test MCP Server", "url": "http://localhost:8001"}, 57 | ) 58 | mcp_server_id = mcp_server_response.json()["id"] 59 | response = client.post( 60 | "/tools/", 61 | json={ 62 | "name": "Test Tool", 63 | "description": "A tool for testing.", 64 | "mcp_server_id": mcp_server_id, 65 | }, 66 | ) 67 | tool_id = response.json()["id"] 68 | response = client.get(f"/tools/{tool_id}") 69 | assert response.status_code == 200 70 | data = response.json() 71 | assert data["name"] == "Test Tool" 72 | assert data["id"] == tool_id 73 | 74 | def test_update_tool(client: TestClient, db_session: Session): 75 | mcp_server_response = client.post( 76 | "/mcp_servers/", 77 | json={"name": "Test MCP Server", "url": "http://localhost:8001"}, 78 | ) 79 | mcp_server_id = mcp_server_response.json()["id"] 80 | response = client.post( 81 | "/tools/", 82 | json={ 83 | "name": "Test Tool", 84 | "description": "A tool for testing.", 85 | "mcp_server_id": mcp_server_id, 86 | }, 87 | ) 88 | tool_id = response.json()["id"] 89 | response = client.put( 90 | f"/tools/{tool_id}", 91 | json={ 92 | "name": "Updated Tool", 93 | "description": "An updated tool for testing.", 94 | "mcp_server_id": mcp_server_id, 95 | }, 96 | ) 97 | assert response.status_code == 200 98 | data = response.json() 99 | assert data["name"] == "Updated Tool" 100 | assert data["description"] == "An updated tool for testing." 101 | assert data["id"] == tool_id 102 | 103 | def test_delete_tool(client: TestClient, db_session: Session): 104 | mcp_server_response = client.post( 105 | "/mcp_servers/", 106 | json={"name": "Test MCP Server", "url": "http://localhost:8001"}, 107 | ) 108 | mcp_server_id = mcp_server_response.json()["id"] 109 | response = client.post( 110 | "/tools/", 111 | json={ 112 | "name": "Test Tool", 113 | "description": "A tool for testing.", 114 | "mcp_server_id": mcp_server_id, 115 | }, 116 | ) 117 | tool_id = response.json()["id"] 118 | response = client.delete(f"/tools/{tool_id}") 119 | assert response.status_code == 200 120 | data = response.json() 121 | assert data["name"] == "Test Tool" 122 | assert data["id"] == tool_id 123 | response = client.get(f"/tools/{tool_id}") 124 | assert response.status_code == 404 125 | -------------------------------------------------------------------------------- /CLAUDE.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | This project is a boilerplate for developers who want to start building an AI agent cluster faster and more efficient. 3 | 4 | ## Concept 5 | * Each AI agent cluster can have multiple AI agent crews (AI Crews) 6 | * Each AI crew can have multiple AI agent, leaded by a superviser (a default AI agent of an AI crew) 7 | * Each AI agent can call tools via MCP servers integration 8 | 9 | ## How it works 10 | * A supervisor agent will receive input (prompt) from a user via API call, then create a detailed plan with its current capabilities (AI agents underneat and their tools) 11 | * Then request the AI agents to perform tasks via A2A protocol 12 | * Wait for all AI agents finish given tasks 13 | * Grab all the results, analyze and decide to assign more tasks to AI agents, or finish the plan 14 | * Synthesize the results and respond to user based on the original input prompt. 15 | 16 | ### Example workflow: 17 | * Case 1: **Simple Direct Response** - User asks "hello" and supervisor decides to answer directly with a simple response. This requires no agent delegation and completes in a single workflow step. 18 | * Case 2: **Multi-Agent Collaboration with Termination Control** - User asks for travel advice about Nha Trang beach (Vietnam): 19 | 1. Supervisor receives query and creates a task plan with clear termination conditions 20 | 2. Supervisor delegates to agent 1 (connected to Search API MCP server) to find top attractions 21 | 3. Supervisor delegates to agent 2 (connected to Search API MCP server) to research local cuisine 22 | 4. Each agent responds with its findings in a single message back to supervisor 23 | 5. Supervisor synthesizes all information into a final response 24 | 6. Workflow terminates after supervisor's final response (enforced by message depth limit) 25 | 26 | ## Core Features 27 | * Create & manage AI crews easily (with a default supervisor agent, add/remove AI agents) 28 | * Create & manage AI agents easily (add/remove MCP tools) 29 | * Create & manage MCP servers easily (supports Streamable HTTP transport only) 30 | * Create & manage conversations with AI crews / AI agents easily 31 | * Able to monitor all the activity logs of AI crews and AI agents easily 32 | * Expose API for frontend (nextjs) interaction (support streaming request) 33 | * Expose Swagger API Docs for frontend integration instructions 34 | 35 | ## Technical Requirements 36 | - Programming language: Python 37 | - Store variables in `.env` file 38 | - AI framework: LangGraph (with OpenRouter AI API) 39 | - Supports Agent-to-Agent (A2A) protocol for AI agents to communicate with each others ("Supervisor" architecture) 40 | - Supports Model Context Protocol (MCP) servers integration (for AI agents to use tool call) 41 | - Expose API for frontend (nextjs) interaction (support streaming request) 42 | - Database: PostgreSQL 43 | - Cloud storage: Cloudflare R2 bucket 44 | 45 | ## Environment Variables (Development Environment / localhost) 46 | 47 | ``` 48 | DATABASE_URL="" 49 | OPENROUTER_API_KEY="" 50 | ... 51 | ``` 52 | 53 | ## Documentations & References 54 | * https://langchain-ai.github.io/langgraph/concepts/multi_agent/ 55 | * https://github.com/langchain-ai/langgraph 56 | * https://github.com/a2aproject/A2A/tree/main 57 | * https://openrouter.ai/docs/quickstart 58 | * https://www.relari.ai/blog/ai-agent-framework-comparison-langgraph-crewai-openai-swarm 59 | * https://langchain-ai.github.io/langgraph/agents/mcp/ 60 | * https://modelcontextprotocol.io/introduction 61 | * https://github.com/modelcontextprotocol/python-sdk 62 | 63 | ## Instructions 64 | * always run python process in a virtual environment (venv) 65 | * always store relevent data, application's states, user's states,... in database (PostgreSQL) 66 | * always create/update `PROJECT_OVERVIEW.md` after every implementation with: 67 | * project structure (use `tree -L 3 -I 'node_modules|.git|.next'` to generate, then explain the directories briefly) 68 | * features 69 | * dependencies 70 | * api routes 71 | * changelog 72 | * always check `PROJECT_OVERVIEW.md` before starting a new task 73 | * always create/update `_TASK.md` after every feature implementation with task overview and todos 74 | * always use `context7` MCP tool to study dependencies/plugins/frameworks' docs for the latest updates 75 | * always implement error catching handler 76 | * always implement user-friendly flows 77 | * always make sure tests are passed 78 | * always follow security best practices 79 | * always commit your code after finishing fixing a bug or implementing a feature completely (DO NOT commit `.env` file or any sensitive data) 80 | * always run the development environment in another process and export logs to `./server.log` (view this file to check the logs and debug) -------------------------------------------------------------------------------- /tests/test_agents.py: -------------------------------------------------------------------------------- 1 | from fastapi.testclient import TestClient 2 | from sqlalchemy.orm import Session 3 | 4 | def test_create_agent(client: TestClient, db_session: Session): 5 | crew_response = client.post("/crews/", json={"name": "Test Crew"}) 6 | crew_id = crew_response.json()["id"] 7 | response = client.post( 8 | "/agents/", 9 | json={ 10 | "name": "Test Agent", 11 | "role": "worker", 12 | "system_prompt": "You are a test agent.", 13 | "crew_id": crew_id, 14 | }, 15 | ) 16 | assert response.status_code == 200 17 | data = response.json() 18 | assert data["name"] == "Test Agent" 19 | assert data["role"] == "worker" 20 | assert data["system_prompt"] == "You are a test agent." 21 | assert data["crew_id"] == crew_id 22 | assert "id" in data 23 | 24 | def test_read_agents(client: TestClient, db_session: Session): 25 | crew_response = client.post("/crews/", json={"name": "Test Crew"}) 26 | crew_id = crew_response.json()["id"] 27 | client.post( 28 | "/agents/", 29 | json={ 30 | "name": "Test Agent 1", 31 | "role": "worker", 32 | "system_prompt": "You are a test agent.", 33 | "crew_id": crew_id, 34 | }, 35 | ) 36 | client.post( 37 | "/agents/", 38 | json={ 39 | "name": "Test Agent 2", 40 | "role": "worker", 41 | "system_prompt": "You are a test agent.", 42 | "crew_id": crew_id, 43 | }, 44 | ) 45 | response = client.get("/agents/") 46 | assert response.status_code == 200 47 | data = response.json() 48 | # Should have 3 agents: supervisor (auto-created) + 2 test agents 49 | assert len(data) == 3 50 | # Find the test agents (excluding supervisor) 51 | test_agents = [agent for agent in data if agent["role"] != "supervisor"] 52 | assert len(test_agents) == 2 53 | assert test_agents[0]["name"] == "Test Agent 1" 54 | assert test_agents[1]["name"] == "Test Agent 2" 55 | 56 | def test_read_agent(client: TestClient, db_session: Session): 57 | crew_response = client.post("/crews/", json={"name": "Test Crew"}) 58 | crew_id = crew_response.json()["id"] 59 | response = client.post( 60 | "/agents/", 61 | json={ 62 | "name": "Test Agent", 63 | "role": "worker", 64 | "system_prompt": "You are a test agent.", 65 | "crew_id": crew_id, 66 | }, 67 | ) 68 | agent_id = response.json()["id"] 69 | response = client.get(f"/agents/{agent_id}") 70 | assert response.status_code == 200 71 | data = response.json() 72 | assert data["name"] == "Test Agent" 73 | assert data["id"] == agent_id 74 | 75 | def test_update_agent(client: TestClient, db_session: Session): 76 | crew_response = client.post("/crews/", json={"name": "Test Crew"}) 77 | crew_id = crew_response.json()["id"] 78 | response = client.post( 79 | "/agents/", 80 | json={ 81 | "name": "Test Agent", 82 | "role": "worker", 83 | "system_prompt": "You are a test agent.", 84 | "crew_id": crew_id, 85 | }, 86 | ) 87 | agent_id = response.json()["id"] 88 | response = client.put( 89 | f"/agents/{agent_id}", 90 | json={ 91 | "name": "Updated Agent", 92 | "role": "supervisor", 93 | "system_prompt": "You are an updated test agent.", 94 | "crew_id": crew_id, 95 | }, 96 | ) 97 | assert response.status_code == 200 98 | data = response.json() 99 | assert data["name"] == "Updated Agent" 100 | assert data["role"] == "supervisor" 101 | assert data["system_prompt"] == "You are an updated test agent." 102 | assert data["id"] == agent_id 103 | 104 | def test_delete_agent(client: TestClient, db_session: Session): 105 | crew_response = client.post("/crews/", json={"name": "Test Crew"}) 106 | crew_id = crew_response.json()["id"] 107 | response = client.post( 108 | "/agents/", 109 | json={ 110 | "name": "Test Agent", 111 | "role": "worker", 112 | "system_prompt": "You are a test agent.", 113 | "crew_id": crew_id, 114 | }, 115 | ) 116 | agent_id = response.json()["id"] 117 | response = client.delete(f"/agents/{agent_id}") 118 | assert response.status_code == 200 119 | data = response.json() 120 | assert data["name"] == "Test Agent" 121 | assert data["id"] == agent_id 122 | response = client.get(f"/agents/{agent_id}") 123 | assert response.status_code == 404 124 | -------------------------------------------------------------------------------- /app/core/agents.py: -------------------------------------------------------------------------------- 1 | from langchain_openai import ChatOpenAI 2 | from langchain.agents import AgentExecutor, create_tool_calling_agent 3 | from langchain_core.messages import BaseMessage, HumanMessage, AIMessage 4 | from langchain.output_parsers.openai_tools import JsonOutputToolsParser 5 | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder 6 | from langchain_core.runnables import RunnableLambda 7 | from typing import List, Dict, Any 8 | 9 | def create_agent(llm: ChatOpenAI, tools: list, system_prompt: str, name: str): 10 | """Creates a named agent executor that returns messages.""" 11 | prompt = ChatPromptTemplate.from_messages( 12 | [ 13 | ("system", system_prompt), 14 | MessagesPlaceholder(variable_name="messages"), 15 | MessagesPlaceholder(variable_name="agent_scratchpad"), 16 | ] 17 | ) 18 | agent = create_tool_calling_agent(llm, tools, prompt) 19 | executor = AgentExecutor(agent=agent, tools=tools, handle_parsing_errors=True) 20 | 21 | def _agent_invoker(state): 22 | """Invokes the agent executor and formats the output as a message.""" 23 | result = executor.invoke(state) 24 | 25 | # The output from an agent executor is a dict with an 'output' key. 26 | # We convert this to an AIMessage with the correct name. 27 | output_message = AIMessage( 28 | content=str(result["output"]), 29 | name=name, 30 | # Pass tool calls if they exist 31 | tool_calls=result.get("tool_calls", []) 32 | ) 33 | 34 | return {"messages": [output_message]} 35 | 36 | return RunnableLambda(_agent_invoker) 37 | 38 | def create_supervisor(llm: ChatOpenAI, agents: List[Dict[str, Any]], system_prompt: str): 39 | options = [agent["name"] for agent in agents] + ["FINISH"] 40 | 41 | function_def = { 42 | "name": "route", 43 | "description": "Select the next agent to act. Or FINISH if the task is complete.", 44 | "parameters": { 45 | "title": "routeSchema", 46 | "type": "object", 47 | "properties": { 48 | "next": { 49 | "title": "Next", 50 | "anyOf": [ 51 | {"enum": options}, 52 | ], 53 | }, 54 | "reasoning": { 55 | "title": "Reasoning", 56 | "type": "string", 57 | "description": "Your reasoning and response to the user" 58 | } 59 | }, 60 | "required": ["next", "reasoning"], 61 | }, 62 | } 63 | 64 | prompt = ChatPromptTemplate.from_messages( 65 | [ 66 | ("system", system_prompt), 67 | MessagesPlaceholder(variable_name="messages"), 68 | ( 69 | "system", 70 | "Given the conversation above, provide your reasoning/response and then decide who should act next." 71 | " Or should we FINISH? Select one of: {options}", 72 | ), 73 | ] 74 | ).partial(options=str(options), agent_names=", ".join([agent["name"] for agent in agents])) 75 | 76 | def _supervisor_invoker(state): 77 | """Invokes the supervisor and formats the output as both a message and routing decision.""" 78 | # Get the structured output from the LLM 79 | result = (prompt | llm.with_structured_output(function_def)).invoke(state) 80 | 81 | # Add the supervisor's reasoning as an AIMessage to the conversation 82 | supervisor_message = AIMessage( 83 | content=result.get("reasoning", "Processing request..."), 84 | name="supervisor" 85 | ) 86 | 87 | # Return both the message and the routing decision 88 | return { 89 | "messages": [supervisor_message], 90 | "next": result["next"] 91 | } 92 | 93 | return RunnableLambda(_supervisor_invoker) 94 | 95 | def create_final_response_chain(llm: ChatOpenAI): 96 | """Creates a chain to generate the final response from the conversation history.""" 97 | prompt = ChatPromptTemplate.from_messages([ 98 | ("system", "You are a helpful AI assistant. Synthesize the conversation history and provide a final, comprehensive answer to the user's initial request. The user's initial request was: {initial_request}"), 99 | MessagesPlaceholder(variable_name="messages") 100 | ]) 101 | 102 | chain = ( 103 | { 104 | "messages": lambda x: x["messages"], 105 | "initial_request": lambda x: x["messages"][0].content 106 | } 107 | | prompt 108 | | llm 109 | ) 110 | return chain 111 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # A2A LangGraph Boilerplate 2 | 3 | This project is a boilerplate for developers who want to start building an AI agent cluster with A2A and MCP servers integration faster and more efficiently. 4 | 5 | - [Project Overview](PROJECT_OVERVIEW.md) 6 | - [Agent Instructions](AGENT_INSTRUCTIONS.md) 7 | 8 | ## Features 9 | 10 | * Create & manage AI crews 11 | * Create & manage AI agents 12 | * Create & manage MCP servers 13 | * Create & manage Tools 14 | * Integrate MCP servers for tool usage 15 | * Monitor activity logs 16 | * Create & manage conversations with AI crews / AI agents 17 | * Expose Swagger API Docs for frontend integration instructions 18 | 19 | ## Concept 20 | * Each AI agent cluster can have multiple AI agent crews (AI Crews) 21 | * Each AI crew can have multiple AI agent, leaded by a superviser (a default AI agent of an AI crew) 22 | * Each AI agent can call tools via MCP servers integration 23 | 24 | ## How it works 25 | * A supervisor agent will receive input (prompt) from a user via API call, then create a detailed plan with its current capabilities (AI agents underneat and their tools) 26 | * Then request the AI agents to perform tasks via A2A protocol 27 | * Wait for all AI agents finish given tasks 28 | * Grab all the results, analyze and decide to assign more tasks to AI agents, or finish the plan 29 | * Synthesize the results and respond to user based on the original input prompt. 30 | 31 | ### Example workflow: 32 | * Case 1: **Simple Direct Response** - User asks "hello" and supervisor decides to answer directly with a simple response. This requires no agent delegation and completes in a single workflow step. 33 | * Case 2: **Multi-Agent Collaboration with Termination Control** - User asks for travel advice about Nha Trang beach (Vietnam): 34 | 1. Supervisor receives query and creates a task plan with clear termination conditions 35 | 2. Supervisor delegates to agent 1 (connected to Search API MCP server) to find top attractions 36 | 3. Supervisor delegates to agent 2 (connected to Search API MCP server) to research local cuisine 37 | 4. Each agent responds with its findings in a single message back to supervisor 38 | 5. Supervisor synthesizes all information into a final response 39 | 6. Workflow terminates after supervisor's final response (enforced by message depth limit) 40 | 41 | ### Diagram of example flow 42 | 43 | 44 | 45 | ## Getting Started 46 | 47 | ### Prerequisites 48 | 49 | * Python 3.11+ 50 | * PostgreSQL 51 | 52 | ### Installation 53 | 54 | 1. **Clone the repository:** 55 | ```bash 56 | git clone https://github.com/your-username/a2a-langgraph-boilerplate.git 57 | cd a2a-langgraph-boilerplate 58 | ``` 59 | 60 | 2. **Create and activate a virtual environment:** 61 | ```bash 62 | python3 -m venv venv 63 | source venv/bin/activate 64 | ``` 65 | 66 | 3. **Install the dependencies:** 67 | ```bash 68 | pip install -r requirements.txt 69 | ``` 70 | 71 | 4. **Set up the database:** 72 | * Ensure your PostgreSQL server is running. 73 | * Create a new database named `a2a-langgraph-boilerplate`. You can use the following command: 74 | ```bash 75 | createdb "a2a-langgraph-boilerplate" 76 | ``` 77 | * Copy the `.env.example` file to `.env`: 78 | ```bash 79 | cp .env.example .env 80 | ``` 81 | * Update the `DATABASE_URL` in the `.env` file with your PostgreSQL credentials. 82 | 83 | 5. **Create the database tables:** 84 | ```bash 85 | python create_tables.py 86 | ``` 87 | 88 | ### Running the Application 89 | 90 | To start the application, run the following command: 91 | 92 | ```bash 93 | uvicorn app.main:app --reload 94 | ``` 95 | 96 | The application will be available at `http://127.0.0.1:8000`. 97 | 98 | ### Running the tests 99 | 100 | To run the tests, run the following command: 101 | 102 | ```bash 103 | source .venv/bin/activate 104 | which python 105 | python -m pytest tests/ -v 106 | ``` 107 | 108 | ### Run specific test 109 | 110 | ```bash 111 | # To run all tests in the tests directory: 112 | python -m pytest tests/ 113 | 114 | # To run the UUID model tests we implemented: 115 | python -m pytest test_uuid_models.py 116 | 117 | # To run a specific test file: 118 | python -m pytest tests/test_crews.py 119 | ``` 120 | 121 | ### [Special] Run AI crew chat workflow demonstration 122 | 123 | ```bash 124 | python -m tests.test_ai_crew_simple_demo 125 | ``` 126 | 127 | ## API Documentation 128 | 129 | The API documentation is automatically generated by FastAPI and is available at the following URLs: 130 | 131 | * **Swagger UI:** [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) 132 | * **ReDoc:** [http://127.0.0.1:8000/redoc](http://127.0.0.1:8000/redoc) 133 | 134 | ## Credits 135 | 136 | * [@goon_nguyen](https://x.com/goon_nguyen) 137 | * Github: [https://github.com/mrgoonie](https://github.com/mrgoonie) 138 | * Substack: [https://goonnguyen.substack.com/](https://goonnguyen.substack.com/) 139 | -------------------------------------------------------------------------------- /docs/getting-started.md: -------------------------------------------------------------------------------- 1 | # Getting Started 2 | 3 | This guide will help you set up and run the A2A LangGraph Boilerplate project for building AI agent clusters. 4 | 5 | ## Prerequisites 6 | 7 | - Python 3.13 or higher 8 | - PostgreSQL database 9 | - OpenRouter API key 10 | - Virtual environment tool (venv) 11 | 12 | ## Installation 13 | 14 | ### 1. Clone the Repository 15 | 16 | ```bash 17 | git clone 18 | cd a2a-langgraph-boilerplate 19 | ``` 20 | 21 | ### 2. Set Up Virtual Environment 22 | 23 | ```bash 24 | python -m venv venv 25 | source venv/bin/activate # On Windows: venv\Scripts\activate 26 | ``` 27 | 28 | ### 3. Install Dependencies 29 | 30 | ```bash 31 | pip install -r requirements.txt 32 | ``` 33 | 34 | ### 4. Environment Configuration 35 | 36 | Create a `.env` file in the project root: 37 | 38 | ```env 39 | DATABASE_URL="postgresql://username:password@localhost:5432/database_name" 40 | OPENROUTER_API_KEY="your_openrouter_api_key_here" 41 | ``` 42 | 43 | ### 5. Database Setup 44 | 45 | Create and initialize the database: 46 | 47 | ```bash 48 | python create_tables.py 49 | ``` 50 | 51 | This will create all necessary tables for crews, agents, MCP servers, tools, and conversations. 52 | 53 | ## Running the Application 54 | 55 | ### Development Server 56 | 57 | Start the FastAPI development server: 58 | 59 | ```bash 60 | uvicorn app.main:app --reload --log-level info 61 | ``` 62 | 63 | The application will be available at `http://localhost:8000` 64 | 65 | ### Background Logging 66 | 67 | To run the server with logging to a file: 68 | 69 | ```bash 70 | uvicorn app.main:app --reload --log-level info > server.log 2>&1 & 71 | ``` 72 | 73 | ## API Documentation 74 | 75 | Once the server is running, you can access the interactive API documentation: 76 | 77 | - **Swagger UI**: `http://localhost:8000/docs` 78 | - **ReDoc**: `http://localhost:8000/redoc` 79 | 80 | ## Project Structure 81 | 82 | ``` 83 | a2a-langgraph-boilerplate/ 84 | ├── app/ # Main application directory 85 | │ ├── api/ # API endpoints 86 | │ ├── core/ # Core logic and LangGraph setup 87 | │ ├── models/ # Database models 88 | │ ├── schemas/ # Pydantic schemas 89 | │ └── services/ # Business logic services 90 | ├── docs/ # Documentation 91 | ├── tests/ # Test files 92 | ├── requirements.txt # Python dependencies 93 | ├── create_tables.py # Database setup script 94 | └── .env # Environment variables 95 | ``` 96 | 97 | ## Quick Start Example 98 | 99 | ### 1. Create an AI Crew 100 | 101 | ```bash 102 | curl -X POST "http://localhost:8000/crews/" \ 103 | -H "Content-Type: application/json" \ 104 | -d '{ 105 | "name": "Research Team", 106 | "description": "A team of AI agents for research tasks" 107 | }' 108 | ``` 109 | 110 | ### 2. Create an Agent 111 | 112 | ```bash 113 | curl -X POST "http://localhost:8000/agents/" \ 114 | -H "Content-Type: application/json" \ 115 | -d '{ 116 | "name": "Research Agent", 117 | "description": "An agent specialized in research tasks", 118 | "role": "researcher", 119 | "system_instructions": "You are a research assistant...", 120 | "crew_id": "crew-uuid-here" 121 | }' 122 | ``` 123 | 124 | ### 3. Create an MCP Server 125 | 126 | ```bash 127 | curl -X POST "http://localhost:8000/mcp_servers/" \ 128 | -H "Content-Type: application/json" \ 129 | -d '{ 130 | "name": "Search API", 131 | "url": "https://searchapi-mcp.prod.diginext.site/mcp", 132 | "description": "Search API MCP server for web search capabilities" 133 | }' 134 | ``` 135 | 136 | ### 4. Create a Tool 137 | 138 | ```bash 139 | curl -X POST "http://localhost:8000/tools/" \ 140 | -H "Content-Type: application/json" \ 141 | -d '{ 142 | "name": "web_search", 143 | "description": "Search the web for information", 144 | "api_name": "search", 145 | "mcp_server_id": "mcp-server-uuid-here" 146 | }' 147 | ``` 148 | 149 | ### 5. Add Tool to Agent 150 | 151 | ```bash 152 | curl -X POST "http://localhost:8000/agents/{agent_id}/tools/{tool_id}" \ 153 | -H "Content-Type: application/json" 154 | ``` 155 | 156 | ## Testing 157 | 158 | Run the test suite: 159 | 160 | ```bash 161 | pytest 162 | ``` 163 | 164 | Run specific test files: 165 | 166 | ```bash 167 | pytest tests/test_crews.py 168 | pytest tests/test_agents.py 169 | ``` 170 | 171 | ## Key Features 172 | 173 | ### AI Crew Management 174 | - Create and manage AI crews (collections of agents) 175 | - Each crew has a supervisor agent that coordinates tasks 176 | - Support for multi-agent collaboration 177 | 178 | ### Agent Management 179 | - Create specialized AI agents with custom roles 180 | - Assign tools and capabilities to agents 181 | - Configure system instructions for agent behavior 182 | 183 | ### MCP Server Integration 184 | - Connect to Model Context Protocol (MCP) servers 185 | - Integrate external tools and APIs 186 | - Support for tool discovery and execution 187 | 188 | ### Conversation Management 189 | - Track conversations between users and AI crews 190 | - Store conversation history and context 191 | - Support for streaming responses 192 | 193 | ## Troubleshooting 194 | 195 | ### Common Issues 196 | 197 | 1. **Database Connection Error** 198 | - Verify PostgreSQL is running 199 | - Check DATABASE_URL in .env file 200 | - Ensure database exists and is accessible 201 | 202 | 2. **OpenRouter API Error** 203 | - Verify OPENROUTER_API_KEY is set correctly 204 | - Check API key permissions and credits 205 | 206 | 3. **MCP Server Connection Error** 207 | - Verify MCP server URL is accessible 208 | - Check if authentication is required for the MCP server 209 | 210 | ### Logs 211 | 212 | Check the server logs for detailed error information: 213 | 214 | ```bash 215 | tail -f server.log 216 | ``` 217 | 218 | ## Next Steps 219 | 220 | - Read the [API Documentation](api-endpoints.md) for detailed endpoint information 221 | - Learn about [AI Crew Management](ai-crews-management.md) 222 | - Explore [MCP Server Integration](mcp-servers-management.md) 223 | - Check the [Testing Guide](testing.md) for comprehensive testing information 224 | 225 | ## Support 226 | 227 | For issues and questions: 228 | - Check the existing documentation 229 | - Review the test files for usage examples 230 | - Examine the example scripts in the project root -------------------------------------------------------------------------------- /tests/test_agent_model_field.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Test for the agent model field feature. 4 | This script demonstrates creating agents with and without custom OpenRouter models. 5 | """ 6 | 7 | import os 8 | import sys 9 | import uuid 10 | from sqlalchemy.orm import Session 11 | from dotenv import load_dotenv 12 | 13 | # Add the project root to the path so we can import the app modules 14 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 15 | 16 | # Import necessary modules from the project 17 | from app.core.database import get_db, engine 18 | from app.models.base import Base 19 | from app.schemas.crew import CrewCreate 20 | from app.schemas.agent import AgentCreate 21 | from app.services import crew as crew_service 22 | from app.services import agent as agent_service 23 | from app.core.logging import get_logger 24 | 25 | # Set up logging 26 | logger = get_logger(__name__) 27 | 28 | # Load environment variables 29 | load_dotenv() 30 | 31 | # Ensure database is set up 32 | Base.metadata.create_all(bind=engine) 33 | 34 | def test_agent_model_field(): 35 | """ 36 | Test the agent model field functionality by: 37 | 1. Creating a crew 38 | 2. Creating agents with and without custom models 39 | 3. Verifying that the model field is correctly stored and retrieved 40 | """ 41 | print("\n" + "=" * 80) 42 | print("TESTING AGENT MODEL FIELD") 43 | print("=" * 80) 44 | 45 | # Initialize database session 46 | db = next(get_db()) 47 | 48 | try: 49 | # Step 1: Create a crew for testing 50 | print("\n[STEP 1] Creating test crew...") 51 | crew_data = CrewCreate(name="Model Field Test Crew") 52 | crew = crew_service.create_crew(db=db, crew=crew_data) 53 | print(f"Created test crew: {crew.name} (ID: {crew.id})") 54 | 55 | # Step 2: Create agents with and without custom model field 56 | print("\n[STEP 2] Creating agents with and without custom model field...") 57 | 58 | # Agent 1: Default agent with no custom model 59 | agent1_data = AgentCreate( 60 | name="Default Agent", 61 | crew_id=crew.id, 62 | role="assistant", 63 | system_prompt="You are a helpful assistant." 64 | ) 65 | agent1 = agent_service.create_agent(db=db, agent=agent1_data) 66 | print(f"Created agent without custom model: {agent1.name} (ID: {agent1.id})") 67 | print(f" Model value: {agent1.model if agent1.model else 'None (using default)'}") 68 | 69 | # Agent 2: Supervisor agent with custom model 70 | agent2_data = AgentCreate( 71 | name="Smart Supervisor", 72 | crew_id=crew.id, 73 | role="supervisor", 74 | system_prompt="You are a smart supervisor overseeing other agents.", 75 | model="anthropic/claude-3-opus-20240229" 76 | ) 77 | agent2 = agent_service.create_agent(db=db, agent=agent2_data) 78 | print(f"Created supervisor with custom model: {agent2.name} (ID: {agent2.id})") 79 | print(f" Model value: {agent2.model}") 80 | 81 | # Agent 3: Another agent with different custom model 82 | agent3_data = AgentCreate( 83 | name="Research Agent", 84 | crew_id=crew.id, 85 | role="researcher", 86 | system_prompt="You are a researcher focused on finding information.", 87 | model="anthropic/claude-3-sonnet-20240229" 88 | ) 89 | agent3 = agent_service.create_agent(db=db, agent=agent3_data) 90 | print(f"Created researcher with custom model: {agent3.name} (ID: {agent3.id})") 91 | print(f" Model value: {agent3.model}") 92 | 93 | # Step 3: Verify by retrieving agents from database 94 | print("\n[STEP 3] Verifying agents retrieved from database...") 95 | 96 | db_agent1 = agent_service.get_agent(db=db, agent_id=agent1.id) 97 | print(f"Retrieved agent 1: {db_agent1.name}") 98 | print(f" Model value: {db_agent1.model if db_agent1.model else 'None (using default)'}") 99 | assert db_agent1.model is None, "Default agent should have None model value" 100 | 101 | db_agent2 = agent_service.get_agent(db=db, agent_id=agent2.id) 102 | print(f"Retrieved agent 2: {db_agent2.name}") 103 | print(f" Model value: {db_agent2.model}") 104 | assert db_agent2.model == "anthropic/claude-3-opus-20240229", "Supervisor model value mismatch" 105 | 106 | db_agent3 = agent_service.get_agent(db=db, agent_id=agent3.id) 107 | print(f"Retrieved agent 3: {db_agent3.name}") 108 | print(f" Model value: {db_agent3.model}") 109 | assert db_agent3.model == "anthropic/claude-3-sonnet-20240229", "Researcher model value mismatch" 110 | 111 | # Step 4: Test updating an agent's model 112 | print("\n[STEP 4] Testing agent model update...") 113 | updated_agent_data = AgentCreate( 114 | name="Default Agent Updated", 115 | crew_id=crew.id, 116 | role="assistant", 117 | system_prompt="You are a helpful assistant.", 118 | model="google/gemini-2.5-flash" # Add a model to previously default agent 119 | ) 120 | updated_agent = agent_service.update_agent(db=db, agent_id=agent1.id, agent=updated_agent_data) 121 | print(f"Updated agent 1: {updated_agent.name}") 122 | print(f" New model value: {updated_agent.model}") 123 | assert updated_agent.model == "google/gemini-2.5-flash", "Updated model value mismatch" 124 | 125 | print("\n" + "=" * 80) 126 | print("TEST COMPLETED SUCCESSFULLY") 127 | print("=" * 80 + "\n") 128 | 129 | except Exception as e: 130 | print(f"\nTest failed: {str(e)}") 131 | raise 132 | finally: 133 | # Clean up (optional - comment out if you want to keep the test data) 134 | if crew: 135 | crew_service.delete_crew(db=db, crew_id=crew.id) 136 | print("Cleanup: Deleted test crew and all associated agents") 137 | # Close the database session 138 | db.close() 139 | 140 | if __name__ == "__main__": 141 | try: 142 | test_agent_model_field() 143 | print("All tests passed!") 144 | except Exception as e: 145 | logger.error(f"Test failed: {str(e)}", exc_info=True) 146 | print(f"Test failed: {str(e)}") 147 | -------------------------------------------------------------------------------- /test_uuid_models.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Test script to verify that UUID-based models are working correctly. 4 | This script will: 5 | 1. Create a new MCP server 6 | 2. Create a new Crew 7 | 3. Create a new Tool (linked to the MCP server) 8 | 4. Create a new Agent (linked to the Crew) 9 | 5. Link the Tool to the Agent 10 | 6. Create a new Conversation 11 | 7. Retrieve all created entities and verify the relationships 12 | """ 13 | 14 | import os 15 | import datetime 16 | from dotenv import load_dotenv 17 | from sqlalchemy.orm import Session 18 | from app.core.database import SessionLocal 19 | from app.models.base import Base 20 | from app.models.mcp_server import McpServer 21 | from app.models.crew import Crew 22 | from app.models.agent import Agent 23 | from app.models.tool import Tool 24 | from app.models.conversation import Conversation 25 | from app.models.agent_tool import agent_tool 26 | import uuid 27 | 28 | # Load environment variables 29 | load_dotenv() 30 | 31 | def test_uuid_models(): 32 | """Test UUID-based models by creating and retrieving records.""" 33 | # Create a session 34 | db = SessionLocal() 35 | 36 | try: 37 | print("Testing UUID-based models...") 38 | 39 | # Create a new MCP server with unique URL (using timestamp) 40 | print("Creating MCP server...") 41 | timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") 42 | unique_url = f"http://test-mcp-server-{timestamp}.local" 43 | mcp_server = McpServer(name="Test MCP Server", url=unique_url) 44 | db.add(mcp_server) 45 | db.flush() # Flush to get the UUID 46 | mcp_server_id = mcp_server.id 47 | print(f"Created MCP server with ID: {mcp_server_id}") 48 | 49 | # Create a new Crew 50 | print("Creating Crew...") 51 | crew = Crew(name="Test Crew") 52 | db.add(crew) 53 | db.flush() 54 | crew_id = crew.id 55 | print(f"Created Crew with ID: {crew_id}") 56 | 57 | # Create a new Tool linked to the MCP server 58 | print("Creating Tool...") 59 | tool = Tool(name="Test Tool", description="A test tool", mcp_server_id=mcp_server_id) 60 | db.add(tool) 61 | db.flush() 62 | tool_id = tool.id 63 | print(f"Created Tool with ID: {tool_id}") 64 | 65 | # Create a new Agent linked to the Crew 66 | print("Creating Agent...") 67 | agent = Agent( 68 | name="Test Agent", 69 | crew_id=crew_id, 70 | role="tester", 71 | system_prompt="You are a test agent." 72 | ) 73 | db.add(agent) 74 | db.flush() 75 | agent_id = agent.id 76 | print(f"Created Agent with ID: {agent_id}") 77 | 78 | # Link the Tool to the Agent 79 | print("Linking Tool to Agent...") 80 | db.execute( 81 | agent_tool.insert().values( 82 | agent_id=agent_id, 83 | tool_id=tool_id 84 | ) 85 | ) 86 | 87 | # Create a new Conversation linked to the Crew and Agent 88 | print("Creating Conversation...") 89 | conversation = Conversation( 90 | user_input="Hello test agent", 91 | agent_output="Hello user", 92 | crew_id=crew_id, 93 | agent_id=agent_id 94 | ) 95 | db.add(conversation) 96 | db.flush() 97 | conversation_id = conversation.id 98 | print(f"Created Conversation with ID: {conversation_id}") 99 | 100 | # Commit all changes 101 | db.commit() 102 | print("All records committed to the database.") 103 | 104 | # Retrieve and verify the records 105 | print("\nVerifying records...") 106 | 107 | # Retrieve the MCP server 108 | retrieved_mcp_server = db.query(McpServer).filter(McpServer.id == mcp_server_id).first() 109 | print(f"Retrieved MCP server: {retrieved_mcp_server.name} (ID: {retrieved_mcp_server.id})") 110 | 111 | # Retrieve the Crew 112 | retrieved_crew = db.query(Crew).filter(Crew.id == crew_id).first() 113 | print(f"Retrieved Crew: {retrieved_crew.name} (ID: {retrieved_crew.id})") 114 | 115 | # Retrieve the Tool 116 | retrieved_tool = db.query(Tool).filter(Tool.id == tool_id).first() 117 | print(f"Retrieved Tool: {retrieved_tool.name} (ID: {retrieved_tool.id})") 118 | print(f"Tool's MCP server ID: {retrieved_tool.mcp_server_id}") 119 | 120 | # Retrieve the Agent 121 | retrieved_agent = db.query(Agent).filter(Agent.id == agent_id).first() 122 | print(f"Retrieved Agent: {retrieved_agent.name} (ID: {retrieved_agent.id})") 123 | print(f"Agent's Crew ID: {retrieved_agent.crew_id}") 124 | 125 | # Retrieve the Conversation 126 | retrieved_conversation = db.query(Conversation).filter(Conversation.id == conversation_id).first() 127 | print(f"Retrieved Conversation: ID: {retrieved_conversation.id}") 128 | print(f"Conversation user input: {retrieved_conversation.user_input}") 129 | print(f"Conversation agent output: {retrieved_conversation.agent_output}") 130 | 131 | # Verify Tool-Agent relationship using relationship 132 | print("\nVerifying relationships...") 133 | print("Accessing relationship: Agent's tools:") 134 | for t in retrieved_agent.tools: 135 | print(f"- {t.name} (ID: {t.id})") 136 | 137 | # Verify Tool's MCP Server relationship 138 | print("\nAccessing relationship: Tool's MCP Server:") 139 | print(f"- {retrieved_tool.mcp_server.name} (ID: {retrieved_tool.mcp_server.id})") 140 | 141 | # Verify Agent's Crew relationship 142 | print("\nAccessing relationship: Agent's Crew:") 143 | print(f"- {retrieved_agent.crew.name} (ID: {retrieved_agent.crew.id})") 144 | 145 | # Verify Crew's Agents relationship 146 | print("\nAccessing relationship: Crew's Agents:") 147 | for a in retrieved_crew.agents: 148 | print(f"- {a.name} (ID: {a.id})") 149 | 150 | # Verify Conversation relationships 151 | print("\nAccessing relationship: Conversation's Agent:") 152 | print(f"- {retrieved_conversation.agent.name} (ID: {retrieved_conversation.agent.id})") 153 | 154 | print("\nAccessing relationship: Conversation's Crew:") 155 | print(f"- {retrieved_conversation.crew.name} (ID: {retrieved_conversation.crew.id})") 156 | 157 | # Verify all relationships 158 | print("\nVerification completed successfully!") 159 | print("All UUID-based models and relationships are working correctly.") 160 | 161 | except Exception as e: 162 | db.rollback() 163 | print(f"Error during testing: {e}") 164 | raise 165 | finally: 166 | db.close() 167 | 168 | if __name__ == "__main__": 169 | test_uuid_models() 170 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # A2A LangGraph Boilerplate Documentation 2 | 3 | Welcome to the comprehensive documentation for the A2A (Agent-to-Agent) LangGraph Boilerplate project. This documentation provides everything you need to get started with building AI agent clusters using LangGraph and Model Context Protocol (MCP) integration. 4 | 5 | ## What is A2A LangGraph Boilerplate? 6 | 7 | The A2A LangGraph Boilerplate is a production-ready framework for building AI agent clusters that can collaborate to solve complex problems. It features: 8 | 9 | - **Multi-Agent Coordination**: Supervisor-agent architecture with specialized roles 10 | - **Tool Integration**: Model Context Protocol (MCP) server integration for external tools 11 | - **Scalable Architecture**: PostgreSQL backend with UUID-based models 12 | - **RESTful API**: FastAPI-based API with automatic documentation 13 | - **Comprehensive Testing**: Full test suite with integration examples 14 | 15 | ## Documentation Structure 16 | 17 | ### Getting Started 18 | - **[Getting Started](getting-started.md)** - Installation, setup, and quick start guide 19 | - **[API Endpoints](api-endpoints.md)** - Complete API reference with examples 20 | - **[Testing](testing.md)** - Test suite overview and testing strategies 21 | 22 | ### Core Concepts 23 | - **[AI Crews Management](ai-crews-management.md)** - Creating and managing AI crews 24 | - **[AI Agents Management](ai-agents-management.md)** - Agent configuration and specialization 25 | - **[MCP Servers Management](mcp-servers-management.md)** - Tool integration and external services 26 | - **[Conversations Management](conversations-management.md)** - Tracking and analyzing interactions 27 | 28 | ### Operations 29 | - **[Logs Monitoring](logs-monitoring.md)** - Logging, monitoring, and debugging 30 | 31 | ## Quick Navigation 32 | 33 | ### For Developers New to the Project 34 | 1. Start with **[Getting Started](getting-started.md)** for installation and setup 35 | 2. Review **[API Endpoints](api-endpoints.md)** for the API overview 36 | 3. Try the examples in **[AI Crews Management](ai-crews-management.md)** 37 | 38 | ### For System Administrators 39 | 1. Review **[Getting Started](getting-started.md)** for deployment requirements 40 | 2. Study **[Logs Monitoring](logs-monitoring.md)** for operational monitoring 41 | 3. Understand **[MCP Servers Management](mcp-servers-management.md)** for service integration 42 | 43 | ### For AI/ML Engineers 44 | 1. Explore **[AI Agents Management](ai-agents-management.md)** for agent specialization 45 | 2. Study **[AI Crews Management](ai-crews-management.md)** for workflow design 46 | 3. Review **[Conversations Management](conversations-management.md)** for interaction patterns 47 | 48 | ## Key Features 49 | 50 | ### Multi-Agent Architecture 51 | - **Supervisor Coordination**: Central coordination with intelligent task delegation 52 | - **Specialized Agents**: Domain-specific agents (research, development, analysis, etc.) 53 | - **A2A Protocol**: Agent-to-agent communication for collaborative problem-solving 54 | 55 | ### Tool Integration 56 | - **MCP Protocol**: Universal standard for connecting to external tools and services 57 | - **Resilient Connections**: Robust error handling and retry mechanisms 58 | - **Dynamic Tool Loading**: Runtime tool discovery and integration 59 | 60 | ### Production Ready 61 | - **PostgreSQL Backend**: Reliable database with UUID primary keys 62 | - **FastAPI Framework**: High-performance API with automatic documentation 63 | - **Comprehensive Testing**: Full test coverage with integration examples 64 | - **Monitoring & Logging**: Detailed logging and monitoring capabilities 65 | 66 | ## Architecture Overview 67 | 68 | ``` 69 | ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ 70 | │ User │───▶│ FastAPI │───▶│ PostgreSQL │ 71 | │ Interface │ │ API │ │ Database │ 72 | └─────────────┘ └─────────────┘ └─────────────┘ 73 | │ 74 | ▼ 75 | ┌─────────────┐ 76 | │ LangGraph │ 77 | │ Workflow │ 78 | └─────────────┘ 79 | │ 80 | ┌────────┼────────┐ 81 | ▼ ▼ ▼ 82 | ┌──────────┐ ┌──────────┐ ┌──────────┐ 83 | │Supervisor│ │ Agent 1 │ │ Agent N │ 84 | │ Agent │ │ │ │ │ 85 | └──────────┘ └──────────┘ └──────────┘ 86 | │ │ │ 87 | └─────────┼──────────┘ 88 | ▼ 89 | ┌─────────────┐ 90 | │MCP Servers │ 91 | │& Tools │ 92 | └─────────────┘ 93 | ``` 94 | 95 | ## Common Use Cases 96 | 97 | ### Research and Analysis 98 | Create crews specialized in information gathering, analysis, and reporting: 99 | - Research agents for data collection 100 | - Analysis agents for insights generation 101 | - Summarization agents for report creation 102 | 103 | ### Software Development 104 | Build development teams with specialized roles: 105 | - Requirements analysis agents 106 | - Architecture design agents 107 | - Code generation agents 108 | - Testing and QA agents 109 | 110 | ### Content Creation 111 | Develop content creation workflows: 112 | - Research and fact-checking agents 113 | - Writing and editing agents 114 | - Review and optimization agents 115 | 116 | ### Business Intelligence 117 | Create analytical crews for business insights: 118 | - Data collection agents 119 | - Statistical analysis agents 120 | - Visualization and reporting agents 121 | 122 | ## Getting Help 123 | 124 | ### Documentation Issues 125 | If you find issues with the documentation or need clarification: 126 | 1. Check the specific guide for your use case 127 | 2. Review the API documentation for technical details 128 | 3. Look at the test files for working examples 129 | 130 | ### Technical Support 131 | For technical issues: 132 | 1. Check the **[Logs Monitoring](logs-monitoring.md)** guide for debugging 133 | 2. Review the **[Testing](testing.md)** guide for troubleshooting steps 134 | 3. Examine the test files for working examples 135 | 136 | ### Best Practices 137 | - Always read the relevant documentation before implementing features 138 | - Use the test files as examples and templates 139 | - Follow the established patterns in the codebase 140 | - Monitor logs for performance and error tracking 141 | 142 | ## Contributing 143 | 144 | When contributing to the project: 145 | 1. Follow the existing code patterns and conventions 146 | 2. Add tests for new features 147 | 3. Update documentation as needed 148 | 4. Follow security best practices 149 | 150 | ## Next Steps 151 | 152 | Choose your path based on your role and needs: 153 | 154 | **New to the Project?** → Start with [Getting Started](getting-started.md) 155 | 156 | **Building AI Workflows?** → Explore [AI Crews Management](ai-crews-management.md) 157 | 158 | **Integrating Tools?** → Study [MCP Servers Management](mcp-servers-management.md) 159 | 160 | **API Integration?** → Review [API Endpoints](api-endpoints.md) 161 | 162 | **Operations & Monitoring?** → Check [Logs Monitoring](logs-monitoring.md) 163 | 164 | This documentation is designed to be comprehensive yet accessible. Each guide includes practical examples, best practices, and troubleshooting information to help you successfully build and deploy AI agent systems. -------------------------------------------------------------------------------- /docs/testing.md: -------------------------------------------------------------------------------- 1 | # Testing Guide 2 | 3 | This guide covers the testing approach and test suite for the A2A LangGraph Boilerplate project. 4 | 5 | ## Test Structure 6 | 7 | The project uses pytest for testing with the following structure: 8 | 9 | ``` 10 | tests/ 11 | ├── conftest.py # Test configuration and fixtures 12 | ├── test_agents.py # Agent API tests 13 | ├── test_crews.py # Crew API tests 14 | ├── test_conversations.py # Conversation API tests 15 | ├── test_mcp_servers.py # MCP Server API tests 16 | ├── test_tools.py # Tool API tests 17 | ├── test_ai_crew_chat.py # AI crew chat workflow tests 18 | ├── test_ai_crew_simple_demo.py # Complete workflow demonstration 19 | └── test_readme_workflow.py # README workflow examples 20 | ``` 21 | 22 | ## Test Configuration 23 | 24 | ### Test Database Setup 25 | 26 | The test suite uses an in-memory SQLite database for fast, isolated testing: 27 | 28 | ```python 29 | # conftest.py 30 | SQLALCHEMY_DATABASE_URL = "sqlite:///:memory:" 31 | ``` 32 | 33 | ### Test Fixtures 34 | 35 | Key fixtures available in all tests: 36 | 37 | - `setup_database`: Creates database tables before tests 38 | - `db_session`: Provides a database session for each test 39 | - `client`: FastAPI test client with database dependency override 40 | 41 | ## Running Tests 42 | 43 | ### Run All Tests 44 | 45 | ```bash 46 | pytest 47 | ``` 48 | 49 | ### Run Specific Test Files 50 | 51 | ```bash 52 | pytest tests/test_crews.py 53 | pytest tests/test_agents.py 54 | pytest tests/test_mcp_servers.py 55 | ``` 56 | 57 | ### Run Tests with Verbose Output 58 | 59 | ```bash 60 | pytest -v 61 | ``` 62 | 63 | ### Run Tests with Coverage 64 | 65 | ```bash 66 | pytest --cov=app 67 | ``` 68 | 69 | ## Test Categories 70 | 71 | ### 1. API Endpoint Tests 72 | 73 | These tests verify the CRUD operations for all API endpoints: 74 | 75 | #### Crew Tests (`test_crews.py`) 76 | - Create crew 77 | - Read crews (list all) 78 | - Read crew (get by ID) 79 | - Update crew 80 | - Delete crew 81 | 82 | #### Agent Tests (`test_agents.py`) 83 | - Create agent 84 | - Read agents (list all) 85 | - Read agent (get by ID) 86 | - Update agent 87 | - Delete agent 88 | - Add tool to agent 89 | 90 | #### MCP Server Tests (`test_mcp_servers.py`) 91 | - Create MCP server 92 | - Read MCP servers 93 | - Read MCP server by ID 94 | - Update MCP server 95 | - Delete MCP server 96 | 97 | #### Tool Tests (`test_tools.py`) 98 | - Create tool 99 | - Read tools 100 | - Read tool by ID 101 | - Update tool 102 | - Delete tool 103 | 104 | #### Conversation Tests (`test_conversations.py`) 105 | - Create conversation 106 | - Read conversations 107 | - Read conversation by ID 108 | 109 | ### 2. Integration Tests 110 | 111 | #### AI Crew Chat Tests (`test_ai_crew_chat.py`) 112 | Tests the complete AI crew workflow with agent interactions. 113 | 114 | #### AI Crew Simple Demo (`test_ai_crew_simple_demo.py`) 115 | Comprehensive demonstration of the complete workflow: 116 | - Creates crew with supervisor agent 117 | - Adds specialized agents (Researcher, Coder, Summarizer) 118 | - Configures MCP servers and tools 119 | - Executes prompts through the crew 120 | - Validates workflow execution 121 | 122 | #### README Workflow Tests (`test_readme_workflow.py`) 123 | Tests the examples shown in the README to ensure they work correctly. 124 | 125 | ## Test Examples 126 | 127 | ### Basic API Test Example 128 | 129 | ```python 130 | def test_create_crew(client: TestClient, db_session: Session): 131 | response = client.post("/crews/", json={"name": "Test Crew"}) 132 | assert response.status_code == 200 133 | data = response.json() 134 | assert data["name"] == "Test Crew" 135 | assert "id" in data 136 | ``` 137 | 138 | ### Integration Test Example 139 | 140 | ```python 141 | def test_complete_workflow(client: TestClient, db_session: Session): 142 | # Create crew 143 | crew_response = client.post("/crews/", json={ 144 | "name": "Research Team", 145 | "description": "A team for research tasks" 146 | }) 147 | crew_id = crew_response.json()["id"] 148 | 149 | # Create agent 150 | agent_response = client.post("/agents/", json={ 151 | "name": "Researcher", 152 | "role": "researcher", 153 | "crew_id": crew_id 154 | }) 155 | agent_id = agent_response.json()["id"] 156 | 157 | # Verify workflow 158 | assert crew_response.status_code == 200 159 | assert agent_response.status_code == 200 160 | ``` 161 | 162 | ## Test Database 163 | 164 | ### In-Memory Database 165 | 166 | Tests use SQLite in-memory database for: 167 | - Fast test execution 168 | - Isolated test environment 169 | - No external dependencies 170 | - Automatic cleanup 171 | 172 | ### Database Schema 173 | 174 | The test database uses the same schema as production: 175 | - All tables are created from SQLAlchemy models 176 | - Foreign key relationships are maintained 177 | - UUID primary keys are supported 178 | 179 | ## Mocking and External Dependencies 180 | 181 | ### MCP Server Testing 182 | 183 | For MCP server integration tests: 184 | - Uses real MCP server URLs where possible 185 | - Handles authentication gracefully 186 | - Provides meaningful error messages for missing credentials 187 | 188 | ### API Client Testing 189 | 190 | - Uses FastAPI TestClient for HTTP requests 191 | - Dependency injection for database sessions 192 | - Proper setup and teardown for each test 193 | 194 | ## Test Data Management 195 | 196 | ### Test Isolation 197 | 198 | Each test runs in its own transaction: 199 | - Database changes are rolled back after each test 200 | - No test data pollution between tests 201 | - Clean state for every test 202 | 203 | ### Test Data Creation 204 | 205 | Tests create their own test data: 206 | - Minimal data creation for each test 207 | - Focused on testing specific functionality 208 | - Clear and predictable test scenarios 209 | 210 | ## Running Tests in Development 211 | 212 | ### Watch Mode 213 | 214 | For continuous testing during development: 215 | 216 | ```bash 217 | pytest --watch 218 | ``` 219 | 220 | ### Specific Test Patterns 221 | 222 | ```bash 223 | # Run tests matching a pattern 224 | pytest -k "test_crew" 225 | 226 | # Run tests in a specific file 227 | pytest tests/test_crews.py::test_create_crew 228 | 229 | # Run tests with specific markers 230 | pytest -m "integration" 231 | ``` 232 | 233 | ## Test Performance 234 | 235 | ### Fast Test Execution 236 | 237 | - In-memory database for speed 238 | - Minimal test data creation 239 | - Efficient test isolation 240 | - Parallel test execution support 241 | 242 | ### Test Optimization 243 | 244 | - Shared fixtures where appropriate 245 | - Efficient database operations 246 | - Minimal external API calls 247 | - Fast assertion strategies 248 | 249 | ## Debugging Tests 250 | 251 | ### Test Failures 252 | 253 | When tests fail: 254 | 1. Check the error message and traceback 255 | 2. Verify test data setup 256 | 3. Check database state 257 | 4. Examine API response details 258 | 259 | ### Debugging Tools 260 | 261 | ```bash 262 | # Run with debug output 263 | pytest -v -s 264 | 265 | # Run single test for debugging 266 | pytest tests/test_crews.py::test_create_crew -v -s 267 | 268 | # Use pdb for debugging 269 | pytest --pdb 270 | ``` 271 | 272 | ## Test Coverage 273 | 274 | ### Coverage Reports 275 | 276 | Generate coverage reports: 277 | 278 | ```bash 279 | pytest --cov=app --cov-report=html 280 | ``` 281 | 282 | ### Coverage Goals 283 | 284 | - Aim for high coverage of core functionality 285 | - Focus on critical business logic 286 | - Test error handling paths 287 | - Cover edge cases and boundary conditions 288 | 289 | ## Continuous Integration 290 | 291 | ### GitHub Actions 292 | 293 | Tests run automatically on: 294 | - Pull requests 295 | - Push to main branch 296 | - Scheduled runs 297 | 298 | ### Test Environment 299 | 300 | CI environment: 301 | - Uses PostgreSQL for integration tests 302 | - Runs full test suite 303 | - Generates coverage reports 304 | - Validates all dependencies 305 | 306 | ## Best Practices 307 | 308 | ### Test Organization 309 | 310 | - Group related tests in the same file 311 | - Use descriptive test names 312 | - Keep tests focused and atomic 313 | - Use fixtures for common setup 314 | 315 | ### Test Quality 316 | 317 | - Test both success and failure scenarios 318 | - Verify error handling 319 | - Check edge cases 320 | - Validate data integrity 321 | 322 | ### Test Maintenance 323 | 324 | - Keep tests updated with code changes 325 | - Remove obsolete tests 326 | - Refactor duplicate test code 327 | - Document complex test scenarios -------------------------------------------------------------------------------- /app/core/tools.py: -------------------------------------------------------------------------------- 1 | import os 2 | import asyncio 3 | import time 4 | import logging 5 | from typing import Dict, List, Any, Optional, Union, Callable 6 | from contextlib import AsyncExitStack 7 | from functools import wraps 8 | 9 | from langgraph.prebuilt import ToolNode 10 | from langchain_core.tools import tool, BaseTool, StructuredTool 11 | # Use direct Pydantic import instead of deprecated langchain_core.pydantic_v1 12 | from pydantic import create_model 13 | from langchain_mcp_adapters.client import MultiServerMCPClient 14 | # Keep imports below for backward compatibility if needed 15 | from mcp import ClientSession 16 | from mcp.client.streamable_http import streamablehttp_client 17 | import httpx 18 | 19 | # Configure logging 20 | logger = logging.getLogger(__name__) 21 | 22 | 23 | def create_tool_node(tools: list): 24 | return ToolNode(tools) 25 | 26 | def create_search_api_tool(): 27 | # Use the MCP server tools instead of direct Tavily integration 28 | # This will be populated with actual MCP tools during workflow execution 29 | # The search tool will be available via the MCP server at runtime 30 | return [] # Empty list as the tools will be obtained from MCP server 31 | 32 | class ResilientMcpTool(StructuredTool): 33 | """A resilient wrapper around MCP tools that handles connection errors gracefully. 34 | 35 | This class wraps the standard MCP tools to add: 36 | - Timeout handling 37 | - Retry logic for transient errors 38 | - Graceful error messages for failed tool calls 39 | 40 | It helps prevent UnboundLocalError and other exceptions that might crash the agent workflow 41 | when MCP servers experience issues. 42 | """ 43 | 44 | def __init__(self, base_tool: BaseTool, max_retries: int = 2, retry_delay: float = 1.0): 45 | """Initialize the resilient MCP tool wrapper. 46 | 47 | Args: 48 | base_tool: The original MCP tool to wrap 49 | max_retries: Maximum number of retries on failure (default: 2) 50 | retry_delay: Delay between retries in seconds (default: 1.0) 51 | """ 52 | self._base_tool = base_tool 53 | self.name = base_tool.name 54 | self.description = base_tool.description 55 | self.max_retries = max_retries 56 | self.retry_delay = retry_delay 57 | 58 | # Get the schema from the base tool 59 | if hasattr(base_tool, "args_schema"): 60 | self.args_schema = base_tool.args_schema 61 | else: 62 | # Create a new schema model if needed 63 | self.args_schema = create_model( 64 | f"{self.name}Schema", 65 | **{k: (v.annotation, v.default) for k, v in base_tool.args.items()} 66 | ) 67 | 68 | # Set up the function signature for the tool 69 | @wraps(base_tool._run) 70 | def _run(**kwargs): 71 | return self._resilient_run(**kwargs) 72 | 73 | @wraps(base_tool._arun) 74 | async def _arun(**kwargs): 75 | return await self._resilient_arun(**kwargs) 76 | 77 | self._run = _run 78 | self._arun = _arun 79 | 80 | def _resilient_run(self, **kwargs) -> str: 81 | """Execute the tool with resilience, retrying on failures.""" 82 | for attempt in range(self.max_retries + 1): 83 | try: 84 | return self._base_tool._run(**kwargs) 85 | except Exception as e: 86 | if attempt < self.max_retries: 87 | logger.warning(f"MCP tool {self.name} failed (attempt {attempt+1}/{self.max_retries+1}): {str(e)}") 88 | time.sleep(self.retry_delay) 89 | else: 90 | logger.error(f"MCP tool {self.name} failed after {self.max_retries+1} attempts: {str(e)}") 91 | return f"Error: Tool call failed after {self.max_retries+1} attempts. The external service may be unavailable or experiencing issues. {str(e)}" 92 | 93 | async def _resilient_arun(self, **kwargs) -> str: 94 | """Execute the tool asynchronously with resilience, retrying on failures.""" 95 | for attempt in range(self.max_retries + 1): 96 | try: 97 | return await self._base_tool._arun(**kwargs) 98 | except (httpx.HTTPStatusError, httpx.ConnectError, httpx.ReadTimeout) as e: 99 | error_msg = f"Network error with MCP server: {str(e)}" 100 | if attempt < self.max_retries: 101 | logger.warning(f"MCP tool {self.name} network error (attempt {attempt+1}/{self.max_retries+1}): {error_msg}") 102 | await asyncio.sleep(self.retry_delay) 103 | else: 104 | logger.error(f"MCP tool {self.name} network error after {self.max_retries+1} attempts: {error_msg}") 105 | return f"Error: MCP server connection failed. The service might be temporarily unavailable or experiencing high load. Details: {str(e)}" 106 | except UnboundLocalError as e: 107 | # Specific handling for the UnboundLocalError in langchain_mcp_adapters/tools.py 108 | if "call_tool_result" in str(e): 109 | error_msg = "MCP server connection failed during tool execution" 110 | logger.error(f"MCP tool {self.name} failed with UnboundLocalError: {str(e)}") 111 | return f"Error: {error_msg}. The MCP server might be unavailable or experiencing timeout issues." 112 | raise 113 | except Exception as e: 114 | error_msg = f"MCP tool execution error: {str(e)}" 115 | if attempt < self.max_retries: 116 | logger.warning(f"MCP tool {self.name} error (attempt {attempt+1}/{self.max_retries+1}): {error_msg}") 117 | await asyncio.sleep(self.retry_delay) 118 | else: 119 | logger.error(f"MCP tool {self.name} error after {self.max_retries+1} attempts: {error_msg}") 120 | return f"Error: Tool execution failed. Details: {str(e)}" 121 | 122 | 123 | async def async_create_mcp_tools(mcp_server_url: str, use_resilient_wrapper: bool = True, max_retries: int = 2): 124 | """Create MCP tools by connecting to the server via Streamable HTTP transport using langchain-mcp-adapters. 125 | 126 | This function dynamically fetches available tools from the MCP server at runtime, 127 | using the MultiServerMCPClient from langchain-mcp-adapters which handles session ID issues 128 | that occur with direct MCP client usage. 129 | 130 | Args: 131 | mcp_server_url: The URL of the MCP server to connect to 132 | use_resilient_wrapper: Whether to wrap tools in ResilientMcpTool for better error handling 133 | max_retries: Maximum number of retries for resilient tools 134 | 135 | Returns: 136 | A list of tools available from the MCP server 137 | """ 138 | logger.info(f"Connecting to MCP server: {mcp_server_url}") 139 | 140 | try: 141 | logger.debug("Setting up MultiServerMCPClient") 142 | # Setup connection configuration for the MultiServerMCPClient 143 | # Using a server name of 'default' for simplicity 144 | connections = { 145 | "default": { 146 | "url": mcp_server_url, 147 | "transport": "streamable_http", 148 | # No authentication headers needed for SearchAPI MCP server 149 | "headers": {} 150 | } 151 | } 152 | 153 | # Create the MultiServerMCPClient with our connection config 154 | logger.debug("Creating MultiServerMCPClient") 155 | client = MultiServerMCPClient(connections=connections) 156 | 157 | # Fetch all available tools from the MCP server 158 | logger.debug("Fetching available tools") 159 | tools = await client.get_tools() 160 | logger.info(f"Successfully connected to MCP server and retrieved {len(tools) if tools else 0} tools") 161 | 162 | # Print details about the tools for debugging 163 | if tools: 164 | for i, tool in enumerate(tools): 165 | logger.debug(f"Tool {i+1} - Name: {tool.name}") 166 | logger.debug(f"Tool {i+1} - Description: {tool.description[:50]}..." if len(tool.description) > 50 else tool.description) 167 | 168 | if use_resilient_wrapper: 169 | # Wrap each tool with the resilient wrapper 170 | resilient_tools = [] 171 | for tool in tools: 172 | resilient_tools.append(ResilientMcpTool(tool, max_retries=max_retries)) 173 | logger.info(f"Created {len(resilient_tools)} resilient MCP tools with {max_retries} max retries") 174 | return resilient_tools 175 | 176 | return tools 177 | except Exception as e: 178 | logger.error(f"Failed connecting to MCP server: {str(e)}") 179 | import traceback 180 | logger.debug("Exception traceback:") 181 | logger.debug(traceback.format_exc()) 182 | # Return empty list to allow the workflow to continue 183 | return [] 184 | 185 | def create_mcp_tools(mcp_server_url: str, use_resilient_wrapper: bool = True, max_retries: int = 2): 186 | """Create MCP tools synchronously. 187 | 188 | Args: 189 | mcp_server_url: The URL of the MCP server to connect to 190 | use_resilient_wrapper: Whether to wrap tools in ResilientMcpTool for better error handling 191 | max_retries: Maximum number of retries for resilient tools 192 | 193 | Returns: 194 | A list of tools available from the MCP server 195 | """ 196 | return asyncio.run(async_create_mcp_tools(mcp_server_url, use_resilient_wrapper, max_retries)) 197 | -------------------------------------------------------------------------------- /tests/test_ai_crew_chat.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import random 4 | from dotenv import load_dotenv 5 | from langchain_core.messages import HumanMessage, AIMessage 6 | from langchain_core.language_models import FakeListChatModel 7 | from langchain_core.tools import tool 8 | 9 | # Add the project root to the path so we can import the app modules 10 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 11 | 12 | from app.core.agents import create_agent, create_supervisor 13 | from app.core.graph import AgentGraph 14 | 15 | # Load environment variables 16 | load_dotenv(dotenv_path=os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), '.env')) 17 | 18 | # No need to check for API key when using mock LLM 19 | print("Using mock LLM for demonstration purposes") 20 | 21 | # Create some specialized tools for different agents 22 | @tool 23 | def research_tool(query: str) -> str: 24 | """Research tool that can find information on a topic.""" 25 | return f"Found the following information about '{query}': This is a simulated research result." 26 | 27 | @tool 28 | def code_analysis_tool(code: str) -> str: 29 | """Analyzes code and provides feedback.""" 30 | return f"Analysis of the code: The code is syntactically correct and follows best practices." 31 | 32 | @tool 33 | def summarize_tool(text: str) -> str: 34 | """Summarizes text to extract key points.""" 35 | return f"Summary of the text: The key points are extracted from the provided content." 36 | 37 | 38 | def demo_ai_crew_chat_workflow(): 39 | """ 40 | Test that demonstrates the AI crew chat workflow: 41 | 1. Supervisor receives input 42 | 2. Supervisor analyzes and creates a plan based on AI crew members' capabilities 43 | 3. Supervisor routes tasks to appropriate agents 44 | 4. Agents process their tasks and respond 45 | 5. Agents communicate with each other through the supervisor 46 | 6. Supervisor collects all results and responds to the user 47 | """ 48 | # Use a fake LLM with predefined responses to demonstrate the workflow 49 | print("Setting up mock LLM for demonstration...") 50 | 51 | # Define mock responses for different agents as strings (not AIMessage objects) 52 | researcher_responses = [ 53 | "I've researched machine learning algorithms and found that decision trees are a fundamental supervised learning method." 54 | ] 55 | 56 | coder_responses = [ 57 | "Here's a simple Python example of a decision tree classifier:\n\n```python\nfrom sklearn.tree import DecisionTreeClassifier\nimport numpy as np\n\n# Sample data\nX = np.array([[0, 0], [1, 1], [0, 1], [1, 0]])\ny = np.array([0, 0, 1, 1])\n\n# Train model\nclf = DecisionTreeClassifier()\nclf.fit(X, y)\n\n# Make predictions\npredictions = clf.predict(X)\nprint(f'Predictions: {predictions}')\n```" 58 | ] 59 | 60 | summarizer_responses = [ 61 | "Decision trees work by splitting data based on feature values to create a tree-like structure of decisions. Each internal node represents a test on a feature, each branch represents the outcome of that test, and each leaf node represents a class label. They're intuitive and easy to visualize, making them great for beginners to understand." 62 | ] 63 | 64 | # Create different LLMs for different agents 65 | researcher_llm = FakeListChatModel(responses=researcher_responses) 66 | coder_llm = FakeListChatModel(responses=coder_responses) 67 | summarizer_llm = FakeListChatModel(responses=summarizer_responses) 68 | 69 | # For the supervisor, create responses that delegate tasks and synthesize results 70 | supervisor_responses = [ 71 | "I'll delegate this task to our researcher to learn about ML algorithms first.", 72 | "Now I'll ask our coder to provide a Python example of decision trees.", 73 | "Finally, I'll have our summarizer create a simple explanation of decision trees.", 74 | "Here's a comprehensive response to your query:\n\n**Research on ML Algorithms:**\nDecision trees are a fundamental supervised learning method in machine learning.\n\n**Python Example:**\n```python\nfrom sklearn.tree import DecisionTreeClassifier\nimport numpy as np\n\n# Sample data\nX = np.array([[0, 0], [1, 1], [0, 1], [1, 0]])\ny = np.array([0, 0, 1, 1])\n\n# Train model\nclf = DecisionTreeClassifier()\nclf.fit(X, y)\n\n# Make predictions\npredictions = clf.predict(X)\nprint(f'Predictions: {predictions}')\n```\n\n**Simple Explanation:**\nDecision trees work by splitting data based on feature values to create a tree-like structure of decisions. Each internal node represents a test on a feature, each branch represents the outcome of that test, and each leaf node represents a class label. They're intuitive and easy to visualize." 75 | ] 76 | 77 | supervisor_llm = FakeListChatModel(responses=supervisor_responses) 78 | 79 | # Use supervisor_llm as the main LLM for demonstration 80 | llm = supervisor_llm 81 | 82 | # Create specialized agents with different capabilities 83 | researcher = create_agent( 84 | llm, 85 | [research_tool], 86 | """You are the Research Agent. 87 | Your specialty is finding and providing information on various topics. 88 | When assigned a task, thoroughly analyze what information is needed and use your research tool. 89 | Provide comprehensive and accurate information.""" 90 | ) 91 | 92 | coder = create_agent( 93 | llm, 94 | [code_analysis_tool], 95 | """You are the Code Agent. 96 | Your specialty is analyzing, writing, and improving code. 97 | When assigned a task related to code, carefully examine it and provide expert feedback. 98 | Use your code analysis tool to help with your assessment.""" 99 | ) 100 | 101 | summarizer = create_agent( 102 | llm, 103 | [summarize_tool], 104 | """You are the Summarization Agent. 105 | Your specialty is condensing information and extracting key points. 106 | When given information, identify the most important elements and create concise summaries. 107 | Use your summarize tool to help with this process.""" 108 | ) 109 | 110 | # Define the agents list for the supervisor 111 | agents = [ 112 | {"name": "researcher", "agent": researcher}, 113 | {"name": "coder", "agent": coder}, 114 | {"name": "summarizer", "agent": summarizer}, 115 | ] 116 | 117 | # Create the supervisor agent with enhanced instructions to demonstrate A2A communication 118 | supervisor = create_supervisor( 119 | llm, 120 | agents, 121 | """You are the Supervisor Agent that coordinates an AI crew. 122 | Your responsibilities: 123 | 1. Analyze incoming user requests and create a detailed plan 124 | 2. Break down complex tasks into subtasks appropriate for each specialized agent 125 | 3. Route tasks to the appropriate agents based on their capabilities 126 | 4. Coordinate communication between agents when they need to build on each other's work 127 | 5. Collect results from all agents, synthesize them, and provide a comprehensive response 128 | 129 | Available agents: 130 | - researcher: Expert at finding information on topics 131 | - coder: Expert at analyzing and working with code 132 | - summarizer: Expert at condensing information and extracting key points 133 | 134 | For complex tasks, consider how agents can work together sequentially, with one agent's 135 | output becoming another agent's input. Think carefully about task dependencies. 136 | """ 137 | ) 138 | 139 | # Create the agent graph with tools 140 | tools = [research_tool, code_analysis_tool, summarize_tool] 141 | graph = AgentGraph(supervisor, agents, tools) 142 | 143 | # Compile the graph 144 | app = graph.compile() 145 | 146 | # User query that requires multiple agents working together 147 | complex_query = """ 148 | I need help understanding the basics of machine learning algorithms, 149 | then I want to see a simple Python example of a decision tree classifier, 150 | and finally give me a summary of how decision trees work in simple terms. 151 | """ 152 | 153 | # Execute the graph and capture all steps 154 | steps = [] 155 | for step in app.stream( 156 | { 157 | "messages": [ 158 | HumanMessage(content=complex_query) 159 | ] 160 | } 161 | ): 162 | if "__end__" not in step: 163 | steps.append(step) 164 | print(f"\n--- STEP ---\n{step}\n-----------") 165 | 166 | # Analyze the workflow 167 | print(f"\nWorkflow had {len(steps)} steps showing agent interactions") 168 | 169 | # Check if supervisor routed to different agents 170 | agent_involvement = set() 171 | for step in steps: 172 | if "next" in step and step["next"] in ["researcher", "coder", "summarizer"]: 173 | agent_involvement.add(step["next"]) 174 | 175 | # Show which agents were involved 176 | print(f"Agents involved: {', '.join(agent_involvement)}") 177 | 178 | # Get final response 179 | final_messages = steps[-1]["messages"] 180 | final_response = final_messages[-1].content if final_messages else "" 181 | 182 | if final_response: 183 | print("\n----- FINAL RESPONSE FROM SUPERVISOR -----") 184 | print(final_response) 185 | print("-----------------------------------------") 186 | else: 187 | print("No final response was generated.") 188 | print("\n\n----- FINAL RESPONSE -----\n") 189 | print(final_response) 190 | print("\n--------------------------\n") 191 | 192 | return steps 193 | 194 | 195 | if __name__ == "__main__": 196 | # When run directly, execute the demo and print detailed output 197 | print("Running AI Crew Chat Demonstration...") 198 | steps = demo_ai_crew_chat_workflow() 199 | 200 | # Print summary of agent interactions 201 | agent_sequence = [] 202 | for step in steps: 203 | if "next" in step: 204 | agent_sequence.append(step["next"]) 205 | 206 | print("\nAgent Interaction Sequence:") 207 | print(" -> ".join(agent_sequence)) 208 | -------------------------------------------------------------------------------- /tests/test_readme_workflow.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import sys 4 | from sqlalchemy import create_engine 5 | from sqlalchemy.orm import sessionmaker 6 | from dotenv import load_dotenv 7 | from langchain_core.messages import AIMessage, HumanMessage 8 | 9 | # Add project root to path 10 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 11 | 12 | from app.models.base import Base 13 | from app.models import mcp_server as mcp_server_model 14 | from app.schemas.crew import CrewCreate 15 | from app.schemas.agent import AgentCreate 16 | from app.schemas.tool import ToolCreate 17 | from app.schemas.mcp_server import McpServerCreate 18 | from app.schemas.prompt import PromptCreate 19 | from app.services import crew as crew_service 20 | from app.services import agent as agent_service 21 | from app.services import tool as tool_service 22 | from app.services import mcp_server as mcp_server_service 23 | from app.services.crew import execute_prompt 24 | 25 | # Load environment variables for tests 26 | load_dotenv() 27 | 28 | # Use an in-memory SQLite database for testing 29 | DATABASE_URL = "sqlite:///:memory:" 30 | engine = create_engine(DATABASE_URL, connect_args={"check_same_thread": False}) 31 | TestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) 32 | 33 | 34 | # Fixture to set up and tear down the database for each test function 35 | @pytest.fixture(scope="function") 36 | def db_session(): 37 | Base.metadata.create_all(bind=engine) 38 | db = TestingSessionLocal() 39 | try: 40 | yield db 41 | finally: 42 | db.close() 43 | Base.metadata.drop_all(bind=engine) 44 | 45 | 46 | # Fixture for a simple crew with only a supervisor 47 | @pytest.fixture(scope="function") 48 | def simple_crew(db_session): 49 | crew_data = CrewCreate(name="Simple Crew") 50 | crew = crew_service.create_crew(db=db_session, crew=crew_data) 51 | 52 | supervisor = next( 53 | (agent for agent in crew.agents if agent.role == "supervisor"), None 54 | ) 55 | assert supervisor is not None, "Supervisor not found in the created crew" 56 | supervisor.system_prompt = """You are a helpful assistant. For simple greetings like 'hello', 'hi', or 'hey', respond with a friendly greeting and then FINISH. For any other request, you must state that you cannot help and then FINISH.""" 57 | db_session.commit() 58 | 59 | return crew 60 | 61 | 62 | # Fixture for a multi-agent crew for travel advice 63 | @pytest.fixture(scope="function") 64 | def travel_crew(db_session): 65 | tavily_server = ( 66 | db_session.query(mcp_server_model.McpServer).filter_by(name="Tavily").first() 67 | ) 68 | if not tavily_server: 69 | tavily_server_data = McpServerCreate( 70 | name="Tavily", url="https://api.tavily.com", description="Tavily Search API" 71 | ) 72 | tavily_server = mcp_server_service.create_mcp_server( 73 | db=db_session, mcp_server=tavily_server_data 74 | ) 75 | 76 | crew_data = CrewCreate(name="Travel Crew") 77 | crew = crew_service.create_crew(db=db_session, crew=crew_data) 78 | 79 | supervisor = next( 80 | (agent for agent in crew.agents if agent.role == "supervisor"), None 81 | ) 82 | assert supervisor is not None, "Supervisor not found for the travel crew" 83 | supervisor.system_prompt = """You are a supervisor for a travel planning crew. 84 | Your agents are: 85 | - Researcher: A travel expert who can find information online. 86 | 87 | Your job is to: 88 | 1. Analyze user queries for travel advice. 89 | 2. Delegate research tasks to the 'Researcher' agent. 90 | 3. Synthesize the researcher's findings into a helpful travel plan. 91 | 4. After providing the travel plan, you must respond with FINISH. 92 | 5. If the query is not about travel, say you cannot help and then FINISH.""" 93 | db_session.commit() 94 | 95 | researcher_data = AgentCreate( 96 | name="Researcher", 97 | role="researcher", 98 | system_prompt="You are a world-class travel researcher. You find the best information on attractions and local cuisine.", 99 | crew_id=crew.id, 100 | ) 101 | researcher = agent_service.create_agent(db=db_session, agent=researcher_data) 102 | 103 | tool_data = ToolCreate( 104 | name="tavily_search", 105 | description="A tool to search the web for information.", 106 | agent_id=researcher.id, 107 | mcp_server_id=tavily_server.id, 108 | config={"api_key": os.getenv("TAVILY_API_KEY")}, 109 | ) 110 | tool_service.create_tool(db=db_session, tool=tool_data) 111 | 112 | return crew 113 | 114 | 115 | def get_sender(msg): 116 | """Helper to get the sender from different message types.""" 117 | if isinstance(msg, AIMessage): 118 | # AIMessage may have a name attribute 119 | return getattr(msg, "name", "supervisor") # Default to supervisor if no name 120 | if isinstance(msg, HumanMessage): 121 | return "user" 122 | if isinstance(msg, dict): 123 | # Check various possible fields for sender info 124 | if msg.get("type") == "system": 125 | return "system" 126 | return msg.get("from") or msg.get("name") or msg.get("sender") 127 | return None 128 | 129 | 130 | def test_simple_direct_response(db_session, simple_crew): 131 | """ 132 | Test Case 1: Simple Direct Response 133 | - User asks "hello" 134 | - Supervisor should respond directly without delegation. 135 | """ 136 | prompt = PromptCreate( 137 | crew_id=simple_crew.id, prompt="hello", user_id="test-user-simple" 138 | ) 139 | 140 | final_result = execute_prompt(db=db_session, crew_id=simple_crew.id, prompt=prompt) 141 | 142 | assert final_result is not None 143 | assert "messages" in final_result 144 | messages = final_result["messages"] 145 | assert len(messages) > 0 146 | 147 | # Debug: Print all messages for verification 148 | print(f"\n=== TEST 1: Simple Direct Response ===") 149 | print(f"Total messages: {len(messages)}") 150 | for i, msg in enumerate(messages): 151 | sender = get_sender(msg) 152 | content = ( 153 | msg.get("content", "") 154 | if isinstance(msg, dict) 155 | else getattr(msg, "content", "") 156 | ) 157 | print(f"Message {i+1} - From: {sender}") 158 | print(f"Content: {content}") 159 | print("---") 160 | 161 | # The last message should be from the supervisor. 162 | last_message = messages[-1] 163 | assert get_sender(last_message) == "supervisor" 164 | 165 | content = ( 166 | last_message.get("content", "") 167 | if isinstance(last_message, dict) 168 | else last_message.content 169 | ) 170 | # The supervisor should have responded, either with a greeting or a message about simple requests 171 | assert len(content) > 0, "Supervisor should have provided a response" 172 | # Check that it's a reasonable response (contains common words) 173 | assert any(word in content.lower() for word in ["hello", "help", "respond", "simple", "request"]), \ 174 | f"Unexpected supervisor response: {content}" 175 | 176 | # Ensure no other agents were involved 177 | agent_messages = [ 178 | m for m in messages if get_sender(m) not in ["user", "supervisor", None] 179 | ] 180 | assert ( 181 | len(agent_messages) == 0 182 | ), f"Unexpected agent messages found: {[get_sender(m) for m in agent_messages]}" 183 | 184 | 185 | def test_multi_agent_collaboration(db_session, travel_crew): 186 | """ 187 | Test Case 2: Multi-Agent Collaboration 188 | - User asks for travel advice. 189 | - Supervisor delegates to Researcher. 190 | - Supervisor synthesizes the final response. 191 | """ 192 | prompt = PromptCreate( 193 | crew_id=travel_crew.id, 194 | prompt="Give me travel advice for Nha Trang beach in Vietnam.", 195 | user_id="test-user-multi", 196 | ) 197 | 198 | final_result = execute_prompt(db=db_session, crew_id=travel_crew.id, prompt=prompt) 199 | 200 | assert final_result is not None 201 | assert "messages" in final_result 202 | messages = final_result["messages"] 203 | assert len(messages) > 0 204 | 205 | # Debug: Print all messages for verification 206 | print(f"\n=== TEST 2: Multi-Agent Collaboration ===") 207 | print(f"Total messages: {len(messages)}") 208 | for i, msg in enumerate(messages): 209 | sender = get_sender(msg) 210 | content = ( 211 | msg.get("content", "") 212 | if isinstance(msg, dict) 213 | else getattr(msg, "content", "") 214 | ) 215 | print(f"Message {i+1} - From: {sender}") 216 | print(f"Content: {content}") 217 | print("---") 218 | 219 | # Check for researcher involvement 220 | researcher_messages = [m for m in messages if get_sender(m) == "Researcher"] 221 | assert len(researcher_messages) > 0, "Researcher agent was not called." 222 | 223 | # The last message should be from supervisor (if workflow completed normally) or system (if terminated) 224 | last_message = messages[-1] 225 | last_sender = get_sender(last_message) 226 | 227 | # If workflow was terminated due to max visits, we should still have researcher responses 228 | if last_sender == "system": 229 | # In this case, check that there are supervisor and researcher messages 230 | supervisor_messages = [m for m in messages if get_sender(m) == "supervisor"] 231 | assert len(supervisor_messages) > 0, "Supervisor agent was not involved." 232 | 233 | # Check that at least one message mentions the travel destination 234 | all_content = " ".join( 235 | [ 236 | (m.get("content", "") if isinstance(m, dict) else m.content) 237 | for m in messages 238 | if hasattr(m, "content") or (isinstance(m, dict) and "content" in m) 239 | ] 240 | ) 241 | assert ( 242 | "nha trang" in all_content.lower() or "vietnam" in all_content.lower() 243 | ), "Travel destination not mentioned in any message" 244 | else: 245 | # Normal completion case - last message should be from supervisor 246 | assert last_sender == "supervisor" 247 | content = ( 248 | last_message.get("content", "") 249 | if isinstance(last_message, dict) 250 | else last_message.content 251 | ) 252 | assert "nha trang" in content.lower() 253 | -------------------------------------------------------------------------------- /AGENT_INSTRUCTIONS.md: -------------------------------------------------------------------------------- 1 | # AI Agent Cluster API Instructions 2 | 3 | This document provides a step-by-step guide on how to create, manage, and interact with AI crews and agents using the provided API. 4 | 5 | ## Table of Contents 6 | 7 | 1. [Crews](#crews) 8 | * [Create a Crew with a Supervisor and Agents](#1-create-a-crew-with-a-supervisor-and-agents) 9 | * [Get a list of Crews](#2-get-a-list-of-crews) 10 | * [Get a specific Crew](#3-get-a-specific-crew) 11 | * [Update a Crew](#4-update-a-crew) 12 | * [Delete a Crew](#5-delete-a-crew) 13 | * [Execute a Prompt in a Crew](#6-execute-a-prompt-in-a-crew) 14 | 2. [Agents](#agents) 15 | * [Create an Agent](#1-create-an-agent) 16 | * [Get a list of Agents](#2-get-a-list-of-agents) 17 | * [Get a specific Agent](#3-get-a-specific-agent) 18 | * [Update an Agent](#4-update-an-agent) 19 | * [Delete an Agent](#5-delete-an-agent) 20 | * [Add a Tool to an Agent](#6-add-a-tool-to-an-agent) 21 | 3. [Conversations](#conversations) 22 | * [Create a Conversation](#1-create-a-conversation) 23 | * [Get a list of Conversations](#2-get-a-list-of-conversations) 24 | * [Get a specific Conversation](#3-get-a-specific-conversation) 25 | 26 | --- 27 | 28 | ## Crews 29 | 30 | ### 1. Create a Crew with a Supervisor and Agents 31 | 32 | To create a new crew with a supervisor and multiple agents, you first need to create the crew, then create the agents and assign them to the crew. 33 | 34 | #### Step 1: Create the Crew 35 | 36 | Send a `POST` request to the `/crews/` endpoint. 37 | 38 | **Endpoint:** `POST /crews/` 39 | 40 | **Request Body:** 41 | 42 | ```json 43 | { 44 | "name": "My New Crew" 45 | } 46 | ``` 47 | 48 | **Example using `curl`:** 49 | 50 | ```bash 51 | curl -X 'POST' \ 52 | 'http://127.0.0.1:8000/crews/' \ 53 | -H 'Content-Type: application/json' \ 54 | -d '{ 55 | "name": "My New Crew" 56 | }' 57 | ``` 58 | 59 | #### Step 2: Create the Supervisor Agent 60 | 61 | Send a `POST` request to the `/agents/` endpoint with the `role` set to `"supervisor"`. 62 | 63 | **Endpoint:** `POST /agents/` 64 | 65 | **Request Body:** 66 | 67 | ```json 68 | { 69 | "name": "My Supervisor", 70 | "crew_id": 1, 71 | "role": "supervisor", 72 | "system_prompt": "You are the supervisor. Your job is to route the conversation to the correct agent." 73 | } 74 | ``` 75 | 76 | **Example using `curl`:** 77 | 78 | ```bash 79 | curl -X 'POST' \ 80 | 'http://127.0.0.1:8000/agents/' \ 81 | -H 'Content-Type: application/json' \ 82 | -d '{ 83 | "name": "My Supervisor", 84 | "crew_id": 1, 85 | "role": "supervisor", 86 | "system_prompt": "You are the supervisor. Your job is to route the conversation to the correct agent." 87 | }' 88 | ``` 89 | 90 | #### Step 3: Create the Worker Agents 91 | 92 | Send a `POST` request to the `/agents/` endpoint for each worker agent, with the `role` set to `"worker"`. 93 | 94 | **Endpoint:** `POST /agents/` 95 | 96 | **Request Body:** 97 | 98 | ```json 99 | { 100 | "name": "My Worker Agent 1", 101 | "crew_id": 1, 102 | "role": "worker", 103 | "system_prompt": "You are a worker agent. Your job is to use the available tools to answer the user's questions.", 104 | "tools": [1, 2] 105 | } 106 | ``` 107 | 108 | **Example using `curl`:** 109 | 110 | ```bash 111 | curl -X 'POST' \ 112 | 'http://127.0.0.1:8000/agents/' \ 113 | -H 'Content-Type: application/json' \ 114 | -d '{ 115 | "name": "My Worker Agent 1", 116 | "crew_id": 1, 117 | "role": "worker", 118 | "system_prompt": "You are a worker agent. Your job is to use the available tools to answer the user's questions.", 119 | "tools": [1, 2] 120 | }' 121 | ``` 122 | 123 | ### 2. Get a list of Crews 124 | 125 | To get a list of all crews, send a `GET` request to the `/crews/` endpoint. 126 | 127 | **Endpoint:** `GET /crews/` 128 | 129 | **Example using `curl`:** 130 | 131 | ```bash 132 | curl -X 'GET' 'http://127.0.0.1:8000/crews/' 133 | ``` 134 | 135 | ### 3. Get a specific Crew 136 | 137 | To get a specific crew by its ID, send a `GET` request to the `/crews/{crew_id}` endpoint. 138 | 139 | **Endpoint:** `GET /crews/{crew_id}` 140 | 141 | **Example using `curl`:** 142 | 143 | ```bash 144 | curl -X 'GET' 'http://127.0.0.1:8000/crews/1' 145 | ``` 146 | 147 | ### 4. Update a Crew 148 | 149 | To update a crew's information, send a `PUT` request to the `/crews/{crew_id}` endpoint. 150 | 151 | **Endpoint:** `PUT /crews/{crew_id}` 152 | 153 | **Request Body:** 154 | 155 | ```json 156 | { 157 | "name": "My Updated Crew" 158 | } 159 | ``` 160 | 161 | **Example using `curl`:** 162 | 163 | ```bash 164 | curl -X 'PUT' \ 165 | 'http://127.0.0.1:8000/crews/1' \ 166 | -H 'Content-Type: application/json' \ 167 | -d '{ 168 | "name": "My Updated Crew" 169 | }' 170 | ``` 171 | 172 | ### 5. Delete a Crew 173 | 174 | To delete a crew, send a `DELETE` request to the `/crews/{crew_id}` endpoint. 175 | 176 | **Endpoint:** `DELETE /crews/{crew_id}` 177 | 178 | **Example using `curl`:** 179 | 180 | ```bash 181 | curl -X 'DELETE' 'http://127.0.0.1:8000/crews/1' 182 | ``` 183 | 184 | ### 6. Execute a Prompt in a Crew 185 | 186 | To execute a prompt within a specific crew, send a `POST` request to the `/crews/{crew_id}/execute` endpoint. 187 | 188 | **Endpoint:** `POST /crews/{crew_id}/execute` 189 | 190 | **Request Body:** 191 | 192 | ```json 193 | { 194 | "user_input": "What is the weather in San Francisco?" 195 | } 196 | ``` 197 | 198 | **Example using `curl`:** 199 | 200 | ```bash 201 | curl -X 'POST' \ 202 | 'http://127.0.0.1:8000/crews/1/execute' \ 203 | -H 'Content-Type: application/json' \ 204 | -d '{ 205 | "user_input": "What is the weather in San Francisco?" 206 | }' 207 | ``` 208 | 209 | --- 210 | 211 | ## Agents 212 | 213 | ### 1. Create an Agent 214 | 215 | To create a new agent within a crew, send a `POST` request to the `/agents/` endpoint. 216 | 217 | **Endpoint:** `POST /agents/` 218 | 219 | **Request Body:** 220 | 221 | ```json 222 | { 223 | "name": "My New Agent", 224 | "crew_id": 1, 225 | "role": "worker", 226 | "system_prompt": "You are a worker agent. Your job is to use the available tools to answer the user's questions.", 227 | "tools": [1, 2] 228 | } 229 | ``` 230 | 231 | **Example using `curl`:** 232 | 233 | ```bash 234 | curl -X 'POST' \ 235 | 'http://127.0.0.1:8000/agents/' \ 236 | -H 'Content-Type: application/json' \ 237 | -d '{ 238 | "name": "My New Agent", 239 | "crew_id": 1, 240 | "role": "worker", 241 | "system_prompt": "You are a worker agent. Your job is to use the available tools to answer the user's questions.", 242 | "tools": [1, 2] 243 | }' 244 | ``` 245 | 246 | ### 2. Get a list of Agents 247 | 248 | To get a list of all agents, send a `GET` request to the `/agents/` endpoint. 249 | 250 | **Endpoint:** `GET /agents/` 251 | 252 | **Example using `curl`:** 253 | 254 | ```bash 255 | curl -X 'GET' 'http://127.0.0.1:8000/agents/' 256 | ``` 257 | 258 | ### 3. Get a specific Agent 259 | 260 | To get a specific agent by its ID, send a `GET` request to the `/agents/{agent_id}` endpoint. 261 | 262 | **Endpoint:** `GET /agents/{agent_id}` 263 | 264 | **Example using `curl`:** 265 | 266 | ```bash 267 | curl -X 'GET' 'http://127.0.0.1:8000/agents/1' 268 | ``` 269 | 270 | ### 4. Update an Agent 271 | 272 | To update an agent's information, send a `PUT` request to the `/agents/{agent_id}` endpoint. 273 | 274 | **Endpoint:** `PUT /agents/{agent_id}` 275 | 276 | **Request Body: 277 | 278 | ```json 279 | { 280 | "name": "My Updated Agent", 281 | "crew_id": 1, 282 | "role": "worker", 283 | "system_prompt": "You are a worker agent. Your job is to use the available tools to answer the user's questions.", 284 | "tools": [1] 285 | } 286 | ``` 287 | 288 | **Example using `curl`: 289 | 290 | ```bash 291 | curl -X 'PUT' \ 292 | 'http://127.0.0.1:8000/agents/1' \ 293 | -H 'Content-Type: application/json' \ 294 | -d '{ 295 | "name": "My Updated Agent", 296 | "crew_id": 1, 297 | "role": "worker", 298 | "system_prompt": "You are a worker agent. Your job is to use the available tools to answer the user's questions.", 299 | "tools": [1] 300 | }' 301 | ``` 302 | 303 | ### 5. Delete an Agent 304 | 305 | To delete an agent, send a `DELETE` request to the `/agents/{agent_id}` endpoint. 306 | 307 | **Endpoint:** `DELETE /agents/{agent_id}` 308 | 309 | **Example using `curl`:** 310 | 311 | ```bash 312 | curl -X 'DELETE' 'http://127.0.0.1:8000/agents/1' 313 | ``` 314 | 315 | ### 6. Add a Tool to an Agent 316 | 317 | To add a tool to an agent, send a `POST` request to the `/agents/{agent_id}/tools/{tool_id}` endpoint. 318 | 319 | **Endpoint:** `POST /agents/{agent_id}/tools/{tool_id}` 320 | 321 | **Example using `curl`:** 322 | 323 | ```bash 324 | curl -X 'POST' 'http://127.0.0.1:8000/agents/1/tools/3' 325 | ``` 326 | 327 | --- 328 | 329 | ## MCP Servers 330 | 331 | ### 1. Get a list of available tools of a MCP server 332 | 333 | To get a list of available tools of a MCP server, send a `GET` request to the `/mcp_servers/{mcp_server_id}/tools` endpoint. 334 | 335 | **Endpoint:** `GET /mcp_servers/{mcp_server_id}/tools` 336 | 337 | **Example using `curl`:** 338 | 339 | ```bash 340 | curl -X 'GET' 'http://127.0.0.1:8000/mcp_servers/1/tools' 341 | ``` 342 | 343 | ### 2. Get a list of available resources of a MCP server 344 | 345 | To get a list of available resources of a MCP server, send a `GET` request to the `/mcp_servers/{mcp_server_id}/resources` endpoint. 346 | 347 | **Endpoint:** `GET /mcp_servers/{mcp_server_id}/resources` 348 | 349 | **Example using `curl`:** 350 | 351 | ```bash 352 | curl -X 'GET' 'http://127.0.0.1:8000/mcp_servers/1/resources' 353 | ``` 354 | 355 | ### 3. Get a list of available prompts of a MCP server 356 | 357 | To get a list of available prompts of a MCP server, send a `GET` request to the `/mcp_servers/{mcp_server_id}/prompts` endpoint. 358 | 359 | **Endpoint:** `GET /mcp_servers/{mcp_server_id}/prompts` 360 | 361 | **Example using `curl`:** 362 | 363 | ```bash 364 | curl -X 'GET' 'http://127.0.0.1:8000/mcp_servers/1/prompts' 365 | ``` 366 | 367 | --- 368 | 369 | ## Conversations 370 | 371 | ### 1. Create a Conversation 372 | 373 | To create a new conversation, send a `POST` request to the `/conversations/` endpoint. 374 | 375 | **Endpoint:** `POST /conversations/` 376 | 377 | **Request Body:** 378 | 379 | ```json 380 | { 381 | "user_input": "Hello, agent!", 382 | "agent_output": "Hello, user!", 383 | "crew_id": 1, 384 | "agent_id": 1 385 | } 386 | ``` 387 | 388 | **Example using `curl`:** 389 | 390 | ```bash 391 | curl -X 'POST' \ 392 | 'http://127.0.0.1:8000/conversations/' \ 393 | -H 'Content-Type: application/json' \ 394 | -d '{ 395 | "user_input": "Hello, agent!", 396 | "agent_output": "Hello, user!", 397 | "crew_id": 1, 398 | "agent_id": 1 399 | }' 400 | ``` 401 | 402 | ### 2. Get a list of Conversations 403 | 404 | To get a list of all conversations, send a `GET` request to the `/conversations/` endpoint. 405 | 406 | **Endpoint:** `GET /conversations/` 407 | 408 | **Example using `curl`:** 409 | 410 | ```bash 411 | curl -X 'GET' 'http://127.0.0.1:8000/conversations/' 412 | ``` 413 | 414 | ### 3. Get a specific Conversation 415 | 416 | To get a specific conversation by its ID, send a `GET` request to the `/conversations/{conversation_id}` endpoint. 417 | 418 | **Endpoint:** `GET /conversations/{conversation_id}` 419 | 420 | **Example using `curl`:** 421 | 422 | ```bash 423 | curl -X 'GET' 'http://127.0.0.1:8000/conversations/1' 424 | ``` 425 | -------------------------------------------------------------------------------- /example_ai_crew_chat.py: -------------------------------------------------------------------------------- 1 | """ 2 | Example AI Crew Chat: Supervisor-Agent Communication Pattern 3 | 4 | This script demonstrates how an AI crew with a supervisor and multiple specialized agents 5 | would work together to solve complex tasks. It simulates the interaction pattern where 6 | a supervisor agent receives input, creates a plan, delegates tasks to specialized agents, 7 | and coordinates their interactions to accomplish a goal. 8 | 9 | For demonstration purposes, we're using simulated agent responses instead of real API calls. 10 | """ 11 | 12 | import os 13 | import sys 14 | from typing import List, Dict, Any, Callable 15 | from dotenv import load_dotenv 16 | from langchain_core.messages import HumanMessage, AIMessage, SystemMessage 17 | 18 | # Load environment variables 19 | load_dotenv() 20 | 21 | class SimulatedAgent: 22 | """Simulates an AI agent with specific capabilities for demonstration purposes.""" 23 | 24 | def __init__(self, name: str, specialty: str, capabilities: List[str]): 25 | self.name = name 26 | self.specialty = specialty 27 | self.capabilities = capabilities 28 | self.history = [] 29 | 30 | def process_task(self, task: str) -> str: 31 | """Simulates processing a task based on the agent's specialty.""" 32 | response = f"[Agent: {self.name}] I've processed the task regarding {task}.\n\n" 33 | 34 | if self.name == "researcher": 35 | response += "Based on my research capabilities, I found the following information:\n\n" 36 | if "machine learning" in task.lower(): 37 | response += """ 38 | Machine learning algorithms are computational methods that allow computers to learn from data without being explicitly programmed. 39 | Key categories include: 40 | 1. Supervised Learning: Learning from labeled data (e.g., classification, regression) 41 | 2. Unsupervised Learning: Finding patterns in unlabeled data (e.g., clustering, dimensionality reduction) 42 | 3. Reinforcement Learning: Learning through interaction with an environment 43 | """ 44 | elif "decision tree" in task.lower(): 45 | response += """ 46 | Decision trees are a popular supervised learning method that works by creating a tree-like model of decisions. 47 | They split the data into subsets based on feature values, creating a flowchart-like structure that helps with classification or regression tasks. 48 | Key advantages include interpretability and handling both numerical and categorical data. 49 | """ 50 | 51 | elif self.name == "coder": 52 | response += "Here's the code implementation you requested:\n\n" 53 | if "decision tree" in task.lower(): 54 | response += """```python 55 | from sklearn.tree import DecisionTreeClassifier 56 | from sklearn.datasets import load_iris 57 | from sklearn.model_selection import train_test_split 58 | from sklearn.metrics import accuracy_score 59 | 60 | # Load a sample dataset 61 | iris = load_iris() 62 | X, y = iris.data, iris.target 63 | 64 | # Split the data 65 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) 66 | 67 | # Create and train the decision tree classifier 68 | clf = DecisionTreeClassifier(max_depth=3) 69 | clf.fit(X_train, y_train) 70 | 71 | # Make predictions 72 | predictions = clf.predict(X_test) 73 | 74 | # Calculate accuracy 75 | accuracy = accuracy_score(y_test, predictions) 76 | print(f"Decision Tree Classifier Accuracy: {accuracy:.2f}") 77 | ``` 78 | """ 79 | 80 | elif self.name == "summarizer": 81 | response += "Here's a concise summary of the key points:\n\n" 82 | if "decision tree" in task.lower(): 83 | response += """ 84 | Decision trees are intuitive machine learning models that: 85 | - Split data based on feature values to make predictions 86 | - Create a flowchart-like structure that's easy to interpret 87 | - Work for both classification and regression problems 88 | - Handle both numerical and categorical data 89 | - Can be used as building blocks for more advanced ensemble methods like Random Forests 90 | - May be prone to overfitting if not properly pruned or limited in depth 91 | """ 92 | 93 | self.history.append({"task": task, "response": response}) 94 | return response 95 | 96 | 97 | class SupervisorAgent: 98 | """Simulates a supervisor agent that coordinates multiple specialized agents.""" 99 | 100 | def __init__(self, name: str, agents: List[SimulatedAgent]): 101 | self.name = name 102 | self.agents = {agent.name: agent for agent in agents} 103 | self.history = [] 104 | 105 | def process_query(self, query: str) -> Dict[str, Any]: 106 | """ 107 | Processes a complex query by: 108 | 1. Analyzing the query 109 | 2. Creating a plan with tasks for different agents 110 | 3. Routing tasks to appropriate agents 111 | 4. Synthesizing results 112 | """ 113 | # Record the query 114 | self.history.append({"query": query, "steps": []}) 115 | 116 | # Step 1: Analyze the query and create a plan 117 | plan = self._create_plan(query) 118 | self.history[-1]["steps"].append({"action": "create_plan", "plan": plan}) 119 | 120 | # Step 2: Execute the plan by routing tasks to agents 121 | agent_responses = {} 122 | for task in plan["tasks"]: 123 | agent_name = task["agent"] 124 | if agent_name in self.agents: 125 | response = self.agents[agent_name].process_task(task["description"]) 126 | agent_responses[agent_name] = response 127 | self.history[-1]["steps"].append({ 128 | "action": "delegate_task", 129 | "agent": agent_name, 130 | "task": task["description"], 131 | "response": response 132 | }) 133 | 134 | # Step 3: Synthesize results 135 | final_response = self._synthesize_results(query, plan, agent_responses) 136 | self.history[-1]["steps"].append({ 137 | "action": "synthesize_results", 138 | "final_response": final_response 139 | }) 140 | 141 | return { 142 | "query": query, 143 | "plan": plan, 144 | "agent_responses": agent_responses, 145 | "final_response": final_response 146 | } 147 | 148 | def _create_plan(self, query: str) -> Dict[str, Any]: 149 | """Creates a plan based on the query and available agents.""" 150 | plan = { 151 | "summary": f"Plan for handling query: {query}", 152 | "tasks": [] 153 | } 154 | 155 | # For this example, we'll create a predetermined plan based on the query 156 | if "machine learning" in query.lower() and "decision tree" in query.lower(): 157 | plan["tasks"] = [ 158 | { 159 | "id": 1, 160 | "agent": "researcher", 161 | "description": "Research the basics of machine learning algorithms" 162 | }, 163 | { 164 | "id": 2, 165 | "agent": "coder", 166 | "description": "Create a Python example of a decision tree classifier" 167 | }, 168 | { 169 | "id": 3, 170 | "agent": "summarizer", 171 | "description": "Provide a simple explanation of how decision trees work" 172 | } 173 | ] 174 | else: 175 | # Default plan for other queries 176 | plan["tasks"] = [ 177 | { 178 | "id": 1, 179 | "agent": "researcher", 180 | "description": f"Research information about: {query}" 181 | }, 182 | { 183 | "id": 2, 184 | "agent": "summarizer", 185 | "description": f"Summarize key points about: {query}" 186 | } 187 | ] 188 | 189 | return plan 190 | 191 | def _synthesize_results(self, query: str, plan: Dict[str, Any], agent_responses: Dict[str, str]) -> str: 192 | """Synthesizes results from multiple agents into a cohesive response.""" 193 | response = f"[Supervisor] I've analyzed your query about '{query}' and coordinated with our specialized agents.\n\n" 194 | 195 | response += "Here's what our team found:\n\n" 196 | 197 | # Include relevant information from each agent based on the plan 198 | for task in plan["tasks"]: 199 | agent_name = task["agent"] 200 | if agent_name in agent_responses: 201 | response += f"## From our {agent_name.title()} Agent\n" 202 | # Extract just the content part, not the agent identifier 203 | content = agent_responses[agent_name].split("\n\n", 1)[1] if "\n\n" in agent_responses[agent_name] else agent_responses[agent_name] 204 | response += f"{content}\n\n" 205 | 206 | # Add a conclusion 207 | response += "## Conclusion\n" 208 | if "machine learning" in query.lower() and "decision tree" in query.lower(): 209 | response += """ 210 | I've coordinated our team to provide you with a comprehensive overview of decision trees in machine learning. 211 | Our Researcher provided the foundational concepts, the Coder created a practical implementation example, 212 | and our Summarizer distilled the key points into an easy-to-understand format. 213 | 214 | This demonstrates how our AI crew works together: we break down complex tasks, assign them to specialized 215 | agents with different capabilities, and then synthesize the results into a cohesive response that addresses 216 | all aspects of your query. 217 | """ 218 | else: 219 | response += f"I've coordinated our team to provide you with information about {query}." 220 | 221 | return response 222 | 223 | 224 | def run_ai_crew_chat_demo(): 225 | """Runs a demonstration of the AI crew chat workflow.""" 226 | # Create specialized agents 227 | researcher = SimulatedAgent( 228 | name="researcher", 229 | specialty="information retrieval", 230 | capabilities=["web search", "data analysis", "knowledge base access"] 231 | ) 232 | 233 | coder = SimulatedAgent( 234 | name="coder", 235 | specialty="software development", 236 | capabilities=["code generation", "code analysis", "debugging"] 237 | ) 238 | 239 | summarizer = SimulatedAgent( 240 | name="summarizer", 241 | specialty="content summarization", 242 | capabilities=["text analysis", "key point extraction", "simplification"] 243 | ) 244 | 245 | # Create the supervisor 246 | supervisor = SupervisorAgent( 247 | name="supervisor", 248 | agents=[researcher, coder, summarizer] 249 | ) 250 | 251 | # Example complex query that requires multiple agents 252 | query = """ 253 | I need help understanding the basics of machine learning algorithms, 254 | then I want to see a simple Python example of a decision tree classifier, 255 | and finally give me a summary of how decision trees work in simple terms. 256 | """ 257 | 258 | print("\n" + "="*80) 259 | print("AI CREW CHAT DEMONSTRATION") 260 | print("="*80) 261 | print(f"\nUser Query: {query.strip()}\n") 262 | print("="*80 + "\n") 263 | 264 | # Process the query through the supervisor 265 | result = supervisor.process_query(query) 266 | 267 | # Display the workflow step by step 268 | print("STEP 1: SUPERVISOR CREATES A PLAN") 269 | print("-"*50) 270 | print(f"Query: {result['query'].strip()}") 271 | print("\nPlan:") 272 | for task in result['plan']['tasks']: 273 | print(f"- Task {task['id']}: Assign to {task['agent']} - {task['description']}") 274 | print("\n" + "-"*50 + "\n") 275 | 276 | # Show each agent's response 277 | print("STEP 2: AGENTS PROCESS THEIR ASSIGNED TASKS") 278 | print("-"*50) 279 | for agent_name, response in result['agent_responses'].items(): 280 | print(f"\n### {agent_name.upper()} RESPONSE:") 281 | print(response) 282 | print("-"*30) 283 | print("\n" + "-"*50 + "\n") 284 | 285 | # Show the final synthesized response 286 | print("STEP 3: SUPERVISOR SYNTHESIZES FINAL RESPONSE") 287 | print("-"*50) 288 | print(result['final_response']) 289 | print("\n" + "-"*50) 290 | 291 | print("\n" + "="*80) 292 | print("DEMONSTRATION COMPLETE") 293 | print("="*80 + "\n") 294 | 295 | return result 296 | 297 | 298 | if __name__ == "__main__": 299 | run_ai_crew_chat_demo() 300 | -------------------------------------------------------------------------------- /PROJECT_OVERVIEW.md: -------------------------------------------------------------------------------- 1 | # PROJECT_OVERVIEW.md 2 | 3 | ## Concept 4 | * Each AI agent cluster can have multiple AI agent crews (AI Crews) 5 | * Each AI crew can have multiple AI agent, leaded by a superviser (a default AI agent of an AI crew) 6 | * Each AI agent can call tools via MCP servers integration 7 | 8 | ## How it works 9 | * A supervisor agent will receive input (prompt) from a user via API call, then create a detailed plan with its current capabilities (AI agents underneat and their tools) 10 | * Then request the AI agents to perform tasks via A2A protocol 11 | * Wait for all AI agents finish given tasks 12 | * Grab all the results, analyze and respond to user based on the original input prompt. 13 | 14 | 15 | ## AI Crew Workflow Demonstration 16 | 17 | To demonstrate the complete AI crew workflow, we have implemented a comprehensive test in `tests/test_ai_crew_simple_demo.py`. This test showcases the end-to-end flow using real API endpoints and services to create AI crews, agents, tools, and execute prompts through the supervisor-agent architecture. 18 | 19 | ### Workflow Steps 20 | 21 | The demonstration follows these steps: 22 | 23 | 1. **Create an AI Crew with a Supervisor Agent** 24 | - Creates a new crew with a specific name and description 25 | - Creates a supervisor agent with specialized system instructions 26 | - Attaches the supervisor agent to the crew 27 | 28 | 2. **Add Specialized Agents to the Crew** 29 | - Creates multiple specialized agents (Researcher, Coder, Summarizer) 30 | - Each agent has custom system instructions tailored to its role 31 | - Adds the agents to the crew for collaborative problem solving 32 | 33 | 3. **Add Tools to Agents** 34 | - Creates or fetches an MCP server for tool integration 35 | - Creates tools with appropriate descriptions and API names 36 | - Assigns tools to agents based on their specialized needs 37 | 38 | 4. **Execute a Prompt Through the Crew** 39 | - Sends a user query to the crew via the supervisor agent 40 | - Uses the actual `crew_service.execute_prompt()` function to process the query 41 | - The function handles supervisor delegation, agent execution, and result synthesis 42 | 43 | 5. **Validate and Present Results** 44 | - Validates the workflow components and execution attempt 45 | - Presents the actual API response from the workflow execution 46 | - Handles potential authentication or connectivity errors gracefully 47 | 48 | ### Key Features 49 | 50 | - **Real API Integration**: Uses actual API endpoints and services for all operations 51 | - **A2A Protocol**: Demonstrates real agent-to-agent communication via LangGraph 52 | - **Tool Integration**: Shows actual MCP server and tool usage (requires authentication) 53 | - **Workflow Validation**: Includes assertions to verify workflow components 54 | - **Error Handling**: Provides comprehensive error reporting and diagnostics 55 | 56 | ### MCP Integration and Tool Usage 57 | 58 | The workflow integrates Model Context Protocol (MCP) servers to provide agents with access to external tools. We've implemented two approaches for MCP integration: 59 | 60 | #### 1. Direct MCP Client Integration (Legacy) 61 | 62 | Initially, the project used the MCP client library directly, which encountered issues with session ID handling and authentication. Common issues included: 63 | 64 | - Session IDs returned as `None` despite being created successfully on the server side 65 | - Authentication failures (401 errors) even with properly configured API keys 66 | - Protocol compatibility issues between client and server 67 | 68 | #### 2. Langchain MCP Adapters Integration (Current) 69 | 70 | To overcome these limitations, we've integrated the `langchain-mcp-adapters` library which provides better compatibility with Langchain and LangGraph: 71 | 72 | ```python 73 | from langchain_mcp_adapters.client import MultiServerMCPClient 74 | 75 | # Create an MCP client with all available servers from the database 76 | mcp_servers = {} 77 | for server in db_mcp_servers: 78 | mcp_servers[server.name] = server.url 79 | 80 | # Create the client with all available servers 81 | mcp_client = MultiServerMCPClient(servers=mcp_servers) 82 | 83 | # Asynchronously fetch all available tools 84 | tools = await mcp_client.get_tools() 85 | ``` 86 | 87 | Key advantages of this approach include: 88 | 89 | - Better session management and authentication handling 90 | - Support for async tool invocation required by LangGraph 91 | - Simplified tool creation and management 92 | - Integration with LangGraph's agent execution framework 93 | - Dynamic tool discovery and loading at runtime 94 | 95 | ### Context Management and Token Optimization 96 | 97 | To prevent token limit errors and improve performance, we've implemented a context management system that: 98 | 99 | 1. **Monitors message history growth**: Tracks message count in agent conversations 100 | 2. **Applies intelligent summarization**: Summarizes historical messages when context size exceeds thresholds 101 | 3. **Preserves critical context**: Always keeps the initial user query and most recent messages 102 | 4. **Reduces LLM token consumption**: Prevents OpenAI API token limit errors by condensing repeated context 103 | 104 | The implementation uses a pre-processing node in the agent graph that manages state before it reaches any agent: 105 | 106 | ```python 107 | def summarize_messages(messages, max_keep=3): 108 | """Summarize message history to reduce token consumption.""" 109 | if len(messages) <= max_keep: 110 | return messages 111 | 112 | # Keep the first message (user query) and the last max_keep messages 113 | keep_messages = [messages[0]] + messages[-max_keep:] 114 | 115 | # Create a summary of the dropped messages 116 | dropped_messages = messages[1:-max_keep] 117 | if dropped_messages: 118 | summary_content = f"Summary of {len(dropped_messages)} previous messages: " 119 | summary_content += "Agents discussed the query and exchanged information." 120 | keep_messages.insert(1, SystemMessage(content=summary_content)) 121 | 122 | return keep_messages 123 | ``` 124 | 125 | This system helps prevent recursion errors and token overflow issues that previously affected workflow execution. 126 | 127 | ### Authentication Requirements 128 | 129 | To run the complete workflow with tool execution, you need to configure the following: 130 | 131 | 1. **MCP Server Authentication**: The MCP server at `https://searchapi-mcp.prod.diginext.site/mcp` may require API credentials depending on the server configuration. 132 | 133 | 2. **API Keys Configuration**: To set up authentication if required: 134 | - Add your API keys to the `.env` file in the project root 135 | - The keys will be automatically loaded by the environment configuration 136 | - The Langchain MCP adapters will handle authentication based on the loaded environment 137 | 138 | 3. **Asynchronous Execution**: The workflow uses async execution for MCP tools, which requires proper async/await handling throughout the codebase. 139 | 140 | ### Running the Demonstration 141 | 142 | To run the AI crew workflow demonstration: 143 | 144 | ```bash 145 | python tests/test_ai_crew_simple_demo.py 146 | ``` 147 | 148 | Without proper API credentials, the test will still create all necessary components and attempt to execute the workflow, but will report an authentication error during the tool execution phase. 149 | 150 | ## Project Structure 151 | 152 | ``` 153 | . 154 | ├── AGENT_INSTRUCTIONS.md 155 | ├── app 156 | │ ├── __init__.py 157 | │ ├── api 158 | │ │ ├── __init__.py 159 | │ │ ├── agents.py 160 | │ │ ├── conversations.py 161 | │ │ ├── crews.py 162 | │ │ ├── mcp_servers.py 163 | │ │ └── tools.py 164 | │ ├── core 165 | │ │ ├── __init__.py 166 | │ │ ├── agents.py 167 | │ │ ├── database.py 168 | │ │ ├── graph.py 169 | │ │ ├── logging.py 170 | │ │ └── tools.py 171 | │ ├── main.py 172 | │ ├── models 173 | │ │ ├── __init__.py 174 | │ │ ├── agent_tool.py 175 | │ │ ├── agent.py 176 | │ │ ├── base.py 177 | │ │ ├── conversation.py 178 | │ │ ├── crew.py 179 | │ │ ├── mcp_server.py 180 | │ │ ├── setup_relationships.py 181 | │ │ └── tool.py 182 | │ ├── schemas 183 | │ │ ├── agent.py 184 | │ │ ├── conversation.py 185 | │ │ ├── crew.py 186 | │ │ ├── mcp_server.py 187 | │ │ ├── prompt.py 188 | │ │ └── tool.py 189 | │ └── services 190 | │ ├── __init__.py 191 | │ ├── agent.py 192 | │ ├── conversation.py 193 | │ ├── crew.py 194 | │ ├── mcp_server.py 195 | │ └── tool.py 196 | ├── create_tables.py 197 | ├── docs 198 | │ └── UUID_MIGRATION.md 199 | ├── example_ai_crew_chat.py 200 | ├── example.py 201 | ├── GEMINI.md 202 | ├── LICENSE 203 | ├── PROJECT_OVERVIEW.md 204 | ├── README.md 205 | ├── requirements.txt 206 | ├── server.log 207 | ├── test_uuid_models.py 208 | ├── tests 209 | │ ├── conftest.py 210 | │ ├── test_agents.py 211 | │ ├── test_ai_crew_chat.py 212 | │ ├── test_ai_crew_simple_demo.py 213 | │ ├── test_conversations.py 214 | │ ├── test_crews.py 215 | │ ├── test_mcp_servers.py 216 | │ └── test_tools.py 217 | ``` 218 | 219 | **Directories:** 220 | 221 | * `app`: The main application directory. 222 | * `app/api`: Contains the API endpoints for the application. 223 | * `app/core`: Houses the core logic of the application, including LangGraph setup and agent definitions. 224 | * `app/models`: Defines the database models for PostgreSQL. 225 | * `app/schemas`: Contains the Pydantic schemas for the API. 226 | * `app/services`: Contains services that interact with the database and other external resources. 227 | * `docs`: Contains project documentation files. 228 | * `tests`: Contains the tests for the application. 229 | * `.env`: Stores environment variables. 230 | * `create_tables.py`: A script to create the database tables. 231 | * `example.py`: An example script to test the LangGraph implementation. 232 | * `example_ai_crew_chat.py`: An example script demonstrating AI crew chat functionality. 233 | * `test_uuid_models.py`: A test script for UUID-based models and relationships. 234 | * `PROJECT_OVERVIEW.md`: This file. 235 | * `requirements.txt`: Lists the Python dependencies for the project. 236 | 237 | ## Features 238 | 239 | * [x] Create & manage AI crews 240 | * [x] Create & manage AI agents 241 | * [x] Create & manage MCP servers 242 | * [x] Create & manage Tools 243 | * [x] Integrate MCP servers for tool usage 244 | * [ ] Monitor activity logs (save/read/retention in database) 245 | * [x] Create & manage conversations with AI crews / AI agents 246 | * [x] Expose Swagger API Docs for frontend integration instructions 247 | * [x] Continuous integration with GitHub Actions 248 | * [x] UUID-based primary keys for all database models 249 | 250 | ## Dependencies 251 | 252 | * langchain 253 | * langgraph 254 | * fastapi 255 | * uvicorn 256 | * psycopg2-binary 257 | * sqlalchemy 258 | * python-dotenv 259 | * langchain-openai 260 | * tavily-python 261 | * langchain-community 262 | * pytest 263 | 264 | ## API Routes 265 | 266 | * `GET /`: Welcome message. 267 | * `POST /crews/`: Create a new crew. 268 | * `GET /crews/`: Get all crews. 269 | * `GET /crews/{crew_id}`: Get a crew by ID. 270 | * `PUT /crews/{crew_id}`: Update a crew. 271 | * `DELETE /crews/{crew_id}`: Delete a crew. 272 | * `POST /agents/`: Create a new agent. 273 | * `GET /agents/`: Get all agents. 274 | * `GET /agents/{agent_id}`: Get an agent by ID. 275 | * `PUT /agents/{agent_id}`: Update an agent. 276 | * `DELETE /agents/{agent_id}`: Delete an agent. 277 | * `POST /agents/{agent_id}/tools/{tool_id}`: Add a tool to an agent. 278 | * `POST /mcp_servers/`: Create a new MCP server. 279 | * `GET /mcp_servers/`: Get all MCP servers. 280 | * `GET /mcp_servers/{mcp_server_id}`: Get an MCP server by ID. 281 | * `PUT /mcp_servers/{mcp_server_id}`: Update an MCP server. 282 | * `DELETE /mcp_servers/{mcp_server_id}`: Delete an MCP server. 283 | * `POST /tools/`: Create a new tool. 284 | * `GET /tools/`: Get all tools. 285 | * `GET /tools/{tool_id}`: Get a tool by ID. 286 | * `PUT /tools/{tool_id}`: Update a tool. 287 | * `DELETE /tools/{tool_id}`: Delete a tool. 288 | * `POST /conversations/`: Create a new conversation. 289 | * `GET /conversations/`: Get all conversations. 290 | * `GET /conversations/{conversation_id}`: Get a conversation by ID. 291 | 292 | ## API Documentation 293 | 294 | The API documentation is automatically generated by FastAPI and is available at the following URLs: 295 | 296 | * **Swagger UI:** `/docs` 297 | * **ReDoc:** `/redoc` 298 | 299 | ## Testing 300 | 301 | To run the tests, you will need to install `pytest`: 302 | 303 | ```bash 304 | pip install pytest 305 | ``` 306 | 307 | Then, you can run the tests using the following command: 308 | 309 | ```bash 310 | pytest 311 | ``` 312 | 313 | ## Todos 314 | 315 | * [ ] Implement true A2A communication between agents. 316 | * [ ] Modify the `AgentGraph` to allow agents to directly communicate with each other. 317 | * [ ] Update the supervisor logic to delegate tasks to agents and manage the overall workflow. 318 | * [ ] Create a new agent type, "supervisor", to distinguish it from other agents. 319 | * [ ] Update the API to allow creating crews with a supervisor and multiple agents. 320 | * [ ] Update the `AGENT_INSTRUCTIONS.md` file with instructions on how to create a crew with a supervisor and multiple agents. 321 | 322 | ## Changelog 323 | 324 | * **2025-07-03:** 325 | * Migrated all database models from integer primary keys to UUIDs for improved scalability and security. 326 | * Implemented late-binding relationship setup to resolve circular dependencies between models. 327 | * Added comprehensive UUID model tests. 328 | * Added documentation for UUID migration in `docs/UUID_MIGRATION.md`. 329 | * Enhanced error handling and validation for UUID operations in API routes and services. 330 | 331 | * **2025-07-02:** 332 | * Added tests for all endpoints and workflows. 333 | * Added a GitHub Actions workflow to run the tests automatically. 334 | * Added A2A implementation to the project's roadmap. 335 | * Initial project setup. 336 | * Created database models for `Crew`, `Agent`, `McpServer`, and `Tool`. 337 | * Set up the database connection and created the tables. 338 | * Implemented the core LangGraph logic for agent interaction. 339 | * Created API endpoints for managing crews. 340 | * Created API endpoints for managing agents. 341 | * Created API endpoints for managing MCP servers. 342 | * Created API endpoints for managing tools. 343 | * Integrated MCP servers for tool usage. 344 | * Updated the database schema to include a `description` field for tools. 345 | * Updated the API to handle the new `description` field. 346 | * Updated the agent service to fetch tools from MCP servers. 347 | * Updated the API for adding tools to agents. 348 | * Implemented logging for monitoring. 349 | * Implemented conversation management. 350 | * Added API documentation section to `PROJECT_OVERVIEW.md`. -------------------------------------------------------------------------------- /docs/ai-crews-management.md: -------------------------------------------------------------------------------- 1 | # AI Crews Management 2 | 3 | This guide covers how to create, manage, and work with AI crews in the A2A LangGraph Boilerplate. 4 | 5 | ## What is an AI Crew? 6 | 7 | An AI crew is a collection of AI agents that work together to solve complex problems. Each crew has: 8 | - **A supervisor agent** that coordinates tasks and delegates work 9 | - **Multiple specialized agents** that handle specific types of tasks 10 | - **Shared tools and resources** accessible to all agents in the crew 11 | - **Conversation history** that maintains context across interactions 12 | 13 | ## Crew Architecture 14 | 15 | ### Supervisor-Agent Model 16 | 17 | The system uses a supervisor-agent architecture where: 18 | - **Supervisor**: Orchestrates the workflow, delegates tasks, and synthesizes results 19 | - **Agents**: Specialized workers that perform specific tasks (research, coding, analysis, etc.) 20 | - **Tools**: External capabilities accessible through MCP (Model Context Protocol) servers 21 | 22 | ### Workflow Process 23 | 24 | 1. **User submits a prompt** to the crew 25 | 2. **Supervisor analyzes** the request and creates a plan 26 | 3. **Supervisor delegates** subtasks to appropriate agents 27 | 4. **Agents execute** their assigned tasks using available tools 28 | 5. **Supervisor synthesizes** results and provides a final response 29 | 30 | ## Creating AI Crews 31 | 32 | ### Basic Crew Creation 33 | 34 | ```python 35 | import requests 36 | 37 | # Create a new crew 38 | crew_data = { 39 | "name": "Research Team", 40 | "description": "A team specialized in research and analysis" 41 | } 42 | 43 | response = requests.post("http://localhost:8000/crews/", json=crew_data) 44 | crew = response.json() 45 | print(f"Created crew: {crew['id']}") 46 | ``` 47 | 48 | ### Crew with Custom Configuration 49 | 50 | ```python 51 | # Create a crew with specific settings 52 | crew_data = { 53 | "name": "Development Team", 54 | "description": "Full-stack development crew with coding and testing capabilities" 55 | } 56 | 57 | response = requests.post("http://localhost:8000/crews/", json=crew_data) 58 | crew = response.json() 59 | ``` 60 | 61 | ### Automatic Supervisor Creation 62 | 63 | When you create a crew, the system automatically creates a supervisor agent with: 64 | - **Name**: "supervisor" 65 | - **Role**: "supervisor" 66 | - **Default instructions**: Basic coordination and delegation tasks 67 | - **Crew assignment**: Automatically linked to the new crew 68 | 69 | ## Managing Crew Agents 70 | 71 | ### Adding Agents to a Crew 72 | 73 | ```python 74 | # Create specialized agents for the crew 75 | agents = [ 76 | { 77 | "name": "Research Agent", 78 | "description": "Specialized in information gathering and analysis", 79 | "role": "researcher", 80 | "system_instructions": "You are a research specialist. Focus on finding accurate, relevant information and providing detailed analysis.", 81 | "crew_id": crew_id 82 | }, 83 | { 84 | "name": "Code Agent", 85 | "description": "Specialized in software development and coding", 86 | "role": "developer", 87 | "system_instructions": "You are a software developer. Write clean, efficient code and follow best practices.", 88 | "crew_id": crew_id 89 | }, 90 | { 91 | "name": "Summarizer Agent", 92 | "description": "Specialized in content summarization", 93 | "role": "summarizer", 94 | "system_instructions": "You are a content summarizer. Create concise, accurate summaries of complex information.", 95 | "crew_id": crew_id 96 | } 97 | ] 98 | 99 | # Create each agent 100 | for agent_data in agents: 101 | response = requests.post("http://localhost:8000/agents/", json=agent_data) 102 | agent = response.json() 103 | print(f"Created agent: {agent['name']}") 104 | ``` 105 | 106 | ### Agent Specialization 107 | 108 | Each agent can be specialized for specific tasks: 109 | 110 | #### Research Agent 111 | ```python 112 | research_agent = { 113 | "name": "Research Specialist", 114 | "role": "researcher", 115 | "system_instructions": """You are a research specialist with expertise in: 116 | - Information gathering and verification 117 | - Data analysis and interpretation 118 | - Source evaluation and citation 119 | - Trend identification and reporting 120 | 121 | Always provide accurate, well-sourced information.""", 122 | "crew_id": crew_id 123 | } 124 | ``` 125 | 126 | #### Development Agent 127 | ```python 128 | developer_agent = { 129 | "name": "Full Stack Developer", 130 | "role": "developer", 131 | "system_instructions": """You are a full-stack developer with expertise in: 132 | - Frontend and backend development 133 | - Database design and optimization 134 | - API development and integration 135 | - Testing and debugging 136 | 137 | Write clean, maintainable code with proper documentation.""", 138 | "crew_id": crew_id 139 | } 140 | ``` 141 | 142 | #### Analysis Agent 143 | ```python 144 | analyst_agent = { 145 | "name": "Data Analyst", 146 | "role": "analyst", 147 | "system_instructions": """You are a data analyst with expertise in: 148 | - Statistical analysis and modeling 149 | - Data visualization and reporting 150 | - Pattern recognition and insights 151 | - Predictive analytics 152 | 153 | Provide clear, actionable insights from data.""", 154 | "crew_id": crew_id 155 | } 156 | ``` 157 | 158 | ## Crew Configuration 159 | 160 | ### Updating Crew Settings 161 | 162 | ```python 163 | # Update crew information 164 | update_data = { 165 | "name": "Advanced Research Team", 166 | "description": "Enhanced research team with AI and ML capabilities" 167 | } 168 | 169 | response = requests.put(f"http://localhost:8000/crews/{crew_id}", json=update_data) 170 | updated_crew = response.json() 171 | ``` 172 | 173 | ### Customizing Supervisor Instructions 174 | 175 | ```python 176 | # Update supervisor agent with custom instructions 177 | supervisor_data = { 178 | "name": "Team Leader", 179 | "role": "supervisor", 180 | "system_instructions": """You are a team leader managing a crew of specialized AI agents. 181 | 182 | Your responsibilities: 183 | 1. Analyze user requests and break them into subtasks 184 | 2. Delegate appropriate tasks to specialized agents 185 | 3. Coordinate between agents and manage workflow 186 | 4. Synthesize results into comprehensive responses 187 | 5. Ensure quality and accuracy of final outputs 188 | 189 | Available agents and their specializations: 190 | - Research Agent: Information gathering and analysis 191 | - Code Agent: Software development and technical tasks 192 | - Summarizer Agent: Content summarization and synthesis 193 | 194 | Always provide clear, actionable responses that address the user's needs.""", 195 | "crew_id": crew_id 196 | } 197 | 198 | response = requests.put(f"http://localhost:8000/agents/{supervisor_id}", json=supervisor_data) 199 | ``` 200 | 201 | ## Adding Tools to Crews 202 | 203 | ### MCP Server Integration 204 | 205 | ```python 206 | # Create MCP server for tools 207 | mcp_server_data = { 208 | "name": "Search API", 209 | "url": "https://searchapi-mcp.prod.diginext.site/mcp", 210 | "description": "Web search and information retrieval capabilities" 211 | } 212 | 213 | response = requests.post("http://localhost:8000/mcp_servers/", json=mcp_server_data) 214 | mcp_server = response.json() 215 | mcp_server_id = mcp_server['id'] 216 | ``` 217 | 218 | ### Creating Tools 219 | 220 | ```python 221 | # Create tools for agents 222 | tools = [ 223 | { 224 | "name": "web_search", 225 | "description": "Search the web for current information", 226 | "api_name": "search", 227 | "mcp_server_id": mcp_server_id 228 | }, 229 | { 230 | "name": "code_analysis", 231 | "description": "Analyze code for quality and best practices", 232 | "api_name": "analyze_code", 233 | "mcp_server_id": mcp_server_id 234 | } 235 | ] 236 | 237 | # Create each tool 238 | for tool_data in tools: 239 | response = requests.post("http://localhost:8000/tools/", json=tool_data) 240 | tool = response.json() 241 | print(f"Created tool: {tool['name']}") 242 | ``` 243 | 244 | ### Assigning Tools to Agents 245 | 246 | ```python 247 | # Add tools to specific agents 248 | # Research agent gets search capabilities 249 | requests.post(f"http://localhost:8000/agents/{research_agent_id}/tools/{search_tool_id}") 250 | 251 | # Developer agent gets code analysis capabilities 252 | requests.post(f"http://localhost:8000/agents/{developer_agent_id}/tools/{code_tool_id}") 253 | ``` 254 | 255 | ## Executing Prompts with Crews 256 | 257 | ### Basic Prompt Execution 258 | 259 | ```python 260 | # Execute a simple prompt 261 | prompt_data = { 262 | "content": "Research the latest trends in artificial intelligence and provide a comprehensive summary" 263 | } 264 | 265 | response = requests.post(f"http://localhost:8000/crews/{crew_id}/execute", json=prompt_data) 266 | result = response.json() 267 | print(f"Execution result: {result}") 268 | ``` 269 | 270 | ### Complex Multi-Agent Tasks 271 | 272 | ```python 273 | # Complex task requiring multiple agents 274 | complex_prompt = { 275 | "content": """Please help me with the following project: 276 | 277 | 1. Research the current state of AI-powered web applications 278 | 2. Analyze the most popular frameworks and technologies 279 | 3. Create a technical specification for a new AI web app 280 | 4. Provide implementation recommendations 281 | 282 | I need a comprehensive report with actionable insights.""" 283 | } 284 | 285 | response = requests.post(f"http://localhost:8000/crews/{crew_id}/execute", json=complex_prompt) 286 | result = response.json() 287 | ``` 288 | 289 | ## Monitoring Crew Performance 290 | 291 | ### Execution Metrics 292 | 293 | The system provides execution metrics including: 294 | - **Execution time**: Total time taken for task completion 295 | - **Agent utilization**: Which agents were involved 296 | - **Tool usage**: What tools were accessed 297 | - **Message count**: Number of interactions between agents 298 | 299 | ### Debugging Workflow Issues 300 | 301 | ```python 302 | # Check crew configuration 303 | response = requests.get(f"http://localhost:8000/crews/{crew_id}") 304 | crew_details = response.json() 305 | 306 | # List all agents in the crew 307 | response = requests.get(f"http://localhost:8000/agents/") 308 | agents = response.json() 309 | crew_agents = [agent for agent in agents if agent['crew_id'] == crew_id] 310 | 311 | print(f"Crew has {len(crew_agents)} agents:") 312 | for agent in crew_agents: 313 | print(f"- {agent['name']} ({agent['role']})") 314 | ``` 315 | 316 | ## Best Practices 317 | 318 | ### Crew Design 319 | 320 | 1. **Specialized Agents**: Create agents with specific roles and expertise 321 | 2. **Clear Instructions**: Provide detailed system instructions for each agent 322 | 3. **Appropriate Tools**: Assign relevant tools to agents based on their roles 323 | 4. **Balanced Teams**: Include complementary skills in your crew 324 | 325 | ### Supervisor Configuration 326 | 327 | 1. **Delegation Strategy**: Configure supervisor to delegate effectively 328 | 2. **Quality Control**: Include quality checks in supervisor instructions 329 | 3. **Coordination**: Ensure supervisor manages agent interactions properly 330 | 4. **Result Synthesis**: Train supervisor to combine results effectively 331 | 332 | ### Performance Optimization 333 | 334 | 1. **Task Complexity**: Match task complexity to crew capabilities 335 | 2. **Agent Specialization**: Use specialized agents for specific tasks 336 | 3. **Tool Efficiency**: Optimize tool usage for performance 337 | 4. **Workflow Management**: Monitor and optimize workflow patterns 338 | 339 | ## Common Use Cases 340 | 341 | ### Research and Analysis Crew 342 | 343 | ```python 344 | crew_config = { 345 | "name": "Research & Analysis Team", 346 | "agents": [ 347 | {"role": "researcher", "specialization": "information_gathering"}, 348 | {"role": "analyst", "specialization": "data_analysis"}, 349 | {"role": "summarizer", "specialization": "content_synthesis"} 350 | ], 351 | "tools": ["web_search", "data_analysis", "document_processing"] 352 | } 353 | ``` 354 | 355 | ### Development Crew 356 | 357 | ```python 358 | crew_config = { 359 | "name": "Development Team", 360 | "agents": [ 361 | {"role": "architect", "specialization": "system_design"}, 362 | {"role": "developer", "specialization": "implementation"}, 363 | {"role": "tester", "specialization": "quality_assurance"} 364 | ], 365 | "tools": ["code_analysis", "testing_framework", "documentation"] 366 | } 367 | ``` 368 | 369 | ### Content Creation Crew 370 | 371 | ```python 372 | crew_config = { 373 | "name": "Content Creation Team", 374 | "agents": [ 375 | {"role": "writer", "specialization": "content_creation"}, 376 | {"role": "editor", "specialization": "content_editing"}, 377 | {"role": "reviewer", "specialization": "quality_review"} 378 | ], 379 | "tools": ["content_analysis", "grammar_check", "style_guide"] 380 | } 381 | ``` 382 | 383 | ## Error Handling 384 | 385 | ### Common Issues 386 | 387 | 1. **Agent Not Found**: Ensure all agents are properly created and assigned 388 | 2. **Tool Access Errors**: Verify MCP server connectivity and tool permissions 389 | 3. **Workflow Timeouts**: Optimize agent instructions and tool usage 390 | 4. **Result Quality**: Review and improve agent specializations 391 | 392 | ### Troubleshooting Steps 393 | 394 | 1. **Check Crew Configuration**: Verify all agents and tools are properly set up 395 | 2. **Review Logs**: Check server logs for detailed error information 396 | 3. **Test Components**: Test individual agents and tools separately 397 | 4. **Optimize Instructions**: Improve agent system instructions for better results 398 | 399 | ## Advanced Features 400 | 401 | ### Dynamic Agent Management 402 | 403 | ```python 404 | # Add agents to existing crew dynamically 405 | new_agent = { 406 | "name": "Specialized Expert", 407 | "role": "domain_expert", 408 | "system_instructions": "Custom expertise instructions", 409 | "crew_id": crew_id 410 | } 411 | 412 | response = requests.post("http://localhost:8000/agents/", json=new_agent) 413 | ``` 414 | 415 | ### Tool Management 416 | 417 | ```python 418 | # Update tool configurations 419 | tool_update = { 420 | "name": "enhanced_search", 421 | "description": "Enhanced search with AI filtering", 422 | "api_name": "enhanced_search", 423 | "mcp_server_id": mcp_server_id 424 | } 425 | 426 | response = requests.put(f"http://localhost:8000/tools/{tool_id}", json=tool_update) 427 | ``` 428 | 429 | ### Workflow Customization 430 | 431 | The system supports customizing workflow patterns through: 432 | - **Agent system instructions**: Define how agents should behave 433 | - **Tool configurations**: Configure how tools should be used 434 | - **Supervisor logic**: Customize coordination and delegation patterns 435 | 436 | This flexibility allows you to create highly specialized crews for specific domains and use cases. --------------------------------------------------------------------------------