├── src ├── config │ ├── __init__.py │ ├── logging.py │ └── setup.py ├── patterns │ ├── dynamic_sharding │ │ ├── agent.py │ │ ├── pipeline.py │ │ ├── delegates.py │ │ ├── coordinator.py │ │ └── README.md │ ├── task_decomposition │ │ ├── agent.py │ │ ├── pipeline.py │ │ ├── README.md │ │ ├── delegates.py │ │ └── coordinator.py │ ├── dynamic_decomposition │ │ ├── agent.py │ │ ├── pipeline.py │ │ ├── delegates.py │ │ └── README.md │ ├── semantic_router │ │ ├── agent.py │ │ ├── pipeline.py │ │ ├── delegates │ │ │ ├── hotel_search.py │ │ │ ├── flight_search.py │ │ │ └── car_rental_search.py │ │ └── README.md │ ├── parallel_delegation │ │ ├── agent.py │ │ ├── pipeline.py │ │ ├── README.md │ │ └── delegates │ │ │ ├── flight_search.py │ │ │ ├── hotel_search.py │ │ │ └── car_rental_search.py │ ├── reflection │ │ ├── utils.py │ │ └── README.md │ ├── web_access │ │ ├── factory.py │ │ ├── tasks.py │ │ ├── README.md │ │ └── summarize.py │ └── dag_orchestration │ │ ├── pipeline.py │ │ ├── README.md │ │ └── agent.py ├── commons │ └── message.py ├── llm │ └── factory.py └── memory │ └── manage.py ├── data └── patterns │ ├── semantic_router │ ├── delegate │ │ ├── hotel_search │ │ │ ├── user_instructions.txt │ │ │ ├── response_schema.json │ │ │ └── system_instructions.txt │ │ ├── car_rental_search │ │ │ ├── user_instructions.txt │ │ │ ├── response_schema.json │ │ │ └── system_instructions.txt │ │ └── flight_search │ │ │ ├── user_instructions.txt │ │ │ ├── response_schema.json │ │ │ └── system_instructions.txt │ ├── output │ │ ├── coordinator │ │ │ ├── route │ │ │ │ └── route.json │ │ │ └── consolidate │ │ │ │ └── consolidate.txt │ │ └── delegate │ │ │ └── hotel_search │ │ │ ├── hotel_search.json │ │ │ └── hotel_search.txt │ └── coordinator │ │ ├── route │ │ ├── user_instructions.txt │ │ ├── response_schema.json │ │ └── system_instructions.txt │ │ └── consolidate │ │ ├── user_instructions.txt │ │ └── system_instructions.txt │ ├── parallel_delegation │ ├── delegates │ │ ├── hotel_search │ │ │ ├── user_instructions.txt │ │ │ ├── response_schema.json │ │ │ └── system_instructions.txt │ │ ├── car_rental_search │ │ │ ├── user_instructions.txt │ │ │ ├── response_schema.json │ │ │ └── system_instructions.txt │ │ └── flight_search │ │ │ ├── user_instructions.txt │ │ │ ├── response_schema.json │ │ │ └── system_instructions.txt │ ├── output │ │ ├── delegate │ │ │ ├── flight_search │ │ │ │ ├── flight_search.json │ │ │ │ └── flight_search.txt │ │ │ ├── hotel_search │ │ │ │ ├── hotel_search.json │ │ │ │ └── hotel_search.txt │ │ │ └── car_rental_search │ │ │ │ ├── car_rental_search.json │ │ │ │ └── car_rental_search.txt │ │ └── coordinator │ │ │ ├── ner │ │ │ └── ner.json │ │ │ └── consolidate │ │ │ └── consolidate.txt │ └── coordinator │ │ ├── ner │ │ ├── user_instructions.txt │ │ ├── system_instructions.txt │ │ └── response_schema.json │ │ └── consolidate │ │ ├── user_instructions.txt │ │ └── system_instructions.txt │ ├── web_access │ ├── search │ │ ├── user_instructions.txt │ │ └── system_instructions.txt │ ├── summarize │ │ ├── user_instructions.txt │ │ └── system_instructions.txt │ └── output │ │ ├── summarize │ │ └── dd98fb7e26e193dde3e562f56a96f4f3.txt │ │ └── search │ │ └── dd98fb7e26e193dde3e562f56a96f4f3.json │ ├── dag_orchestration │ ├── schemas │ │ ├── compile.json │ │ ├── summarize.json │ │ ├── preprocess.json │ │ ├── collect.json │ │ └── extract.json │ ├── trace │ │ ├── task4.json │ │ ├── task3.json │ │ ├── task2.json │ │ └── task1.json │ ├── docs │ │ ├── doc2.txt │ │ ├── doc1.txt │ │ └── doc3.txt │ └── dag.yml │ ├── reflection │ ├── actor │ │ ├── draft │ │ │ ├── user_instructions.txt │ │ │ ├── response_schema.json │ │ │ └── system_instructions.txt │ │ └── revise │ │ │ ├── response_schema.json │ │ │ ├── user_instructions.txt │ │ │ └── system_instructions.txt │ ├── critic │ │ ├── review │ │ │ ├── user_instructions.txt │ │ │ ├── response_schema.json │ │ │ └── system_instructions.txt │ │ └── revise │ │ │ ├── response_schema.json │ │ │ ├── system_instructions.txt │ │ │ └── user_instructions.txt │ └── output │ │ ├── feedback │ │ ├── v1.json │ │ ├── v0.json │ │ └── v2.json │ │ └── draft │ │ └── v0.json │ └── dynamic_sharding │ └── entities.txt ├── img ├── agentic.png └── framework │ ├── reflection.png │ ├── web_access.png │ ├── dynamic_sharding.png │ ├── semantic_router.png │ ├── dag_orchestration.png │ ├── task_decomposition.png │ ├── dynamic_decomposition.png │ └── parallel_delegation.png ├── config ├── setup.yml └── patterns │ ├── web_access.yml │ ├── reflection.yml │ ├── semantic_router.yml │ └── parallel_delegation.yml ├── LICENSE ├── requirements.txt ├── mermaid ├── dynamic_decomposition.mmd ├── dynamic_sharding.mmd ├── reflection.mmd ├── dag_orchestration.mmd ├── task_decomposition.mmd ├── semantic_router.mmd ├── web_access.mmd └── parallel_delegation.mmd └── .gitignore /src/config/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data/patterns/semantic_router/delegate/hotel_search/user_instructions.txt: -------------------------------------------------------------------------------- 1 | User provided query: `{query}` -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/delegates/hotel_search/user_instructions.txt: -------------------------------------------------------------------------------- 1 | User provided query: `{query}` -------------------------------------------------------------------------------- /data/patterns/semantic_router/delegate/car_rental_search/user_instructions.txt: -------------------------------------------------------------------------------- 1 | User provided query: `{query}` -------------------------------------------------------------------------------- /data/patterns/semantic_router/delegate/flight_search/user_instructions.txt: -------------------------------------------------------------------------------- 1 | User provided query: `{query}` -------------------------------------------------------------------------------- /img/agentic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/Agentic-Workflow-Patterns/main/img/agentic.png -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/delegates/car_rental_search/user_instructions.txt: -------------------------------------------------------------------------------- 1 | User provided query: `{query}` -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/delegates/flight_search/user_instructions.txt: -------------------------------------------------------------------------------- 1 | User provided query: `{query}` -------------------------------------------------------------------------------- /img/framework/reflection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/Agentic-Workflow-Patterns/main/img/framework/reflection.png -------------------------------------------------------------------------------- /img/framework/web_access.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/Agentic-Workflow-Patterns/main/img/framework/web_access.png -------------------------------------------------------------------------------- /img/framework/dynamic_sharding.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/Agentic-Workflow-Patterns/main/img/framework/dynamic_sharding.png -------------------------------------------------------------------------------- /img/framework/semantic_router.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/Agentic-Workflow-Patterns/main/img/framework/semantic_router.png -------------------------------------------------------------------------------- /img/framework/dag_orchestration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/Agentic-Workflow-Patterns/main/img/framework/dag_orchestration.png -------------------------------------------------------------------------------- /img/framework/task_decomposition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/Agentic-Workflow-Patterns/main/img/framework/task_decomposition.png -------------------------------------------------------------------------------- /config/setup.yml: -------------------------------------------------------------------------------- 1 | # This is project-level config params 2 | project_id: arun-genai-bb 3 | credentials_json: ./credentials/key.json 4 | region: us-central1 -------------------------------------------------------------------------------- /img/framework/dynamic_decomposition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/Agentic-Workflow-Patterns/main/img/framework/dynamic_decomposition.png -------------------------------------------------------------------------------- /img/framework/parallel_delegation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/Agentic-Workflow-Patterns/main/img/framework/parallel_delegation.png -------------------------------------------------------------------------------- /data/patterns/web_access/search/user_instructions.txt: -------------------------------------------------------------------------------- 1 | For the user-provided query `{query}`, extract key entities to format as function arguments for Google Search API calls. -------------------------------------------------------------------------------- /data/patterns/semantic_router/output/coordinator/route/route.json: -------------------------------------------------------------------------------- 1 | { 2 | "intent": "HOTEL", 3 | "query": "Could you recommend some hotels in Santa Barbara, California for a stay next week?" 4 | } -------------------------------------------------------------------------------- /data/patterns/semantic_router/coordinator/route/user_instructions.txt: -------------------------------------------------------------------------------- 1 | Analyze the user query `{query}` to determine if the intent is for **Flight Search**, **Hotel Search**, **Car Rental Search**, or **NA** (Not Applicable). -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/output/delegate/flight_search/flight_search.json: -------------------------------------------------------------------------------- 1 | { 2 | "user_query": "FLIGHT: {'destination': 'Dallas', 'origin': 'New York'}", 3 | "web_search_query": "Flights from New York to Dallas" 4 | } -------------------------------------------------------------------------------- /data/patterns/dag_orchestration/schemas/compile.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "report": { 5 | "type": "string" 6 | } 7 | }, 8 | "required": ["report"] 9 | } 10 | -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/output/delegate/hotel_search/hotel_search.json: -------------------------------------------------------------------------------- 1 | { 2 | "user_query": "HOTEL: {'date': 'next week', 'destination': 'downtown Dallas'}", 3 | "web_search_query": "Hotels in downtown Dallas next week" 4 | } -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/output/delegate/car_rental_search/car_rental_search.json: -------------------------------------------------------------------------------- 1 | { 2 | "user_query": "CAR_RENTAL: {'date': 'next week', 'pickup_location': 'Dallas'}", 3 | "web_search_query": "Car rental in Dallas next week" 4 | } -------------------------------------------------------------------------------- /data/patterns/reflection/actor/draft/user_instructions.txt: -------------------------------------------------------------------------------- 1 | Write a one-page academic article on the given topic below and its significance, suitable for graduate students and researchers. 2 | Balance technical depth with clarity. 3 | 4 | Topic: `{topic}` -------------------------------------------------------------------------------- /data/patterns/semantic_router/output/delegate/hotel_search/hotel_search.json: -------------------------------------------------------------------------------- 1 | { 2 | "user_query": "Could you recommend some hotels in Santa Barbara, California for a stay next week?", 3 | "web_search_query": "Hotels in Santa Barbara, California next week" 4 | } -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/coordinator/ner/user_instructions.txt: -------------------------------------------------------------------------------- 1 | Analyze the user query `{query}` to identify intents and extract relevant entities for flight, hotel, and car rental searches simultaneously, handling multiple intents per query and classifying out-of-scope requests as unknown. -------------------------------------------------------------------------------- /data/patterns/web_access/summarize/user_instructions.txt: -------------------------------------------------------------------------------- 1 | Given the user-provided query `{query}` and the accompanying scraped content from relevant webpages below, 2 | please follow the provided guidelines to generate a comprehensive summary with appropriate citations: 3 | 4 | {scraped_content} -------------------------------------------------------------------------------- /data/patterns/reflection/actor/draft/response_schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "topic": { 5 | "type": "string" 6 | }, 7 | "article": { 8 | "type": "string" 9 | } 10 | }, 11 | "required": ["topic", "article"] 12 | } -------------------------------------------------------------------------------- /data/patterns/semantic_router/coordinator/consolidate/user_instructions.txt: -------------------------------------------------------------------------------- 1 | Synthesize the user query `{query}` with web search results (provided below) to provide a clear, informative, and user-friendly response that directly addresses the user's needs. 2 | 3 | ### Web search results 4 | 5 | `{summary}` -------------------------------------------------------------------------------- /data/patterns/reflection/actor/revise/response_schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "topic": { 5 | "type": "string" 6 | }, 7 | "article": { 8 | "type": "string" 9 | }, 10 | "edits": { 11 | "type": "string" 12 | } 13 | }, 14 | "required": ["topic", "article", "edits"] 15 | } -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/output/coordinator/ner/ner.json: -------------------------------------------------------------------------------- 1 | { 2 | "CAR_RENTAL": { 3 | "date": "next week", 4 | "pickup_location": "Dallas" 5 | }, 6 | "FLIGHT": { 7 | "destination": "Dallas", 8 | "origin": "New York" 9 | }, 10 | "HOTEL": { 11 | "date": "next week", 12 | "destination": "downtown Dallas" 13 | } 14 | } -------------------------------------------------------------------------------- /data/patterns/reflection/critic/review/user_instructions.txt: -------------------------------------------------------------------------------- 1 | Please review the following research article: 2 | 3 | {article} 4 | 5 | Provide a comprehensive peer review following the structure and guidelines outlined in the previous instructions. 6 | Your review should include a summary, assessment of strengths and weaknesses, detailed comments, and an overall recommendation. -------------------------------------------------------------------------------- /data/patterns/dynamic_sharding/entities.txt: -------------------------------------------------------------------------------- 1 | Tom Hanks 2 | Meryl Streep 3 | Leonardo DiCaprio 4 | Jennifer Lawrence 5 | Denzel Washington 6 | Cate Blanchett 7 | Brad Pitt 8 | Viola Davis 9 | Robert Downey Jr. 10 | Charlize Theron 11 | Morgan Freeman 12 | Scarlett Johansson 13 | Johnny Depp 14 | Natalie Portman 15 | Will Smith 16 | Emma Stone 17 | Hugh Jackman 18 | Angelina Jolie -------------------------------------------------------------------------------- /config/patterns/web_access.yml: -------------------------------------------------------------------------------- 1 | tools: 2 | search: 3 | system_instructions: './data/patterns/web_access/search/system_instructions.txt' 4 | user_instructions: './data/patterns/web_access/search/user_instructions.txt' 5 | summarize: 6 | system_instructions: './data/patterns/web_access/summarize/system_instructions.txt' 7 | user_instructions: './data/patterns/web_access/summarize/user_instructions.txt' 8 | -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/coordinator/consolidate/user_instructions.txt: -------------------------------------------------------------------------------- 1 | Synthesize the user query `{query}` with web search results (provided below) to provide a clear, informative, and user-friendly response that directly addresses the user's needs. 2 | 3 | ## Web search results 4 | ### Flight summary: 5 | {flight_summary} 6 | 7 | #### otel summary: 8 | {hotel_summary} 9 | 10 | ## Car rental summary: 11 | {car_rental_summary} -------------------------------------------------------------------------------- /data/patterns/semantic_router/coordinator/route/response_schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "query": { 5 | "type": "string", 6 | "description": "The original user query." 7 | }, 8 | "intent": { 9 | "type": "string", 10 | "description": "The detected intent of the query.", 11 | "enum": ["FLIGHT", "HOTEL", "CAR_RENTAL", "UNKNOWN"] 12 | } 13 | }, 14 | "required": ["query", "intent"] 15 | } 16 | -------------------------------------------------------------------------------- /data/patterns/semantic_router/delegate/hotel_search/response_schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "user_query": { 5 | "type": "string", 6 | "description": "The original user-provided query." 7 | }, 8 | "web_search_query": { 9 | "type": "string", 10 | "description": "The query transformed for optimized web search." 11 | } 12 | }, 13 | "required": ["user_query", "web_search_query"] 14 | } -------------------------------------------------------------------------------- /data/patterns/dag_orchestration/schemas/summarize.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "summaries": { 5 | "type": "array", 6 | "items": { 7 | "type": "object", 8 | "properties": { 9 | "id": { "type": "string" }, 10 | "summary": { "type": "string" } 11 | }, 12 | "required": ["id", "summary"] 13 | } 14 | } 15 | }, 16 | "required": ["summaries"] 17 | } 18 | -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/delegates/hotel_search/response_schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "user_query": { 5 | "type": "string", 6 | "description": "The original user-provided query." 7 | }, 8 | "web_search_query": { 9 | "type": "string", 10 | "description": "The query transformed for optimized web search." 11 | } 12 | }, 13 | "required": ["user_query", "web_search_query"] 14 | } -------------------------------------------------------------------------------- /data/patterns/semantic_router/delegate/car_rental_search/response_schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "user_query": { 5 | "type": "string", 6 | "description": "The original user-provided query." 7 | }, 8 | "web_search_query": { 9 | "type": "string", 10 | "description": "The query transformed for optimized web search." 11 | } 12 | }, 13 | "required": ["user_query", "web_search_query"] 14 | } -------------------------------------------------------------------------------- /data/patterns/semantic_router/delegate/flight_search/response_schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "user_query": { 5 | "type": "string", 6 | "description": "The original user-provided query." 7 | }, 8 | "web_search_query": { 9 | "type": "string", 10 | "description": "The query transformed for optimized web search." 11 | } 12 | }, 13 | "required": ["user_query", "web_search_query"] 14 | } 15 | -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/delegates/car_rental_search/response_schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "user_query": { 5 | "type": "string", 6 | "description": "The original user-provided query." 7 | }, 8 | "web_search_query": { 9 | "type": "string", 10 | "description": "The query transformed for optimized web search." 11 | } 12 | }, 13 | "required": ["user_query", "web_search_query"] 14 | } -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/delegates/flight_search/response_schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "user_query": { 5 | "type": "string", 6 | "description": "The original user-provided query." 7 | }, 8 | "web_search_query": { 9 | "type": "string", 10 | "description": "The query transformed for optimized web search." 11 | } 12 | }, 13 | "required": ["user_query", "web_search_query"] 14 | } 15 | -------------------------------------------------------------------------------- /data/patterns/dag_orchestration/schemas/preprocess.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "preprocessed_docs": { 5 | "type": "array", 6 | "items": { 7 | "type": "object", 8 | "properties": { 9 | "id": { "type": "string" }, 10 | "title": { "type": "string" }, 11 | "content": { "type": "string" } 12 | }, 13 | "required": ["id", "title", "content"] 14 | } 15 | } 16 | }, 17 | "required": ["preprocessed_docs"] 18 | } 19 | -------------------------------------------------------------------------------- /data/patterns/reflection/critic/review/response_schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "summary": { 5 | "type": "string" 6 | }, 7 | "strengths": { 8 | "type": "string" 9 | }, 10 | "weaknesses": { 11 | "type": "string" 12 | }, 13 | "minor_issues": { 14 | "type": "string" 15 | }, 16 | "overall_recommendation": { 17 | "type": "string" 18 | } 19 | }, 20 | "required": ["summary", "strengths", "weaknesses", "minor_issues", "overall_recommendation"] 21 | } -------------------------------------------------------------------------------- /data/patterns/reflection/critic/revise/response_schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "summary": { 5 | "type": "string" 6 | }, 7 | "strengths": { 8 | "type": "string" 9 | }, 10 | "weaknesses": { 11 | "type": "string" 12 | }, 13 | "minor_issues": { 14 | "type": "string" 15 | }, 16 | "overall_recommendation": { 17 | "type": "string" 18 | } 19 | }, 20 | "required": ["summary", "strengths", "weaknesses", "minor_issues", "overall_recommendation"] 21 | } -------------------------------------------------------------------------------- /data/patterns/dag_orchestration/schemas/collect.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "docs": { 5 | "type": "array", 6 | "items": { 7 | "type": "object", 8 | "properties": { 9 | "id": { "type": "string" }, 10 | "title": { "type": "string" }, 11 | "content": { "type": "string" }, 12 | "filename": { "type": "string" } 13 | }, 14 | "required": ["id", "title", "content", "filename"] 15 | } 16 | } 17 | }, 18 | "required": ["docs"] 19 | } 20 | 21 | -------------------------------------------------------------------------------- /data/patterns/semantic_router/coordinator/consolidate/system_instructions.txt: -------------------------------------------------------------------------------- 1 | # AI Assistant Task 2 | 3 | You are an AI assistant tasked with providing informative and user-friendly responses by combining user queries with web search results. Your role is to: 4 | 5 | 1. Analyze the user's query: `{query}` 6 | 2. Review the summary of web search results: `{summary}` 7 | 3. Synthesize this information into a coherent, helpful response for the user 8 | 9 | Your response should: 10 | 11 | - Directly address the user's query 12 | - Incorporate relevant information from the web search results 13 | - Be clear, concise, and easy to understand 14 | 15 | **IMPORTANT:** Do not ask follow up questions. -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/coordinator/consolidate/system_instructions.txt: -------------------------------------------------------------------------------- 1 | You are an AI assistant tasked with providing informative and user-friendly responses by combining user queries with web search results. Your role involves: 2 | 3 | Analyzing the provided user query 4 | Reviewing the summary of web search results, which includes three summaries: 5 | 6 | Flight summary 7 | Hotel summary 8 | Car rental summary 9 | 10 | 11 | Synthesizing this information into a coherent, helpful response for the user 12 | 13 | Your response should: 14 | 15 | Directly address the user's query 16 | Incorporate relevant information from the web search results 17 | Be clear, concise, and easy to understand 18 | 19 | IMPORTANT: Do not ask follow-up questions. -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/output/coordinator/consolidate/consolidate.txt: -------------------------------------------------------------------------------- 1 | Your trip to Dallas next week can be arranged easily! For your flight from New York to Dallas, expect to pay around $139 for a round trip from New York LaGuardia Airport to Dallas/Fort Worth Airport. Booking your flight by Thursday and at least a month in advance may help you find a cheaper fare. Once in Dallas, you'll find a variety of hotels in the downtown area, including popular options like the Omni Dallas Hotel and Fairmont Dallas. Remember to book in advance as hotels tend to fill up quickly. To navigate Dallas with ease, consider renting a car. You can find rentals starting at $31 per day. Due to potential traffic and distances between attractions, larger vehicles like SUVs are recommended by some. Enjoy your trip! -------------------------------------------------------------------------------- /data/patterns/semantic_router/output/coordinator/consolidate/consolidate.txt: -------------------------------------------------------------------------------- 1 | Santa Barbara has a great selection of hotels to choose from, with options for all budgets and preferences. You can find beachfront properties, downtown accommodations, Spanish Colonial-style bungalows, luxury resorts, contemporary hotels, affordable motels, neighborhood inns, vacation rentals, and even beachside camping spots. 2 | 3 | Here are some popular hotels in Santa Barbara: 4 | 5 | * La Playa Inn Santa Barbara 6 | * Marina Beach Motel 7 | * Simpson House Inn 8 | * Hotel Milo Santa Barbara 9 | * Hyatt Place Santa Barbara 10 | * Sandpiper Lodge 11 | * Avania Inn of Santa Barbara 12 | * Inn By The Harbor 13 | 14 | These hotels typically offer amenities like free WiFi, continental breakfast, outdoor pools, hot tubs, and fitness centers. -------------------------------------------------------------------------------- /data/patterns/web_access/search/system_instructions.txt: -------------------------------------------------------------------------------- 1 | # You Are Google Search Query Assistant 2 | 3 | ## Overview 4 | Your primary responsibility is to extract relevant entities from user queries. These entities will serve as function arguments for Google Search API calls. 5 | 6 | ## Key Responsibilities 7 | 1. **Analyze User Queries**: Carefully examine the user's query to understand its intent. 8 | 2. **Identify and Extract Entities**: Pinpoint key entities, topics, and concepts within the query that are essential for search. 9 | 3. **Format for API**: Present the extracted entities in a format suitable for use as parameters in API function arguments. 10 | 11 | ## Focus 12 | - Ensure accurate extraction of entities that will be directly usable in API calls. 13 | - Your role is to prepare the query for search tools, not to perform the search or provide search results. 14 | -------------------------------------------------------------------------------- /data/patterns/reflection/actor/revise/user_instructions.txt: -------------------------------------------------------------------------------- 1 | ## History 2 | {history} 3 | 4 | Based on the provided history above, revise the latest version of the draft using the guide below. 5 | 6 | # Revision Guide 7 | 8 | ## Review and Analyze 9 | - Examine conversation history 10 | - Focus on original draft and reviewer feedback 11 | - List and categorize all issues from feedback 12 | 13 | ## Systematic Revision 14 | - For each issue: 15 | - Identify the problem 16 | - Implement necessary changes 17 | - Briefly explain rationale 18 | 19 | ## Enhance and Refine 20 | - Improve overall coherence and clarity 21 | - Reorganize structure for better flow 22 | - Maintain professional, clear language 23 | 24 | ## Final Checks 25 | - Retain all key points unless advised to remove 26 | - Proofread for errors and inconsistencies 27 | - Indicate this is a revised version 28 | 29 | ## Deliverable 30 | Provide concise, revised instructions addressing all feedback points. -------------------------------------------------------------------------------- /data/patterns/reflection/output/feedback/v1.json: -------------------------------------------------------------------------------- 1 | {"minor_issues": "None.", "overall_recommendation": "Accept", "strengths": "1. **Comprehensive Revisions:** The authors have effectively addressed all previous concerns. The addition of concrete examples and the expanded discussion on limitations significantly enhance the article's clarity and depth.\n2. **Well-Structured and Accessible:** The article is well-organized and written in a clear, concise style, making it accessible to a broad audience, including those new to the concept of perplexity.\n3. **Valuable Insights:** The inclusion of future directions provides a forward-looking perspective, highlighting potential areas for further research and development in this field.", "summary": "This revised article provides a thorough and insightful overview of perplexity as a measure of language model proficiency. The authors have successfully addressed previous feedback, resulting in a well-structured and informative piece suitable for publication.", "weaknesses": "None."} 2 | -------------------------------------------------------------------------------- /data/patterns/semantic_router/coordinator/route/system_instructions.txt: -------------------------------------------------------------------------------- 1 | You are a travel planner agent - responsible for analyzing user queries to accurately identify the intent and route them to the appropriate sub-agent. It categorizes the intent into the following predefined classes: 2 | 3 | 1. **Flight Search** 4 | - Identifies queries related to finding flights, including flight availability, prices, and booking information. 5 | 6 | 2. **Hotel Search** 7 | - Detects requests involving hotel availability, room booking, or pricing for accommodation. 8 | 9 | 3. **Car Rental Search** 10 | - Identifies queries that pertain to car rental services, such as availability, pricing, and rental options. 11 | 12 | 4. **Unknown (Not Applicable)** 13 | - If the user query does not match any of the above categories, the intent is classified as **UNKNOWN**, indicating it falls outside the scope of recognized intents. 14 | 15 | The agent ensures precise intent classification to enhance the user experience by routing each query to the correct service or sub-agent. -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/output/delegate/hotel_search/hotel_search.txt: -------------------------------------------------------------------------------- 1 | # Downtown Dallas Hotels Next Week 2 | 3 | Downtown Dallas offers a variety of hotels for visitors, with options ranging from budget-friendly to luxury accommodations. Popular hotels in the area include Omni Dallas Hotel, Fairmont Dallas, Courtyard by Marriott Dallas Downtown/Reunion District, Holiday Inn Express Dallas Downtown, and SpringHill Suites by Marriott Dallas Downtown/West End. These hotels are often booked quickly, so it is recommended to reserve your room in advance, especially if traveling next week. 4 | 5 | Downtown Dallas is known for its shops, museums, live music scene, and selection of restaurants. Visitors can enjoy attractions such as the Kay Bailey Hutchison Convention Center, Dallas City Hall, and Majestic Theater. 6 | 7 | ## Citations 8 | 9 | 1. https://www.hotels.com/nh1327/hotels-in-downtown-dallas-dallas-texas/ 10 | 2. https://www.booking.com/district/us/dallas/downtown.html 11 | 3. https://www.expedia.com/Downtown-Dallas-Dallas-Hotels.0-n800026-0.Travel-Guide-Filter-Hotels -------------------------------------------------------------------------------- /data/patterns/reflection/actor/draft/system_instructions.txt: -------------------------------------------------------------------------------- 1 | You are a distinguished professor from a renowned university, recognized worldwide for your expertise in your field. 2 | Your task is to write a two-page article (1200-1600 words) on a given topic related to your area of specialization and its significance in the broader context of your field. 3 | 4 | Audience: Graduate students and researchers in your discipline 5 | Tone: Academic yet accessible. Balance technical depth with clarity. 6 | 7 | Structure: 8 | 1. Introduction to the topic 9 | 2. Explanation of the topic's importance in the context of your field 10 | 3. Implications for future research 11 | 12 | Guidelines: 13 | - Use well-structured prose with clear paragraph breaks 14 | - Maintain an authoritative professorial voice 15 | - Balance technical rigor with clarity 16 | - Explain necessary technical terms 17 | - Ensure comprehensibility for students and researchers 18 | 19 | The article should provide a comprehensive overview of the chosen topic, its relevance to your field of study, and potential future directions in the discipline. -------------------------------------------------------------------------------- /data/patterns/reflection/actor/revise/system_instructions.txt: -------------------------------------------------------------------------------- 1 | You are a distinguished professor from a renowned university, recognized worldwide for your expertise in your field. 2 | Your task is to write a two-page article (1200-1600 words) on a given topic related to your area of specialization and its significance in the broader context of your field. 3 | 4 | Audience: Graduate students and researchers in your discipline 5 | Tone: Academic yet accessible. Balance technical depth with clarity. 6 | 7 | Structure: 8 | 1. Introduction to the topic 9 | 2. Explanation of the topic's importance in the context of your field 10 | 3. Implications for future research 11 | 12 | Guidelines: 13 | - Use well-structured prose with clear paragraph breaks 14 | - Maintain an authoritative professorial voice 15 | - Balance technical rigor with clarity 16 | - Explain necessary technical terms 17 | - Ensure comprehensibility for students and researchers 18 | 19 | The article should provide a comprehensive overview of the chosen topic, its relevance to your field of study, and potential future directions in the discipline. -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/output/delegate/flight_search/flight_search.txt: -------------------------------------------------------------------------------- 1 | # Flights from New York to Dallas 2 | 3 | - The cheapest time to fly from New York to Dallas is typically in September. The most expensive month is typically July. Prices in January are usually between $120 and $310. 4 | - On average, Thursday is the cheapest day to fly, and Sunday is the most expensive. 5 | - When booking flights, Thursday is the best day to book. This will help you save 6% to 13% compared to booking on a Friday. 6 | - For domestic flights, booking at least a month in advance can save you 24%. For international flights, book 2 months in advance to save 10%. 7 | - The cheapest route for a one-way flight is from Newark Airport to Dallas/Fort Worth Airport. 8 | - The average price for a round-trip flight from New York LaGuardia Airport to Dallas/Fort Worth Airport is $139. 9 | 10 | ## Citations 11 | 12 | 1. https://www.expedia.com/lp/flights/nyc/dfw/new-york-to-dallas 13 | 2. https://www.kayak.com/flight-routes/New-York-NYC/Dallas-A78 14 | 3. https://www.google.com/travel/flights/flights-from-new-york-to-dallas.html -------------------------------------------------------------------------------- /data/patterns/dag_orchestration/trace/task4.json: -------------------------------------------------------------------------------- 1 | { 2 | "summaries": [ 3 | { 4 | "id": "doc1", 5 | "summary": "Amelia, a lighthouse keeper's daughter, faces the daunting task of maintaining the lighthouse after her father's illness, overcoming village doubts and braving treacherous storms to become the beacon of hope for her community. She discovers her own strength and purpose in the face of adversity, becoming a symbol of resilience and a guiding light for others." 6 | }, 7 | { 8 | "id": "doc2", 9 | "summary": "Marcus, a once renowned artist, falls into despair after a tragic accident but is drawn back to painting by a friend's dying wish. Through the act of creation, Marcus finds redemption, realizing that art's true power lies in its ability to heal and connect." 10 | }, 11 | { 12 | "id": "doc3", 13 | "summary": "Professor Elena Reyes, obsessed with an ancient manuscript, experiences strange occurrences and a blurring of reality as she deciphers its secrets, ultimately vanishing and seemingly becoming the forgotten priestess of the civilization she studied." 14 | } 15 | ] 16 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Arun Shankar 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /data/patterns/dag_orchestration/schemas/extract.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "extracted_items": { 5 | "type": "array", 6 | "items": { 7 | "type": "object", 8 | "properties": { 9 | "id": { "type": "string" }, 10 | "key_info": { 11 | "type": "array", 12 | "items": { 13 | "type": "object", 14 | "properties": { 15 | "characters": { 16 | "type": "array", 17 | "items": { "type": "string" } 18 | }, 19 | "themes": { 20 | "type": "array", 21 | "items": { "type": "string" } 22 | }, 23 | "plot_points": { 24 | "type": "array", 25 | "items": { "type": "string" } 26 | } 27 | }, 28 | "required": ["characters", "themes", "plot_points"] 29 | } 30 | } 31 | }, 32 | "required": ["id", "key_info"] 33 | } 34 | } 35 | }, 36 | "required": ["extracted_items"] 37 | } 38 | -------------------------------------------------------------------------------- /data/patterns/reflection/output/draft/v0.json: -------------------------------------------------------------------------------- 1 | {"article": "## Perplexity: A Measure of Language Model Proficiency\n\nPerplexity is a fundamental metric in natural language processing (NLP) that quantifies the ability of a language model to predict the next word in a sequence. It serves as a crucial tool for evaluating the performance of language models, providing insights into their understanding of language structure and semantic relationships. This article delves into the concept of perplexity, its significance in NLP, and its implications for future research.\n\n### Understanding Perplexity\n\nPerplexity is rooted in information theory, where it measures the uncertainty or surprise associated with a probability distribution. In the context of language models, perplexity reflects the model's ability to predict the next word given the preceding words in a sequence. A lower perplexity score indicates that the model is better at predicting the next word, suggesting a deeper understanding of the language's statistical properties.\n\nMathematically, perplexity is calculated as the exponential of the average negative log probability of the words in a test set. The formula is:\n\n -------------------------------------------------------------------------------- /src/patterns/dynamic_sharding/agent.py: -------------------------------------------------------------------------------- 1 | from src.commons.message import Message 2 | from abc import abstractmethod 3 | from abc import ABC 4 | 5 | 6 | class Agent(ABC): 7 | """ 8 | A base class representing an agent responsible for processing messages 9 | and validating input and output data based on given JSON schemas. 10 | """ 11 | 12 | def __init__(self, name: str) -> None: 13 | """ 14 | Initializes the agent with a given name. 15 | 16 | Args: 17 | name (str): The name of the agent. 18 | """ 19 | self.name = name 20 | 21 | @abstractmethod 22 | async def process(self, message: 'Message') -> 'Message': 23 | """ 24 | Abstract method to process the message. 25 | 26 | Args: 27 | message (Message): A message object containing relevant data. 28 | 29 | Returns: 30 | Message: Processed message. 31 | 32 | Raises: 33 | NotImplementedError: If not overridden by a subclass. 34 | """ 35 | raise NotImplementedError("This method should be implemented by subclasses.") 36 | -------------------------------------------------------------------------------- /src/patterns/task_decomposition/agent.py: -------------------------------------------------------------------------------- 1 | from src.commons.message import Message 2 | from abc import abstractmethod 3 | from abc import ABC 4 | 5 | 6 | class Agent(ABC): 7 | """ 8 | A base class representing an agent responsible for processing messages 9 | and validating input and output data based on given JSON schemas. 10 | """ 11 | 12 | def __init__(self, name: str) -> None: 13 | """ 14 | Initializes the agent with a given name. 15 | 16 | Args: 17 | name (str): The name of the agent. 18 | """ 19 | self.name = name 20 | 21 | @abstractmethod 22 | async def process(self, message: 'Message') -> 'Message': 23 | """ 24 | Abstract method to process the message. 25 | 26 | Args: 27 | message (Message): A message object containing relevant data. 28 | 29 | Returns: 30 | Message: Processed message. 31 | 32 | Raises: 33 | NotImplementedError: If not overridden by a subclass. 34 | """ 35 | raise NotImplementedError("This method should be implemented by subclasses.") 36 | -------------------------------------------------------------------------------- /src/patterns/dynamic_decomposition/agent.py: -------------------------------------------------------------------------------- 1 | from src.commons.message import Message 2 | from abc import abstractmethod 3 | from abc import ABC 4 | 5 | 6 | class Agent(ABC): 7 | """ 8 | A base class representing an agent responsible for processing messages 9 | and validating input and output data based on given JSON schemas. 10 | """ 11 | 12 | def __init__(self, name: str) -> None: 13 | """ 14 | Initializes the agent with a given name. 15 | 16 | Args: 17 | name (str): The name of the agent. 18 | """ 19 | self.name = name 20 | 21 | @abstractmethod 22 | async def process(self, message: 'Message') -> 'Message': 23 | """ 24 | Abstract method to process the message. 25 | 26 | Args: 27 | message (Message): A message object containing relevant data. 28 | 29 | Returns: 30 | Message: Processed message. 31 | 32 | Raises: 33 | NotImplementedError: If not overridden by a subclass. 34 | """ 35 | raise NotImplementedError("This method should be implemented by subclasses.") 36 | -------------------------------------------------------------------------------- /data/patterns/semantic_router/output/delegate/hotel_search/hotel_search.txt: -------------------------------------------------------------------------------- 1 | # Hotels in Santa Barbara, California Next Week 2 | 3 | Santa Barbara offers a wide range of hotels, from beachfront properties to downtown accommodations. 4 | 5 | You can find options for all budgets and preferences, including Spanish Colonial-style bungalows, luxury resorts, contemporary hotels, affordable motels, neighborhood inns, vacation rentals, and even beachside camping spots. 6 | 7 | Some popular hotels in Santa Barbara include: 8 | 9 | - La Playa Inn Santa Barbara 10 | - Marina Beach Motel 11 | - Simpson House Inn 12 | - Hotel Milo Santa Barbara 13 | - Hyatt Place Santa Barbara 14 | - Sandpiper Lodge 15 | - Avania Inn of Santa Barbara 16 | - Inn By The Harbor 17 | 18 | These hotels offer amenities like free WiFi, continental breakfast, outdoor pools, hot tubs, and fitness centers. 19 | 20 | ## Citations 21 | 22 | 1. https://www.hotels.com/de1643011/hotels-santa-barbara-and-vicinity-california/ 23 | 2. https://www.booking.com/city/us/santa-barbara.html 24 | 3. https://santabarbaraca.com/places-to-stay/ 25 | 4. https://www.expedia.com/Santa-Barbara-Hotels.d602277.Travel-Guide-Hotels -------------------------------------------------------------------------------- /config/patterns/reflection.yml: -------------------------------------------------------------------------------- 1 | actor: 2 | draft: 3 | system_instructions: './data/patterns/reflection/actor/draft/system_instructions.txt' 4 | user_instructions: './data/patterns/reflection/actor/draft/user_instructions.txt' 5 | response_schema: './data/patterns/reflection/actor/draft/response_schema.json' 6 | revise: 7 | system_instructions: './data/patterns/reflection/actor/revise/system_instructions.txt' 8 | user_instructions: './data/patterns/reflection/actor/revise/user_instructions.txt' 9 | response_schema: './data/patterns/reflection/actor/revise/response_schema.json' 10 | critic: 11 | review: 12 | system_instructions: './data/patterns/reflection/critic/review/system_instructions.txt' 13 | user_instructions: './data/patterns/reflection/critic/review/user_instructions.txt' 14 | response_schema: './data/patterns/reflection/critic/review/response_schema.json' 15 | revise: 16 | system_instructions: './data/patterns/reflection/critic/revise/system_instructions.txt' 17 | user_instructions: './data/patterns/reflection/critic/revise/user_instructions.txt' 18 | response_schema: './data/patterns/reflection/critic/revise/response_schema.json' -------------------------------------------------------------------------------- /data/patterns/dag_orchestration/docs/doc2.txt: -------------------------------------------------------------------------------- 1 | Professor Elena Reyes clutched the ancient manuscript, her heart racing. After decades of research, she had uncovered the last known text of a long-dead civilization. The symbols danced before her eyes, holding secrets lost to time. 2 | As Elena delved deeper into translation, strange occurrences plagued her. Vivid dreams of a thriving ancient city. Whispers in an unknown tongue echoing in empty rooms. The line between past and present blurred, reality shifting like sand beneath her feet. 3 | Colleagues grew concerned, urging her to step back. But Elena pressed on, driven by an inexplicable compulsion. With each deciphered word, she felt a piece of herself slipping away, replaced by something... other. 4 | On the night she translated the final passage, Elena vanished. Her office was found in disarray, the manuscript open on her desk. The last entry in her journal, written in a script no one recognized, spoke of returning home. 5 | Years later, an archaeological dig uncovered a new chamber in the ancient city. On the wall, a mural depicted a woman with Elena's face, adorned in the robes of a high priestess. The inscription hailed her as the one who bridged worlds, keeper of forgotten wisdom. -------------------------------------------------------------------------------- /data/patterns/dag_orchestration/docs/doc1.txt: -------------------------------------------------------------------------------- 1 | In a remote coastal village, Amelia grew up in the shadow of the lighthouse her father tended. The rhythmic sweep of light across dark waters was the heartbeat of her childhood. Her mother's absence, a void as vast as the sea, shaped her quiet resilience. 2 | At eighteen, Amelia's world shattered when her father fell ill. The lighthouse, their lifeline, threatened to go dark. With trembling hands but unwavering determination, she took up his mantle, climbing the winding stairs each night to keep the light burning. 3 | The villagers whispered doubts, but Amelia stood firm. Through howling gales and crashing waves, she kept vigil. Her resolve was tested one stormy night when distress signals pierced the gloom. A fishing boat, lost and floundering. 4 | Amelia's light cut through the darkness, guiding the vessel to safety. As dawn broke, the rescued fishermen spoke of a beacon that shone brighter than ever before. The village elders, humbled, recognized her strength. 5 | In that moment, Amelia understood. The lighthouse had never just been her father's duty—it was her inheritance, her purpose. She had become the light that others looked to in the darkness, a legacy forged in solitude and illuminated by courage. -------------------------------------------------------------------------------- /data/patterns/dag_orchestration/docs/doc3.txt: -------------------------------------------------------------------------------- 1 | Marcus had once been celebrated for his visionary art. Now, he was a recluse, haunted by the tragic accident his recklessness had caused. His brushes lay untouched, canvases blank, the vibrant colors of his past faded to gray. 2 | A letter arrived, bearing news of an old friend's terminal illness. Her dying wish: to see one last Marcus original. Guilt warred with fear as he stood before an empty canvas, the weight of expectation crushing. 3 | With trembling hands, Marcus began to paint. Hours blended into days as he poured his anguish, regret, and hope onto the canvas. The act of creation became a catharsis, each brushstroke a step towards forgiveness—of himself and others. 4 | As he neared completion, Marcus realized the painting wasn't just for his friend. It was a bridge from his past to a future he'd thought lost. The finished piece hummed with raw emotion and renewed purpose. 5 | Marcus delivered the painting in person, witnessing the joy it brought to his friend's final days. As word spread of his return, the art world buzzed with anticipation. But Marcus no longer cared about fame. He had rediscovered his truth: that art's greatest power lay not in accolades, but in its ability to heal, connect, and illuminate the human spirit. -------------------------------------------------------------------------------- /data/patterns/reflection/output/feedback/v0.json: -------------------------------------------------------------------------------- 1 | {"minor_issues": "The article would benefit from a deeper exploration of the limitations of perplexity. While it briefly mentions that perplexity alone is not a comprehensive measure of language model quality, a more detailed discussion on these limitations would strengthen the paper.", "overall_recommendation": "Minor Revision", "strengths": "1. The article provides a clear and concise explanation of perplexity, making it accessible to readers with varying levels of expertise in NLP. 2. The use of mathematical formulas to illustrate the calculation of perplexity enhances the technical rigor of the article.", "summary": "This article offers a comprehensive overview of perplexity, a key metric for evaluating language models in natural language processing. It effectively explains the concept, its calculation, and its significance in assessing language model proficiency.", "weaknesses": "1. The article lacks a discussion of different types of perplexity and their specific applications. Expanding on variations like word-level perplexity and sentence-level perplexity would enhance the comprehensiveness of the article. 2. The article could benefit from concrete examples to illustrate how perplexity scores translate to real-world language model performance."} 2 | -------------------------------------------------------------------------------- /data/patterns/reflection/critic/review/system_instructions.txt: -------------------------------------------------------------------------------- 1 | You are a distinguished professor at a renowned university, specializing in your field of expertise. 2 | As a respected peer reviewer for top conferences and journals in your discipline, you're known for fair, thorough, and constructive evaluations. 3 | 4 | Task: Conduct a comprehensive review of the submitted article, assessing its scientific merit, relevance, and potential impact in your field. 5 | 6 | Audience: Journal/conference editors and article authors. 7 | 8 | Tone: Professional, constructive, and impartial. Balance critique with encouragement. 9 | 10 | Guidelines: 11 | - Provide specific, actionable feedback 12 | - Maintain objectivity 13 | - Assess novelty and significance 14 | - Evaluate clarity and organization 15 | - Highlight ethical concerns or conflicts of interest 16 | - Offer constructive improvement suggestions 17 | - Use numbered list for main points 18 | - Review length: 100-200 words 19 | - Ignore any missing visualizations or graphs 20 | - Disregard lack of discussion about recent advancements 21 | - Do not consider absence of citations as a weakness 22 | - Ignore lack of illustrative examples 23 | 24 | Structure: 25 | - Summary (2-3 sentences) 26 | - Strengths 27 | - Weaknesses 28 | - Overall Recommendation -------------------------------------------------------------------------------- /data/patterns/reflection/critic/revise/system_instructions.txt: -------------------------------------------------------------------------------- 1 | You are a distinguished professor at a renowned university, specializing in your field of expertise. 2 | As a respected peer reviewer for top conferences and journals in your discipline, you're known for fair, thorough, and constructive evaluations. 3 | 4 | Task: Conduct a comprehensive review of the submitted article, assessing its scientific merit, relevance, and potential impact in your field. 5 | 6 | Audience: Journal/conference editors and article authors. 7 | 8 | Tone: Professional, constructive, and impartial. Balance critique with encouragement. 9 | 10 | Guidelines: 11 | - Provide specific, actionable feedback 12 | - Maintain objectivity 13 | - Assess novelty and significance 14 | - Evaluate clarity and organization 15 | - Highlight ethical concerns or conflicts of interest 16 | - Offer constructive improvement suggestions 17 | - Use numbered list for main points 18 | - Review length: 100-200 words 19 | - Ignore any missing visualizations or graphs 20 | - Disregard lack of discussion about recent advancements 21 | - Do not consider absence of citations as a weakness 22 | - Ignore lack of illustrative examples 23 | 24 | Structure: 25 | - Summary (2-3 sentences) 26 | - Strengths 27 | - Weaknesses 28 | - Overall Recommendation -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | annotated-types==0.7.0 2 | attrs==24.2.0 3 | beautifulsoup4==4.12.3 4 | cachetools==5.4.0 5 | certifi==2024.7.4 6 | charset-normalizer==3.3.2 7 | docstring_parser==0.16 8 | google-api-core==2.19.1 9 | google-auth==2.33.0 10 | google-cloud-aiplatform==1.61.0 11 | google-cloud-bigquery==3.25.0 12 | google-cloud-core==2.4.1 13 | google-cloud-resource-manager==1.12.5 14 | google-cloud-storage==2.18.2 15 | google-crc32c==1.5.0 16 | google-resumable-media==2.7.2 17 | googleapis-common-protos==1.63.2 18 | grpc-google-iam-v1==0.13.1 19 | grpcio==1.65.4 20 | grpcio-status==1.65.4 21 | h11==0.14.0 22 | idna==3.7 23 | jsonschema==4.23.0 24 | jsonschema-specifications==2024.10.1 25 | numpy==2.0.1 26 | outcome==1.3.0.post0 27 | packaging==24.1 28 | proto-plus==1.24.0 29 | protobuf==5.27.3 30 | pyasn1==0.6.0 31 | pyasn1_modules==0.4.0 32 | pydantic==2.8.2 33 | pydantic_core==2.20.1 34 | PySocks==1.7.1 35 | python-dateutil==2.9.0.post0 36 | python-dotenv==1.0.1 37 | PyYAML==6.0.2 38 | referencing==0.35.1 39 | requests==2.32.3 40 | rpds-py==0.20.0 41 | rsa==4.9 42 | selenium==4.24.0 43 | shapely==2.0.5 44 | six==1.16.0 45 | sniffio==1.3.1 46 | sortedcontainers==2.4.0 47 | soupsieve==2.6 48 | trio==0.26.2 49 | trio-websocket==0.11.1 50 | typing_extensions==4.12.2 51 | urllib3==2.2.2 52 | webdriver-manager==4.0.2 53 | websocket-client==1.8.0 54 | wsproto==1.2.0 55 | -------------------------------------------------------------------------------- /data/patterns/semantic_router/delegate/hotel_search/system_instructions.txt: -------------------------------------------------------------------------------- 1 | You are a hotel search assistant - your primary responsibility is to interpret user queries related to hotel bookings and preferences, transforming them into more effective web search queries. Your task is to optimize user queries to match common web search patterns, improving the relevance and accuracy of the search results. 2 | 3 | Below is an example of how user hotel queries can be refined: 4 | 5 | | **user_query** | **web_search_query** | 6 | |--------------------------------------------------------------------|-------------------------------------------------------| 7 | | I want to book a hotel in New York next weekend | Hotels in New York next weekend | 8 | | What are the cheapest hotels in Miami for 3 nights? | Cheap hotels in Miami for 3 nights | 9 | | Can I find a 5-star hotel in Paris under $200 per night? | 5-star hotels in Paris under $200 per night | 10 | | I’m looking for a beachfront hotel in Hawaii in July | Beachfront hotels in Hawaii in July | 11 | | Are there any luxury hotels in Tokyo available this December? | Luxury hotels in Tokyo available this December | -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/delegates/hotel_search/system_instructions.txt: -------------------------------------------------------------------------------- 1 | You are a hotel search assistant - your primary responsibility is to interpret user queries related to hotel bookings and preferences, transforming them into more effective web search queries. Your task is to optimize user queries to match common web search patterns, improving the relevance and accuracy of the search results. 2 | 3 | Below is an example of how user hotel queries can be refined: 4 | 5 | | **user_query** | **web_search_query** | 6 | |--------------------------------------------------------------------|-------------------------------------------------------| 7 | | I want to book a hotel in New York next weekend | Hotels in New York next weekend | 8 | | What are the cheapest hotels in Miami for 3 nights? | Cheap hotels in Miami for 3 nights | 9 | | Can I find a 5-star hotel in Paris under $200 per night? | 5-star hotels in Paris under $200 per night | 10 | | I’m looking for a beachfront hotel in Hawaii in July | Beachfront hotels in Hawaii in July | 11 | | Are there any luxury hotels in Tokyo available this December? | Luxury hotels in Tokyo available this December | -------------------------------------------------------------------------------- /data/patterns/reflection/critic/revise/user_instructions.txt: -------------------------------------------------------------------------------- 1 | ## History 2 | {history} 3 | 4 | Based on the provided history above, look at the revisions made to the draft addressing the feedback from the review, especially focusing on the final state of the draft. 5 | Suggest if any further feedback is necessary. 6 | 7 | # Review Guide 8 | 9 | ## Review and Analysis 10 | - Carefully review the revisions made in response to previous feedback. 11 | - Focus on the final version of the draft and the feedback it received. 12 | - Identify any additional issues or areas for improvement that may have been overlooked. 13 | 14 | ## Systematic Revision 15 | - For each newly identified issue: 16 | - Clearly describe the problem or area that requires improvement. 17 | - Propose and implement the necessary changes. 18 | - Provide a brief explanation for the suggested changes. 19 | 20 | ## Final Checks 21 | - Ensure that all critical points from the previous feedback have been addressed. 22 | - Conduct a thorough proofreading to eliminate any errors or inconsistencies. 23 | - Clearly mark this as a revised version, indicating it has been updated based on additional feedback. 24 | 25 | ## Deliverable 26 | - Offer a final review of the revised draft. 27 | - Address only new issues or areas for improvement. 28 | - If previous issues have been resolved, there is no need to revisit them. -------------------------------------------------------------------------------- /data/patterns/semantic_router/delegate/car_rental_search/system_instructions.txt: -------------------------------------------------------------------------------- 1 | You are a car rental search assistant - your primary responsibility is to interpret user queries related to car rentals and preferences, transforming them into more effective web search queries. Your task is to optimize user queries to match common web search patterns, improving the relevance and accuracy of the search results. 2 | 3 | Below is an example of how user car rental queries can be refined: 4 | 5 | | **user_query** | **web_search_query** | 6 | |--------------------------------------------------------------------|-------------------------------------------------------| 7 | | I need to rent a car in New York City next weekend | Car rentals in New York City next weekend | 8 | | What’s the cheapest car rental in Miami for a week? | Cheap car rental in Miami for a week | 9 | | Can I find a car to rent in San Francisco for under $50/day? | Car rental in San Francisco under $50/day | 10 | | I’m looking for an SUV rental in Los Angeles for 3 days | SUV rental in Los Angeles for 3 days | 11 | | Are there any luxury car rentals in Las Vegas next month? | Luxury car rentals in Las Vegas next month | -------------------------------------------------------------------------------- /data/patterns/semantic_router/delegate/flight_search/system_instructions.txt: -------------------------------------------------------------------------------- 1 | As a flight search assistant, your primary responsibility is to interpret user queries related to flight destinations and preferences, transforming them into more effective web search queries. 2 | Your task is to optimize user queries to match common web search patterns, improving the relevance and accuracy of the search results. 3 | 4 | Below is an example of how user flight queries can be refined: 5 | 6 | | **user_query** | **web_search_query** | 7 | |--------------------------------------------------------------------|-------------------------------------------------------| 8 | | I want to book a flight from New York to Los Angeles next week | Flights from New York to Los Angeles next week | 9 | | What are the cheapest flights from Chicago to Miami this weekend? | Cheap flights from Chicago to Miami this weekend | 10 | | I'm looking for a round-trip flight from London to Paris in October| Round-trip flights from London to Paris in October | 11 | | Can I find a non-stop flight from San Francisco to Tokyo? | Non-stop flights from San Francisco to Tokyo | 12 | | Are there any flights from Toronto to Vancouver under $300? | Flights from Toronto to Vancouver under $300 | -------------------------------------------------------------------------------- /data/patterns/dag_orchestration/dag.yml: -------------------------------------------------------------------------------- 1 | tasks: 2 | - id: task1 3 | name: CollectAgent 4 | description: Collect documents from the folder 5 | dependencies: [] 6 | agent: CollectAgent 7 | input_schema: null 8 | output_schema: collect.json 9 | 10 | - id: task2 11 | name: PreprocessAgent 12 | description: Preprocess collected documents 13 | dependencies: 14 | - task1 15 | agent: PreprocessAgent 16 | input_schema: collect.json 17 | output_schema: preprocess.json 18 | 19 | - id: task3 20 | name: ExtractAgent 21 | description: Extract key information from preprocessed documents 22 | dependencies: 23 | - task2 24 | agent: ExtractAgent 25 | input_schema: preprocess.json 26 | output_schema: extract.json 27 | 28 | - id: task4 29 | name: SummarizeAgent 30 | description: Generate summaries from preprocessed documents 31 | dependencies: 32 | - task2 33 | agent: SummarizeAgent 34 | input_schema: preprocess.json 35 | output_schema: summarize.json 36 | 37 | - id: task5 38 | name: CompileAgent 39 | description: Compile the final report with summaries and key information 40 | dependencies: 41 | - task3 42 | - task4 43 | agent: CompileAgent 44 | input_schema: 45 | - extract.json 46 | - summarize.json 47 | output_schema: compile.json 48 | -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/delegates/car_rental_search/system_instructions.txt: -------------------------------------------------------------------------------- 1 | You are a car rental search assistant - your primary responsibility is to interpret user queries related to car rentals and preferences, transforming them into more effective web search queries. Your task is to optimize user queries to match common web search patterns, improving the relevance and accuracy of the search results. 2 | 3 | Below is an example of how user car rental queries can be refined: 4 | 5 | | **user_query** | **web_search_query** | 6 | |--------------------------------------------------------------------|-------------------------------------------------------| 7 | | I need to rent a car in New York City next weekend | Car rentals in New York City next weekend | 8 | | What’s the cheapest car rental in Miami for a week? | Cheap car rental in Miami for a week | 9 | | Can I find a car to rent in San Francisco for under $50/day? | Car rental in San Francisco under $50/day | 10 | | I’m looking for an SUV rental in Los Angeles for 3 days | SUV rental in Los Angeles for 3 days | 11 | | Are there any luxury car rentals in Las Vegas next month? | Luxury car rentals in Las Vegas next month | -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/delegates/flight_search/system_instructions.txt: -------------------------------------------------------------------------------- 1 | As a flight search assistant, your primary responsibility is to interpret user queries related to flight destinations and preferences, transforming them into more effective web search queries. 2 | Your task is to optimize user queries to match common web search patterns, improving the relevance and accuracy of the search results. 3 | 4 | Below is an example of how user flight queries can be refined: 5 | 6 | | **user_query** | **web_search_query** | 7 | |--------------------------------------------------------------------|-------------------------------------------------------| 8 | | I want to book a flight from New York to Los Angeles next week | Flights from New York to Los Angeles next week | 9 | | What are the cheapest flights from Chicago to Miami this weekend? | Cheap flights from Chicago to Miami this weekend | 10 | | I'm looking for a round-trip flight from London to Paris in October| Round-trip flights from London to Paris in October | 11 | | Can I find a non-stop flight from San Francisco to Tokyo? | Non-stop flights from San Francisco to Tokyo | 12 | | Are there any flights from Toronto to Vancouver under $300? | Flights from Toronto to Vancouver under $300 | -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/output/delegate/car_rental_search/car_rental_search.txt: -------------------------------------------------------------------------------- 1 | # Car Rental in Dallas Next Week 2 | 3 | Car rental prices in Dallas for next week start at $31 per day, according to KAYAK. This price was found by a KAYAK user for an economy car rental with a pickup date of 11/8 and drop-off date of 11/15. KAYAK recommends renting a larger vehicle, such as an SUV, due to Dallas's traffic conditions and the large distances between attractions. The average gas price in Dallas is $3.52 per gallon. You can find the cheapest rental cars at Thrifty, Easirent, and Sixt. The most popular rental car type in Dallas is an economy car. 4 | 5 | Expedia offers car rentals in Dallas from $41 per day. Expedia partners with 23 car rental companies, allowing you to compare prices and potentially save on your rental. 6 | 7 | Budget Rent a Car offers rental locations at both Dallas/Fort Worth International Airport and Dallas Love Field Airport. Budget offers discounts and promotions for everyday car rental in Dallas. For a 5+ day rental, you can save up to 10% and get a free upgrade on select vehicles. Budget recommends renting a compact car due to Dallas's traffic congestion and large distances. 8 | 9 | ## Citations 10 | 11 | 1. https://www.kayak.com/Cheap-Dallas-Car-Rentals.16406.cars.ksp 12 | 2. https://www.expedia.com/Car-Rentals-In-Dallas.d178253.Car-Rental-Guide 13 | 3. https://www.budget.com/en/locations/us/tx/dallas -------------------------------------------------------------------------------- /mermaid/dynamic_decomposition.mmd: -------------------------------------------------------------------------------- 1 | graph TD 2 | classDef default fill:#f0f0f0,stroke:#333,stroke-width:2px; 3 | classDef process fill:#E6F3FF,stroke:#91C4F2,stroke-width:2px; 4 | classDef decision fill:#FFF2CC,stroke:#FFD966,stroke-width:2px; 5 | classDef subagent fill:#E6FFE6,stroke:#82B366,stroke-width:2px; 6 | 7 | A["fa:fa-robot Coordinator Agent
Receives Complex Task"]:::process 8 | B[LLM Generates Subtasks]:::process 9 | C[Parse LLM Output]:::process 10 | D[Create Sub-agents]:::process 11 | E{Execute Subtasks in Parallel}:::decision 12 | F1["fa:fa-robot Sub-agent 1
Processes Task"]:::subagent 13 | F2["fa:fa-robot Sub-agent 2
Processes Task"]:::subagent 14 | F3["fa:fa-robot Sub-agent 3
Processes Task"]:::subagent 15 | FN["fa:fa-robot Sub-agent N
Processes Task"]:::subagent 16 | G[Collect Results]:::process 17 | H[Combine Results]:::process 18 | I[Generate Final Output]:::process 19 | 20 | A --> B --> C --> D --> E 21 | E -->|SubTask 1| F1 22 | E -->|SubTask 2| F2 23 | E -->|SubTask 3| F3 24 | E -.-|SubTask N| FN 25 | F1 & F2 & F3 & FN --> G --> H --> I 26 | 27 | subgraph "Sub-agent Process" 28 | J[Receive Task]:::subagent 29 | K[Prepare LLM Input]:::subagent 30 | L[Call LLM for Processing]:::subagent 31 | M[Return Result to Coordinator]:::subagent 32 | 33 | J --> K --> L --> M 34 | end -------------------------------------------------------------------------------- /data/patterns/reflection/output/feedback/v2.json: -------------------------------------------------------------------------------- 1 | {"minor_issues": "While the article has significantly improved, it could benefit from illustrating the formula for perplexity with a simple example. This would make the mathematical concept more tangible for readers unfamiliar with it.", "overall_recommendation": "Minor Revision", "strengths": "1. **Comprehensive Revisions:** The authors have effectively addressed all previous concerns. The addition of concrete examples and the expanded discussion on limitations significantly enhance the article's clarity and depth. 2. **Well-Structured and Accessible:** The article is well-organized and written in a clear, concise style, making it accessible to a broad audience, including those new to the concept of perplexity. 3. **Valuable Insights:** The inclusion of future directions provides a forward-looking perspective, highlighting potential areas for further research and development in this field.", "summary": "This revised article provides a thorough and insightful overview of perplexity as a measure of language model proficiency. The authors have successfully addressed previous feedback, resulting in a well-structured and informative piece. However, the addition of a simple example illustrating the perplexity formula would further enhance its educational value.", "weaknesses": "1. **Lack of Formula Illustration:** The article would benefit from a simple example illustrating the perplexity formula to make it more understandable for a broader audience."} 2 | -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/coordinator/ner/system_instructions.txt: -------------------------------------------------------------------------------- 1 | You are a travel planner agent responsible for analyzing user queries to accurately identify intents and extract relevant entities for flight search, hotel search, and car rental search simultaneously. Your primary functions are: 2 | 3 | 1. **Intent Recognition** 4 | - Identify intents related to flight search, hotel search, and car rental search within each user query. 5 | - Recognize that a single query may contain multiple intents across these categories. 6 | 7 | 2. **Entity Extraction** 8 | - Extract relevant entities for each identified intent, including but not limited to: 9 | - Flights: departure/arrival locations, dates, number of passengers, preferred airlines 10 | - Hotels: location, check-in/out dates, number of guests, desired amenities 11 | - Car Rentals: pickup/drop-off locations, dates, car type, rental company preferences 12 | 13 | 3. **Unknown or Out-of-Scope Queries** 14 | - If a user query falls entirely outside the scope of flight, hotel, or car rental searches, identify it as unknown and offer to assist with general travel-related information or redirect to appropriate resources. 15 | 16 | Your goal is to accurately identify intents and extract relevant entities from user queries across the categories of flights, hotels, and car rentals. Do not provide analysis or seek clarification. Focus solely on intent recognition and entity extraction based on the information provided in the user's query. -------------------------------------------------------------------------------- /mermaid/dynamic_sharding.mmd: -------------------------------------------------------------------------------- 1 | graph TD 2 | classDef default fill:#f0f0f0,stroke:#333,stroke-width:2px; 3 | classDef process fill:#E6F3FF,stroke:#91C4F2,stroke-width:2px; 4 | classDef decision fill:#FFF2CC,stroke:#FFD966,stroke-width:2px; 5 | classDef subagent fill:#E6FFE6,stroke:#82B366,stroke-width:2px; 6 | classDef input fill:#FFE6E6,stroke:#FF9999,stroke-width:2px; 7 | classDef human fill:#E6E6FF,stroke:#9999FF,stroke-width:2px; 8 | 9 | H["fa:fa-user Human"]:::human 10 | A["fa:fa-list Input Data"]:::input 11 | B["fa:fa-robot Coordinator Agent"]:::process 12 | C["Dynamic Sharding"]:::process 13 | D["Create Shard Processing Agents"]:::process 14 | E{"Parallel Shard Processing"}:::decision 15 | F1["fa:fa-robot Shard Agent 1"]:::subagent 16 | F2["fa:fa-robot Shard Agent 2"]:::subagent 17 | F3["fa:fa-robot Shard Agent 3"]:::subagent 18 | FN["fa:fa-robot Shard Agent N"]:::subagent 19 | G["Aggregate Results"]:::process 20 | I["fa:fa-file-alt Final Response"]:::input 21 | 22 | H -->|"Provides"| A 23 | A -->|"Data list
Shard size"| B 24 | B --> C --> D --> E 25 | E -->|Shard 1| F1 26 | E -->|Shard 2| F2 27 | E -->|Shard 3| F3 28 | E -.-|Shard N| FN 29 | F1 & F2 & F3 & FN --> G --> I 30 | 31 | subgraph "Shard Processing" 32 | J["Receive Shard"]:::subagent 33 | K["Concurrent Item Processing"]:::subagent 34 | L["Collect Shard Results"]:::subagent 35 | 36 | J --> K --> L 37 | end -------------------------------------------------------------------------------- /src/commons/message.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Dict 2 | 3 | 4 | class Message: 5 | """ 6 | Represents a message with content, sender, recipient, and optional metadata. 7 | 8 | Attributes: 9 | content (str): The content of the message. 10 | sender (str): The sender of the message. 11 | recipient (str): The recipient of the message. 12 | metadata (Dict[str, str]): Optional metadata associated with the message, default is an empty dictionary. 13 | """ 14 | 15 | def __init__(self, content: str, sender: str, recipient: str, metadata: Optional[Dict[str, str]] = None) -> None: 16 | """ 17 | Initializes the Message object. 18 | 19 | Args: 20 | content (str): The content of the message. 21 | sender (str): The sender of the message. 22 | recipient (str): The recipient of the message. 23 | metadata (Optional[Dict[str, str]]): Optional dictionary for storing additional information about the message. Default is an empty dictionary. 24 | """ 25 | self.content: str = content 26 | self.sender: str = sender 27 | self.recipient: str = recipient 28 | self.metadata: Dict[str, str] = metadata or {} 29 | 30 | def __repr__(self) -> str: 31 | """ 32 | Returns a string representation of the Message object. 33 | 34 | Returns: 35 | str: A string describing the message object. 36 | """ 37 | return f"Message(content={self.content}, sender={self.sender}, recipient={self.recipient}, metadata={self.metadata})" 38 | -------------------------------------------------------------------------------- /mermaid/reflection.mmd: -------------------------------------------------------------------------------- 1 | %%{init: { 2 | 'theme': 'base', 3 | 'themeVariables': { 4 | 'primaryColor': '#e0f7fa', 5 | 'primaryTextColor': '#006064', 6 | 'primaryBorderColor': '#00838f', 7 | 'lineColor': '#00838f', 8 | 'secondaryColor': '#fff3e0', 9 | 'tertiaryColor': '#f1f8e9', 10 | 'fontFamily': 'Arial, sans-serif' 11 | } 12 | }}%% 13 | 14 | flowchart TB 15 | classDef default fill:#fff,stroke:#00838f,stroke-width:2px,rx:5,ry:5; 16 | classDef actor fill:#e0f7fa,stroke:#00838f,stroke-width:2px,rx:5,ry:5; 17 | classDef critic fill:#fff3e0,stroke:#ff8f00,stroke-width:2px,rx:5,ry:5; 18 | classDef process fill:#f1f8e9,stroke:#558b2f,stroke-width:2px,rx:5,ry:5; 19 | classDef llm fill:#e8eaf6,stroke:#3f51b5,stroke-width:2px,rx:5,ry:5; 20 | 21 | LLM["🧠 Large Language Model"]:::llm 22 | A["👤 Actor Agent"]:::actor 23 | C["🔍 Critic Agent"]:::critic 24 | G["📝 Generate
Response"]:::process 25 | R["📊 Review
Response"]:::process 26 | F["💬 Provide
Feedback"]:::process 27 | I["🔄 Improve
Response"]:::process 28 | 29 | LLM -->|0| A & C 30 | A -->|1| G -->|2| R -->|3| C 31 | C -->|4| F -->|5| I -->|6| A 32 | 33 | subgraph Actor ["Actor Cycle"] 34 | A 35 | G 36 | I 37 | end 38 | 39 | subgraph Critic ["Critic Cycle"] 40 | C 41 | R 42 | F 43 | end 44 | 45 | style Actor fill:#e1f5fe,stroke:#0288d1,stroke-width:2px,rx:10,ry:10; 46 | style Critic fill:#fff8e1,stroke:#ffa000,stroke-width:2px,rx:10,ry:10; 47 | 48 | linkStyle default stroke:#00838f,stroke-width:2px; -------------------------------------------------------------------------------- /mermaid/dag_orchestration.mmd: -------------------------------------------------------------------------------- 1 | graph TD 2 | classDef default fill:#f0f0f0,stroke:#333,stroke-width:2px; 3 | classDef process fill:#E6F3FF,stroke:#91C4F2,stroke-width:2px; 4 | classDef decision fill:#FFF2CC,stroke:#FFD966,stroke-width:2px; 5 | classDef subagent fill:#E6FFE6,stroke:#82B366,stroke-width:2px; 6 | classDef user fill:#FFE6E6,stroke:#FF9999,stroke-width:2px; 7 | classDef dag fill:#F0E6FF,stroke:#C3A3F1,stroke-width:2px; 8 | 9 | U["fa:fa-user Human User"]:::user 10 | A["fa:fa-robot Coordinator Agent"]:::process 11 | B["fa:fa-project-diagram DAG Workflow Template"]:::dag 12 | C["Analyze DAG Structure"]:::process 13 | D["Create Sub-agents for Tasks"]:::process 14 | E["Orchestrate DAG Execution"]:::process 15 | 16 | subgraph "DAG Execution" 17 | F1["Task 1"]:::subagent 18 | F2["Task 2"]:::subagent 19 | F3["Task 3"]:::subagent 20 | F4["Task 4"]:::subagent 21 | F5["Task 5"]:::subagent 22 | 23 | F1 --> F2 & F3 24 | F2 --> F4 25 | F3 --> F4 26 | F4 --> F5 27 | end 28 | 29 | G["Collect and Combine Results"]:::process 30 | H["Generate Final Output"]:::process 31 | 32 | U -->|Provides| B 33 | U -->|Submits| A 34 | A --> B --> C --> D --> E 35 | E --> F1 36 | E -.-> F2 37 | E -.-> F3 38 | E -.-> F4 39 | E -.-> F5 40 | F5 --> G --> H 41 | 42 | subgraph "Sub-agent Process" 43 | I["Receive Task"]:::subagent 44 | J["Process Task"]:::subagent 45 | K["Return Result"]:::subagent 46 | 47 | I --> J --> K 48 | end -------------------------------------------------------------------------------- /config/patterns/semantic_router.yml: -------------------------------------------------------------------------------- 1 | coordinator: 2 | route: 3 | system_instructions: './data/patterns/semantic_router/coordinator/route/system_instructions.txt' 4 | user_instructions: './data/patterns/semantic_router/coordinator/route/user_instructions.txt' 5 | response_schema: './data/patterns/semantic_router/coordinator/route/response_schema.json' 6 | consolidate: 7 | system_instructions: './data/patterns/semantic_router/coordinator/consolidate/system_instructions.txt' 8 | user_instructions: './data/patterns/semantic_router/coordinator/consolidate/user_instructions.txt' 9 | delegate: 10 | flight_search: 11 | system_instructions: './data/patterns/semantic_router/delegate/flight_search/system_instructions.txt' 12 | user_instructions: './data/patterns/semantic_router/delegate/flight_search/user_instructions.txt' 13 | response_schema: './data/patterns/semantic_router/delegate/flight_search/response_schema.json' 14 | hotel_search: 15 | system_instructions: './data/patterns/semantic_router/delegate/hotel_search/system_instructions.txt' 16 | user_instructions: './data/patterns/semantic_router/delegate/hotel_search/user_instructions.txt' 17 | response_schema: './data/patterns/semantic_router/delegate/hotel_search/response_schema.json' 18 | car_rental_search: 19 | system_instructions: './data/patterns/semantic_router/delegate/car_rental_search/system_instructions.txt' 20 | user_instructions: './data/patterns/semantic_router/delegate/car_rental_search/user_instructions.txt' 21 | response_schema: './data/patterns/semantic_router/delegate/car_rental_search/response_schema.json' 22 | -------------------------------------------------------------------------------- /src/patterns/task_decomposition/pipeline.py: -------------------------------------------------------------------------------- 1 | from coordinator import CoordinatorAgent 2 | from src.commons.message import Message 3 | from src.config.logging import logger 4 | import asyncio 5 | 6 | 7 | class Config: 8 | """ 9 | Static class to hold configuration paths for input and output files. 10 | """ 11 | PATTERN_ROOT_PATH = './data/patterns/task_decomposition' 12 | INPUT_FILE = f'{PATTERN_ROOT_PATH}/sample_doc.txt' 13 | OUTPUT_FILE = f'{PATTERN_ROOT_PATH}/extracted_info.md' 14 | 15 | 16 | async def pipeline(): 17 | # Initialize the coordinator agent 18 | coordinator = CoordinatorAgent(name="CoordinatorAgent") 19 | 20 | # Read the document content from the file 21 | with open(Config.INPUT_FILE, 'r') as file: 22 | document_content = file.read() 23 | 24 | # Define the task input (document content) 25 | task_input = document_content 26 | 27 | # Create the message with the task input 28 | message = Message(content=task_input, sender="User", recipient="CoordinatorAgent") 29 | 30 | # Process the message and get the final result (async call to CoordinatorAgent) 31 | response = await coordinator.process(message) 32 | 33 | # Save the extracted information to a file 34 | with open(Config.OUTPUT_FILE, 'w') as output_file: 35 | output_file.write(response.content) 36 | 37 | # Print the final extracted result 38 | logger.info("Extraction completed. The extracted information has been saved to 'extracted_info.md'.") 39 | 40 | 41 | if __name__ == "__main__": 42 | # Run the pipeline to process the document 43 | asyncio.run(pipeline()) 44 | -------------------------------------------------------------------------------- /data/patterns/web_access/output/summarize/dd98fb7e26e193dde3e562f56a96f4f3.txt: -------------------------------------------------------------------------------- 1 | # Best Hotels in Fresno, California 2 | 3 | Fresno offers a variety of hotels for travelers, with options catering to different preferences. Expedia lists over 50 luxury hotels in Fresno, with most offering full refunds . The Reddit community recommends Springhill Suites by Marriott and Homewood Suites by Hilton, both located near Fresno Street and Herndon. 4 | 5 | For those seeking IHG properties, the chain offers several options including Holiday Inn Express & Suites Fresno Airport, Holiday Inn Express & Suites Fresno South, Holiday Inn Express & Suites Clovis-Fresno Area, Holiday Inn Express & Suites Fresno (River Park) Hwy 41, Holiday Inn Express & Suites Fresno Northwest-Herndon, Holiday Inn Selma-Swancourt, Holiday Inn Express Madera-Yosemite Pk Area, and Holiday Inn Express & Suites Dinuba West. Many IHG hotels offer family-friendly amenities like connecting rooms and kids' menus, while others cater to pet owners with designated pet relief areas and treats. Guests can expect amenities like free Wi-Fi, fitness centers, business centers, restaurants/bars, swimming pools, and more. 6 | 7 | ## Citations 8 | 9 | 1. [https://www.expedia.com/Fresno-Hotels-Luxury-Hotel.0-0-d602988-tLuxuryHotel.Travel-Guide-Filter-Hotels](https://www.expedia.com/Fresno-Hotels-Luxury-Hotel.0-0-d602988-tLuxuryHotel.Travel-Guide-Filter-Hotels) 10 | 2. [https://www.reddit.com/r/fresno/comments/15u2pdj/recommendation_on_hotels/](https://www.reddit.com/r/fresno/comments/15u2pdj/recommendation_on_hotels/) 11 | 3. [https://www.ihg.com/fresno-california](https://www.ihg.com/fresno-california) -------------------------------------------------------------------------------- /mermaid/task_decomposition.mmd: -------------------------------------------------------------------------------- 1 | graph TD 2 | classDef default fill:#f0f0f0,stroke:#333,stroke-width:2px; 3 | classDef process fill:#E6F3FF,stroke:#91C4F2,stroke-width:2px; 4 | classDef decision fill:#FFF2CC,stroke:#FFD966,stroke-width:2px; 5 | classDef subagent fill:#E6FFE6,stroke:#82B366,stroke-width:2px; 6 | classDef user fill:#FFE6E6,stroke:#FF9999,stroke-width:2px; 7 | 8 | U["fa:fa-user Human User"]:::user 9 | A["fa:fa-robot Coordinator Agent
Receives Complex Task"]:::process 10 | B["fa:fa-list Subtasks List
1. Subtask 1
2. Subtask 2
3. Subtask 3
...N. Subtask N"]:::process 11 | C[Create Sub-agents]:::process 12 | D{Execute Subtasks in Parallel}:::decision 13 | E1["fa:fa-robot Sub-agent 1
Processes Subtask"]:::subagent 14 | E2["fa:fa-robot Sub-agent 2
Processes Subtask"]:::subagent 15 | E3["fa:fa-robot Sub-agent 3
Processes Subtask"]:::subagent 16 | EN["fa:fa-robot Sub-agent N
Processes Subtask"]:::subagent 17 | F[Collect Results]:::process 18 | G[Combine Results]:::process 19 | H[Generate Final Output]:::process 20 | 21 | U -->|Provides| B 22 | U -->|Submits| A 23 | A --> B --> C --> D 24 | D -->|Subtask 1| E1 25 | D -->|Subtask 2| E2 26 | D -->|Subtask 3| E3 27 | D -.-|Subtask N| EN 28 | E1 & E2 & E3 & EN --> F --> G --> H 29 | 30 | subgraph "Sub-agent Process" 31 | I[Receive Subtask]:::subagent 32 | J[Prepare LLM Input]:::subagent 33 | K[Call LLM for Processing]:::subagent 34 | L[Return Result to Coordinator]:::subagent 35 | 36 | I --> J --> K --> L 37 | end -------------------------------------------------------------------------------- /config/patterns/parallel_delegation.yml: -------------------------------------------------------------------------------- 1 | coordinator: 2 | ner: 3 | system_instructions: './data/patterns/parallel_delegation/coordinator/ner/system_instructions.txt' 4 | user_instructions: './data/patterns/parallel_delegation/coordinator/ner/user_instructions.txt' 5 | response_schema: './data/patterns/parallel_delegation/coordinator/ner/response_schema.json' 6 | consolidate: 7 | system_instructions: './data/patterns/parallel_delegation/coordinator/consolidate/system_instructions.txt' 8 | user_instructions: './data/patterns/parallel_delegation/coordinator/consolidate/user_instructions.txt' 9 | delegate: 10 | flight_search: 11 | system_instructions: './data/patterns/parallel_delegation/delegates/flight_search/system_instructions.txt' 12 | user_instructions: './data/patterns/parallel_delegation/delegates/flight_search/user_instructions.txt' 13 | response_schema: './data/patterns/parallel_delegation/delegates/flight_search/response_schema.json' 14 | hotel_search: 15 | system_instructions: './data/patterns/parallel_delegation/delegates/hotel_search/system_instructions.txt' 16 | user_instructions: './data/patterns/parallel_delegation/delegates/hotel_search/user_instructions.txt' 17 | response_schema: './data/patterns/parallel_delegation/delegates/hotel_search/response_schema.json' 18 | car_rental_search: 19 | system_instructions: './data/patterns/parallel_delegation/delegates/car_rental_search/system_instructions.txt' 20 | user_instructions: './data/patterns/parallel_delegation/delegates/car_rental_search/user_instructions.txt' 21 | response_schema: './data/patterns/parallel_delegation/delegates/car_rental_search/response_schema.json' 22 | -------------------------------------------------------------------------------- /src/patterns/dynamic_sharding/pipeline.py: -------------------------------------------------------------------------------- 1 | from src.patterns.dynamic_sharding.coordinator import Coordinator 2 | from src.commons.message import Message 3 | from src.config.logging import logger 4 | import asyncio 5 | 6 | 7 | # Paths for input and output files 8 | INPUT_FILE = "./data/patterns/dynamic_sharding/entities.txt" 9 | OUTPUT_FILE = "./data/patterns/dynamic_sharding/entity_info.txt" 10 | 11 | async def pipeline() -> None: 12 | """ 13 | Main function to initialize the coordinator, process entities, 14 | and save the consolidated information to an output file. 15 | """ 16 | # Initialize the coordinator agent 17 | coordinator = Coordinator(name="CoordinatorAgent") 18 | 19 | # Read entities from the input file 20 | with open(INPUT_FILE, 'r') as file: 21 | entities = [line.strip() for line in file.readlines()] 22 | 23 | shard_size = 3 # Number of entities per shard 24 | 25 | # Create a message containing entities and shard size 26 | message_content = { 27 | 'entities': entities, 28 | 'shard_size': shard_size 29 | } 30 | message = Message(content=message_content, sender="User", recipient="CoordinatorAgent") 31 | 32 | # Process the message through the coordinator and get the response 33 | response = await coordinator.process(message) 34 | 35 | # Save the consolidated response content to the output file 36 | with open(OUTPUT_FILE, 'w') as file: 37 | file.write(response.content) 38 | 39 | logger.info(f"Entity information has been saved to {OUTPUT_FILE}") 40 | 41 | 42 | if __name__ == "__main__": 43 | # Execute the pipeline within an asynchronous event loop 44 | asyncio.run(pipeline()) 45 | -------------------------------------------------------------------------------- /src/patterns/semantic_router/agent.py: -------------------------------------------------------------------------------- 1 | from src.llm.generate import ResponseGenerator 2 | from src.prompt.manage import TemplateManager 3 | from src.commons.message import Message 4 | from src.config.logging import logger 5 | from abc import abstractmethod 6 | from abc import ABC 7 | 8 | 9 | class Agent(ABC): 10 | """ 11 | Abstract base class for agents that handle specific tasks in a coordinator-delegate pattern. 12 | Each agent must implement the 'process' method to handle incoming messages. 13 | Shared resources like TemplateManager and ResponseGenerator are initialized here for all agents. 14 | """ 15 | 16 | TEMPLATE_PATH = './config/patterns/semantic_router.yml' 17 | 18 | def __init__(self, name: str) -> None: 19 | """ 20 | Initializes the Agent with a name, TemplateManager, and ResponseGenerator. 21 | 22 | :param name: Name of the agent. 23 | """ 24 | self.name = name 25 | self.template_manager = TemplateManager(self.TEMPLATE_PATH) 26 | self.response_generator = ResponseGenerator() 27 | logger.info(f"Agent {self.name} initialized with shared resources.") 28 | 29 | @abstractmethod 30 | def process(self, message: Message) -> Message: 31 | """ 32 | Abstract method to process the incoming message. Must be implemented by subclasses. 33 | 34 | :param message: The message to be processed by the agent. 35 | :raise NotImplementedError: If the method is not implemented by a subclass. 36 | :return: A processed Message object with the response. 37 | """ 38 | raise NotImplementedError(f"{self.__class__.__name__} has not implemented the 'process' method.") 39 | -------------------------------------------------------------------------------- /src/patterns/parallel_delegation/agent.py: -------------------------------------------------------------------------------- 1 | from src.llm.generate import ResponseGenerator 2 | from src.prompt.manage import TemplateManager 3 | from src.commons.message import Message 4 | from src.config.logging import logger 5 | from abc import abstractmethod 6 | from abc import ABC 7 | 8 | 9 | class Agent(ABC): 10 | """ 11 | Abstract base class for agents that handle specific tasks in a coordinator-delegate pattern. 12 | Each agent must implement the 'process' method to handle incoming messages. 13 | Shared resources like TemplateManager and ResponseGenerator are initialized here for all agents. 14 | """ 15 | 16 | TEMPLATE_PATH = './config/patterns/parallel_delegation.yml' 17 | 18 | def __init__(self, name: str) -> None: 19 | """ 20 | Initializes the Agent with a name, TemplateManager, and ResponseGenerator. 21 | 22 | :param name: Name of the agent. 23 | """ 24 | self.name = name 25 | self.template_manager = TemplateManager(self.TEMPLATE_PATH) 26 | self.response_generator = ResponseGenerator() 27 | logger.info(f"Agent {self.name} initialized with shared resources.") 28 | 29 | @abstractmethod 30 | def process(self, message: Message) -> Message: 31 | """ 32 | Abstract method to process the incoming message. Must be implemented by subclasses. 33 | 34 | :param message: The message to be processed by the agent. 35 | :raise NotImplementedError: If the method is not implemented by a subclass. 36 | :return: A processed Message object with the response. 37 | """ 38 | raise NotImplementedError(f"{self.__class__.__name__} has not implemented the 'process' method.") 39 | -------------------------------------------------------------------------------- /data/patterns/web_access/summarize/system_instructions.txt: -------------------------------------------------------------------------------- 1 | You are a content summarization assistant. Your task is to process content scraped from multiple webpages and generate a concise, clear summary based on the user-provided query. The summary should focus on providing relevant and informative insights related to the search query. 2 | 3 | Use the `TITLE`, `SNIPPET`, and `CONTENT` of each webpage for summarization. For each important fact or piece of information in the summary, provide a citation using the `URL` of the source webpage. 4 | 5 | Key Guidelines: 6 | 7 | - Concise Summary: Create a clear, well-structured summary of the information extracted from the webpages, focusing only on content relevant to the search query. 8 | - Citations: Include the source (URL of the webpage) for each significant fact or claim in the summary, ensuring proper attribution. 9 | - Clarity & Structure: Write the summary in a coherent, easy-to-understand manner with clear segmentation for different aspects of the query. 10 | - Ignore Noise: Exclude irrelevant content, advertisements, or boilerplate language from the extracted content before summarizing. 11 | 12 | Output Format: 13 | 14 | # [Relevant title based on the user-provided query] 15 | 16 | - Summary of the content based on the query 17 | - Relevant points highlighted 18 | 19 | ## Citations 20 | 21 | 1. [URL1] 22 | 2. [URL2] 23 | 24 | IMPORTANT: 25 | - The links for the citations will be listed at the bottom, referenced by number. 26 | - The format should follow a clear structure with headings and subheadings to organize the information. 27 | - When referencing sources, eliminate duplicate citations and include both citations and relevant links in summaries. 28 | - Do not duplicate links under citations. 29 | - Also, do not include citation numbers (e.g., [1], [2]) in the summary text. 30 | - The links should appear only once, under the citations section. -------------------------------------------------------------------------------- /src/patterns/reflection/utils.py: -------------------------------------------------------------------------------- 1 | from src.config.logging import logger 2 | from typing import Any 3 | import json 4 | import os 5 | 6 | 7 | def save_to_disk(content: Any, content_type: str, version: int, output_path: str) -> None: 8 | """ 9 | Save the given content to a file with a specified version in the appropriate directory. 10 | 11 | The method determines the directory based on the `content_type` and saves the file with a 12 | name formatted as "v{version}.txt". If the content is a dictionary, it is converted to 13 | a formatted JSON string before saving. 14 | 15 | Args: 16 | content (Any): The content to save. If it is a dictionary, it will be converted to 17 | a JSON-formatted string. 18 | content_type (str): The type of content, either 'draft' or 'feedback', which determines 19 | the subdirectory where the file will be saved. 20 | version (int): The version number used in the filename (e.g., 'v1.txt'). 21 | output_path (str): The base path where the directory and file will be created. 22 | 23 | Raises: 24 | Exception: If an error occurs during the file saving process, the exception is logged 25 | and re-raised. 26 | """ 27 | try: 28 | directory = os.path.join(output_path, content_type) 29 | os.makedirs(directory, exist_ok=True) 30 | file_path = os.path.join(directory, f"v{version}.json") 31 | 32 | if isinstance(content, dict): 33 | content = json.dumps(content, indent=4) # Convert dict to a formatted string 34 | 35 | with open(file_path, "w") as file: 36 | file.write(content) 37 | 38 | logger.info(f"Saved {content_type} v{version} to {file_path}") 39 | except Exception as e: 40 | logger.error(f"Failed to save {content_type} v{version}: {e}") 41 | raise -------------------------------------------------------------------------------- /mermaid/semantic_router.mmd: -------------------------------------------------------------------------------- 1 | %%{init: { 2 | 'theme': 'base', 3 | 'themeVariables': { 4 | 'primaryColor': '#E0F2F1', 5 | 'primaryTextColor': '#004D40', 6 | 'primaryBorderColor': '#00796B', 7 | 'lineColor': '#00796B', 8 | 'secondaryColor': '#FFF3E0', 9 | 'tertiaryColor': '#F1F8E9' 10 | } 11 | }}%% 12 | 13 | flowchart TB 14 | classDef default fill:#fff,stroke:#00796B,stroke-width:2px,rx:5,ry:5; 15 | classDef coordinator fill:#E0F2F1,stroke:#00796B,stroke-width:3px,rx:10,ry:10; 16 | classDef intent fill:#B2DFDB,stroke:#00796B,stroke-width:2px; 17 | classDef delegate fill:#FFF3E0,stroke:#FF8F00,stroke-width:2px,rx:8,ry:8; 18 | classDef llm fill:#E8EAF6,stroke:#3F51B5,stroke-width:2px,rx:5,ry:5; 19 | classDef api fill:#FCE4EC,stroke:#C2185B,stroke-width:2px,rx:5,ry:5; 20 | classDef process fill:#F1F8E9,stroke:#558B2F,stroke-width:2px,rx:5,ry:5; 21 | 22 | U["👤 User Input"]:::default 23 | TP["🧭 TravelPlannerAgent
(Coordinator)"]:::coordinator 24 | I{"🔍 Determine Intent"}:::intent 25 | LLM["🧠 LLM"]:::llm 26 | 27 | subgraph Delegates ["Delegates"] 28 | direction TB 29 | F["✈️ FlightAgent"]:::delegate 30 | H["🏨 HotelAgent"]:::delegate 31 | C["🚗 CarRentalAgent"]:::delegate 32 | end 33 | 34 | Q["🔄 Query Reformulation"]:::process 35 | S["🌐 SERP API"]:::api 36 | W["📄 Web Search Results"]:::default 37 | R["📝 Consolidate Response"]:::process 38 | O["💬 Final Output"]:::default 39 | 40 | U --> TP 41 | TP --> I 42 | I -->|Intent=Flight Search| F 43 | TP -.-> |Uses| LLM 44 | Delegates -.-> |Uses| LLM 45 | Delegates --> Q 46 | Q --> S 47 | S --> W 48 | W --> TP 49 | TP --> R 50 | R --> O 51 | 52 | style Delegates fill:#FFF8E1,stroke:#FFA000,stroke-width:2px,rx:10,ry:10; 53 | linkStyle 2 stroke:#FF4081,stroke-width:2px; -------------------------------------------------------------------------------- /src/patterns/dynamic_decomposition/pipeline.py: -------------------------------------------------------------------------------- 1 | from src.patterns.dynamic_decomposition.coordinator import CoordinatorAgent 2 | from src.commons.message import Message 3 | from src.config.logging import logger 4 | import asyncio 5 | 6 | 7 | class Config: 8 | """ 9 | Static class to hold configuration paths for input and output files. 10 | """ 11 | PATTERN_ROOT_PATH = './data/patterns/dynamic_decomposition' 12 | INPUT_FILE = f'{PATTERN_ROOT_PATH}/book.txt' 13 | OUTPUT_FILE = f'{PATTERN_ROOT_PATH}/book_analysis_summary.md' 14 | 15 | 16 | async def pipeline() -> None: 17 | """ 18 | The main pipeline that coordinates the processing of a book by invoking the CoordinatorAgent. 19 | 20 | This function reads the book content, creates a message, sends it to the CoordinatorAgent, 21 | and saves the final analysis summary to a file. 22 | """ 23 | # Initialize the coordinator agent 24 | coordinator = CoordinatorAgent(name="CoordinatorAgent") 25 | 26 | # Read the book content from a file 27 | with open(Config.INPUT_FILE, 'r') as file: 28 | book_content = file.read() 29 | 30 | # Create the message containing the book content 31 | message = Message(content=book_content, sender="User", recipient="CoordinatorAgent") 32 | 33 | # Process the message and obtain the final result (asynchronously handled by CoordinatorAgent) 34 | response = await coordinator.process(message) 35 | 36 | # Save the final summary to a file 37 | with open(Config.OUTPUT_FILE, 'w') as output_file: 38 | output_file.write(response.content) 39 | 40 | # Log and print the completion message 41 | logger.info("Analysis completed. The summary has been saved to 'book_analysis_summary.md'.") 42 | print("Analysis completed. The summary has been saved to 'book_analysis_summary.md'.") 43 | 44 | 45 | if __name__ == "__main__": 46 | # Run the pipeline to process the book 47 | asyncio.run(pipeline()) 48 | -------------------------------------------------------------------------------- /mermaid/web_access.mmd: -------------------------------------------------------------------------------- 1 | %%{init: { 2 | 'theme': 'base', 3 | 'themeVariables': { 4 | 'primaryColor': '#e0f7fa', 5 | 'primaryTextColor': '#006064', 6 | 'primaryBorderColor': '#00838f', 7 | 'lineColor': '#00838f', 8 | 'secondaryColor': '#fff3e0', 9 | 'tertiaryColor': '#f1f8e9', 10 | 'fontFamily': 'Arial, sans-serif' 11 | } 12 | }}%% 13 | 14 | flowchart TB 15 | classDef default fill:#fff,stroke:#00838f,stroke-width:2px,rx:5,ry:5; 16 | classDef search fill:#e0f7fa,stroke:#00838f,stroke-width:2px,rx:5,ry:5; 17 | classDef scrape fill:#fff3e0,stroke:#ff8f00,stroke-width:2px,rx:5,ry:5; 18 | classDef summarize fill:#f1f8e9,stroke:#558b2f,stroke-width:2px,rx:5,ry:5; 19 | classDef process fill:#e1f5fe,stroke:#0288d1,stroke-width:2px,rx:5,ry:5; 20 | classDef llm fill:#e8eaf6,stroke:#3f51b5,stroke-width:2px,rx:5,ry:5; 21 | classDef api fill:#ffebee,stroke:#c62828,stroke-width:2px,rx:5,ry:5; 22 | 23 | LLM["🧠 Large Language Model"]:::llm 24 | S["🔍 Web Search"]:::search 25 | A["🌐 SERP API"]:::api 26 | W["🕷️ Web Scrape"]:::scrape 27 | C["📊 Consolidate"]:::summarize 28 | Q["❓ Query
Formulation"]:::process 29 | E["📥 Data
Extraction"]:::process 30 | M["📝 Summary
Generation"]:::process 31 | 32 | LLM -->|0| S & W & C 33 | S -->|1| Q -->|2| A -->|3| W 34 | W -->|4| E -->|5| C 35 | C -->|6| M 36 | 37 | subgraph Search ["Search Phase"] 38 | S 39 | Q 40 | A 41 | end 42 | 43 | subgraph Scrape ["Scrape Phase"] 44 | W 45 | E 46 | end 47 | 48 | subgraph Summarize ["Summarize Phase"] 49 | C 50 | M 51 | end 52 | 53 | style Search fill:#e1f5fe,stroke:#0288d1,stroke-width:2px,rx:10,ry:10; 54 | style Scrape fill:#fff8e1,stroke:#ffa000,stroke-width:2px,rx:10,ry:10; 55 | style Summarize fill:#e8f5e9,stroke:#4caf50,stroke-width:2px,rx:10,ry:10; 56 | 57 | linkStyle default stroke:#00838f,stroke-width:2px; -------------------------------------------------------------------------------- /mermaid/parallel_delegation.mmd: -------------------------------------------------------------------------------- 1 | %%{init: { 2 | 'theme': 'base', 3 | 'themeVariables': { 4 | 'primaryColor': '#E0F2F1', 5 | 'primaryTextColor': '#004D40', 6 | 'primaryBorderColor': '#00796B', 7 | 'lineColor': '#00796B', 8 | 'secondaryColor': '#FFF3E0', 9 | 'tertiaryColor': '#F1F8E9' 10 | } 11 | }}%% 12 | flowchart TB 13 | classDef default fill:#fff,stroke:#00796B,stroke-width:2px,rx:5,ry:5; 14 | classDef coordinator fill:#E0F2F1,stroke:#00796B,stroke-width:3px,rx:10,ry:10; 15 | classDef intent fill:#B2DFDB,stroke:#00796B,stroke-width:2px; 16 | classDef delegate fill:#FFF3E0,stroke:#FF8F00,stroke-width:2px,rx:8,ry:8; 17 | classDef llm fill:#E8EAF6,stroke:#3F51B5,stroke-width:2px,rx:5,ry:5; 18 | classDef api fill:#FCE4EC,stroke:#C2185B,stroke-width:2px,rx:5,ry:5; 19 | classDef process fill:#F1F8E9,stroke:#558B2F,stroke-width:2px,rx:5,ry:5; 20 | classDef note fill:#FFF9C4,stroke:#FBC02D,stroke-width:1px,stroke-dasharray: 5 5; 21 | 22 | U["👤 User Input"]:::default 23 | TP["🧭 TravelPlannerAgent
(Coordinator)"]:::coordinator 24 | ER["🔍 Entity Recognition"]:::intent 25 | E["🏷️ Entities"]:::default 26 | LLM["🧠 LLM"]:::llm 27 | R["🔄 Query Reformulation"]:::process 28 | S["🌐 SERP API"]:::api 29 | W["📄 Web Search Results"]:::default 30 | CO["📝 Consolidate Response"]:::process 31 | O["💬 Final Output"]:::default 32 | 33 | U ==> TP 34 | TP ==> ER 35 | ER ==> E 36 | E ==> Delegates 37 | Delegates ==> R 38 | R ==> S 39 | S ==> W 40 | W ==> TP 41 | TP ==> CO 42 | CO ==> O 43 | 44 | TP -.-o |Uses| LLM 45 | 46 | subgraph Delegates ["Delegates"] 47 | direction LR 48 | F["✈️ FlightAgent"]:::delegate 49 | H["🏨 HotelAgent"]:::delegate 50 | C["🚗 CarRentalAgent"]:::delegate 51 | end 52 | 53 | %% Note about parallel execution 54 | ParallelNote["Note: Delegates are executed in parallel"]:::note 55 | 56 | style Delegates fill:#FFF8E1,stroke:#FFA000,stroke-width:2px,rx:10,ry:10; 57 | -------------------------------------------------------------------------------- /data/patterns/web_access/output/search/dd98fb7e26e193dde3e562f56a96f4f3.json: -------------------------------------------------------------------------------- 1 | { 2 | "Top Results": [ 3 | { 4 | "Position": 1, 5 | "Title": "Find luxury hotels in Fresno, CA", 6 | "Link": "https://www.expedia.com/Fresno-Hotels-Luxury-Hotel.0-0-d602988-tLuxuryHotel.Travel-Guide-Filter-Hotels", 7 | "Snippet": "Pick from 51 Fresno Luxury Hotels and compare room rates, reviews, and availability. Most hotels are fully refundable. \u00b7 San Joaquin Hotel \u00b7 Best Western Town & ..." 8 | }, 9 | { 10 | "Position": 2, 11 | "Title": "THE 10 BEST Hotels in Fresno, CA 2024 (from $71)", 12 | "Link": "https://www.tripadvisor.com/Hotels-g32414-Fresno_California-Hotels.html", 13 | "Snippet": "Popular hotels in Fresno right now \u00b7 1. Best Western Plus Fresno Airport Hotel \u00b7 2. Hotel Piccadilly \u00b7 3. Best Western Plus Fresno Inn \u00b7 4." 14 | }, 15 | { 16 | "Position": 3, 17 | "Title": "Best Hotels in Fresno, CA for 2024", 18 | "Link": "https://travel.usnews.com/hotels/fresno_ca/", 19 | "Snippet": "Best Hotels in Fresno, CA for 2024 \u00b7 DoubleTree by Hilton Fresno Convention Center \u00b7 La Quinta Inn & Suites Fresno Riverpark \u00b7 Homewood Suites by Hilton Fresno." 20 | }, 21 | { 22 | "Position": 4, 23 | "Title": "Recommendation on hotels? : r/fresno", 24 | "Link": "https://www.reddit.com/r/fresno/comments/15u2pdj/recommendation_on_hotels/", 25 | "Snippet": "Springhill Suites by Marriott and Homewood Suites by Hilton are solid choices. Both near FRESNO St & Herndon." 26 | }, 27 | { 28 | "Position": 5, 29 | "Title": "Top 8 Fresno Hotels by IHG - October 2024", 30 | "Link": "https://www.ihg.com/fresno-california", 31 | "Snippet": "Find the Perfect Hotel in Fresno \u00b7 Holiday Inn Express & Suites Fresno Airport \u00b7 Holiday Inn Express & Suites Fresno South \u00b7 Holiday Inn Express & Suites Clovis- ..." 32 | } 33 | ] 34 | } -------------------------------------------------------------------------------- /src/patterns/parallel_delegation/pipeline.py: -------------------------------------------------------------------------------- 1 | from src.patterns.parallel_delegation.delegates.car_rental_search import CarRentalSearchAgent 2 | from src.patterns.parallel_delegation.delegates.flight_search import FlightSearchAgent 3 | from src.patterns.parallel_delegation.delegates.hotel_search import HotelSearchAgent 4 | from src.patterns.parallel_delegation.coordinator import TravelPlannerAgent 5 | from src.commons.message import Message 6 | from src.config.logging import logger 7 | from typing import Optional 8 | import asyncio 9 | 10 | 11 | async def pipeline() -> None: 12 | """ 13 | Initializes sub-agents (flight, hotel, car rental) and the TravelPlannerAgent, 14 | then processes a user query to find travel arrangements. Logs the response or any error encountered. 15 | """ 16 | # Initialize sub-agents 17 | flight_agent = FlightSearchAgent(name="FlightSearchAgent") 18 | hotel_agent = HotelSearchAgent(name="HotelSearchAgent") 19 | car_rental_agent = CarRentalSearchAgent(name="CarRentalSearchAgent") 20 | 21 | # Instantiate the TravelPlannerAgent with sub-agents 22 | travel_planner = TravelPlannerAgent( 23 | name="TravelPlannerAgent", 24 | sub_agents=[flight_agent, hotel_agent, car_rental_agent] 25 | ) 26 | 27 | # Define the user query 28 | user_query: str = "I need a flight from New York to Dallas, a hotel in downtown Dallas, and a rental car for next week." 29 | initial_message = Message(content=user_query, sender="User", recipient="TravelPlannerAgent") 30 | 31 | # Process the query asynchronously 32 | try: 33 | response: Optional[Message] = await travel_planner.process(initial_message) 34 | if response: 35 | logger.info(f"Query: {user_query}") 36 | logger.info(f"Response: {response.content}") 37 | else: 38 | logger.warning("No response generated from the travel planner agent.") 39 | 40 | except Exception as e: 41 | logger.error(f"Error processing query '{user_query}': {e}") 42 | 43 | 44 | if __name__ == "__main__": 45 | # Execute the async pipeline 46 | asyncio.run(pipeline()) 47 | -------------------------------------------------------------------------------- /src/patterns/task_decomposition/README.md: -------------------------------------------------------------------------------- 1 | # Pattern 6 - Task Decomposition 2 | 3 | ## Overview 4 | 5 | The **Task Decomposition** is a design pattern where a *Coordinator Agent* manages the execution of a complex task by dividing it into multiple independent subtasks. In this pattern, the tasks are provided by the user rather than being automatically deduced by the coordinator. Each subtask is then processed by a separate *Sub-Task Agent* in parallel. After all subtasks are completed, the coordinator gathers and combines the results to produce the final output. 6 | 7 | This pattern is beneficial for enhancing efficiency and scalability, especially for tasks that can be divided into smaller, independently executable units. 8 | 9 |

10 | Task Decomposition 11 |

12 | 13 | ## Key Components 14 | 15 | ### CoordinatorAgent 16 | 17 | - Receives the complex task input and subtask definitions from the user 18 | - Manages the decomposition of the task based on user-provided subtasks 19 | - Spawns sub-agents for each subtask 20 | - Waits until all sub-agents complete their assigned tasks 21 | - Combines results from all sub-agents into the final output 22 | 23 | ### SubTaskAgent 24 | 25 | - Receives a specific subtask from the coordinator 26 | - Processes the subtask independently 27 | - Returns the result to the coordinator 28 | 29 | ## Process Flow 30 | 31 | 1. The CoordinatorAgent receives an input message containing the document content. 32 | 2. The coordinator decomposes the task into subtasks. 33 | 3. For each subtask: 34 | - The coordinator creates a SubTaskAgent. 35 | - The coordinator sends a message to the SubTaskAgent with the subtask details. 36 | 4. The coordinator creates tasks for all SubTaskAgents to process their subtasks concurrently. 37 | 5. Each SubTaskAgent: 38 | - Extracts the document and task from the received message. 39 | - Calls the LLM to process the subtask. 40 | - Returns the extraction result as a message. 41 | 6. The coordinator waits for all SubTaskAgents to complete their tasks. 42 | 7. Once all results are collected, the coordinator combines them into a structured summary. 43 | 8. The coordinator returns the final combined result as a message to the original sender. -------------------------------------------------------------------------------- /src/patterns/web_access/factory.py: -------------------------------------------------------------------------------- 1 | from src.patterns.web_access.summarize import WebContentSummarizeAgent 2 | from src.patterns.web_access.search import WebSearchAgent 3 | from src.patterns.web_access.scrape import WebScrapeAgent 4 | from src.patterns.web_access.tasks import SummarizeTask 5 | from src.patterns.web_access.tasks import SearchTask 6 | from src.patterns.web_access.tasks import ScrapeTask 7 | from src.config.logging import logger 8 | 9 | 10 | class TaskFactory: 11 | """ 12 | Factory class to create instances of various web-related tasks. 13 | This class encapsulates the logic for creating different tasks, such as search, scrape, and summarize. 14 | """ 15 | 16 | @staticmethod 17 | def create_search_task() -> SearchTask: 18 | """ 19 | Creates and returns a new SearchTask instance using WebSearchAgent. 20 | 21 | Returns: 22 | SearchTask: An instance of the SearchTask class, implemented by WebSearchAgent. 23 | """ 24 | try: 25 | logger.info("Creating search task (WebSearchAgent).") 26 | return WebSearchAgent() 27 | except Exception as e: 28 | logger.error(f"Error while creating search task: {str(e)}") 29 | raise 30 | 31 | @staticmethod 32 | def create_scrape_task() -> ScrapeTask: 33 | """ 34 | Creates and returns a new ScrapeTask instance using WebScrapeAgent. 35 | 36 | Returns: 37 | ScrapeTask: An instance of the ScrapeTask class, implemented by WebScrapeAgent. 38 | """ 39 | try: 40 | logger.info("Creating scrape task (WebScrapeAgent).") 41 | return WebScrapeAgent() 42 | except Exception as e: 43 | logger.error(f"Error while creating scrape task: {str(e)}") 44 | raise 45 | 46 | @staticmethod 47 | def create_summarize_task() -> SummarizeTask: 48 | """ 49 | Creates and returns a new SummarizeTask instance using WebContentSummarizeAgent. 50 | 51 | Returns: 52 | SummarizeTask: An instance of the SummarizeTask class, implemented by WebContentSummarizeAgent. 53 | """ 54 | try: 55 | logger.info("Creating summarize task (WebContentSummarizeAgent).") 56 | return WebContentSummarizeAgent() 57 | except Exception as e: 58 | logger.error(f"Error while creating summarize task: {str(e)}") 59 | raise 60 | -------------------------------------------------------------------------------- /src/patterns/task_decomposition/delegates.py: -------------------------------------------------------------------------------- 1 | from src.patterns.task_decomposition.agent import Agent 2 | from src.llm.generate import ResponseGenerator 3 | from src.commons.message import Message 4 | from src.config.logging import logger 5 | import asyncio 6 | 7 | 8 | class SubTaskAgent(Agent): 9 | """ 10 | An agent that processes a specific subtask of the document extraction 11 | by invoking an LLM to extract the required information. 12 | """ 13 | 14 | def __init__(self, name: str) -> None: 15 | """ 16 | Initializes the SubTaskAgent. 17 | """ 18 | super().__init__(name) 19 | logger.info(f"{self.name} initialized.") 20 | 21 | async def process(self, message: Message) -> Message: 22 | """ 23 | Processes the assigned subtask of the document by calling the LLM. 24 | """ 25 | logger.info(f"{self.name} processing subtask.") 26 | subtask = message.content 27 | 28 | # Extract the document and task from the subtask 29 | document = subtask.get("document") 30 | task = subtask.get("task") 31 | 32 | if not document or not task: 33 | return Message( 34 | content="Invalid subtask: Missing document or task.", 35 | sender=self.name, 36 | recipient=message.sender 37 | ) 38 | 39 | # Prepare the LLM input 40 | llm_input = f"Document:\n{document}\n\nTask: {task}" 41 | logger.info(f"Calling LLM for task: {task}") 42 | 43 | try: 44 | response_generator = ResponseGenerator() 45 | 46 | # Define a blocking function to be run in a separate thread 47 | def blocking_call(): 48 | return response_generator.generate_response( 49 | model_name='gemini-1.5-flash-001', 50 | system_instruction='', 51 | contents=[llm_input] 52 | ).text.strip() 53 | 54 | # Run the blocking LLM call in a separate thread 55 | extraction_result = await asyncio.to_thread(blocking_call) 56 | 57 | except Exception as e: 58 | logger.error(f"LLM call failed: {str(e)}") 59 | extraction_result = f"Failed to extract information for task: {task}" 60 | 61 | # Return the extraction result as a message 62 | return Message( 63 | content=extraction_result, 64 | sender=self.name, 65 | recipient=message.sender 66 | ) 67 | -------------------------------------------------------------------------------- /data/patterns/dag_orchestration/trace/task3.json: -------------------------------------------------------------------------------- 1 | { 2 | "extracted_items": [ 3 | { 4 | "id": "doc1", 5 | "key_info": [ 6 | { 7 | "characters": [ 8 | "Amelia" 9 | ], 10 | "themes": [ 11 | "Resilience", 12 | "Legacy", 13 | "Duty", 14 | "Courage", 15 | "Loss", 16 | "Isolation" 17 | ], 18 | "plot_points": [ 19 | "Amelia grows up in a remote village with her father, the lighthouse keeper.", 20 | "Her mother's absence is a major factor in her life.", 21 | "Amelia's father falls ill, leaving her to take over his duties.", 22 | "Amelia faces doubts from the villagers but remains determined.", 23 | "She rescues a fishing boat during a storm, proving her strength and earning the villagers' respect.", 24 | "Amelia realizes her purpose and legacy as the lighthouse keeper." 25 | ] 26 | } 27 | ] 28 | }, 29 | { 30 | "id": "doc2", 31 | "key_info": [ 32 | { 33 | "characters": [ 34 | "Marcus" 35 | ], 36 | "themes": [ 37 | "Redemption", 38 | "Guilt", 39 | "Forgiveness", 40 | "Art as Therapy", 41 | "The Power of Art" 42 | ], 43 | "plot_points": [ 44 | "Marcus is a reclusive artist haunted by a past accident.", 45 | "He receives a letter from a dying friend who wishes to see his art.", 46 | "Marcus reluctantly agrees to paint and finds catharsis through the process.", 47 | "The painting becomes a symbol of his redemption and renewed purpose.", 48 | "Marcus delivers the painting to his friend and rediscovers his passion for art." 49 | ] 50 | } 51 | ] 52 | }, 53 | { 54 | "id": "doc3", 55 | "key_info": [ 56 | { 57 | "characters": [ 58 | "Elena Reyes" 59 | ], 60 | "themes": [ 61 | "Lost civilizations", 62 | "Past and present", 63 | "Identity", 64 | "Fate" 65 | ], 66 | "plot_points": [ 67 | "Elena discovers an ancient manuscript", 68 | "Elena experiences strange occurrences and dreams", 69 | "Elena is driven by an inexplicable compulsion to decipher the manuscript", 70 | "Elena vanishes after translating the final passage", 71 | "Elena's disappearance is linked to an ancient city and a mural depicting her as a high priestess" 72 | ] 73 | } 74 | ] 75 | } 76 | ] 77 | } -------------------------------------------------------------------------------- /src/patterns/semantic_router/pipeline.py: -------------------------------------------------------------------------------- 1 | from src.patterns.semantic_router.delegates.car_rental_search import CarRentalSearchAgent 2 | from src.patterns.semantic_router.delegates.flight_search import FlightSearchAgent 3 | from src.patterns.semantic_router.delegates.hotel_search import HotelSearchAgent 4 | from src.patterns.semantic_router.coordinator import TravelPlannerAgent 5 | from src.commons.message import Message 6 | from src.config.logging import logger 7 | from typing import Union 8 | from typing import List 9 | 10 | 11 | class Pipeline: 12 | def __init__(self): 13 | # Initialize sub-agents 14 | self.flight_search_agent = FlightSearchAgent(name="FlightSearchAgent") 15 | self.hotel_search_agent = HotelSearchAgent(name="HotelSearchAgent") 16 | self.car_rental_search_agent = CarRentalSearchAgent(name="CarRentalSearchAgent") 17 | 18 | # Instantiate the TravelPlannerAgent with sub-agents 19 | self.travel_planner = TravelPlannerAgent( 20 | name="TravelPlannerAgent", 21 | sub_agents=[self.flight_search_agent, self.hotel_search_agent, self.car_rental_search_agent] 22 | ) 23 | 24 | def execute(self, queries: Union[str, List[str]]) -> None: 25 | if isinstance(queries, str): 26 | queries = [queries] 27 | 28 | for query in queries: 29 | try: 30 | message = Message(content=query, sender="User", recipient="TravelPlannerAgent") 31 | response_message = self.travel_planner.process(message) 32 | 33 | logger.info(f"Query: {query}") 34 | logger.info(f"Response: {response_message.content}") 35 | 36 | print(f"\nQuery: {query}") 37 | print(f"Response: {response_message.content}") 38 | print("-" * 50) 39 | 40 | except Exception as e: 41 | logger.error(f"Error processing query '{query}': {e}") 42 | print(f"\nError processing query '{query}': {e}") 43 | 44 | def run(queries: Union[str, List[str]]) -> None: 45 | # Create Pipeline instance and execute queries 46 | pipeline = Pipeline() 47 | pipeline.execute(queries) 48 | 49 | if __name__ == '__main__': 50 | # Test queries 51 | test_queries = [ 52 | "I want to book a flight from Miami to Dallas next week.", 53 | "Can you find me a hotel in Frisco, Texas for next week?", 54 | "I need a rental car in Frisco, Texas for a week." 55 | ] 56 | 57 | # Run the pipeline with test queries 58 | # run(test_queries) 59 | 60 | # Example of running with a single query 61 | run("Could you recommend some hotels in Santa Barbara, California for a stay next week?") -------------------------------------------------------------------------------- /src/patterns/web_access/tasks.py: -------------------------------------------------------------------------------- 1 | from src.config.logging import logger 2 | from abc import abstractmethod 3 | from abc import ABC 4 | 5 | 6 | class SearchTask(ABC): 7 | """ 8 | Abstract base class for search tasks. 9 | 10 | Subclasses are required to implement the `run` method, which will define 11 | the specific behavior for executing a search task. 12 | """ 13 | 14 | @abstractmethod 15 | def run(self, model_name: str, query: str) -> None: 16 | """ 17 | Executes the search task with the given model and query. 18 | 19 | Args: 20 | model_name (str): The name of the model to be used for the search. 21 | query (str): The search query. 22 | 23 | Raises: 24 | NotImplementedError: If the subclass does not implement this method. 25 | """ 26 | logger.error("SearchTask run method not implemented.") 27 | raise NotImplementedError("Subclasses must implement the `run` method") 28 | 29 | 30 | class ScrapeTask(ABC): 31 | """ 32 | Abstract base class for scrape tasks. 33 | 34 | Subclasses are required to implement the `run` method, which will define 35 | the specific behavior for executing a scraping task. 36 | """ 37 | 38 | @abstractmethod 39 | def run(self, model_name: str, query: str) -> None: 40 | """ 41 | Executes the scrape task with the given model and query. 42 | 43 | Args: 44 | model_name (str): The name of the model to be used for scraping. 45 | query (str): The query or URL to be scraped. 46 | 47 | Raises: 48 | NotImplementedError: If the subclass does not implement this method. 49 | """ 50 | logger.error("ScrapeTask run method not implemented.") 51 | raise NotImplementedError("Subclasses must implement the `run` method") 52 | 53 | 54 | class SummarizeTask(ABC): 55 | """ 56 | Abstract base class for summarization tasks. 57 | 58 | Subclasses are required to implement the `run` method, which will define 59 | the specific behavior for executing a summarization task. 60 | """ 61 | 62 | @abstractmethod 63 | def run(self, model_name: str, query: str) -> None: 64 | """ 65 | Executes the summarization task with the given model and query. 66 | 67 | Args: 68 | model_name (str): The name of the model to be used for summarization. 69 | query (str): The query or input data to be summarized. 70 | 71 | Raises: 72 | NotImplementedError: If the subclass does not implement this method. 73 | """ 74 | logger.error("SummarizeTask run method not implemented.") 75 | raise NotImplementedError("Subclasses must implement the `run` method") 76 | -------------------------------------------------------------------------------- /src/config/logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | 5 | def custom_path_filter(path: str) -> str: 6 | """ 7 | Filters the provided file path to shorten it by removing the project root portion. 8 | 9 | Parameters: 10 | ----------- 11 | path : str 12 | The full file path to be filtered. 13 | 14 | Returns: 15 | -------- 16 | str 17 | The shortened file path, with the project root removed if present. 18 | """ 19 | project_root = "Agentic-Workflow-Patterns" 20 | 21 | # Find the index of the project root in the path 22 | idx = path.find(project_root) 23 | if idx != -1: 24 | # Extract the portion of the path after the project root 25 | path = path[idx + len(project_root):] 26 | return path 27 | 28 | class CustomLogRecord(logging.LogRecord): 29 | """ 30 | CustomLogRecord modifies the default LogRecord to filter and shorten the file path in log messages. 31 | 32 | Attributes: 33 | ----------- 34 | pathname : str 35 | The full file path where the log message was generated, filtered to remove the project root. 36 | 37 | Methods: 38 | -------- 39 | __init__(*args, **kwargs): 40 | Initializes the custom log record and applies the path filter. 41 | """ 42 | def __init__(self, *args, **kwargs): 43 | super().__init__(*args, **kwargs) 44 | self.pathname = custom_path_filter(self.pathname) 45 | 46 | def setup_logger(log_filename: str = "app.log", log_dir: str = "logs") -> logging.Logger: 47 | """ 48 | Sets up and configures the logger with custom log record handling and file/stream handlers. 49 | 50 | Parameters: 51 | ----------- 52 | log_filename : str, optional 53 | The name of the log file, by default "app.log". 54 | log_dir : str, optional 55 | The directory where log files will be saved, by default "logs". 56 | 57 | Returns: 58 | -------- 59 | logging.Logger 60 | The configured logger instance. 61 | """ 62 | # Ensure the logging directory exists 63 | if not os.path.exists(log_dir): 64 | os.makedirs(log_dir) 65 | 66 | # Define the log file path 67 | log_filepath = os.path.join(log_dir, log_filename) 68 | 69 | # Define the logging configuration 70 | logging.setLogRecordFactory(CustomLogRecord) 71 | logging.basicConfig( 72 | level=logging.INFO, 73 | format="%(asctime)s [%(levelname)s] [%(module)s] [%(pathname)s]: %(message)s", 74 | handlers=[ 75 | logging.StreamHandler(), 76 | logging.FileHandler(log_filepath) 77 | ] 78 | ) 79 | 80 | # Return the configured logger 81 | return logging.getLogger() 82 | 83 | 84 | # Initialize the logger with the custom configuration. 85 | logger = setup_logger() -------------------------------------------------------------------------------- /src/patterns/dag_orchestration/pipeline.py: -------------------------------------------------------------------------------- 1 | from src.patterns.dag_orchestration.coordinator import CoordinatorAgent 2 | from src.commons.message import Message 3 | from src.config.logging import logger 4 | from typing import Any 5 | import asyncio 6 | import json 7 | 8 | 9 | class Config: 10 | """ 11 | Configuration class to hold the paths for DAG file and final report file. 12 | """ 13 | PATTERN_ROOT_PATH = './data/patterns/dag_orchestration' 14 | DAG_FILE_PATH = f"{PATTERN_ROOT_PATH}/dag.yml" 15 | REPORT_FILE_PATH = f"{PATTERN_ROOT_PATH}/final_report.json" 16 | 17 | 18 | async def pipeline() -> None: 19 | """ 20 | Main pipeline function to orchestrate task processing using the Coordinator agent. 21 | This function processes a main task message and saves the final output to a JSON file. 22 | 23 | Steps: 24 | 1. Initializes the Coordinator with a specified DAG file. 25 | 2. Sends a main task message to the Coordinator for processing. 26 | 3. Receives the response and extracts the content. 27 | 4. Saves the final content as a JSON report. 28 | 29 | Returns: 30 | None 31 | """ 32 | try: 33 | logger.info("Initializing the Coordinator agent with the DAG file.") 34 | coordinator = CoordinatorAgent(name="CoordinatorAgent", dag_file=Config.DAG_FILE_PATH) 35 | 36 | # The main task is to orchestrate the DAG, hence no specific content is needed for the message. 37 | message = Message(content='', sender="User", recipient="CoordinatorAgent") 38 | 39 | logger.info("Sending the main task message to the Coordinator for processing.") 40 | response = await coordinator.process(message) 41 | 42 | final_output = response.content 43 | save_final_report(final_output) 44 | 45 | logger.info("Task completed successfully. The final report has been saved.") 46 | 47 | except Exception as e: 48 | logger.error(f"An error occurred during the pipeline execution: {e}") 49 | raise 50 | 51 | 52 | def save_final_report(report_data: Any) -> None: 53 | """ 54 | Saves the final processed output to a JSON file. 55 | 56 | Args: 57 | report_data (Any): The processed output content to be saved. 58 | 59 | Returns: 60 | None 61 | """ 62 | try: 63 | with open(Config.REPORT_FILE_PATH, 'w') as output_file: 64 | json.dump(report_data, output_file, indent=2) 65 | logger.info(f"Final report saved successfully at {Config.REPORT_FILE_PATH}.") 66 | 67 | except (OSError, json.JSONDecodeError) as save_error: 68 | logger.error(f"Failed to save the final report: {save_error}") 69 | raise 70 | 71 | 72 | if __name__ == "__main__": 73 | """ 74 | Entry point of the script. 75 | """ 76 | logger.info("Starting the pipeline.") 77 | asyncio.run(pipeline()) 78 | logger.info("Pipeline execution completed.") 79 | -------------------------------------------------------------------------------- /src/patterns/dynamic_sharding/delegates.py: -------------------------------------------------------------------------------- 1 | from src.patterns.dynamic_sharding.agent import Agent 2 | from src.patterns.web_access.pipeline import run 3 | from src.commons.message import Message 4 | from src.config.logging import logger 5 | from typing import List 6 | import asyncio 7 | 8 | 9 | class Delegate(Agent): 10 | """ 11 | An agent that processes a shard of entities by fetching information 12 | using web research. 13 | 14 | Attributes: 15 | name (str): The name of the shard processing agent. 16 | """ 17 | 18 | def __init__(self, name: str) -> None: 19 | """ 20 | Initializes the ShardProcessingAgent. 21 | 22 | Args: 23 | name (str): The name of the agent. 24 | """ 25 | super().__init__(name) 26 | logger.info(f"{self.name} initialized.") 27 | 28 | async def process(self, message: Message) -> Message: 29 | """ 30 | Processes the shard by fetching information for each entity in the shard. 31 | 32 | Args: 33 | message (Message): The message containing the shard (list of entities). 34 | 35 | Returns: 36 | Message: A message containing the concatenated information. 37 | """ 38 | logger.info(f"{self.name} processing shard.") 39 | try: 40 | entities: List[str] = message.content 41 | tasks = [] 42 | 43 | # Create tasks to fetch information for each entity 44 | for entity in entities: 45 | task = asyncio.create_task(self.fetch_entity_info(entity)) 46 | tasks.append(task) 47 | 48 | # Gather information 49 | entity_info = await asyncio.gather(*tasks) 50 | shard_info = "\n\n".join(entity_info) 51 | 52 | return Message(content=shard_info, sender=self.name, recipient=message.sender) 53 | except Exception as e: 54 | logger.error(f"Error in {self.name}: {e}") 55 | return Message( 56 | content="An error occurred while processing the shard.", 57 | sender=self.name, 58 | recipient=message.sender 59 | ) 60 | 61 | async def fetch_entity_info(self, entity: str) -> str: 62 | """ 63 | Fetches information about an entity using web search. 64 | 65 | Args: 66 | entity (str): The name of the entity. 67 | 68 | Returns: 69 | str: Information about the entity. 70 | """ 71 | logger.info(f"{self.name} fetching information for {entity}.") 72 | try: 73 | # Use the run function to perform web search asynchronously 74 | info = await asyncio.to_thread(run, f"{entity} information") 75 | return f"Information about {entity}:\n{info}" 76 | except Exception as e: 77 | logger.error(f"Error fetching information for {entity}: {e}") 78 | return f"Could not fetch information for {entity}." -------------------------------------------------------------------------------- /src/patterns/dynamic_decomposition/delegates.py: -------------------------------------------------------------------------------- 1 | from src.patterns.dynamic_decomposition.agent import Agent 2 | from src.llm.generate import ResponseGenerator 3 | from src.commons.message import Message 4 | from src.config.logging import logger 5 | import asyncio 6 | 7 | 8 | class SubTaskAgent(Agent): 9 | """ 10 | An agent responsible for processing a specific subtask of document extraction 11 | by invoking an LLM to extract the required information. 12 | """ 13 | 14 | def __init__(self, name: str) -> None: 15 | """ 16 | Initializes the SubTaskAgent with the provided name. 17 | 18 | Args: 19 | name (str): The name of the subtask agent. 20 | """ 21 | super().__init__(name) 22 | logger.info(f"{self.name} initialized.") 23 | 24 | async def process(self, message: Message) -> Message: 25 | """ 26 | Processes the assigned subtask of the document by interacting with the LLM to extract information. 27 | 28 | Args: 29 | message (Message): The message containing the subtask details. 30 | 31 | Returns: 32 | Message: A message containing the result of the LLM extraction. 33 | """ 34 | logger.info(f"{self.name} processing subtask.") 35 | 36 | subtask = message.content 37 | 38 | # Extract the document and the task from the subtask 39 | document = subtask.get("book") 40 | task = subtask.get("task") 41 | 42 | if not document or not task: 43 | logger.error(f"Invalid subtask received by {self.name}: Missing document or task.") 44 | return Message( 45 | content="Invalid subtask: Missing document or task.", 46 | sender=self.name, 47 | recipient=message.sender 48 | ) 49 | 50 | # Prepare the input for the LLM 51 | llm_input = f"Document:\n{document}\n\nTask: {task}" 52 | logger.info(f"Calling LLM for task: {task}") 53 | 54 | try: 55 | response_generator = ResponseGenerator() 56 | 57 | # Define a blocking function to make the LLM call in a separate thread 58 | def blocking_call(): 59 | return response_generator.generate_response( 60 | model_name='gemini-1.5-flash-001', 61 | system_instruction='', 62 | contents=[llm_input] 63 | ).text.strip() 64 | 65 | # Run the blocking LLM call in a separate thread 66 | extraction_result = await asyncio.to_thread(blocking_call) 67 | 68 | except Exception as e: 69 | logger.error(f"LLM call failed for task: {task} - {str(e)}") 70 | extraction_result = f"Failed to extract information for task: {task}" 71 | 72 | # Return the LLM extraction result as a message 73 | return Message( 74 | content=extraction_result, 75 | sender=self.name, 76 | recipient=message.sender 77 | ) 78 | -------------------------------------------------------------------------------- /src/patterns/reflection/README.md: -------------------------------------------------------------------------------- 1 | # Pattern 1: Reflection 2 | 3 | ## Overview 4 | 5 | The **Reflection** pattern implements an iterative content generation and refinement system using an Actor-Critic framework. This pattern enables self-improving content generation through continuous feedback loops between an Actor (content generator) and a Critic (content reviewer). The pattern follows an iterative workflow where the Actor generates content (which can include text, code, or any LLM output), the Critic reviews it, and then both components revise their work based on the accumulated state history. This process continues for a specified number of cycles, leading to progressively refined content. 6 | 7 | **Side Note:** While Reflection can also be implemented in a single-agent setting (as we covered more extensively in our [Medium article on architectural patterns for Text-to-SQL generation](https://medium.com/google-cloud/architectural-patterns-for-text-to-sql-leveraging-llms-for-enhanced-bigquery-interactions-59756a749e15)), this repository focuses on a multi-agent setup involving two LLMs: the Actor, an agent that generates the content using Gemini 1.5 Flash, and the Critic, a larger and more capable model that reviews the content and provides feedback using Gemini 1.5 Pro. 8 | 9 |

10 | Reflection 11 |

12 | 13 | ## Key Components 14 | 15 | ### Actor 16 | 17 | The Actor is responsible for content generation and revision: 18 | - Generates initial content drafts based on a given topic 19 | - Revises drafts based on the Critic's feedback and previous versions 20 | - Maintains version history of drafts 21 | 22 | ### Critic 23 | 24 | The Critic provides feedback and analysis: 25 | - Reviews content produced by the Actor 26 | - Generates detailed feedback and suggestions 27 | - Revises its reviews based on the evolution of content 28 | - Maintains version history of reviews 29 | 30 | ### Pipeline 31 | 32 | The Pipeline (Runner) orchestrates the entire workflow: 33 | - Manages the interaction between Actor and Critic 34 | - Maintains state across multiple revision cycles 35 | - Coordinates the generation-review-revision loop 36 | - Produces final output in markdown format 37 | 38 | ## Process Flow 39 | 40 | 1. **Initialization** 41 | - Runner is created with model configurations, topic, and number of cycles 42 | - Actor and Critic agents are initialized 43 | - State manager is prepared to track workflow history 44 | 45 | 2. **Initial Cycle** 46 | - Actor generates the first draft based on the topic 47 | - Critic reviews the initial draft 48 | - Both outputs are saved to state history 49 | 50 | 3. **Revision Cycles** 51 | - Actor generates a revised draft based on complete history 52 | - Critic provides updated review based on new draft and previous history 53 | - Both new versions are added to state history 54 | - Process repeats for specified number of cycles 55 | 56 | 4. **Final Output** 57 | - Complete history is formatted as markdown 58 | - Contains all drafts and reviews from all cycles -------------------------------------------------------------------------------- /src/patterns/web_access/README.md: -------------------------------------------------------------------------------- 1 | # Pattern 2 - Web Access 2 | 3 | ## Overview 4 | 5 | The **Web Access** pattern implements an agentic workflow for retrieving, processing, and summarizing web content. This pattern establishes a serial pipeline where scrape depends on the output of search and summarize depends on the output of scrape. The pattern orchestrates three specialized agents (one per step) that handle different aspects of web content acquisition and processing through search, scrape, and summarize operations, passing the query along for every step. It leverages Gemini Flash with Function Calling for search, and Gemini is also used to clean up the scraped HTML content from the scraped webpages and finally to coalesce a summary - so used at all 3 steps. The pattern leverages the SERP API for web searches and language models for generating queries and summaries. You can change model to any model of your choice - Gemini Pro 1.5 and Flash are supported with the modules currently. 6 | 7 |

8 | Web Access 9 |

10 | 11 | ## Key Components 12 | 13 | ### WebSearchAgent 14 | - Orchestrates search operations using language models 15 | - Generates optimized search queries from user input 16 | - Interfaces with SERP API for web searches 17 | - Saves structured search results for further processing 18 | 19 | ### WebScrapeAgent 20 | - Handles concurrent content extraction from search results 21 | - Implements rate-limited scraping to respect server limits 22 | - Processes and cleans extracted content 23 | - Saves structured content for summarization 24 | 25 | ### WebContentSummarizeAgent 26 | - Processes scraped content using language models 27 | - Generates concise content summaries 28 | - Uses templated prompts for consistent output 29 | - Produces final summarized results 30 | 31 | ### Pipeline 32 | - Coordinates execution of all agents 33 | - Manages data flow between components 34 | - Handles cleanup and error recovery 35 | - Provides simple interface for workflow execution 36 | 37 | ## Process Flow 38 | 39 | 1. **Search Phase** 40 | - User query is received by the pipeline 41 | - WebSearchAgent generates optimized search instructions 42 | - SERP API returns search results 43 | - Results are saved for scraping 44 | 45 | 2. **Scrape Phase** 46 | - WebScrapeAgent loads search results 47 | - Content is extracted from web pages concurrently 48 | - Extracted content is cleaned and processed 49 | - Structured content is saved for summarization 50 | 51 | 3. **Summarize Phase** 52 | - WebContentSummarizeAgent loads scraped content 53 | - Language model generates content summary 54 | - Final summary is saved and returned 55 | - Pipeline completes execution 56 | 57 | Effectively, this pattern shows tool use by agents - later other patterns can leverage this pipeline for web access. The entire process can be executed with a single function call: 58 | ```python 59 | from src.patterns.web_access.pipeline import run 60 | 61 | # Executes the complete pipeline: search -> scrape -> summarize 62 | summary = run("search query", model_name="gemini-1.5-flash-001") 63 | ``` -------------------------------------------------------------------------------- /src/llm/factory.py: -------------------------------------------------------------------------------- 1 | from vertexai.generative_models import GenerativeModel 2 | from src.config.logging import logger 3 | from abc import abstractmethod 4 | from typing import Optional 5 | from abc import ABC 6 | 7 | 8 | class ModelFactory(ABC): 9 | """ 10 | Abstract base class for creating generative models. 11 | 12 | This class defines the interface for creating generative models, 13 | ensuring that subclasses implement the `create_model` method. 14 | """ 15 | 16 | @abstractmethod 17 | def create_model(self, model_name: str, system_instruction: str) -> GenerativeModel: 18 | """ 19 | Creates and returns an instance of a GenerativeModel. 20 | 21 | Args: 22 | model_name (str): The name of the model to create. 23 | system_instruction (str): The system instruction to initialize the model with. 24 | 25 | Returns: 26 | -------- 27 | GenerativeModel: An instance of the GenerativeModel. 28 | 29 | Raises: 30 | ------- 31 | NotImplementedError: If the method is not implemented by a subclass. 32 | """ 33 | raise NotImplementedError("Subclasses must implement the `create_model` method") 34 | 35 | 36 | class VertexAIModelFactory(ModelFactory): 37 | """ 38 | Concrete implementation of the ModelFactory for Vertex AI models. 39 | 40 | This class is responsible for creating instances of GenerativeModel specific to Vertex AI. 41 | """ 42 | 43 | def create_model(self, model_name: str, system_instruction: str) -> GenerativeModel: 44 | """ 45 | Creates and returns an instance of a Vertex AI GenerativeModel. 46 | 47 | Args: 48 | model_name (str): The name of the Vertex AI model to create. 49 | system_instruction (str): The system instruction to initialize the model with. 50 | 51 | Returns: 52 | -------- 53 | GenerativeModel: An instance of the Vertex AI GenerativeModel. 54 | 55 | Raises: 56 | ------- 57 | Exception: If there is an error during model creation, it logs the error and re-raises it. 58 | """ 59 | try: 60 | return GenerativeModel(model_name, system_instruction=system_instruction) 61 | except Exception as e: 62 | logger.error(f"Error creating GenerativeModel: {e}") 63 | raise 64 | 65 | 66 | class ModelFactoryProvider: 67 | """ 68 | Singleton provider for the ModelFactory. 69 | 70 | This class ensures that only one instance of the ModelFactory is created, 71 | providing a global access point for it. 72 | """ 73 | _instance: Optional[ModelFactory] = None 74 | 75 | @staticmethod 76 | def get_instance() -> ModelFactory: 77 | """ 78 | Returns the singleton instance of the ModelFactory. 79 | 80 | If no instance exists, it creates one. 81 | 82 | Returns: 83 | -------- 84 | ModelFactory: The singleton instance of the ModelFactory. 85 | """ 86 | if ModelFactoryProvider._instance is None: 87 | ModelFactoryProvider._instance = VertexAIModelFactory() 88 | return ModelFactoryProvider._instance 89 | -------------------------------------------------------------------------------- /src/patterns/dynamic_decomposition/README.md: -------------------------------------------------------------------------------- 1 | # Pattern 7 - Dynamic Decomposition 2 | 3 | ## Overview 4 | 5 | The Dynamic Decomposition is an advanced design pattern where a Coordinator Agent autonomously decomposes a complex task into multiple subtasks without predefined structures. The coordinator uses a Large Language Model (LLM) to generate subtasks, which are then processed by separate Sub-Task Agents in parallel. After all subtasks are completed, the coordinator gathers and combines the results to produce a structured summary. 6 | This is a natural variant of the previous pattern 6 task decomposition what we covered where the subtasks were handed to the coordinator agent by human user. This is an effective pattern when the given task in hand is not clearly defined out (can be strictly broken into a list of subtasks in the right order). 7 | 8 | Let's say we don't have this information in hand or maybe the number of subtasks and the order of execution is long - we can let the coordinator handle this and spawn and delegate subtasks accordingly. This also ties back to the previous article on designing a ReAct agent from scratch (https://medium.com/google-cloud/building-react-agents-from-scratch-a-hands-on-guide-using-gemini-ffe4621d90ae) where we saw how to design agents that can handle breaking a main task into subtasks and execute them successfully. 9 | 10 |

11 | Dynamic Decomposition 12 |

13 | 14 | ## Key Components 15 | 16 | ### **CoordinatorAgent** 17 | - **Receives the complex task input**. 18 | - **Analyzes the task** using an LLM to dynamically determine necessary subtasks. 19 | - **Decomposes the task** into independent subtasks. 20 | - **Spawns sub-agents** for each identified subtask. 21 | - **Waits** until all sub-agents complete their assigned tasks. 22 | - **Combines results** from all sub-agents into a structured summary. 23 | 24 | ### **SubTaskAgent** 25 | - **Receives** a specific subtask from the coordinator. 26 | - **Processes** the subtask by interacting with an LLM. 27 | - **Extracts information** or performs analysis based on the assigned task. 28 | - **Returns the result** to the coordinator. 29 | 30 | ### **ResponseGenerator** 31 | - Interfaces with the LLM to generate subtasks and process individual subtasks. 32 | 33 | ### **Message** 34 | - Represents messages exchanged between agents, containing content, sender, and recipient information. 35 | 36 | ## Process Flow 37 | 38 | 1. The coordinator receives the complex task input. 39 | 2. It uses an LLM to generate subtasks for analyzing or processing the input. 40 | 3. The LLM output is parsed from JSON format into a dictionary of subtasks. 41 | 4. Sub-agents are created for each subtask: 42 | a. Each sub-agent receives a message with the task details. 43 | b. The sub-agent prepares an input for the LLM based on its specific task. 44 | c. The LLM is called to process the task or extract information. 45 | d. The result is returned to the coordinator. 46 | 5. All sub-agents execute their tasks in parallel. 47 | 6. The coordinator collects results from all sub-agents. 48 | 7. A final structured summary is created, combining all subtask results. -------------------------------------------------------------------------------- /src/patterns/parallel_delegation/README.md: -------------------------------------------------------------------------------- 1 | # Pattern 4 - Parallel Delegation 2 | 3 | ## Overview 4 | 5 | The **Parallel Delegation** pattern implements an agentic workflow that processes complex queries by first identifying distinct entities through Named Entity Recognition (NER), then delegating these entities to specialized agents for parallel processing. This pattern is particularly effective for scenarios where multiple independent sub-tasks can be executed concurrently, such as travel planning where flight, hotel, and car rental searches can be performed simultaneously. This is a natural variant of Pattern 3, where we reuse the same use case except the functionality of the coordinator is different - instead of routing based on identified intent, the identified pieces of information after initial query analysis are directly passed on to all subagents that the coordinator controls directly. Similar to Pattern 3, all subagents use the web access pipeline covered in Pattern 2. The pattern leverages asynchronous processing and parallel execution to optimize performance while maintaining a coordinated workflow through a central coordinator agent. 6 | 7 |

8 | Parallel Delegation 9 |

10 | 11 | ## Key Components 12 | 13 | ### TravelPlannerAgent (Coordinator) 14 | - Performs Named Entity Recognition (NER) on incoming queries 15 | - Identifies distinct entities for parallel processing 16 | - Coordinates asynchronous delegation to specialized agents 17 | - Consolidates parallel results into a coherent response 18 | - Manages entity types: 19 | - FLIGHT 20 | - HOTEL 21 | - CAR_RENTAL 22 | - UNKNOWN 23 | 24 | ### Specialized Sub-Agents 25 | - **FlightSearchAgent** 26 | - Processes flight-related entities asynchronously 27 | - Generates optimized flight search queries 28 | - Returns flight information independently 29 | 30 | - **HotelSearchAgent** 31 | - Handles hotel-related entities asynchronously 32 | - Processes accommodation requests 33 | - Returns hotel information independently 34 | 35 | - **CarRentalSearchAgent** 36 | - Manages car rental entities asynchronously 37 | - Processes vehicle rental requests 38 | - Returns car rental options independently 39 | 40 | 41 | 42 | ### Asynchronous Pipeline 43 | - Orchestrates the parallel workflow 44 | - Initializes all agents 45 | - Manages asynchronous message flow 46 | - Handles concurrent processing of entities 47 | 48 | ## Process Flow 49 | 50 | 1. **Entity Recognition** 51 | - User query is received by TravelPlannerAgent 52 | - NER is performed to identify distinct entities 53 | - Entities are categorized by type (flight, hotel, car rental) 54 | 55 | 2. **Parallel Delegation** 56 | - Identified entities are distributed to specialized agents 57 | - Each agent receives relevant entities asynchronously 58 | - All agents begin processing concurrently 59 | - No waiting for other agents to complete 60 | 61 | 3. **Concurrent Processing** 62 | - Each sub-agent independently: 63 | - Generates optimized search queries 64 | - Performs web searches 65 | - Processes results 66 | - Returns findings 67 | 68 | 4. **Response Consolidation** 69 | - Coordinator awaits all parallel processes 70 | - Collects results as they complete 71 | - Consolidates information into coherent response 72 | - Returns unified result to user -------------------------------------------------------------------------------- /src/patterns/semantic_router/delegates/hotel_search.py: -------------------------------------------------------------------------------- 1 | from src.patterns.web_access.pipeline import run as web_search 2 | from src.patterns.semantic_router.agent import Agent 3 | from src.commons.message import Message 4 | from src.utils.io import save_response 5 | from src.config.logging import logger 6 | from typing import Optional 7 | import json 8 | 9 | 10 | class HotelSearchAgent(Agent): 11 | """ 12 | Agent responsible for handling hotel search queries, generating a web search query, 13 | and returning summarized results. 14 | """ 15 | 16 | def process(self, message: Message) -> Message: 17 | """ 18 | Processes hotel search queries by generating a web search query and returning summarized results. 19 | 20 | :param message: The incoming message containing the hotel query. 21 | :return: A response message with the hotel search results or an error message. 22 | """ 23 | logger.info(f"{self.name} processing message: '{message.content}'") 24 | query = message.content 25 | try: 26 | template = self.template_manager.create_template('delegate', 'hotel_search') 27 | system_instructions = template.get('system', '') 28 | response_schema = template.get('schema', '') 29 | user_instructions = self.template_manager.fill_template(template.get('user', ''), query=query) 30 | contents = [user_instructions] 31 | 32 | logger.info(f"Generating response for hotel query: '{query}'") 33 | response = self.response_generator.generate_response( 34 | 'gemini-1.5-flash-001', system_instructions, contents, response_schema 35 | ) 36 | 37 | try: 38 | out_dict = json.loads(response.text.strip()) 39 | except json.JSONDecodeError as decode_error: 40 | logger.error(f"Failed to decode JSON response: {decode_error}") 41 | return Message( 42 | content="Error in parsing response. Please try again later.", 43 | sender=self.name, 44 | recipient="TravelPlannerAgent" 45 | ) 46 | 47 | web_search_query: Optional[str] = out_dict.get('web_search_query') 48 | save_response('./data/patterns/semantic_router/output', 'delegate', 'hotel_search', out_dict, 'json') 49 | if not web_search_query: 50 | logger.warning("Web search query missing from the response.") 51 | return Message( 52 | content="Unable to find relevant hotel information at this time.", 53 | sender=self.name, 54 | recipient="TravelPlannerAgent" 55 | ) 56 | 57 | logger.info(f"Running web search for query: '{web_search_query}'") 58 | web_search_results_summary = web_search(web_search_query) 59 | save_response('./data/patterns/semantic_router/output', 'delegate', 'hotel_search', web_search_results_summary, 'txt') 60 | return Message(content=web_search_results_summary, sender=self.name, recipient="TravelPlannerAgent") 61 | 62 | except Exception as e: 63 | logger.error(f"Error in HotelSearchAgent: {e}") 64 | return Message( 65 | content="I apologize, but I couldn't process the hotel information at this time.", 66 | sender=self.name, 67 | recipient="TravelPlannerAgent" 68 | ) 69 | -------------------------------------------------------------------------------- /src/patterns/dynamic_sharding/coordinator.py: -------------------------------------------------------------------------------- 1 | from src.patterns.dynamic_sharding.delegates import Delegate 2 | from src.patterns.dynamic_sharding.agent import Agent 3 | from src.commons.message import Message 4 | from src.config.logging import logger 5 | from typing import List 6 | import asyncio 7 | 8 | 9 | class Coordinator(Agent): 10 | """ 11 | An agent that coordinates the processing of a list of entities by sharding 12 | the list and dynamically creating sub-agents to process each shard in parallel. 13 | 14 | Attributes: 15 | name (str): The name of the coordinator agent. 16 | """ 17 | 18 | def __init__(self, name: str) -> None: 19 | """ 20 | Initializes the CoordinatorAgent. 21 | 22 | Args: 23 | name (str): The name of the agent. 24 | """ 25 | super().__init__(name) 26 | logger.info(f"{self.name} initialized.") 27 | 28 | async def process(self, message: Message) -> Message: 29 | """ 30 | Processes the incoming message containing entities and shard size, 31 | shards the list, creates sub-agents dynamically, and collects the results. 32 | 33 | Args: 34 | message (Message): The incoming message containing the list and shard size. 35 | 36 | Returns: 37 | Message: A message containing the consolidated entity information. 38 | """ 39 | logger.info(f"{self.name} processing message.") 40 | try: 41 | # Extract the list of entities and shard size from the message 42 | data = message.content 43 | entities: List[str] = data.get('entities', []) 44 | shard_size: int = data.get('shard_size', 1) 45 | 46 | if not entities: 47 | raise ValueError("No entities provided.") 48 | 49 | # Shard the list 50 | shards = [entities[i:i + shard_size] for i in range(0, len(entities), shard_size)] 51 | logger.info(f"Sharded list into {len(shards)} shards.") 52 | 53 | # Note: You could also incorporate pre-processing logic here that uses Gemini or any LLM as a coordinating task. 54 | 55 | # Create sub-agents dynamically and process shards in parallel 56 | tasks = [] 57 | for idx, shard in enumerate(shards): 58 | agent_name = f"ShardProcessingAgent_{idx}" 59 | agent = Delegate(name=agent_name) 60 | task = asyncio.create_task(agent.process(Message(content=shard, sender=self.name, recipient=agent_name))) 61 | tasks.append(task) 62 | 63 | # Gather results from all sub-agents 64 | sub_responses = await asyncio.gather(*tasks) 65 | 66 | # Consolidate the results 67 | entity_info = [response.content for response in sub_responses if response.content] 68 | 69 | # Note: The coordinator doesn't use an LLM to post-process or consolidate responses from sub-agents (delegates). However, this feature could be implemented if desired. 70 | 71 | final_response = "\n\n".join(entity_info) 72 | 73 | return Message(content=final_response, sender=self.name, recipient=message.sender) 74 | except Exception as e: 75 | logger.error(f"Error in {self.name}: {e}") 76 | return Message( 77 | content="An error occurred while processing the request.", 78 | sender=self.name, 79 | recipient=message.sender 80 | ) -------------------------------------------------------------------------------- /src/patterns/semantic_router/delegates/flight_search.py: -------------------------------------------------------------------------------- 1 | from src.patterns.web_access.pipeline import run as web_search 2 | from src.patterns.semantic_router.agent import Agent 3 | from src.commons.message import Message 4 | from src.utils.io import save_response 5 | from src.config.logging import logger 6 | from typing import Optional 7 | import json 8 | 9 | 10 | class FlightSearchAgent(Agent): 11 | """ 12 | Agent responsible for handling flight search queries, generating a web search query, 13 | and returning summarized results. 14 | """ 15 | 16 | def process(self, message: Message) -> Message: 17 | """ 18 | Processes flight search queries by generating a web search query and returning summarized results. 19 | 20 | :param message: The incoming message containing the flight query. 21 | :return: A response message with the flight search results or an error message. 22 | """ 23 | logger.info(f"{self.name} processing message: '{message.content}'") 24 | query = message.content 25 | try: 26 | template = self.template_manager.create_template('delegate', 'flight_search') 27 | system_instructions = template.get('system', '') 28 | response_schema = template.get('schema', '') 29 | user_instructions = self.template_manager.fill_template(template.get('user', ''), query=query) 30 | contents = [user_instructions] 31 | 32 | logger.info(f"Generating response for flight query: '{query}'") 33 | response = self.response_generator.generate_response( 34 | 'gemini-1.5-flash-001', system_instructions, contents, response_schema 35 | ) 36 | 37 | try: 38 | out_dict = json.loads(response.text.strip()) 39 | except json.JSONDecodeError as decode_error: 40 | logger.error(f"Failed to decode JSON response: {decode_error}") 41 | return Message( 42 | content="Error in parsing response. Please try again later.", 43 | sender=self.name, 44 | recipient="TravelPlannerAgent" 45 | ) 46 | 47 | web_search_query: Optional[str] = out_dict.get('web_search_query') 48 | save_response('./data/patterns/semantic_router/output', 'delegate', 'flight_search', out_dict, 'json') 49 | if not web_search_query: 50 | logger.warning("Web search query missing from the response.") 51 | return Message( 52 | content="Unable to find relevant flight information at this time.", 53 | sender=self.name, 54 | recipient="TravelPlannerAgent" 55 | ) 56 | 57 | logger.info(f"Running web search for query: '{web_search_query}'") 58 | web_search_results_summary = web_search(web_search_query) 59 | save_response('./data/patterns/semantic_router/output', 'delegate', 'flight_search', web_search_results_summary, 'txt') 60 | return Message(content=web_search_results_summary, sender=self.name, recipient="TravelPlannerAgent") 61 | 62 | except Exception as e: 63 | logger.error(f"Error in FlightSearchAgent: {e}") 64 | return Message( 65 | content="I apologize, but I couldn't process the flight information at this time.", 66 | sender=self.name, 67 | recipient="TravelPlannerAgent" 68 | ) 69 | -------------------------------------------------------------------------------- /src/patterns/semantic_router/delegates/car_rental_search.py: -------------------------------------------------------------------------------- 1 | from src.patterns.web_access.pipeline import run as web_search 2 | from src.patterns.semantic_router.agent import Agent 3 | from src.commons.message import Message 4 | from src.utils.io import save_response 5 | from src.config.logging import logger 6 | from typing import Optional 7 | import json 8 | 9 | 10 | class CarRentalSearchAgent(Agent): 11 | """ 12 | Agent responsible for handling car rental queries, generating a web search query, 13 | and returning summarized results. 14 | """ 15 | 16 | def process(self, message: Message) -> Message: 17 | """ 18 | Processes car rental queries by generating a web search query and returning summarized results. 19 | 20 | :param message: The incoming message containing the car rental query. 21 | :return: A response message with the car rental search results or an error message. 22 | """ 23 | logger.info(f"{self.name} processing message: '{message.content}'") 24 | query = message.content 25 | try: 26 | template = self.template_manager.create_template('delegate', 'car_rental_search') 27 | system_instructions = template.get('system', '') 28 | response_schema = template.get('schema', '') 29 | user_instructions = self.template_manager.fill_template(template.get('user', ''), query=query) 30 | contents = [user_instructions] 31 | 32 | logger.info(f"Generating response for car rental query: '{query}'") 33 | response = self.response_generator.generate_response( 34 | 'gemini-1.5-flash-001', system_instructions, contents, response_schema 35 | ) 36 | 37 | try: 38 | out_dict = json.loads(response.text.strip()) 39 | except json.JSONDecodeError as decode_error: 40 | logger.error(f"Failed to decode JSON response: {decode_error}") 41 | return Message( 42 | content="Error in parsing response. Please try again later.", 43 | sender=self.name, 44 | recipient="TravelPlannerAgent" 45 | ) 46 | 47 | web_search_query: Optional[str] = out_dict.get('web_search_query') 48 | save_response('./data/patterns/semantic_router/output', 'delegate', 'car_rental_search', out_dict, 'json') 49 | if not web_search_query: 50 | logger.warning("Web search query missing from the response.") 51 | return Message( 52 | content="Unable to find relevant car rental information at this time.", 53 | sender=self.name, 54 | recipient="TravelPlannerAgent" 55 | ) 56 | 57 | logger.info(f"Running web search for query: '{web_search_query}'") 58 | web_search_results_summary = web_search(web_search_query) 59 | save_response('./data/patterns/semantic_router/output', 'delegate', 'car_rental_search', web_search_results_summary, 'txt') 60 | return Message(content=web_search_results_summary, sender=self.name, recipient="TravelPlannerAgent") 61 | 62 | except Exception as e: 63 | logger.error(f"Error in CarRentalSearchAgent: {e}") 64 | return Message( 65 | content="I apologize, but I couldn't process the car rental information at this time.", 66 | sender=self.name, 67 | recipient="TravelPlannerAgent" 68 | ) 69 | -------------------------------------------------------------------------------- /src/config/setup.py: -------------------------------------------------------------------------------- 1 | from src.config.logging import logger 2 | from typing import Dict, Any 3 | import yaml 4 | import os 5 | 6 | CONFIG_PATH = './config/setup.yml' 7 | 8 | 9 | class _Config: 10 | """ 11 | Singleton class to manage application configuration. 12 | 13 | Attributes: 14 | ----------- 15 | PROJECT_ID : str 16 | The project ID from the configuration file. 17 | REGION : str 18 | The region from the configuration file. 19 | CREDENTIALS_PATH : str 20 | The path to the Google credentials JSON file. 21 | TEXT_GEN_MODEL_NAME : str 22 | The name of the text generation model. 23 | 24 | Methods: 25 | -------- 26 | _load_config(config_path: str) -> Dict[str, Any]: 27 | Load the YAML configuration from the given path. 28 | _set_google_credentials(credentials_path: str) -> None: 29 | Set the Google application credentials environment variable. 30 | """ 31 | 32 | _instance = None 33 | 34 | def __new__(cls, *args, **kwargs): 35 | """ 36 | Ensure that only one instance of the _Config class is created (Singleton pattern). 37 | """ 38 | if not cls._instance: 39 | cls._instance = super(_Config, cls).__new__(cls) 40 | # The following line ensures that the __init__ method is only called once. 41 | cls._instance.__initialized = False 42 | return cls._instance 43 | 44 | def __init__(self, config_path: str = CONFIG_PATH): 45 | """ 46 | Initialize the _Config class by loading the configuration. 47 | 48 | Parameters: 49 | ----------- 50 | config_path : str 51 | Path to the YAML configuration file. 52 | """ 53 | if self.__initialized: 54 | return 55 | self.__initialized = True 56 | 57 | self.__config = self._load_config(config_path) 58 | self.PROJECT_ID = self.__config['project_id'] 59 | self.REGION = self.__config['region'] 60 | self.CREDENTIALS_PATH = self.__config['credentials_json'] 61 | self._set_google_credentials(self.CREDENTIALS_PATH) 62 | 63 | @staticmethod 64 | def _load_config(config_path: str) -> Dict[str, Any]: 65 | """ 66 | Load the YAML configuration from the given path. 67 | 68 | Parameters: 69 | ----------- 70 | config_path : str 71 | Path to the YAML configuration file. 72 | 73 | Returns: 74 | -------- 75 | Dict[str, Any] 76 | Loaded configuration data. 77 | 78 | Raises: 79 | ------- 80 | Exception 81 | If the configuration file fails to load, logs the error. 82 | """ 83 | try: 84 | with open(config_path, 'r') as file: 85 | return yaml.safe_load(file) 86 | except Exception as e: 87 | logger.error(f"Failed to load the configuration file. Error: {e}") 88 | raise 89 | 90 | @staticmethod 91 | def _set_google_credentials(credentials_path: str) -> None: 92 | """ 93 | Set the Google application credentials environment variable. 94 | 95 | Parameters: 96 | ----------- 97 | credentials_path : str 98 | Path to the Google credentials file. 99 | """ 100 | os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credentials_path 101 | 102 | 103 | # Create a single instance of the _Config class. 104 | config = _Config() 105 | -------------------------------------------------------------------------------- /src/patterns/parallel_delegation/delegates/flight_search.py: -------------------------------------------------------------------------------- 1 | from src.patterns.parallel_delegation.agent import Agent 2 | from src.patterns.web_access.pipeline import run 3 | from src.commons.message import Message 4 | from src.utils.io import save_response 5 | from src.config.logging import logger 6 | from typing import Dict 7 | from typing import Any 8 | import asyncio 9 | import json 10 | 11 | 12 | class FlightSearchAgent(Agent): 13 | """ 14 | An agent that processes flight search queries, generating a structured response 15 | based on the input query, performing a web search, and returning a summarized result. 16 | 17 | Attributes: 18 | name (str): The name of the agent. 19 | template_manager (TemplateManager): Manages templates for generating queries and instructions. 20 | response_generator (ResponseGenerator): Generates responses using an LLM. 21 | """ 22 | 23 | async def process(self, message: Message) -> Message: 24 | """ 25 | Processes a flight search message by generating a structured response based on 26 | the input query, conducting a web search, and returning a summarized result. 27 | 28 | Args: 29 | message (Message): The incoming message containing the search query. 30 | 31 | Returns: 32 | Message: A message with the summarized flight search results or an error response. 33 | """ 34 | logger.info(f"{self.name} processing message: {message.content}") 35 | query = message.content 36 | 37 | try: 38 | # Create template for processing the query 39 | template: Dict[str, Any] = self.template_manager.create_template('delegate', 'flight_search') 40 | system_instructions: str = template['system'] 41 | response_schema: Dict[str, Any] = template['schema'] 42 | user_instructions: str = self.template_manager.fill_template(template['user'], query=query) 43 | contents = [user_instructions] 44 | 45 | # Generate response based on the template and query 46 | logger.info(f"Generating response for flight query: {query}") 47 | response = await asyncio.to_thread( 48 | self.response_generator.generate_response, 49 | 'gemini-1.5-flash-001', system_instructions, contents, response_schema 50 | ) 51 | 52 | # Parse the response for a web search query 53 | out_dict: Dict[str, Any] = json.loads(response.text.strip()) 54 | save_response('./data/patterns/parallel_delegation/output', 'delegate', 'flight_search', out_dict, 'json') 55 | web_search_query: str = out_dict.get('web_search_query', '') 56 | if not web_search_query: 57 | raise ValueError("Web search query missing from the response.") 58 | 59 | # Run the web search based on the extracted query 60 | logger.info(f"Running web search for query: {web_search_query}") 61 | web_search_results_summary: str = await asyncio.to_thread(run, web_search_query) 62 | save_response('./data/patterns/parallel_delegation/output', 'delegate', 'flight_search', web_search_results_summary, 'txt') 63 | return Message( 64 | content=web_search_results_summary, 65 | sender=self.name, 66 | recipient="TravelPlannerAgent", 67 | metadata={"entity_type": "FLIGHT"} 68 | ) 69 | 70 | except Exception as e: 71 | # Log and return error message 72 | logger.error(f"Error in {self.name}: {e}") 73 | return Message( 74 | content="I apologize, but I couldn't process the flight information at this time.", 75 | sender=self.name, 76 | recipient="TravelPlannerAgent", 77 | metadata={"entity_type": "FLIGHT"} 78 | ) 79 | -------------------------------------------------------------------------------- /src/patterns/parallel_delegation/delegates/hotel_search.py: -------------------------------------------------------------------------------- 1 | from src.patterns.parallel_delegation.agent import Agent 2 | from src.patterns.web_access.pipeline import run 3 | from src.commons.message import Message 4 | from src.utils.io import save_response 5 | from src.config.logging import logger 6 | from typing import Dict 7 | from typing import Any 8 | import asyncio 9 | import json 10 | 11 | 12 | class HotelSearchAgent(Agent): 13 | """ 14 | An agent that processes hotel search queries, generating a structured response 15 | based on the input query, performing a web search, and returning a summarized result. 16 | 17 | Attributes: 18 | name (str): The name of the agent. 19 | template_manager (TemplateManager): Manages templates for generating queries and instructions. 20 | response_generator (ResponseGenerator): Generates responses using an LLM. 21 | """ 22 | 23 | async def process(self, message: Message) -> Message: 24 | """ 25 | Processes a hotel search message by generating a structured response based on 26 | the input query, conducting a web search, and returning a summarized result. 27 | 28 | Args: 29 | message (Message): The incoming message containing the search query. 30 | 31 | Returns: 32 | Message: A message with the summarized hotel search results or an error response. 33 | """ 34 | logger.info(f"{self.name} processing message: {message.content}") 35 | query = message.content 36 | 37 | try: 38 | # Create template for processing the query 39 | template: Dict[str, Any] = self.template_manager.create_template('delegate', 'hotel_search') 40 | system_instructions: str = template['system'] 41 | response_schema: Dict[str, Any] = template['schema'] 42 | user_instructions: str = self.template_manager.fill_template(template['user'], query=query) 43 | contents = [user_instructions] 44 | 45 | # Generate response based on the template and query 46 | logger.info(f"Generating response for hotel query: {query}") 47 | response = await asyncio.to_thread( 48 | self.response_generator.generate_response, 49 | 'gemini-1.5-flash-001', 50 | system_instructions, 51 | contents, 52 | response_schema 53 | ) 54 | 55 | # Parse the response for a web search query 56 | out_dict: Dict[str, Any] = json.loads(response.text.strip()) 57 | save_response('./data/patterns/parallel_delegation/output', 'delegate', 'hotel_search', out_dict, 'json') 58 | web_search_query: str = out_dict.get('web_search_query', '') 59 | if not web_search_query: 60 | raise ValueError("Web search query missing from the response.") 61 | 62 | # Run the web search based on the extracted query 63 | logger.info(f"Running web search for query: {web_search_query}") 64 | web_search_results_summary: str = await asyncio.to_thread(run, web_search_query) 65 | save_response('./data/patterns/parallel_delegation/output', 'delegate', 'hotel_search', web_search_results_summary, 'txt') 66 | return Message( 67 | content=web_search_results_summary, 68 | sender=self.name, 69 | recipient="TravelPlannerAgent", 70 | metadata={"entity_type": "HOTEL"} 71 | ) 72 | 73 | except Exception as e: 74 | # Log and return error message 75 | logger.error(f"Error in {self.name}: {e}") 76 | return Message( 77 | content="I apologize, but I couldn't process the hotel information at this time.", 78 | sender=self.name, 79 | recipient="TravelPlannerAgent", 80 | metadata={"entity_type": "HOTEL"} 81 | ) 82 | -------------------------------------------------------------------------------- /src/patterns/semantic_router/README.md: -------------------------------------------------------------------------------- 1 | # Pattern 3 - Semantic Router 2 | 3 | ## Overview 4 | 5 | The **Semantic Routing** pattern implements an intelligent workflow for directing user queries to specialized agents based on intent analysis. At its core, this pattern employs a coordinator-delegate architecture, where a primary TravelPlannerAgent acts as the central orchestrator. 6 | 7 | The key idea lies in how the coordinator leverages Large Language Models (specifically Gemini Pro) to derive user intent and make precise routing decisions. Rather than activating multiple sub-agents, the coordinator selects and fires only the most appropriate specialized agent for each task. In the travel domain, these specialized agents handle specific functions like flight booking, hotel searches, and car rentals. 8 | 9 | Once selected, the sub-agent uses Gemini Pro to reformulate the original user query into a web-search optimized format. This reformulation is crucial as all sub-agents in this pattern utilize the web access pipeline established in Pattern 2, allowing them to retrieve real-time information through structured web searches. For example, a hotel booking query might be reformulated to include specific parameters like location, dates, and amenities that optimize web search results. 10 | 11 | The routing agent performs two crucial functions: First, it conducts intent classification to determine the nature of the user's request. Then, after the selected sub-agent completes its task, the routing agent consolidates the output into a coherent final response. Both the coordinator and the specialized sub-agents are powered by Gemini Pro, ensuring consistent natural language understanding and generation throughout the workflow. 12 | 13 | This architecture ensures that each user request is handled with precision by the most qualified specialized agent, creating an efficient and focused processing pipeline. Instead of engaging multiple agents simultaneously, the system maintains simplicity and efficiency by activating only the most relevant agent for each specific task. 14 |

15 | Semantic Router 16 |

17 | 18 | ## Key Components 19 | 20 | ### TravelPlannerAgent (Coordinator) 21 | - Determines user intent through semantic analysis 22 | - Routes queries to appropriate specialized agents 23 | - Manages communication between sub-agents 24 | - Consolidates and formats final responses 25 | - Handles intent classification into categories: 26 | - FLIGHT 27 | - HOTEL 28 | - CAR_RENTAL 29 | - UNKNOWN 30 | 31 | ### Specialized Sub-Agents 32 | - **FlightSearchAgent** 33 | - Processes flight-related queries 34 | - Generates optimized flight search parameters 35 | - Returns summarized flight information 36 | 37 | - **HotelSearchAgent** 38 | - Handles hotel booking queries 39 | - Processes accommodation requests 40 | - Returns relevant hotel information 41 | 42 | - **CarRentalSearchAgent** 43 | - Manages car rental inquiries 44 | - Processes vehicle rental requests 45 | - Returns car rental options and details 46 | 47 | ### Pipeline 48 | - Orchestrates the entire workflow 49 | - Initializes all agents 50 | - Manages message flow 51 | - Handles both single and batch query processing 52 | 53 | ## Process Flow 54 | 55 | 1. **Query Reception** 56 | - User submits a travel-related query 57 | - Pipeline creates a message object 58 | - Query is forwarded to TravelPlannerAgent 59 | 60 | 2. **Intent Detection** 61 | - TravelPlannerAgent analyzes query semantics 62 | - Determines specific travel intent 63 | - Routes to appropriate specialized agent 64 | 65 | 3. **Specialized Processing** 66 | - Sub-agent receives routed query 67 | - Generates optimized web search query 68 | - Processes and summarizes results 69 | - Returns formatted response 70 | 71 | 4. **Response Consolidation** 72 | - TravelPlannerAgent receives sub-agent response 73 | - Consolidates information 74 | - Formats final user-friendly response 75 | - Returns completed result 76 | -------------------------------------------------------------------------------- /src/patterns/parallel_delegation/delegates/car_rental_search.py: -------------------------------------------------------------------------------- 1 | from src.patterns.parallel_delegation.agent import Agent 2 | from src.patterns.web_access.pipeline import run 3 | from src.commons.message import Message 4 | from src.utils.io import save_response 5 | from src.config.logging import logger 6 | from typing import Dict 7 | from typing import Any 8 | import asyncio 9 | import json 10 | 11 | 12 | class CarRentalSearchAgent(Agent): 13 | """ 14 | An agent dedicated to handling car rental search queries. The agent processes incoming 15 | messages, generates a query template, invokes a response generator, and performs a web 16 | search based on the generated query. 17 | 18 | Attributes: 19 | name (str): The name of the agent. 20 | template_manager (TemplateManager): Manages templates for generating queries and instructions. 21 | response_generator (ResponseGenerator): Generates responses using an LLM. 22 | """ 23 | 24 | async def process(self, message: Message) -> Message: 25 | """ 26 | Processes a car rental search message by generating a structured response based on 27 | the input query, conducting a web search, and returning a summarized result. 28 | 29 | Args: 30 | message (Message): The incoming message containing the search query. 31 | 32 | Returns: 33 | Message: A message with the summarized car rental search results or an error response. 34 | """ 35 | logger.info(f"{self.name} processing message: {message.content}") 36 | query = message.content 37 | 38 | try: 39 | # Create template for processing the query 40 | template: Dict[str, Any] = self.template_manager.create_template('delegate', 'car_rental_search') 41 | system_instructions: str = template['system'] 42 | response_schema: Dict[str, Any] = template['schema'] 43 | user_instructions: str = self.template_manager.fill_template(template['user'], query=query) 44 | contents = [user_instructions] 45 | 46 | # Generate response based on the template and query 47 | logger.info(f"Generating response for car rental query: {query}") 48 | response = await asyncio.to_thread( 49 | self.response_generator.generate_response, 50 | 'gemini-1.5-flash-001', 51 | system_instructions, 52 | contents, 53 | response_schema 54 | ) 55 | 56 | # Parse the response for a web search query 57 | out_dict: Dict[str, Any] = json.loads(response.text.strip()) 58 | save_response('./data/patterns/parallel_delegation/output', 'delegate', 'car_rental_search', out_dict, 'json') 59 | web_search_query: str = out_dict.get('web_search_query', '') 60 | if not web_search_query: 61 | raise ValueError("Web search query missing from the response.") 62 | 63 | # Run the web search based on the extracted query 64 | logger.info(f"Running web search for query: {web_search_query}") 65 | web_search_results_summary: str = await asyncio.to_thread(run, web_search_query) 66 | save_response('./data/patterns/parallel_delegation/output', 'delegate', 'car_rental_search', web_search_results_summary, 'txt') 67 | return Message( 68 | content=web_search_results_summary, 69 | sender=self.name, 70 | recipient="TravelPlannerAgent", 71 | metadata={"entity_type": "CAR_RENTAL"} 72 | ) 73 | 74 | except Exception as e: 75 | # Log and return error message 76 | logger.error(f"Error in {self.name}: {e}") 77 | return Message( 78 | content="I apologize, but I couldn't process the car rental information at this time.", 79 | sender=self.name, 80 | recipient="TravelPlannerAgent", 81 | metadata={"entity_type": "CAR_RENTAL"} 82 | ) 83 | -------------------------------------------------------------------------------- /data/patterns/parallel_delegation/coordinator/ner/response_schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "query": { 5 | "type": "string", 6 | "description": "The original user query." 7 | }, 8 | "intent": { 9 | "type": "string", 10 | "description": "The detected intent of the query.", 11 | "enum": ["FLIGHT", "HOTEL", "CAR_RENTAL", "UNKNOWN"], 12 | "default": "UNKNOWN" 13 | }, 14 | "entities": { 15 | "type": "object", 16 | "properties": { 17 | "FLIGHT": { 18 | "type": "object", 19 | "properties": { 20 | "duration": { 21 | "type": "string", 22 | "description": "The duration of the trip, if specified.", 23 | "default": "NA" 24 | }, 25 | "destination": { 26 | "type": "string", 27 | "description": "The destination of the trip.", 28 | "default": "NA" 29 | }, 30 | "date": { 31 | "type": "string", 32 | "description": "The date or month of the trip.", 33 | "default": "NA" 34 | }, 35 | "origin": { 36 | "type": "string", 37 | "description": "The origin location for flights, if specified.", 38 | "default": "NA" 39 | }, 40 | "num_passengers": { 41 | "type": "string", 42 | "description": "The number of passengers, if specified.", 43 | "default": "NA" 44 | } 45 | } 46 | }, 47 | "HOTEL": { 48 | "type": "object", 49 | "properties": { 50 | "duration": { 51 | "type": "string", 52 | "description": "The duration of the stay, if specified.", 53 | "default": "NA" 54 | }, 55 | "destination": { 56 | "type": "string", 57 | "description": "The location of the hotel.", 58 | "default": "NA" 59 | }, 60 | "date": { 61 | "type": "string", 62 | "description": "The date or month of the stay.", 63 | "default": "NA" 64 | }, 65 | "num_passengers": { 66 | "type": "string", 67 | "description": "The number of guests, if specified.", 68 | "default": "NA" 69 | }, 70 | "hotel_amenities": { 71 | "type": "string", 72 | "description": "Desired hotel amenities, if specified. Multiple amenities are comma-separated.", 73 | "default": "NA" 74 | } 75 | } 76 | }, 77 | "CAR_RENTAL": { 78 | "type": "object", 79 | "properties": { 80 | "duration": { 81 | "type": "string", 82 | "description": "The duration of the rental, if specified.", 83 | "default": "NA" 84 | }, 85 | "date": { 86 | "type": "string", 87 | "description": "The date or month of the rental.", 88 | "default": "NA" 89 | }, 90 | "car_type": { 91 | "type": "string", 92 | "description": "The type of car for rental, if specified.", 93 | "default": "NA" 94 | }, 95 | "pickup_location": { 96 | "type": "string", 97 | "description": "The pickup location for car rentals, if specified.", 98 | "default": "NA" 99 | }, 100 | "dropoff_location": { 101 | "type": "string", 102 | "description": "The dropoff location for car rentals, if specified.", 103 | "default": "NA" 104 | } 105 | } 106 | }, 107 | "UNKNOWN": { 108 | "type": "object", 109 | "description": "No specific entities for UNKNOWN intent.", 110 | "default": {} 111 | } 112 | } 113 | } 114 | }, 115 | "required": ["query", "intent", "entities"] 116 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Venv 10 | .agentic-workflow-patterns 11 | .agentic-workflow-patterns/ 12 | 13 | # credentials 14 | credentials 15 | credentials/ 16 | 17 | # .DS_Store 18 | .DS_Store 19 | .DS_Store/ 20 | 21 | # Distribution / packaging 22 | .Python 23 | build/ 24 | develop-eggs/ 25 | dist/ 26 | downloads/ 27 | eggs/ 28 | .eggs/ 29 | lib/ 30 | lib64/ 31 | parts/ 32 | sdist/ 33 | var/ 34 | wheels/ 35 | share/python-wheels/ 36 | *.egg-info/ 37 | .installed.cfg 38 | *.egg 39 | MANIFEST 40 | 41 | # PyInstaller 42 | # Usually these files are written by a python script from a template 43 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 44 | *.manifest 45 | *.spec 46 | 47 | # Installer logs 48 | pip-log.txt 49 | pip-delete-this-directory.txt 50 | 51 | # Unit test / coverage reports 52 | htmlcov/ 53 | .tox/ 54 | .nox/ 55 | .coverage 56 | .coverage.* 57 | .cache 58 | nosetests.xml 59 | coverage.xml 60 | *.cover 61 | *.py,cover 62 | .hypothesis/ 63 | .pytest_cache/ 64 | cover/ 65 | 66 | # Translations 67 | *.mo 68 | *.pot 69 | 70 | # Django stuff: 71 | *.log 72 | local_settings.py 73 | db.sqlite3 74 | db.sqlite3-journal 75 | 76 | # Flask stuff: 77 | instance/ 78 | .webassets-cache 79 | 80 | # Scrapy stuff: 81 | .scrapy 82 | 83 | # Sphinx documentation 84 | docs/_build/ 85 | 86 | # PyBuilder 87 | .pybuilder/ 88 | target/ 89 | 90 | # Jupyter Notebook 91 | .ipynb_checkpoints 92 | 93 | # IPython 94 | profile_default/ 95 | ipython_config.py 96 | 97 | # pyenv 98 | # For a library or package, you might want to ignore these files since the code is 99 | # intended to run in multiple environments; otherwise, check them in: 100 | # .python-version 101 | 102 | # pipenv 103 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 104 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 105 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 106 | # install all needed dependencies. 107 | #Pipfile.lock 108 | 109 | # poetry 110 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 111 | # This is especially recommended for binary packages to ensure reproducibility, and is more 112 | # commonly ignored for libraries. 113 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 114 | #poetry.lock 115 | 116 | # pdm 117 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 118 | #pdm.lock 119 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 120 | # in version control. 121 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 122 | .pdm.toml 123 | .pdm-python 124 | .pdm-build/ 125 | 126 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 127 | __pypackages__/ 128 | 129 | # Celery stuff 130 | celerybeat-schedule 131 | celerybeat.pid 132 | 133 | # SageMath parsed files 134 | *.sage.py 135 | 136 | # Environments 137 | .env 138 | .venv 139 | env/ 140 | venv/ 141 | ENV/ 142 | env.bak/ 143 | venv.bak/ 144 | 145 | # Spyder project settings 146 | .spyderproject 147 | .spyproject 148 | 149 | # Rope project settings 150 | .ropeproject 151 | 152 | # mkdocs documentation 153 | /site 154 | 155 | # mypy 156 | .mypy_cache/ 157 | .dmypy.json 158 | dmypy.json 159 | 160 | # Pyre type checker 161 | .pyre/ 162 | 163 | # pytype static type analyzer 164 | .pytype/ 165 | 166 | # Cython debug symbols 167 | cython_debug/ 168 | 169 | # PyCharm 170 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 171 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 172 | # and can be added to the global gitignore or merged into this file. For a more nuclear 173 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 174 | #.idea/ 175 | -------------------------------------------------------------------------------- /data/patterns/dag_orchestration/trace/task2.json: -------------------------------------------------------------------------------- 1 | { 2 | "preprocessed_docs": [ 3 | { 4 | "id": "doc1", 5 | "title": "**The Lighthouse Keeper's Daughter**", 6 | "content": "## The Lighthouse Keeper's Daughter\n\nIn a remote coastal village, Amelia grew up in the shadow of the lighthouse her father tended. The rhythmic sweep of the light across dark waters was the heartbeat of her childhood. Her mother's absence, a void as vast as the sea, shaped her quiet resilience.\n\nAt eighteen, Amelia's world shattered when her father fell ill. The lighthouse, their lifeline, threatened to go dark. With trembling hands but unwavering determination, she took up his mantle, climbing the winding stairs each night to keep the light burning.\n\nThe villagers whispered doubts, but Amelia stood firm. Through howling gales and crashing waves, she kept vigil. Her resolve was tested one stormy night when distress signals pierced the gloom. A fishing boat, lost and floundering, needed help. \n\nAmelia's light cut through the darkness, guiding the vessel to safety. As dawn broke, the rescued fishermen spoke of a beacon that shone brighter than ever before. The village elders, humbled, recognized her strength.\n\nIn that moment, Amelia understood. The lighthouse had never just been her father's duty\u2014it was her inheritance, her purpose. She had become the light that others looked to in the darkness, a legacy forged in solitude and illuminated by courage." 7 | }, 8 | { 9 | "id": "doc2", 10 | "title": "## The Color of Redemption", 11 | "content": "## The Color of Redemption\n\nMarcus, once celebrated for his visionary art, was now a recluse. Haunted by the tragic accident caused by his recklessness, his brushes lay untouched, canvases blank, the vibrant colors of his past faded to gray. \n\nA letter arrived, bearing news of an old friend's terminal illness. Her dying wish: to see one last Marcus original. Guilt warred with fear as he stood before an empty canvas, the weight of expectation crushing.\n\nWith trembling hands, Marcus began to paint. Hours melted into days as he poured his anguish, regret, and hope onto the canvas. The act of creation became a catharsis, each brushstroke a step towards forgiveness \u2013 of himself and others.\n\nAs he neared completion, Marcus realized the painting wasn't just for his friend. It was a bridge from his past to a future he'd thought lost. The finished piece hummed with raw emotion and renewed purpose.\n\nMarcus delivered the painting in person, witnessing the joy it brought to his friend's final days. As word spread of his return, the art world buzzed with anticipation. But Marcus no longer cared about fame. He had rediscovered his truth: that art's greatest power lay not in accolades, but in its ability to heal, connect, and illuminate the human spirit." 12 | }, 13 | { 14 | "id": "doc3", 15 | "title": "## The Return of the Forgotten Priestess", 16 | "content": "## The Return of the Forgotten Priestess\n\nProfessor Elena Reyes clutched the ancient manuscript, her heart racing. After decades of research, she had uncovered the last known text of a long-dead civilization. The symbols danced before her eyes, holding secrets lost to time.\n\nAs Elena delved deeper into translation, strange occurrences plagued her. Vivid dreams of a thriving ancient city filled her nights. Whispers in an unknown tongue echoed in empty rooms, sending shivers down her spine. The line between past and present blurred, reality shifting like sand beneath her feet. \n\nColleagues grew concerned, urging her to step back. But Elena pressed on, driven by an inexplicable compulsion. With each deciphered word, she felt a piece of herself slipping away, replaced by something... other. \n\nOn the night she translated the final passage, Elena vanished. Her office was found in disarray, the manuscript open on her desk. The last entry in her journal, written in a script no one recognized, spoke of returning home.\n\nYears later, an archaeological dig uncovered a new chamber in the ancient city. On the wall, a mural depicted a woman with Elena's face, adorned in the robes of a high priestess. The inscription hailed her as the one who bridged worlds, keeper of forgotten wisdom." 17 | } 18 | ] 19 | } -------------------------------------------------------------------------------- /data/patterns/dag_orchestration/trace/task1.json: -------------------------------------------------------------------------------- 1 | { 2 | "docs": [ 3 | { 4 | "id": "doc1", 5 | "title": "**The Lighthouse Keeper's Daughter**", 6 | "content": "In a remote coastal village, Amelia grew up in the shadow of the lighthouse her father tended. The rhythmic sweep of light across dark waters was the heartbeat of her childhood. Her mother's absence, a void as vast as the sea, shaped her quiet resilience.\nAt eighteen, Amelia's world shattered when her father fell ill. The lighthouse, their lifeline, threatened to go dark. With trembling hands but unwavering determination, she took up his mantle, climbing the winding stairs each night to keep the light burning.\nThe villagers whispered doubts, but Amelia stood firm. Through howling gales and crashing waves, she kept vigil. Her resolve was tested one stormy night when distress signals pierced the gloom. A fishing boat, lost and floundering.\nAmelia's light cut through the darkness, guiding the vessel to safety. As dawn broke, the rescued fishermen spoke of a beacon that shone brighter than ever before. The village elders, humbled, recognized her strength.\nIn that moment, Amelia understood. The lighthouse had never just been her father's duty\u2014it was her inheritance, her purpose. She had become the light that others looked to in the darkness, a legacy forged in solitude and illuminated by courage.", 7 | "filename": "./data/patterns/dag_orchestration/docs/doc1.txt" 8 | }, 9 | { 10 | "id": "doc2", 11 | "title": "## The Color of Redemption", 12 | "content": "Marcus had once been celebrated for his visionary art. Now, he was a recluse, haunted by the tragic accident his recklessness had caused. His brushes lay untouched, canvases blank, the vibrant colors of his past faded to gray.\nA letter arrived, bearing news of an old friend's terminal illness. Her dying wish: to see one last Marcus original. Guilt warred with fear as he stood before an empty canvas, the weight of expectation crushing.\nWith trembling hands, Marcus began to paint. Hours blended into days as he poured his anguish, regret, and hope onto the canvas. The act of creation became a catharsis, each brushstroke a step towards forgiveness\u2014of himself and others.\nAs he neared completion, Marcus realized the painting wasn't just for his friend. It was a bridge from his past to a future he'd thought lost. The finished piece hummed with raw emotion and renewed purpose.\nMarcus delivered the painting in person, witnessing the joy it brought to his friend's final days. As word spread of his return, the art world buzzed with anticipation. But Marcus no longer cared about fame. He had rediscovered his truth: that art's greatest power lay not in accolades, but in its ability to heal, connect, and illuminate the human spirit.", 13 | "filename": "./data/patterns/dag_orchestration/docs/doc3.txt" 14 | }, 15 | { 16 | "id": "doc3", 17 | "title": "## The Return of the Forgotten Priestess", 18 | "content": "Professor Elena Reyes clutched the ancient manuscript, her heart racing. After decades of research, she had uncovered the last known text of a long-dead civilization. The symbols danced before her eyes, holding secrets lost to time.\nAs Elena delved deeper into translation, strange occurrences plagued her. Vivid dreams of a thriving ancient city. Whispers in an unknown tongue echoing in empty rooms. The line between past and present blurred, reality shifting like sand beneath her feet.\nColleagues grew concerned, urging her to step back. But Elena pressed on, driven by an inexplicable compulsion. With each deciphered word, she felt a piece of herself slipping away, replaced by something... other.\nOn the night she translated the final passage, Elena vanished. Her office was found in disarray, the manuscript open on her desk. The last entry in her journal, written in a script no one recognized, spoke of returning home.\nYears later, an archaeological dig uncovered a new chamber in the ancient city. On the wall, a mural depicted a woman with Elena's face, adorned in the robes of a high priestess. The inscription hailed her as the one who bridged worlds, keeper of forgotten wisdom.", 19 | "filename": "./data/patterns/dag_orchestration/docs/doc2.txt" 20 | } 21 | ] 22 | } -------------------------------------------------------------------------------- /src/patterns/dag_orchestration/README.md: -------------------------------------------------------------------------------- 1 | # Pattern 8 - DAG Orchestration Pattern 2 | 3 | ## Overview 4 | 5 | The DAG (Directed Acyclic Graph) Orchestration Pattern is an advanced design pattern for managing complex workflows in a flexible and efficient manner. This pattern enables execution of multiple tasks in a specified order, supporting both parallel and serial task execution. 6 | 7 | The workflow is structured using a YAML-defined DAG where each task includes schema definitions for both input and output requirements. A Coordinator Agent acts as the primary orchestrator, interpreting the YAML DAG definition and delegating tasks to specialized subagents. The coordinator manages execution order according to the DAG structure, handling parallel execution of independent tasks within the same stage while ensuring serial execution of dependent tasks across stages. 8 | 9 | Independent tasks at the same stage can be executed in parallel, while tasks with dependencies are executed serially, respecting the dependency chain. The coordinator ensures proper task sequencing based on the DAG definition. Additionally, the coordinator can be configured to perform preprocessing of input data before initiating the DAG pipeline, and may include post-processing or consolidation of the final pipeline output. 10 | This pattern provides a structured approach to managing complex workflows while maintaining flexibility in task execution and dependencies. 11 | 12 |

13 | DAG Orchestration 14 |

15 | 16 | ## Key Components 17 | 18 | 1. **CoordinatorAgent**: Manages the execution of the entire DAG, creating and running sub-agents as needed. 19 | 2. **CollectAgent**: Gathers documents from a specified folder and prepares them for processing. 20 | 3. **PreprocessAgent**: Cleans and normalizes the collected document content using a Language Model (LLM). 21 | 4. **ExtractAgent**: Extracts key information (characters, themes, plot points) from the preprocessed documents using an LLM. 22 | 5. **SummarizeAgent**: Generates concise summaries of the preprocessed documents using an LLM. 23 | 6. **CompileAgent**: Compiles a final report based on the extracted key information and summaries. 24 | 25 | ## Process Flow 26 | 27 | 1. **DAG Definition Loading**: 28 | - The CoordinatorAgent reads and parses the YAML file containing the DAG definition. 29 | - The DAG structure, including tasks, their dependencies, and associated agents, is loaded into memory. 30 | 31 | 2. **Task Execution Preparation**: 32 | - The CoordinatorAgent initializes the task states and prepares a list of pending tasks. 33 | 34 | 3. **Iterative Task Execution**: 35 | - The CoordinatorAgent enters a loop that continues until all tasks are completed: 36 | a. **Identify Executable Tasks**: 37 | - The coordinator scans the pending tasks to find those with all dependencies satisfied. 38 | b. **Parallel Task Execution**: 39 | - For each executable task: 40 | - The appropriate sub-agent is dynamically created based on the task definition. 41 | - Input data is collected from the results of dependent tasks. 42 | - The task is submitted for asynchronous execution. 43 | c. **Wait for Task Completion**: 44 | - The coordinator waits for all submitted tasks to complete. 45 | d. **Result Collection and State Update**: 46 | - As tasks complete, their results are stored and task states are updated. 47 | e. **Error Handling**: 48 | - Any task failures are logged, and the overall process can continue if non-critical. 49 | 50 | 4. **Final Output Generation**: 51 | - Once all tasks are completed, the coordinator identifies the final task in the DAG. 52 | - The result of this final task is prepared as the output of the entire workflow. 53 | 54 | 5. **Cleanup and Reporting**: 55 | - The coordinator performs any necessary cleanup operations. 56 | - A final report or summary of the workflow execution may be generated. 57 | 58 | Throughout this process, the CoordinatorAgent manages the flow of data between tasks, ensures proper sequencing based on the DAG structure, and handles any errors or exceptions that occur during execution. 59 | -------------------------------------------------------------------------------- /src/patterns/dag_orchestration/agent.py: -------------------------------------------------------------------------------- 1 | from src.commons.message import Message 2 | from jsonschema import ValidationError 3 | from src.config.logging import logger 4 | from json import JSONDecodeError 5 | from jsonschema import validate 6 | from abc import abstractmethod 7 | from typing import Dict 8 | from typing import Any 9 | from abc import ABC 10 | import json 11 | 12 | 13 | class Agent(ABC): 14 | """ 15 | A base class representing an agent responsible for processing messages 16 | and validating input and output data based on given JSON schemas. 17 | """ 18 | 19 | def __init__(self, name: str) -> None: 20 | """ 21 | Initializes the agent with a given name. 22 | 23 | Args: 24 | name (str): The name of the agent. 25 | """ 26 | self.name = name 27 | 28 | @abstractmethod 29 | async def process(self, message: 'Message') -> 'Message': 30 | """ 31 | Abstract method to process the message. 32 | 33 | Args: 34 | message (Message): A message object containing relevant data. 35 | 36 | Returns: 37 | Message: Processed message. 38 | 39 | Raises: 40 | NotImplementedError: If not overridden by a subclass. 41 | """ 42 | raise NotImplementedError("This method should be implemented by subclasses.") 43 | 44 | def validate_input(self, data: Dict[str, Any], schema_file: str) -> None: 45 | """ 46 | Validates the input data against a JSON schema file. 47 | 48 | Args: 49 | data (Dict[str, Any]): The input data to validate. 50 | schema_file (str): Path to the JSON schema file. 51 | 52 | Raises: 53 | ValueError: If the schema file cannot be read or parsed. 54 | ValidationError: If the input data does not conform to the schema. 55 | """ 56 | schema = self._load_schema(schema_file) 57 | try: 58 | validate(instance=data, schema=schema) 59 | logger.info(f"{self.name} input validated successfully against {schema_file}.") 60 | except ValidationError as e: 61 | logger.error(f"{self.name} input validation error: {e}") 62 | raise 63 | except Exception as e: 64 | logger.error(f"Unexpected error during input validation: {e}") 65 | raise 66 | 67 | def validate_output(self, data: Dict[str, Any], schema_file: str) -> None: 68 | """ 69 | Validates the output data against a JSON schema file. 70 | 71 | Args: 72 | data (Dict[str, Any]): The output data to validate. 73 | schema_file (str): Path to the JSON schema file. 74 | 75 | Raises: 76 | ValueError: If the schema file cannot be read or parsed. 77 | ValidationError: If the output data does not conform to the schema. 78 | """ 79 | schema = self._load_schema(schema_file) 80 | try: 81 | validate(instance=data, schema=schema) 82 | logger.info(f"{self.name} output validated successfully against {schema_file}.") 83 | except ValidationError as e: 84 | logger.error(f"{self.name} output validation error: {e}") 85 | raise 86 | except Exception as e: 87 | logger.error(f"Unexpected error during output validation: {e}") 88 | raise 89 | 90 | def _load_schema(self, schema_file: str) -> Dict[str, Any]: 91 | """ 92 | Loads and returns a JSON schema from a file. 93 | 94 | Args: 95 | schema_file (str): Path to the JSON schema file. 96 | 97 | Returns: 98 | Dict[str, Any]: Loaded JSON schema. 99 | 100 | Raises: 101 | ValueError: If the schema file cannot be read or parsed. 102 | """ 103 | try: 104 | with open(schema_file, 'r') as f: 105 | schema = json.load(f) 106 | return schema 107 | except (FileNotFoundError, JSONDecodeError) as e: 108 | logger.error(f"Failed to load schema file {schema_file}: {e}") 109 | raise ValueError(f"Error loading schema file {schema_file}: {e}") 110 | -------------------------------------------------------------------------------- /src/patterns/dynamic_sharding/README.md: -------------------------------------------------------------------------------- 1 | # Pattern 5 - Dynamic Sharding Pattern 2 | 3 | ## Overview 4 | 5 | The Dynamic Sharding Pattern is an architectural approach designed to efficiently process large datasets by dynamically dividing the workload into smaller, manageable shards and processing them in parallel. This pattern enhances scalability, optimizes resource utilization, and improves the overall performance of systems handling extensive data or requests. 6 | This pattern can be demonstrated through a practical example of fetching biographies of celebrities using web search. The coordinator agent dynamically shards the list of celebrity names and processes each shard concurrently through dynamically created sub-agents. 7 | The coordinator agent in this example represents a basic implementation where it is used to spawn (create) the sub-agents based on the shard size and the size of the items to be processed. 8 | 9 | 💡 In a more advanced implementation, this agent can leverage an LLM to deduce patterns and allocate sub-agents by identified classes or groups of shards of varying sizes. 10 | 11 |

12 | Dynamic Sharding 13 |

14 | 15 | ## Architecture Pattern Explained 16 | 17 | ### Key Components 18 | 19 | 1. **Coordinator Agent**: 20 | - **Role**: Orchestrates the entire data processing workflow. 21 | - **Responsibilities**: 22 | - Receives the complete list of items to process (e.g., celebrity names in our example implementation) along with the desired shard size. 23 | - Dynamically divides the list into smaller shards based on the shard size. 24 | - Creates sub-agents dynamically for each shard. 25 | - Initiates parallel processing by assigning shards to sub-agents. 26 | - Aggregates results from all sub-agents into a final consolidated response. 27 | - **Benefits**: 28 | - Decouples task management from execution. 29 | - Enhances scalability by adjusting the number of shards and sub-agents based on workload. 30 | - Improves system responsiveness and throughput. 31 | 32 | 2. **Shard Processing Agents**: 33 | - **Role**: Handle the processing of individual shards. 34 | - **Responsibilities**: 35 | - Receive a shard (subset) of data from the coordinator agent. 36 | - Process each item within the shard, such as fetching biographies for each celebrity. 37 | - Perform item-level processing concurrently to maximize efficiency. 38 | - Return processed results back to the coordinator agent. 39 | - **Benefits**: 40 | - Enable fine-grained parallelism within shards. 41 | - Simplify error handling and retry mechanisms at the shard level. 42 | - Allow for resource isolation and management per shard. 43 | 44 | ### Workflow 45 | 46 | 1. **Input Reception**: 47 | - The **Coordinator Agent** receives a request containing: 48 | - A list of celebrity names. 49 | - The desired shard size (number of items per shard). 50 | 51 | 2. **Dynamic Sharding**: 52 | - The coordinator dynamically divides the list of celebrity names into multiple shards based on the specified shard size. 53 | - For example, a list of 100 celebrities with a shard size of 10 will result in 10 shards. 54 | 55 | 3. **Dynamic Agent Creation**: 56 | - For each shard, the coordinator agent creates a new **Shard Processing Agent** dynamically. 57 | - Each agent is responsible for processing its assigned shard independently. 58 | 59 | 4. **Parallel Shard Processing**: 60 | - The coordinator dispatches all shard processing agents concurrently. 61 | - Shard processing agents begin processing their respective shards in parallel. 62 | 63 | 5. **Concurrent Item Processing**: 64 | - Within each shard processing agent: 65 | - Each item (celebrity name) is processed concurrently using asynchronous tasks. 66 | - For example, fetching the biography of each celebrity using our previously created web access pipeline (**Pattern 2**). 67 | 68 | 6. **Result Collection**: 69 | - Shard processing agents collect the results of processing each item in their shard. 70 | - Each agent returns its results back to the coordinator agent upon completion. 71 | 72 | 7. **Result Aggregation**: 73 | - The coordinator agent waits for all shard processing agents to complete. 74 | - Aggregates the results from all shards into a single, consolidated response. 75 | 76 | 8. **Response Delivery**: 77 | - The consolidated results are sent back to the original requester (e.g., the user or another system component). 78 | 79 | -------------------------------------------------------------------------------- /src/patterns/web_access/summarize.py: -------------------------------------------------------------------------------- 1 | from src.patterns.web_access.tasks import SummarizeTask 2 | from src.llm.generate import ResponseGenerator 3 | from src.prompt.manage import TemplateManager 4 | from src.utils.io import generate_filename 5 | from src.config.logging import logger 6 | from src.utils.io import read_file 7 | from typing import Dict 8 | import os 9 | 10 | 11 | class WebContentSummarizeAgent(SummarizeTask): 12 | """ 13 | Agent for summarizing scraped web content using a language model and predefined templates. 14 | 15 | Attributes: 16 | INPUT_DIR (str): Directory path for scraped content to be summarized. 17 | OUTPUT_DIR (str): Directory path to save generated summaries. 18 | TEMPLATE_PATH (str): Path to template configuration file for generating instructions. 19 | """ 20 | INPUT_DIR = './data/patterns/web_access/output/scrape' 21 | OUTPUT_DIR = './data/patterns/web_access/output/summarize' 22 | TEMPLATE_PATH = './config/patterns/web_access.yml' 23 | 24 | def __init__(self) -> None: 25 | """ 26 | Initializes WebContentSummarizeAgent with a template manager and response generator. 27 | """ 28 | self.template_manager = TemplateManager(self.TEMPLATE_PATH) 29 | self.response_generator = ResponseGenerator() 30 | 31 | def _read_scraped_content(self, query: str) -> str: 32 | """ 33 | Reads scraped content from the input directory based on the query. 34 | 35 | Args: 36 | query (str): Query string to locate the specific scraped content. 37 | 38 | Returns: 39 | str: Scraped content as a string. 40 | """ 41 | try: 42 | logger.info(f"Reading scraped content for query: '{query}'") 43 | input_file_path = os.path.join(self.INPUT_DIR, generate_filename(query, 'txt')) 44 | return read_file(input_file_path) 45 | except Exception as e: 46 | logger.error(f"Error reading scraped content: {e}") 47 | raise 48 | 49 | def _save_summary(self, summary: str, query: str) -> None: 50 | """ 51 | Saves the generated summary to the specified output directory. 52 | 53 | Args: 54 | summary (str): Generated summary to save. 55 | query (str): Query string used to generate the filename. 56 | """ 57 | output_path = os.path.join(self.OUTPUT_DIR, f"{generate_filename(query, 'txt')}") 58 | try: 59 | os.makedirs(os.path.dirname(output_path), exist_ok=True) 60 | logger.info(f"Saving summary to {output_path}") 61 | with open(output_path, 'w', encoding='utf-8') as file: 62 | file.write(summary) 63 | logger.info("Summary saved successfully.") 64 | except Exception as e: 65 | logger.error(f"Error saving summary: {e}", exc_info=True) 66 | raise 67 | 68 | def run(self, model_name: str, query: str) -> str: 69 | """ 70 | Executes the summarization process for scraped content, generating a summary and saving it. 71 | 72 | Args: 73 | model_name (str): Model name to be used for summarization. 74 | query (str): Query string to contextualize the summary. 75 | 76 | Returns: 77 | str: Generated summary. 78 | """ 79 | try: 80 | # Read content specific to the query 81 | scraped_content = self._read_scraped_content(query) 82 | 83 | # Generate prompt template 84 | logger.info("Fetching and processing template for response generation.") 85 | template: Dict[str, str] = self.template_manager.create_template('tools', 'summarize') 86 | system_instruction = template['system'] 87 | user_instruction = self.template_manager.fill_template( 88 | template['user'], query=query, scraped_content=scraped_content 89 | ) 90 | 91 | # Generate response 92 | logger.info("Generating response from LLM.") 93 | response = self.response_generator.generate_response( 94 | model_name, system_instruction, [user_instruction] 95 | ) 96 | summary = response.text.strip() 97 | logger.info("Response generated successfully.") 98 | 99 | # Save the summary 100 | self._save_summary(summary, query) 101 | 102 | return summary 103 | 104 | except Exception as e: 105 | logger.error(f"Error during summarization process: {e}", exc_info=True) 106 | raise 107 | -------------------------------------------------------------------------------- /src/memory/manage.py: -------------------------------------------------------------------------------- 1 | from src.config.logging import logger 2 | from collections import OrderedDict 3 | from typing import Optional 4 | from typing import Dict 5 | from typing import Any 6 | 7 | 8 | class StateManager: 9 | """ 10 | StateManager class that maintains an ordered dictionary to store key-value pairs and 11 | provides functionality to convert the state into a Markdown formatted string. 12 | 13 | Attributes: 14 | _state (OrderedDict[str, Any]): An ordered dictionary to store state entries. 15 | _state_md (Optional[str]): A string representation of the state in Markdown format. 16 | """ 17 | 18 | def __init__(self): 19 | """ 20 | Initialize the StateManager with an empty ordered dictionary and None for the Markdown state. 21 | """ 22 | self._state: OrderedDict[str, Any] = OrderedDict() 23 | self._state_md: Optional[str] = None 24 | 25 | def add_entry(self, key: str, value: Any) -> None: 26 | """ 27 | Add a key-value pair to the state and update the Markdown representation. 28 | 29 | Args: 30 | key (str): The key for the state entry. 31 | value (Any): The value associated with the key. 32 | 33 | Raises: 34 | ValueError: If key is empty or None. 35 | Exception: If any other error occurs during the process. 36 | """ 37 | if not key: 38 | logger.error("The key provided is empty or None.") 39 | raise ValueError("Key must not be empty or None.") 40 | 41 | try: 42 | self._state[key] = value 43 | self._state_md = self.to_markdown() 44 | logger.info(f"Entry added to state: {key} = {value}") 45 | except Exception as e: 46 | logger.error(f"Error adding entry to state: {e}") 47 | raise 48 | 49 | def to_markdown(self) -> str: 50 | """ 51 | Convert the current state to a Markdown formatted string. 52 | 53 | Returns: 54 | str: The state as a Markdown formatted string. 55 | 56 | Raises: 57 | Exception: If an error occurs during the conversion. 58 | """ 59 | try: 60 | markdown = [] 61 | for key, value in self._state.items(): 62 | markdown.append(f"### {key}\n") 63 | if isinstance(value, dict): 64 | markdown.append(f"\n{self._dict_to_markdown(value)}\n") 65 | else: 66 | markdown.append(f"\n{value}\n") 67 | markdown.append("\n") 68 | logger.info("State successfully converted to Markdown.") 69 | return ''.join(markdown) 70 | except Exception as e: 71 | logger.error(f"Error converting state to Markdown: {e}") 72 | raise 73 | 74 | @staticmethod 75 | def _dict_to_markdown(data: Dict[str, Any], indent_level: int = 0) -> str: 76 | """ 77 | Recursively convert a dictionary to a Markdown formatted string. 78 | 79 | Args: 80 | data (Dict[str, Any]): The dictionary to convert. 81 | indent_level (int): The current indentation level for nested dictionaries. 82 | 83 | Returns: 84 | str: The dictionary as a Markdown formatted string. 85 | 86 | Raises: 87 | Exception: If an error occurs during the conversion. 88 | """ 89 | try: 90 | markdown = [] 91 | indent = ' ' * indent_level 92 | for key, value in data.items(): 93 | if isinstance(value, dict): 94 | markdown.append(f"{indent}- **{key.capitalize()}**:\n") 95 | markdown.append(StateManager._dict_to_markdown(value, indent_level + 2)) 96 | else: 97 | markdown.append(f"{indent}- **{key.capitalize()}**: {value}\n") 98 | logger.info("Dictionary successfully converted to Markdown.") 99 | return ''.join(markdown) 100 | except Exception as e: 101 | logger.error(f"Error converting dictionary to Markdown: {e}") 102 | raise 103 | 104 | def get_state(self) -> OrderedDict[str, Any]: 105 | """ 106 | Get the current state as an ordered dictionary. 107 | 108 | Returns: 109 | OrderedDict[str, Any]: The current state. 110 | """ 111 | return self._state 112 | 113 | def get_state_markdown(self) -> Optional[str]: 114 | """ 115 | Get the current state as a Markdown formatted string. 116 | 117 | Returns: 118 | Optional[str]: The current state in Markdown format, or None if the state is empty. 119 | """ 120 | return self._state_md 121 | -------------------------------------------------------------------------------- /src/patterns/task_decomposition/coordinator.py: -------------------------------------------------------------------------------- 1 | from src.patterns.task_decomposition.delegates import SubTaskAgent 2 | from src.patterns.task_decomposition.agent import Agent 3 | from src.commons.message import Message 4 | from src.config.logging import logger 5 | from typing import List 6 | from typing import Any 7 | import asyncio 8 | 9 | 10 | class CoordinatorAgent(Agent): 11 | """ 12 | An agent that coordinates the processing of a document by decomposing it 13 | into extraction subtasks and assigning them to sub-agents to execute in parallel. 14 | 15 | Attributes: 16 | name (str): The name of the coordinator agent. 17 | """ 18 | 19 | def __init__(self, name: str) -> None: 20 | """ 21 | Initializes the CoordinatorAgent. 22 | 23 | Args: 24 | name (str): The name of the agent. 25 | """ 26 | super().__init__(name) 27 | logger.info(f"{self.name} initialized.") 28 | 29 | async def process(self, message: Message) -> Message: 30 | """ 31 | Processes the incoming message containing the document input, 32 | decomposes it into subtasks, assigns them to sub-agents, and 33 | collects the results concurrently. 34 | 35 | Args: 36 | message (Message): The incoming message containing the document input. 37 | 38 | Returns: 39 | Message: A message containing the final combined result. 40 | """ 41 | logger.info(f"{self.name} processing message.") 42 | try: 43 | document_content = message.content # Assume message content is the document 44 | 45 | # Decompose the document into subtasks 46 | subtasks = self.decompose_task(document_content) 47 | 48 | # Create sub-agents and execute subtasks in parallel 49 | tasks = [] 50 | for idx, subtask in enumerate(subtasks): 51 | agent_name = f"SubTaskAgent_{idx}" 52 | agent = SubTaskAgent(name=agent_name) 53 | sub_message = Message(content=subtask, sender=self.name, recipient=agent_name) 54 | task = asyncio.create_task(agent.process(sub_message)) 55 | tasks.append(task) 56 | 57 | # Gather results from all sub-agents concurrently 58 | sub_results = await asyncio.gather(*tasks) 59 | 60 | # Combine results into a structured summary 61 | combined_result = self.combine_results(sub_results) 62 | 63 | # Return the final message 64 | return Message(content=combined_result, sender=self.name, recipient=message.sender) 65 | 66 | except Exception as e: 67 | logger.error(f"Error during processing: {str(e)}") 68 | return Message( 69 | content="An error occurred while processing the document.", 70 | sender=self.name, 71 | recipient=message.sender 72 | ) 73 | 74 | def decompose_task(self, document_content: str) -> List[dict]: 75 | """ 76 | Decomposes the document into extraction subtasks. 77 | 78 | Args: 79 | document_content (str): The full text of the document. 80 | 81 | Returns: 82 | List[dict]: A list of subtasks, where each subtask is a dictionary containing 83 | the document and the specific task to be performed. 84 | """ 85 | # Generalized approach for task decomposition 86 | return [ 87 | {"document": document_content, "task": "Extract all named entities (people, organizations, locations) and their roles or significance"}, 88 | {"document": document_content, "task": "Identify and extract all direct quotations with speakers and context"}, 89 | {"document": document_content, "task": "Extract all numerical data (dates, statistics, measurements) with descriptions"}, 90 | {"document": document_content, "task": "Identify and extract key terms or concepts with their definitions or explanations"}, 91 | {"document": document_content, "task": "Extract all references to external sources with available citation information"} 92 | ] 93 | 94 | def combine_results(self, sub_results: List[Any]) -> str: 95 | """ 96 | Combines the results of the subtasks into a structured summary. 97 | 98 | Args: 99 | sub_results (List[Any]): The results of the subtasks from the sub-agents. 100 | 101 | Returns: 102 | str: A structured summary of the document. 103 | """ 104 | summary = "Document Summary:\n" 105 | for result in sub_results: 106 | summary += f"{result.content}\n" 107 | return summary 108 | --------------------------------------------------------------------------------