├── utils ├── __pycache__ │ ├── config.cpython-312.pyc │ ├── helpers.cpython-312.pyc │ └── rate_limiter.cpython-312.pyc ├── rate_limiter.py ├── helpers.py └── config.py ├── testing ├── test_nvidia_model.py ├── test_sambanova.py ├── test_langchain.py ├── test_ollama.py └── test_nvidia_langchain.py ├── .env.example ├── pyproject.toml ├── .gitignore ├── config.py ├── session1 ├── basics.py └── crewai_intro.py ├── ai_agent_workshop_curriculum.md ├── session2 ├── agent_roles.py ├── content_crew.py └── agent_roles_gui.py ├── GIT_SETUP.md ├── architecture.md ├── session3 ├── stateful_workflow_langchain_nvidia.py ├── stateful_workflow.py ├── langgraph_basics.py └── langgraph_basics_nvidia.py ├── CODE_REVIEW_REPORT.md └── README.md /utils/__pycache__/config.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/AIAgentWorkshop-New/master/utils/__pycache__/config.cpython-312.pyc -------------------------------------------------------------------------------- /utils/__pycache__/helpers.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/AIAgentWorkshop-New/master/utils/__pycache__/helpers.cpython-312.pyc -------------------------------------------------------------------------------- /testing/test_nvidia_model.py: -------------------------------------------------------------------------------- 1 | NVIDIA key: 2 | 3 | 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 4 | 5 | https://build.nvidia.com/models -------------------------------------------------------------------------------- /utils/__pycache__/rate_limiter.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/AIAgentWorkshop-New/master/utils/__pycache__/rate_limiter.cpython-312.pyc -------------------------------------------------------------------------------- /testing/test_sambanova.py: -------------------------------------------------------------------------------- 1 | from openai import OpenAI 2 | import os 3 | from dotenv import load_dotenv 4 | 5 | load_dotenv() 6 | 7 | client = OpenAI( 8 | api_key="e601d1fe-2b94-489d-b6d8-2b6f853e4bfe", 9 | base_url="https://api.sambanova.ai/v1", 10 | ) 11 | 12 | response = client.chat.completions.create( 13 | model="gpt-oss-120b", 14 | messages=[{"role": "user", "content": "Explain the importance of fast language models"}] 15 | ) 16 | 17 | print(response.choices[0].message.content) -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # # AI Provider Selection (choose 'sambanova' or 'ollama') 2 | AI_PROVIDER=ollama 3 | 4 | # # SambaNova API Configuration 5 | # SAMBA_API_KEY= 6 | 7 | # # Optional: Specify SambaNova model (default is gpt-oss-120b) 8 | # SAMBA_MODEL=gpt-oss-120b 9 | 10 | # # Ollama Configuration (for local models) 11 | OLLAMA_MODEL=gemma3:4b 12 | 13 | # # NVIDIA API Configuration (optional) 14 | # NVIDIA_API_KEY= 15 | 16 | # # Workshop Configuration 17 | WORKSHOP_DEBUG=false 18 | MAX_TOKENS=4000 19 | TEMPERATURE=0.7 -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "ai-agent-workshop" 3 | version = "0.1.0" 4 | description = "AI Agent Workshop for Beginners" 5 | readme = "README.md" 6 | requires-python = ">=3.11" 7 | dependencies = [ 8 | "crewai==1.6.1", 9 | "langchain>=0.3.27", 10 | "langchain-openai", 11 | "langgraph", 12 | "litellm==1.60.2", 13 | "openai", 14 | "python-dotenv", 15 | "streamlit", 16 | ] 17 | 18 | [tool.hatch.build.targets.wheel] 19 | packages = ["."] 20 | 21 | [build-system] 22 | requires = ["hatchling"] 23 | build-backend = "hatchling.build" 24 | -------------------------------------------------------------------------------- /testing/test_langchain.py: -------------------------------------------------------------------------------- 1 | from langchain_openai import ChatOpenAI 2 | from dotenv import load_dotenv 3 | import os 4 | 5 | load_dotenv() 6 | 7 | llm = ChatOpenAI( 8 | temperature=0.7, 9 | model="gpt-oss-120b", 10 | api_key="e601d1fe-2b94-489d-b6d8-2b6f853e4bfe", 11 | base_url="https://api.sambanova.ai/v1" 12 | ) 13 | 14 | messages = [ 15 | {"role": "system", "content": "You are a helpful AI assistant."}, 16 | {"role": "user", "content": "Explain the importance of fast language models."} 17 | ] 18 | 19 | response = llm.invoke(messages) 20 | 21 | print(response.content) -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Environment variables 2 | .env 3 | 4 | # Python 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | *.so 9 | .Python 10 | build/ 11 | develop-eggs/ 12 | dist/ 13 | downloads/ 14 | eggs/ 15 | .eggs/ 16 | lib/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | wheels/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | MANIFEST 26 | 27 | # Virtual environments 28 | .venv/ 29 | venv/ 30 | ENV/ 31 | env/ 32 | 33 | # IDE 34 | .vscode/ 35 | .idea/ 36 | *.swp 37 | *.swo 38 | *~ 39 | 40 | # OS 41 | .DS_Store 42 | Thumbs.db 43 | 44 | # Logs 45 | *.log 46 | logs/ 47 | 48 | # Temporary files 49 | *.tmp 50 | *.temp -------------------------------------------------------------------------------- /testing/test_ollama.py: -------------------------------------------------------------------------------- 1 | from openai import OpenAI 2 | import os 3 | from dotenv import load_dotenv 4 | 5 | load_dotenv() 6 | 7 | client = OpenAI( 8 | api_key="ollama", # Ollama doesn't need a real API key 9 | base_url="http://localhost:11434/v1", 10 | ) 11 | 12 | try: 13 | response = client.chat.completions.create( 14 | model="gemma3:4b", 15 | messages=[{"role": "user", "content": "Say hello in one word"}] 16 | ) 17 | print("Ollama is working!") 18 | print("Response:", response.choices[0].message.content) 19 | except Exception as e: 20 | print("Ollama connection failed:", str(e)) 21 | print("Make sure Ollama is running: ollama serve") 22 | print("And the model is pulled: ollama pull llama3.2:3b") -------------------------------------------------------------------------------- /testing/test_nvidia_langchain.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test NVIDIA API with LangChain 3 | """ 4 | 5 | from langchain_openai import ChatOpenAI 6 | from langchain_core.messages import HumanMessage, SystemMessage 7 | import os 8 | from dotenv import load_dotenv 9 | 10 | load_dotenv() 11 | 12 | # NVIDIA API configuration 13 | NVIDIA_API_KEY = "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" 14 | 15 | def test_nvidia_langchain(): 16 | """Test NVIDIA with LangChain.""" 17 | 18 | # Configure NVIDIA LLM 19 | llm = ChatOpenAI( 20 | model="meta/llama3-8b-instruct", 21 | api_key=NVIDIA_API_KEY, 22 | base_url="https://integrate.api.nvidia.com/v1", 23 | temperature=0.7 24 | ) 25 | 26 | # Create messages 27 | messages = [ 28 | SystemMessage(content="You are a helpful AI assistant."), 29 | HumanMessage(content="Explain what artificial intelligence is in simple terms.") 30 | ] 31 | 32 | try: 33 | response = llm.invoke(messages) 34 | print("✅ LangChain with NVIDIA successful!") 35 | print(f"Response: {response.content}") 36 | return True 37 | except Exception as e: 38 | print(f"❌ LangChain with NVIDIA failed: {e}") 39 | return False 40 | 41 | if __name__ == "__main__": 42 | test_nvidia_langchain() -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Simple configuration for AI Agent Workshop. 3 | Automatically loads environment variables and provides easy imports. 4 | """ 5 | 6 | import os 7 | from dotenv import load_dotenv 8 | 9 | # Load environment variables 10 | load_dotenv() 11 | 12 | # API Configuration - Choose provider 13 | PROVIDER = os.getenv('AI_PROVIDER', 'ollama') # 'sambanova' or 'ollama' 14 | 15 | if PROVIDER == 'sambanova': 16 | API_KEY = os.getenv('SAMBA_API_KEY', '1f2ecb79-46bf-4b57-938a-b26db06ed941') 17 | MODEL = os.getenv('SAMBA_MODEL', 'gpt-oss-120b') 18 | API_BASE = 'https://api.sambanova.ai/v1' 19 | elif PROVIDER == 'ollama': 20 | API_KEY = 'ollama' # Ollama doesn't need a real API key 21 | MODEL = os.getenv('OLLAMA_MODEL', 'gemma3:4b') 22 | API_BASE = 'http://localhost:11434/v1' 23 | else: 24 | # Default to SambaNova 25 | API_KEY = os.getenv('SAMBA_API_KEY', '1f2ecb79-46bf-4b57-938a-b26db06ed941') 26 | MODEL = os.getenv('SAMBA_MODEL', 'gpt-oss-120b') 27 | API_BASE = 'https://api.sambanova.ai/v1' 28 | 29 | # Workshop Configuration 30 | DEBUG = os.getenv('WORKSHOP_DEBUG', 'false').lower() == 'true' 31 | MAX_TOKENS = int(os.getenv('MAX_TOKENS', '4000')) 32 | TEMPERATURE = float(os.getenv('TEMPERATURE', '0.7')) 33 | 34 | # Agent Configuration 35 | MAX_RETRIES = 3 36 | RETRY_DELAY = 1.0 37 | 38 | # LLM Configuration based on provider 39 | if PROVIDER == 'ollama': 40 | LLM_STRING = f"ollama/{MODEL}" # Ollama uses ollama/model format 41 | else: 42 | LLM_STRING = f"{PROVIDER}/{MODEL}" # SambaNova uses provider/model format -------------------------------------------------------------------------------- /session1/basics.py: -------------------------------------------------------------------------------- 1 | """ 2 | Session 1: Basic AI Agent Examples 3 | This file shows simple examples of AI agents for beginners. 4 | We will learn how AI can chat and use tools to solve problems. 5 | """ 6 | 7 | from langchain_openai import ChatOpenAI 8 | from config import API_KEY, MODEL, API_BASE, TEMPERATURE, MAX_TOKENS, MAX_RETRIES, RETRY_DELAY, LLM_STRING, PROVIDER 9 | 10 | def basic_chat_example(): 11 | """Example 1: Simple chat with AI""" 12 | print("=== Example 1: Basic Chat with AI ===") 13 | print("This shows how to talk to an AI assistant.") 14 | print() 15 | 16 | # Ask user what they want to ask 17 | try: 18 | user_question = input("What would you like to ask the AI? (press Enter for default): ").strip() 19 | if not user_question: 20 | user_question = "Explain what an AI agent is in simple terms." 21 | except EOFError: 22 | # Handle non-interactive environments 23 | user_question = "Explain what an AI agent is in simple terms." 24 | print("What would you like to ask the AI? (press Enter for default): ") 25 | print("(Using default question in non-interactive environment)") 26 | 27 | # Create the AI model (like choosing which AI to talk to) 28 | llm = ChatOpenAI( 29 | temperature=TEMPERATURE, # How creative the AI should be 30 | model=MODEL, # Which AI model to use 31 | api_key=API_KEY, # Our secret key to use the AI 32 | base_url=API_BASE # Where to connect to the AI 33 | ) 34 | 35 | # Prepare our message to the AI 36 | messages = [ 37 | {"role": "system", "content": "You are a helpful AI assistant."}, # Tell AI how to behave 38 | {"role": "user", "content": user_question} # User's question 39 | ] 40 | 41 | print(f"Sending your question to AI: '{user_question}'") 42 | print("AI is thinking...") 43 | response = llm.invoke(messages) # Send message and get response 44 | 45 | print("\nAI says:") 46 | print(response.content) 47 | print() 48 | 49 | def simple_math_helper(): 50 | """Example 2: AI Math Helper (simplified version)""" 51 | print("=== Example 2: AI Math Helper ===") 52 | print("This shows how AI can help with simple math.") 53 | print() 54 | 55 | # Ask user for their math question 56 | try: 57 | math_question = input("What math question would you like to ask? (press Enter for default): ").strip() 58 | if not math_question: 59 | math_question = "If you have 15 apples and buy 27 more, then give away 3, how many do you have left?" 60 | except EOFError: 61 | # Handle non-interactive environments 62 | math_question = "If you have 15 apples and buy 27 more, then give away 3, how many do you have left?" 63 | print("What math question would you like to ask? (press Enter for default): ") 64 | print("(Using default question in non-interactive environment)") 65 | 66 | # Create the AI brain 67 | llm = ChatOpenAI( 68 | temperature=TEMPERATURE, 69 | model=MODEL, 70 | api_key=API_KEY, 71 | base_url=API_BASE 72 | ) 73 | 74 | # Create a helpful message for the AI 75 | messages = [ 76 | {"role": "system", "content": "You are a helpful math tutor. Explain your answers clearly step by step."}, 77 | {"role": "user", "content": math_question} 78 | ] 79 | 80 | print(f"Asking AI: {math_question}") 81 | print("AI is thinking...") 82 | response = llm.invoke(messages) 83 | 84 | print("\nAI Math Helper says:") 85 | print(response.content) 86 | print() 87 | 88 | def main(): 89 | """Run all the basic examples.""" 90 | print("AI Agent Workshop - Session 1: Learning the Basics") 91 | print("=" * 60) 92 | print("Welcome! Today we'll learn about AI agents.") 93 | print("An AI agent is like a smart helper that can think and use tools.") 94 | print() 95 | 96 | try: 97 | # Run the first example 98 | basic_chat_example() 99 | 100 | # Run the second example 101 | simple_math_helper() 102 | 103 | print("Great job! You completed all the basic examples!") 104 | print("You now know how AI agents can chat and use tools.") 105 | 106 | except Exception as e: 107 | print(f"Oops! Something went wrong: {e}") 108 | if PROVIDER == 'sambanova': 109 | print("Make sure your SAMBA_API_KEY is set correctly in the .env file.") 110 | elif PROVIDER == 'ollama': 111 | print("Make sure Ollama is running locally on http://localhost:11434") 112 | print("Install Ollama from https://ollama.ai and run: ollama serve") 113 | print("Check the README.md for setup instructions.") 114 | 115 | if __name__ == "__main__": 116 | main() -------------------------------------------------------------------------------- /ai_agent_workshop_curriculum.md: -------------------------------------------------------------------------------- 1 | # AI Agent Workshop Curriculum: Building Multi-Agent Systems with CrewAI and LangGraph 2 | 3 | ## Workshop Overview 4 | **Target Audience:** College engineering students with basic programming knowledge 5 | **Duration:** 3 hours (3 sessions of 1 hour each) 6 | **Format:** Hands-on workshop with theory sessions, live coding demonstrations, and individual/group projects 7 | **Prerequisites:** 8 | - Basic Python programming (variables, functions, classes) 9 | - Familiarity with command line/terminal 10 | - Basic understanding of AI/ML concepts (optional but helpful) 11 | - Laptop with internet connection 12 | 13 | **Learning Objectives:** 14 | By the end of this workshop, students will be able to: 15 | - Understand the concepts of AI agents and multi-agent systems 16 | - Implement basic autonomous agents using CrewAI framework 17 | - Build simple multi-agent workflows with CrewAI 18 | - Get an introduction to stateful workflows with LangGraph 19 | - Design basic collaborative agent systems for real-world problems 20 | 21 | ## Required Software Setup 22 | **Before the Workshop:** 23 | 1. Install Python 3.8+ 24 | 2. Install required packages using UV: 25 | ```bash 26 | uv pip install crewai langchain langgraph openai python-dotenv 27 | ``` 28 | 3. Set up OpenAI API key (for LLM access) 29 | 4. Install VS Code or preferred IDE 30 | 5. Git for version control 31 | 32 | ## Workshop Schedule 33 | 34 | ### Session 1: Introduction to AI Agents and CrewAI Basics (1 hour) 35 | **Objectives:** Understand agent fundamentals and get started with CrewAI 36 | 37 | **Topics Covered:** 38 | - What are AI agents? Definition and characteristics 39 | - Single vs Multi-agent systems 40 | - Overview of CrewAI framework 41 | - Brief introduction to LangGraph 42 | 43 | **Hands-on Activities:** 44 | 1. **Environment Setup (20 mins)** 45 | - Install required packages 46 | - Configure API keys 47 | - Test basic setup 48 | 49 | 2. **CrewAI Basics (40 mins)** 50 | - Create simple agents with roles 51 | - Define basic tasks 52 | - Run a simple crew workflow 53 | 54 | ### Session 2: Hands-on CrewAI Project (1 hour) 55 | **Objectives:** Build a functional multi-agent system with CrewAI 56 | 57 | **Topics Covered:** 58 | - Agent roles and responsibilities 59 | - Task delegation and collaboration 60 | - Tool integration basics 61 | 62 | **Hands-on Activities:** 63 | - **Collaborative Project: Content Creation Crew (1 hour)** 64 | - Build a 3-agent system: Researcher, Writer, Editor 65 | - Assign tasks for topic research and content generation 66 | - Test the workflow and iterate 67 | 68 | ### Session 3: LangGraph Introduction and Integration (1 hour) 69 | **Objectives:** Learn basics of LangGraph and combine with CrewAI concepts 70 | 71 | **Topics Covered:** 72 | - LangGraph concepts: nodes, edges, state 73 | - Basic stateful workflows 74 | - Integrating CrewAI with LangGraph ideas 75 | 76 | **Hands-on Activities:** 77 | 1. **LangGraph Fundamentals (30 mins)** 78 | - Build a simple graph-based workflow 79 | - Implement basic state management 80 | 81 | 2. **Integration Project (30 mins)** 82 | - Create a hybrid simple agent system 83 | - Discuss real-world applications and next steps 84 | 85 | ## Detailed Module Breakdown 86 | 87 | ### Module 1: Agent Fundamentals and Setup 88 | **Duration:** 20 minutes 89 | **Content:** 90 | - Agent = AI + Tools + Memory + Reasoning 91 | - Single vs Multi-agent systems 92 | - Overview of CrewAI and LangGraph 93 | 94 | **Activity:** Environment setup and basic agent creation 95 | 96 | ### Module 2: CrewAI Hands-on 97 | **Duration:** 1 hour 98 | **Content:** 99 | - Agent creation with roles 100 | - Task definition and delegation 101 | - Basic collaboration patterns 102 | 103 | **Activity:** Build a content creation crew (Researcher, Writer, Editor) 104 | 105 | ### Module 3: LangGraph Introduction 106 | **Duration:** 1 hour 107 | **Content:** 108 | - Graph concepts for agents 109 | - Basic stateful workflows 110 | - Integration possibilities 111 | 112 | **Activity:** Create a simple stateful agent workflow 113 | 114 | ## Assessment and Evaluation 115 | - **Formative:** Code reviews during hands-on sessions 116 | - **Summative:** Final project presentation (optional) 117 | - **Peer Learning:** Code sharing and debugging sessions 118 | 119 | ## Resources and Materials 120 | - Workshop GitHub repository with starter code 121 | - Official documentation links: 122 | - CrewAI: https://www.crewai.com/ 123 | - LangGraph: https://langchain-ai.github.io/langgraph/ 124 | - Additional reading: "Human Compatible" by Stuart Russell 125 | 126 | ## Instructor Preparation Notes 127 | - Prepare demo environments for each session 128 | - Have backup solutions for API rate limits 129 | - Prepare troubleshooting guides for common issues 130 | - Arrange for adequate computing resources (cloud credits if needed) 131 | 132 | ## Expected Challenges and Solutions 133 | - API key management: Provide sandbox environments 134 | - Complex debugging: Include error handling examples 135 | - Time management: Have modular exercises that can be shortened 136 | - Varying skill levels: Offer advanced extensions for experienced students 137 | 138 | This curriculum provides a comprehensive, hands-on introduction to AI agent development suitable for engineering students, balancing theoretical concepts with practical implementation. 139 | -------------------------------------------------------------------------------- /utils/rate_limiter.py: -------------------------------------------------------------------------------- 1 | """ 2 | Rate limiting and retry utilities for API calls. 3 | Implements exponential backoff and intelligent rate limit handling. 4 | """ 5 | 6 | import time 7 | import random 8 | from typing import Any, Callable, Optional 9 | import logging 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | class RateLimiter: 14 | """Intelligent rate limiter with exponential backoff and jitter.""" 15 | 16 | def __init__(self, max_retries: int = 3, base_delay: float = 1.0, max_delay: float = 60.0): 17 | self.max_retries = max_retries 18 | self.base_delay = base_delay 19 | self.max_delay = max_delay 20 | self.last_request_time = 0 21 | self.request_count = 0 22 | 23 | def _calculate_delay(self, attempt: int, error_message: str = "") -> float: 24 | """Calculate delay with exponential backoff and jitter.""" 25 | # Extract retry-after from error message if available 26 | retry_after = self._extract_retry_after(error_message) 27 | 28 | if retry_after: 29 | delay = retry_after 30 | else: 31 | # Exponential backoff: base_delay * (2 ^ attempt) 32 | delay = self.base_delay * (2 ** attempt) 33 | 34 | # Add jitter (±25%) to prevent thundering herd 35 | jitter = delay * 0.25 * (random.random() * 2 - 1) 36 | delay += jitter 37 | 38 | # Cap at max_delay 39 | delay = min(delay, self.max_delay) 40 | 41 | return max(delay, 0.1) # Minimum 100ms delay 42 | 43 | def _extract_retry_after(self, error_message: str) -> Optional[float]: 44 | """Extract retry-after time from error message.""" 45 | import re 46 | 47 | # Look for X-RateLimit-Reset header pattern 48 | reset_match = re.search(r'X-RateLimit-Reset["\']:\s*["\']([^"\']+)["\']', error_message) 49 | if reset_match: 50 | try: 51 | reset_timestamp = int(reset_match.group(1)) 52 | current_time = int(time.time() * 1000) 53 | if reset_timestamp > current_time: 54 | return (reset_timestamp - current_time) / 1000.0 55 | except (ValueError, IndexError): 56 | pass 57 | 58 | return None 59 | 60 | def _is_rate_limit_error(self, error: Exception) -> bool: 61 | """Check if error is a rate limit error.""" 62 | error_str = str(error).lower() 63 | return any(keyword in error_str for keyword in [ 64 | 'rate limit', '429', 'too many requests', 'quota exceeded' 65 | ]) 66 | 67 | def call_with_retry(self, func: Callable, *args, **kwargs) -> Any: 68 | """ 69 | Call function with intelligent retry logic. 70 | 71 | Args: 72 | func: Function to call 73 | *args: Positional arguments for func 74 | **kwargs: Keyword arguments for func 75 | 76 | Returns: 77 | Result of successful function call 78 | 79 | Raises: 80 | Last exception encountered after all retries exhausted 81 | """ 82 | last_exception = None 83 | 84 | for attempt in range(self.max_retries + 1): 85 | try: 86 | # Rate limiting: ensure minimum time between requests 87 | current_time = time.time() 88 | time_since_last = current_time - self.last_request_time 89 | min_interval = 0.2 # 200ms minimum between requests 90 | 91 | if time_since_last < min_interval: 92 | time.sleep(min_interval - time_since_last) 93 | 94 | self.last_request_time = time.time() 95 | self.request_count += 1 96 | 97 | # Make the API call 98 | result = func(*args, **kwargs) 99 | 100 | # Success! Reset rate limiting state 101 | if attempt > 0: 102 | logger.info(f"Request succeeded after {attempt} retries") 103 | 104 | return result 105 | 106 | except Exception as e: 107 | last_exception = e 108 | 109 | if not self._is_rate_limit_error(e): 110 | # Not a rate limit error, don't retry 111 | logger.error(f"Non-rate-limit error: {e}") 112 | raise e 113 | 114 | if attempt == self.max_retries: 115 | # All retries exhausted 116 | logger.error(f"All {self.max_retries} retries exhausted. Last error: {e}") 117 | raise e 118 | 119 | # Calculate delay and wait 120 | delay = self._calculate_delay(attempt, str(e)) 121 | logger.warning(f"Rate limit hit (attempt {attempt + 1}/{self.max_retries + 1}), " 122 | f"retrying in {delay:.2f} seconds: {e}") 123 | 124 | time.sleep(delay) 125 | 126 | # This should never be reached, but just in case 127 | raise last_exception 128 | 129 | def create_rate_limited_llm(config: dict) -> Any: 130 | """ 131 | Create a rate-limited LLM instance. 132 | 133 | This is a factory function that wraps LLM creation with rate limiting. 134 | """ 135 | from langchain_openai import ChatOpenAI 136 | 137 | rate_limiter = RateLimiter( 138 | max_retries=config.get('max_retries', 3), 139 | base_delay=config.get('retry_delay', 1.0) 140 | ) 141 | 142 | def create_llm(): 143 | return ChatOpenAI( 144 | temperature=config['temperature'], 145 | model=config['model'], 146 | api_key=config['api_key'], 147 | base_url=config['api_base'] 148 | ) 149 | 150 | # For now, return the LLM directly. In production, you'd wrap the call method 151 | # with rate_limiter.call_with_retry 152 | return create_llm() 153 | 154 | # Global rate limiter instance 155 | _default_rate_limiter = RateLimiter() 156 | 157 | def get_rate_limiter() -> RateLimiter: 158 | """Get the global rate limiter instance.""" 159 | return _default_rate_limiter 160 | 161 | def set_global_rate_limits(max_retries: int = 3, base_delay: float = 1.0, max_delay: float = 60.0): 162 | """Configure global rate limiting parameters.""" 163 | global _default_rate_limiter 164 | _default_rate_limiter = RateLimiter(max_retries, base_delay, max_delay) -------------------------------------------------------------------------------- /utils/helpers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility functions and helpers for the AI Agent Workshop. 3 | """ 4 | 5 | import os 6 | from typing import Dict, Any, Optional 7 | from dotenv import load_dotenv 8 | 9 | def load_environment_variables() -> Dict[str, str]: 10 | """Load and validate environment variables.""" 11 | load_dotenv() 12 | 13 | required_vars = ['GROQ_API_KEY'] 14 | optional_vars = ['GROQ_MODEL', 'WORKSHOP_DEBUG', 'MAX_TOKENS', 'TEMPERATURE'] 15 | 16 | env_vars = {} 17 | 18 | # Check required variables 19 | for var in required_vars: 20 | value = os.getenv(var) 21 | if not value: 22 | raise ValueError(f"Required environment variable {var} is not set") 23 | env_vars[var] = value 24 | 25 | # Load optional variables 26 | for var in optional_vars: 27 | value = os.getenv(var) 28 | if value: 29 | env_vars[var] = value 30 | 31 | return env_vars 32 | 33 | def validate_api_key(api_key: str) -> bool: 34 | """Validate Groq API key format.""" 35 | if not api_key or not isinstance(api_key, str): 36 | return False 37 | 38 | # Basic validation - Groq keys start with 'gsk_' 39 | return api_key.startswith('gsk_') and len(api_key) > 20 40 | 41 | def format_agent_response(response: Any) -> str: 42 | """Format agent response for consistent output.""" 43 | if isinstance(response, str): 44 | return response.strip() 45 | elif hasattr(response, '__str__'): 46 | return str(response).strip() 47 | else: 48 | return f"Response: {response}" 49 | 50 | def create_progress_indicator(current: int, total: int, prefix: str = "Progress") -> str: 51 | """Create a simple progress indicator string.""" 52 | percentage = int((current / total) * 100) if total > 0 else 0 53 | bar_length = 20 54 | filled_length = int(bar_length * current / total) if total > 0 else 0 55 | 56 | bar = '█' * filled_length + '░' * (bar_length - filled_length) 57 | 58 | return f"{prefix}: [{bar}] {percentage}% ({current}/{total})" 59 | 60 | def safe_get_nested_value(data: Dict, keys: list, default: Any = None) -> Any: 61 | """Safely get nested dictionary value.""" 62 | try: 63 | for key in keys: 64 | data = data[key] 65 | return data 66 | except (KeyError, TypeError, IndexError): 67 | return default 68 | 69 | def truncate_text(text: str, max_length: int = 500, suffix: str = "...") -> str: 70 | """Truncate text to maximum length with suffix.""" 71 | if len(text) <= max_length: 72 | return text 73 | return text[:max_length - len(suffix)] + suffix 74 | 75 | def calculate_token_estimate(text: str) -> int: 76 | """Rough estimate of token count for text (approximation).""" 77 | # Rough approximation: 1 token ≈ 4 characters for English text 78 | return len(text) // 4 79 | 80 | def format_workflow_summary(state: Dict[str, Any]) -> str: 81 | """Format a workflow state summary for display.""" 82 | summary_lines = [] 83 | 84 | # Basic info 85 | if 'status' in state: 86 | summary_lines.append(f"Status: {state['status']}") 87 | 88 | if 'timestamp' in state: 89 | summary_lines.append(f"Timestamp: {state['timestamp']}") 90 | 91 | # Task information 92 | crew_tasks = safe_get_nested_value(state, ['crew_tasks'], []) 93 | if crew_tasks: 94 | summary_lines.append(f"Tasks Completed: {len(crew_tasks)}") 95 | 96 | crew_results = safe_get_nested_value(state, ['crew_results'], []) 97 | if crew_results: 98 | summary_lines.append(f"Results Generated: {len(crew_results)}") 99 | 100 | # Content summaries 101 | for key in ['user_request', 'analysis_result', 'final_synthesis']: 102 | value = safe_get_nested_value(state, [key]) 103 | if value: 104 | truncated = truncate_text(str(value), 100) 105 | summary_lines.append(f"{key.replace('_', ' ').title()}: {truncated}") 106 | 107 | return "\n".join(summary_lines) 108 | 109 | def create_error_message(error: Exception, context: str = "") -> str: 110 | """Create a formatted error message.""" 111 | error_type = type(error).__name__ 112 | error_msg = str(error) 113 | 114 | message = f"Error in {context}: {error_type}" 115 | if error_msg: 116 | message += f" - {error_msg}" 117 | 118 | return message 119 | 120 | def validate_workflow_state(state: Dict[str, Any], required_keys: list) -> tuple[bool, list]: 121 | """Validate that workflow state contains required keys.""" 122 | missing_keys = [] 123 | 124 | for key in required_keys: 125 | if key not in state: 126 | missing_keys.append(key) 127 | 128 | is_valid = len(missing_keys) == 0 129 | return is_valid, missing_keys 130 | 131 | def merge_agent_contexts(*contexts) -> Dict[str, Any]: 132 | """Merge multiple agent contexts into one.""" 133 | merged = {} 134 | 135 | for context in contexts: 136 | if isinstance(context, dict): 137 | merged.update(context) 138 | 139 | return merged 140 | 141 | def log_workflow_step(step_name: str, data: Optional[Dict] = None): 142 | """Log a workflow step with optional data.""" 143 | import datetime 144 | 145 | timestamp = datetime.datetime.now().isoformat() 146 | print(f"[{timestamp}] {step_name}") 147 | 148 | if data: 149 | for key, value in data.items(): 150 | if isinstance(value, str) and len(value) > 100: 151 | value = value[:100] + "..." 152 | print(f" {key}: {value}") 153 | 154 | def get_available_models() -> list: 155 | """Get list of available Groq models for agents.""" 156 | return [ 157 | "gemma2-9b-it", # Free 158 | "llama3-8b-8192", # Free 159 | "llama3-70b-8192", # Free 160 | "mixtral-8x7b-32768", # Free 161 | "llama-3.1-8b-instant", 162 | "llama-3.1-70b-versatile", 163 | "llama-3.1-405b-inference" 164 | ] 165 | 166 | def estimate_cost(tokens_used: int, model: str = "gemma2-9b-it") -> float: 167 | """Estimate API cost based on tokens used via Groq (rough approximation).""" 168 | # Approximate costs per 1K tokens via Groq (as of 2024) 169 | # Note: Many models have free tiers or very low costs 170 | costs = { 171 | "gemma2-9b-it": 0.0, # Free 172 | "llama3-8b-8192": 0.0, # Free 173 | "llama3-70b-8192": 0.0, # Free 174 | "mixtral-8x7b-32768": 0.0, # Free 175 | "llama-3.1-8b-instant": 0.0, # Free 176 | "llama-3.1-70b-versatile": 0.0, # Free 177 | "llama-3.1-405b-inference": 0.0, # Free 178 | } 179 | 180 | cost_per_1k = costs.get(model, 0.01) # Default fallback 181 | return (tokens_used / 1000) * cost_per_1k 182 | -------------------------------------------------------------------------------- /session2/agent_roles.py: -------------------------------------------------------------------------------- 1 | """ 2 | Session 2: Agent Roles and Responsibilities 3 | This file shows how different AI agents can have different jobs and work together. 4 | Just like in a real company, each agent has a special role and expertise. 5 | """ 6 | 7 | from crewai import Agent, Task, Crew, LLM 8 | from config import API_KEY, MODEL, API_BASE, TEMPERATURE, MAX_TOKENS, MAX_RETRIES, RETRY_DELAY, PROVIDER 9 | 10 | # Step 3: Set up environment for LiteLLM 11 | import os 12 | if PROVIDER == 'sambanova': 13 | os.environ["SAMBANOVA_API_KEY"] = API_KEY 14 | elif PROVIDER == 'ollama': 15 | # Ollama doesn't need environment variables 16 | pass 17 | 18 | def get_llm(): 19 | """Get the appropriate LLM configuration based on provider.""" 20 | if PROVIDER == 'ollama': 21 | return LLM( 22 | model=f"ollama/{MODEL}", 23 | base_url="http://localhost:11434" 24 | ) 25 | elif PROVIDER == 'sambanova': 26 | return LLM( 27 | model=f"sambanova/{MODEL}", 28 | api_key=API_KEY, 29 | base_url=API_BASE 30 | ) 31 | else: 32 | # Default fallback 33 | return f"{PROVIDER}/{MODEL}" 34 | 35 | def demonstrate_agent_roles(): 36 | """Example: Different AI agents with different jobs working together.""" 37 | print("=== Example: Team of AI Agents with Different Roles ===") 38 | print("This shows how AI agents can be like a team where each has a special job.") 39 | print() 40 | 41 | # Use configured LLM 42 | llm = get_llm() 43 | 44 | print("Creating our AI team members...") 45 | 46 | # Agent 1: Data Analyst (like a number cruncher) 47 | analyst = Agent( 48 | role="Data Analyst", # Job title 49 | goal="Look at data and find useful patterns", # What they do 50 | backstory="I love working with numbers and finding hidden insights in data.", # Personality 51 | llm=llm, 52 | verbose=True # Show thinking 53 | ) 54 | 55 | # Agent 2: Business Strategist (like a business planner) 56 | strategist = Agent( 57 | role="Business Strategist", 58 | goal="Create plans based on data insights", 59 | backstory="I am good at making business plans and giving advice for growth.", 60 | llm=llm, 61 | verbose=True 62 | ) 63 | 64 | print("Giving jobs to our AI team...") 65 | 66 | # Job 1: Analyze some data (for the analyst) 67 | analysis_task = Task( 68 | description="Look at this simple sales data: Q1 sales were $10,000, Q2 were $12,000, Q3 were $15,000. Find trends.", 69 | expected_output="Tell me if sales are going up or down, and by how much.", 70 | agent=analyst 71 | ) 72 | 73 | # Job 2: Make a business plan (for the strategist - they can see the analyst's work) 74 | strategy_task = Task( 75 | description="Based on the sales analysis, suggest 2 ways to increase sales next quarter.", 76 | expected_output="Two simple suggestions for growing the business.", 77 | agent=strategist, 78 | context=[analysis_task] # Strategist can read analyst's results 79 | ) 80 | 81 | print("Starting the team to work...") 82 | # Create the team (crew) with both agents and their jobs 83 | business_crew = Crew( 84 | agents=[analyst, strategist], # Team members 85 | tasks=[analysis_task, strategy_task], # Jobs to do (in order) 86 | verbose=True, # Show progress 87 | memory=True, # Enable memory 88 | cache=True, # Enable caching 89 | max_rpm=1 # Rate limiting 90 | ) 91 | 92 | # Start the work! 93 | result = business_crew.kickoff() 94 | 95 | print("\nFinal Team Result:") 96 | print(result) 97 | print() 98 | 99 | def show_simple_roles(): 100 | """Simple example showing just two different roles.""" 101 | print("=== Simple Example: Two Different Jobs ===") 102 | print("Let's see how two agents with different skills work together.") 103 | print() 104 | 105 | # Use configured LLM 106 | llm = get_llm() 107 | 108 | # Create two simple agents 109 | chef = Agent( 110 | role="Chef", 111 | goal="Create and describe recipes", 112 | backstory="I am a creative chef who loves making delicious food.", 113 | llm=llm, 114 | verbose=True 115 | ) 116 | 117 | nutritionist = Agent( 118 | role="Nutritionist", 119 | goal="Check if food is healthy", 120 | backstory="I am a health expert who makes sure food is good for you.", 121 | llm=llm, 122 | verbose=True 123 | ) 124 | 125 | # Tasks 126 | recipe_task = Task( 127 | description="Create a simple recipe for chocolate chip cookies.", 128 | expected_output="List ingredients and basic steps.", 129 | agent=chef 130 | ) 131 | 132 | health_task = Task( 133 | description="Check if this cookie recipe is healthy and suggest improvements.", 134 | expected_output="Say if it's healthy and give one healthy tip.", 135 | agent=nutritionist, 136 | context=[recipe_task] # Can see the recipe 137 | ) 138 | 139 | # Create and run crew 140 | food_crew = Crew( 141 | agents=[chef, nutritionist], 142 | tasks=[recipe_task, health_task], 143 | verbose=True, 144 | memory=True, # Enable memory 145 | cache=True, # Enable caching 146 | max_rpm=1 # Rate limiting 147 | ) 148 | 149 | result = food_crew.kickoff() 150 | print(f"Food Team Result:\n{result}") 151 | print() 152 | 153 | def main(): 154 | """Run the agent roles examples.""" 155 | print("AI Agent Workshop - Session 2: Agent Roles and Team Work") 156 | print("=" * 70) 157 | print("Welcome! Today we'll learn about different AI agent roles.") 158 | print("Just like people in a company, AI agents can have different jobs.") 159 | print("Let's see how they work together as a team!") 160 | print() 161 | 162 | try: 163 | # Run the business example 164 | demonstrate_agent_roles() 165 | 166 | # Run the simple food example 167 | show_simple_roles() 168 | 169 | print("Great work! You learned about AI agent roles!") 170 | print("Each agent has special skills, just like people in a team.") 171 | 172 | except Exception as e: 173 | print(f"Oops! Something went wrong: {e}") 174 | if PROVIDER == 'sambanova': 175 | print("Make sure your SAMBA_API_KEY is set correctly in the .env file.") 176 | elif PROVIDER == 'ollama': 177 | print("Make sure Ollama is running locally on http://localhost:11434") 178 | print("Install Ollama from https://ollama.ai and run: ollama serve") 179 | print("Check the README.md for help.") 180 | 181 | if __name__ == "__main__": 182 | main() 183 | -------------------------------------------------------------------------------- /GIT_SETUP.md: -------------------------------------------------------------------------------- 1 | # Git Setup Guide for AI Agent Workshop 2 | 3 | This guide will help you install and configure Git on Windows 11, macOS, and Linux systems. 4 | 5 | ## Table of Contents 6 | - [Windows 11 Setup](#windows-11-setup) 7 | - [macOS Setup](#macos-setup) 8 | - [Linux Setup](#linux-setup) 9 | - [Git Configuration](#git-configuration) 10 | - [Cloning the Repository](#cloning-the-repository) 11 | - [Troubleshooting](#troubleshooting) 12 | 13 | --- 14 | 15 | ## Windows 11 Setup 16 | 17 | ### Option 1: Git for Windows (Recommended) 18 | 19 | 1. **Download Git for Windows** 20 | - Visit: https://gitforwindows.org/ 21 | - Click "Download" to get the latest version 22 | 23 | 2. **Run the Installer** 24 | - Run the downloaded `.exe` file 25 | - Click "Next" through the default options 26 | - Choose "Use Git from Git Bash only" when asked 27 | - Choose "Use Windows' default console window" for the terminal emulator 28 | - Click "Install" 29 | 30 | 3. **Verify Installation** 31 | ```bash 32 | git --version 33 | ``` 34 | 35 | ### Option 2: Windows Terminal + Git 36 | 37 | 1. **Install Windows Terminal** (if not already installed) 38 | - Open Microsoft Store 39 | - Search for "Windows Terminal" 40 | - Click "Get" to install 41 | 42 | 2. **Install Git via Winget** 43 | ```powershell 44 | winget install --id Git.Git -e --source winget 45 | ``` 46 | 47 | 3. **Verify Installation** 48 | ```bash 49 | git --version 50 | ``` 51 | 52 | --- 53 | 54 | ## macOS Setup 55 | 56 | ### Option 1: Xcode Command Line Tools (Recommended) 57 | 58 | 1. **Install Xcode Command Line Tools** 59 | ```bash 60 | xcode-select --install 61 | ``` 62 | - Click "Install" when prompted 63 | - Wait for installation to complete 64 | 65 | 2. **Verify Installation** 66 | ```bash 67 | git --version 68 | ``` 69 | 70 | ### Option 2: Homebrew Installation 71 | 72 | 1. **Install Homebrew** (if not already installed) 73 | ```bash 74 | /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" 75 | ``` 76 | 77 | 2. **Install Git** 78 | ```bash 79 | brew install git 80 | ``` 81 | 82 | 3. **Verify Installation** 83 | ```bash 84 | git --version 85 | ``` 86 | 87 | ### Option 3: GitHub Desktop 88 | 89 | 1. **Download GitHub Desktop** 90 | - Visit: https://desktop.github.com/ 91 | - Download and install the macOS version 92 | 93 | 2. **Git will be installed automatically with GitHub Desktop** 94 | 95 | --- 96 | 97 | ## Linux Setup 98 | 99 | ### Ubuntu/Debian 100 | 101 | ```bash 102 | # Update package list 103 | sudo apt update 104 | 105 | # Install Git 106 | sudo apt install git 107 | 108 | # Verify installation 109 | git --version 110 | ``` 111 | 112 | ### CentOS/RHEL/Fedora 113 | 114 | ```bash 115 | # For CentOS/RHEL 116 | sudo yum install git 117 | 118 | # For Fedora 119 | sudo dnf install git 120 | 121 | # Verify installation 122 | git --version 123 | ``` 124 | 125 | ### Arch Linux 126 | 127 | ```bash 128 | # Install Git 129 | sudo pacman -S git 130 | 131 | # Verify installation 132 | git --version 133 | ``` 134 | 135 | ### Universal Linux Installation 136 | 137 | If your distribution isn't listed above, you can compile Git from source: 138 | 139 | ```bash 140 | # Install dependencies 141 | sudo apt update && sudo apt install -y dh-autoreconf libcurl4-gnutls-dev libexpat1-dev gettext libz-dev libssl-dev 142 | 143 | # Download and extract Git source 144 | wget https://github.com/git/git/archive/refs/tags/v2.40.0.tar.gz 145 | tar -xzf v2.40.0.tar.gz 146 | cd git-2.40.0 147 | 148 | # Compile and install 149 | make configure 150 | ./configure --prefix=/usr 151 | make all 152 | sudo make install 153 | 154 | # Verify installation 155 | git --version 156 | ``` 157 | 158 | --- 159 | 160 | ## Git Configuration 161 | 162 | After installing Git, configure it with your information: 163 | 164 | ### Basic Configuration 165 | 166 | ```bash 167 | # Set your name 168 | git config --global user.name "Your Full Name" 169 | 170 | # Set your email 171 | git config --global user.email "your.email@example.com" 172 | 173 | # Set default branch name to main 174 | git config --global init.defaultBranch main 175 | 176 | # Enable colored output 177 | git config --global color.ui auto 178 | ``` 179 | 180 | ### Verify Configuration 181 | 182 | ```bash 183 | # Check your settings 184 | git config --global --list 185 | 186 | # Check Git version 187 | git --version 188 | ``` 189 | 190 | ### SSH Key Setup (Optional but Recommended) 191 | 192 | For GitHub authentication without passwords: 193 | 194 | 1. **Generate SSH Key** 195 | ```bash 196 | ssh-keygen -t ed25519 -C "your.email@example.com" 197 | ``` 198 | 199 | 2. **Add SSH Key to ssh-agent** 200 | ```bash 201 | # Start ssh-agent 202 | eval "$(ssh-agent -s)" 203 | 204 | # Add your key 205 | ssh-add ~/.ssh/id_ed25519 206 | ``` 207 | 208 | 3. **Copy Public Key** 209 | ```bash 210 | # Display public key 211 | cat ~/.ssh/id_ed25519.pub 212 | ``` 213 | 214 | 4. **Add to GitHub** 215 | - Go to GitHub.com → Settings → SSH and GPG keys 216 | - Click "New SSH key" 217 | - Paste your public key 218 | - Click "Add SSH key" 219 | 220 | --- 221 | 222 | ## Cloning the Repository 223 | 224 | Once Git is installed and configured: 225 | 226 | ```bash 227 | # Clone the AI Agent Workshop repository 228 | git clone https://github.com/your-username/ai-agent-workshop.git 229 | 230 | # Navigate to the project directory 231 | cd ai-agent-workshop 232 | 233 | # Verify the clone worked 234 | ls -la 235 | ``` 236 | 237 | ### If Using SSH (after SSH key setup) 238 | 239 | ```bash 240 | # Clone using SSH (no password needed) 241 | git clone git@github.com:your-username/ai-agent-workshop.git 242 | cd ai-agent-workshop 243 | ``` 244 | 245 | --- 246 | 247 | ## Troubleshooting 248 | 249 | ### "Command not found" Error 250 | 251 | **Windows:** 252 | - Make sure Git is in your PATH 253 | - Try restarting your terminal/command prompt 254 | - Check if you installed Git correctly 255 | 256 | **macOS/Linux:** 257 | - Check if Git is installed: `which git` 258 | - If not found, reinstall following the steps above 259 | 260 | ### Permission Denied Errors 261 | 262 | **When cloning:** 263 | ```bash 264 | # Try using HTTPS instead of SSH 265 | git clone https://github.com/username/repo.git 266 | 267 | # Or check your SSH key setup 268 | ssh -T git@github.com 269 | ``` 270 | 271 | ### Git Version Too Old 272 | 273 | **Update Git:** 274 | 275 | **Windows:** 276 | - Download latest Git for Windows installer 277 | - Run installer (it will update existing installation) 278 | 279 | **macOS:** 280 | ```bash 281 | brew upgrade git 282 | ``` 283 | 284 | **Ubuntu/Debian:** 285 | ```bash 286 | sudo apt update && sudo apt upgrade git 287 | ``` 288 | 289 | ### Network Issues 290 | 291 | If you're behind a corporate firewall: 292 | ```bash 293 | # Configure proxy (replace with your proxy settings) 294 | git config --global http.proxy http://proxy.company.com:8080 295 | git config --global https.proxy http://proxy.company.com:8080 296 | ``` 297 | 298 | ### Common Git Commands 299 | 300 | ```bash 301 | # Check status 302 | git status 303 | 304 | # Add files 305 | git add . 306 | 307 | # Commit changes 308 | git commit -m "Your message" 309 | 310 | # Push changes 311 | git push origin main 312 | 313 | # Pull latest changes 314 | git pull origin main 315 | ``` 316 | 317 | --- 318 | 319 | ## Next Steps 320 | 321 | Once Git is set up and you've cloned the repository: 322 | 323 | 1. **Follow the main README.md** for project setup 324 | 2. **Run the installation commands** as described 325 | 3. **Start with Session 1** to learn about AI agents 326 | 327 | ## Support 328 | 329 | If you encounter issues: 330 | 1. Check this guide again 331 | 2. Search online for your specific error message 332 | 3. Ask for help in the project discussions 333 | 334 | Happy coding! 🚀 -------------------------------------------------------------------------------- /session1/crewai_intro.py: -------------------------------------------------------------------------------- 1 | """ 2 | Session 1: CrewAI Introduction 3 | This file shows how to use CrewAI to create teams of AI agents that work together. 4 | CrewAI helps multiple AI agents collaborate on tasks, just like a real team! 5 | """ 6 | 7 | import os 8 | from crewai import Agent, Task, Crew, LLM 9 | from config import API_KEY, MODEL, API_BASE, LLM_STRING, PROVIDER 10 | 11 | # Step 3: Set up environment for LiteLLM 12 | if PROVIDER == 'sambanova': 13 | os.environ["SAMBANOVA_API_KEY"] = API_KEY 14 | elif PROVIDER == 'ollama': 15 | # Ollama doesn't need environment variables 16 | pass 17 | 18 | def get_llm(): 19 | """Get the appropriate LLM configuration based on provider.""" 20 | if PROVIDER == 'ollama': 21 | return LLM( 22 | model=f"ollama/{MODEL}", 23 | base_url="http://localhost:11434" 24 | ) 25 | elif PROVIDER == 'sambanova': 26 | return LLM( 27 | model=f"sambanova/{MODEL}", 28 | api_key=API_KEY, 29 | base_url=API_BASE 30 | ) 31 | else: 32 | # Default fallback 33 | return LLM_STRING 34 | 35 | def simple_crew_example(): 36 | """Example 1: Single AI Agent (like having one team member)""" 37 | print("=== Example 1: Single Agent Crew ===") 38 | print("This shows how one AI agent can complete a task.") 39 | print() 40 | 41 | # Ask user what topic they want researched 42 | try: 43 | research_topic = input("What topic would you like the AI researcher to explain? (press Enter for default): ").strip() 44 | if not research_topic: 45 | research_topic = "AI agents" 46 | except EOFError: 47 | # Handle non-interactive environments 48 | research_topic = "AI agents" 49 | print("What topic would you like the AI researcher to explain? (press Enter for default): ") 50 | print("(Using default topic in non-interactive environment)") 51 | 52 | # Use configured LLM 53 | llm = get_llm() 54 | 55 | print("Creating an AI researcher agent...") 56 | # Create one agent (like hiring one employee) 57 | researcher = Agent( 58 | role="Researcher", # Job title 59 | goal="Find and explain information about topics", # What they should do 60 | backstory="I am a helpful researcher who loves learning and sharing knowledge.", # Their personality 61 | llm=llm, # Their brain 62 | verbose=True # Show their thinking process 63 | ) 64 | 65 | print(f"Creating a research task about: {research_topic}") 66 | # Give the agent a job to do 67 | research_task = Task( 68 | description=f"Explain what {research_topic} are/is in simple terms that anyone can understand.", # What to do 69 | expected_output="A simple explanation in 2-3 sentences.", # What the result should look like 70 | agent=researcher # Which agent does this task 71 | ) 72 | 73 | print("Starting the crew (team) to work...") 74 | # Create a "crew" (team) with our agent and task 75 | crew = Crew( 76 | agents=[researcher], # Team members 77 | tasks=[research_task], # Jobs to do 78 | verbose=True, # Show progress 79 | memory=True, # Enable memory 80 | cache=True, # Enable caching 81 | max_rpm=1 # Rate limiting (further reduced) 82 | ) 83 | 84 | # Start the work! 85 | result = crew.kickoff() 86 | 87 | print(f"\nFinal Result about {research_topic}:") 88 | print(result) 89 | print() 90 | 91 | def multi_agent_crew_example(): 92 | """Example 2: Multiple AI Agents Working Together (like a real team)""" 93 | print("=== Example 2: Multi-Agent Crew (Team Work) ===") 94 | print("This shows how multiple AI agents can work together on a bigger project.") 95 | print() 96 | 97 | # Ask user what topic they want the team to work on 98 | try: 99 | team_topic = input("What topic would you like the AI team to research and write about? (press Enter for default): ").strip() 100 | if not team_topic: 101 | team_topic = "AI agents" 102 | except EOFError: 103 | # Handle non-interactive environments 104 | team_topic = "AI agents" 105 | print("What topic would you like the AI team to research and write about? (press Enter for default): ") 106 | print("(Using default topic in non-interactive environment)") 107 | 108 | # Use configured LLM 109 | llm = get_llm() 110 | 111 | print("Creating two AI agents for our team...") 112 | # Create two agents (like hiring two employees) 113 | researcher = Agent( 114 | role="Researcher", 115 | goal="Find information about topics", 116 | backstory="I am a curious researcher who gathers facts and information.", 117 | llm=llm, 118 | verbose=True 119 | ) 120 | 121 | writer = Agent( 122 | role="Writer", 123 | goal="Write clear and interesting content", 124 | backstory="I am a creative writer who makes information easy to read.", 125 | llm=llm, 126 | verbose=True 127 | ) 128 | 129 | print(f"Creating tasks for the team about: {team_topic}") 130 | # Task 1: Research (first job) 131 | research_task = Task( 132 | description=f"Find 3 benefits and 2 challenges of using {team_topic}.", 133 | expected_output="A simple list of benefits and challenges.", 134 | agent=researcher 135 | ) 136 | 137 | # Task 2: Write article (second job - uses research results) 138 | writing_task = Task( 139 | description=f"Write a short paragraph about {team_topic} using the research information.", 140 | expected_output="One paragraph explaining the topic.", 141 | agent=writer, 142 | context=[research_task] # Writer can see what researcher found 143 | ) 144 | 145 | print("Starting the team to work together...") 146 | # Create crew with both agents and both tasks 147 | crew = Crew( 148 | agents=[researcher, writer], # Our team 149 | tasks=[research_task, writing_task], # Jobs in order 150 | verbose=True, 151 | memory=True, # Enable memory 152 | cache=True, # Enable caching 153 | max_rpm=1 # Rate limiting (further reduced) 154 | ) 155 | 156 | # Start the team work! 157 | result = crew.kickoff() 158 | 159 | print(f"\nFinal Team Result about {team_topic}:") 160 | print(result) 161 | print() 162 | 163 | def main(): 164 | """Run the CrewAI examples.""" 165 | print("AI Agent Workshop - Session 1: Learning About CrewAI") 166 | print("=" * 65) 167 | print("Welcome! CrewAI lets us create teams of AI agents.") 168 | print("Each agent has a special role, just like people in a company.") 169 | print("Let's see how they work alone and together!") 170 | print() 171 | 172 | try: 173 | # Run first example (single agent) 174 | simple_crew_example() 175 | 176 | # Run second example (team work) 177 | multi_agent_crew_example() 178 | 179 | print("Excellent! You learned about CrewAI!") 180 | print("Now you know how AI agents can work as a team.") 181 | 182 | except Exception as e: 183 | print(f"Oops! Something went wrong: {e}") 184 | if PROVIDER == 'sambanova': 185 | print("Make sure your SAMBA_API_KEY is set correctly in the .env file.") 186 | elif PROVIDER == 'ollama': 187 | print("Make sure Ollama is running locally on http://localhost:11434") 188 | print("Install Ollama from https://ollama.ai and run: ollama serve") 189 | print("Check the README.md for setup help.") 190 | 191 | if __name__ == "__main__": 192 | main() 193 | -------------------------------------------------------------------------------- /session2/content_crew.py: -------------------------------------------------------------------------------- 1 | """ 2 | Session 2: Content Creation Crew 3 | This file demonstrates a complete multi-agent system for content creation using CrewAI. 4 | """ 5 | 6 | from crewai import Agent, Task, Crew, LLM 7 | from config import API_KEY, MODEL, API_BASE, TEMPERATURE, MAX_TOKENS, MAX_RETRIES, RETRY_DELAY, PROVIDER 8 | 9 | # Step 3: Set up environment for LiteLLM 10 | import os 11 | if PROVIDER == 'sambanova': 12 | os.environ["SAMBANOVA_API_KEY"] = API_KEY 13 | elif PROVIDER == 'ollama': 14 | # Ollama doesn't need environment variables 15 | pass 16 | 17 | def get_llm(): 18 | """Get the appropriate LLM configuration based on provider.""" 19 | if PROVIDER == 'ollama': 20 | return LLM( 21 | model=f"ollama/{MODEL}", 22 | base_url="http://localhost:11434" 23 | ) 24 | elif PROVIDER == 'sambanova': 25 | return LLM( 26 | model=f"sambanova/{MODEL}", 27 | api_key=API_KEY, 28 | base_url=API_BASE 29 | ) 30 | else: 31 | # Default fallback 32 | return f"{PROVIDER}/{MODEL}" 33 | 34 | def create_content_creation_crew(topic: str = "AI Agents"): 35 | """Create a content creation crew with researcher, writer, and editor agents.""" 36 | 37 | # Use configured LLM 38 | llm = get_llm() 39 | 40 | # Create agents with specific roles 41 | researcher = Agent( 42 | role="Senior Research Analyst", 43 | goal="Conduct thorough research and gather comprehensive information on topics", 44 | backstory="""You are a senior research analyst with years of experience in gathering 45 | and synthesizing information from various sources. You excel at finding accurate, 46 | up-to-date information and organizing it in a structured way.""", 47 | llm=llm, 48 | verbose=True, 49 | allow_delegation=False 50 | ) 51 | 52 | writer = Agent( 53 | role="Content Writer", 54 | goal="Create engaging, well-structured content based on research findings", 55 | backstory="""You are a skilled content writer who transforms complex research into 56 | compelling, easy-to-understand narratives. You focus on clarity, engagement, 57 | and logical flow in your writing.""", 58 | llm=llm, 59 | verbose=True, 60 | allow_delegation=False 61 | ) 62 | 63 | editor = Agent( 64 | role="Content Editor", 65 | goal="Review, refine, and polish content for accuracy, clarity, and quality", 66 | backstory="""You are an experienced editor with a keen eye for detail. You ensure 67 | content is accurate, well-structured, grammatically correct, and engaging for 68 | the target audience.""", 69 | llm=llm, 70 | verbose=True, 71 | allow_delegation=False 72 | ) 73 | 74 | return researcher, writer, editor 75 | 76 | def create_content_tasks(topic: str, researcher: Agent, writer: Agent, editor: Agent): 77 | """Create tasks for the content creation workflow.""" 78 | 79 | # Research task 80 | research_task = Task( 81 | description=f"""Research the topic '{topic}' comprehensively. Include: 82 | - Key concepts and definitions 83 | - Current trends and developments 84 | - Real-world applications and examples 85 | - Benefits and challenges 86 | - Future outlook 87 | 88 | Provide detailed, accurate information with sources where possible.""", 89 | expected_output="""A comprehensive research report with: 90 | - Executive summary 91 | - Key findings organized by category 92 | - Supporting evidence and examples 93 | - References to reliable sources""", 94 | agent=researcher 95 | ) 96 | 97 | # Writing task 98 | writing_task = Task( 99 | description=f"""Write a 500-word article about '{topic}' based on the research provided. 100 | The article should: 101 | - Have a compelling introduction 102 | - Cover key concepts clearly 103 | - Include real-world examples 104 | - End with future implications 105 | - Be written in an engaging, accessible style""", 106 | expected_output="""A complete article with: 107 | - Title 108 | - Introduction paragraph 109 | - 3-4 body paragraphs 110 | - Conclusion 111 | - Word count: approximately 500 words""", 112 | agent=writer, 113 | context=[research_task] # Access to research results 114 | ) 115 | 116 | # Editing task 117 | editing_task = Task( 118 | description=f"""Review and edit the article about '{topic}'. Focus on: 119 | - Factual accuracy based on research 120 | - Clarity and readability 121 | - Grammar and style 122 | - Logical flow and structure 123 | - Engagement and appeal 124 | 125 | Provide the final polished version.""", 126 | expected_output="""A polished article with: 127 | - Final title 128 | - Edited content 129 | - Brief notes on changes made 130 | - Quality assurance checklist confirmation""", 131 | agent=editor, 132 | context=[research_task, writing_task] # Access to all previous work 133 | ) 134 | 135 | return research_task, writing_task, editing_task 136 | 137 | def run_content_creation_workflow(topic: str = "AI Agents and Multi-Agent Systems"): 138 | """Run the complete content creation workflow.""" 139 | print(f"=== Content Creation Crew for: {topic} ===") 140 | print() 141 | 142 | # Create agents 143 | researcher, writer, editor = create_content_creation_crew(topic) 144 | 145 | # Create tasks 146 | research_task, writing_task, editing_task = create_content_tasks( 147 | topic, researcher, writer, editor 148 | ) 149 | 150 | # Create and configure the crew 151 | content_crew = Crew( 152 | agents=[researcher, writer, editor], 153 | tasks=[research_task, writing_task, editing_task], 154 | verbose=True, 155 | process="sequential", # Tasks run in sequence 156 | memory=True, # Enable memory 157 | cache=True, # Enable caching 158 | max_rpm=1 # Rate limiting (further reduced) 159 | ) 160 | 161 | # Execute the workflow 162 | print("🚀 Starting content creation workflow...") 163 | print() 164 | 165 | result = content_crew.kickoff() 166 | 167 | print("\n" + "="*80) 168 | print("🎉 CONTENT CREATION COMPLETE!") 169 | print("="*80) 170 | print(f"\nFinal Result:\n{result}") 171 | 172 | return result 173 | 174 | def main(): 175 | """Run the content creation crew example.""" 176 | print("AI Agent Workshop - Session 2: Content Creation Crew") 177 | print("=" * 65) 178 | 179 | try: 180 | # Ask user for topic 181 | try: 182 | topic = input("What topic would you like the content creation team to write about? (press Enter for default): ").strip() 183 | if not topic: 184 | topic = "The Future of Multi-Agent AI Systems" 185 | except EOFError: 186 | # Handle non-interactive environments 187 | topic = "The Future of Multi-Agent AI Systems" 188 | print("What topic would you like the content creation team to write about? (press Enter for default): ") 189 | print("(Using default topic in non-interactive environment)") 190 | 191 | result = run_content_creation_workflow(topic) 192 | 193 | print("\n✅ Content creation workflow completed successfully!") 194 | print("\n💡 Try modifying the topic variable to create content on different subjects!") 195 | 196 | except Exception as e: 197 | print(f"❌ Error running content creation crew: {e}") 198 | if PROVIDER == 'sambanova': 199 | print("Make sure your SAMBA_API_KEY is set correctly in the .env file.") 200 | elif PROVIDER == 'ollama': 201 | print("Make sure Ollama is running locally on http://localhost:11434") 202 | print("Install Ollama from https://ollama.ai and run: ollama serve") 203 | print("Also ensure all required packages are installed.") 204 | 205 | if __name__ == "__main__": 206 | main() 207 | -------------------------------------------------------------------------------- /architecture.md: -------------------------------------------------------------------------------- 1 | # AIAgentWorkshop Code Architecture 2 | 3 | This document provides a visual representation of the codebase architecture using Mermaid diagrams. 4 | 5 | ## Project Overview 6 | 7 | The AIAgentWorkshop is a Python-based educational project demonstrating progressive AI agent development, from basic interactions to complex multi-agent workflows. It supports multiple AI providers (SambaNova cloud API, Ollama local models, and NVIDIA API) with a simple configuration system. 8 | 9 | ## Architecture Diagram 10 | 11 | ```mermaid 12 | flowchart LR 13 | subgraph subGraph0["External Dependencies"] 14 | LangChain["LangChain OpenAI
ChatOpenAI"] 15 | CrewAI["CrewAI Framework
Agent, Task, Crew"] 16 | LangGraph["LangGraph
StateGraph, TypedDict"] 17 | SambaNova["SambaNova API
Cloud LLM Models"] 18 | Ollama["Ollama
Local LLM Models"] 19 | NVIDIA["NVIDIA API
Cloud LLM Models"] 20 | DotEnv["python-dotenv
Environment Loading"] 21 | end 22 | subgraph subGraph1["Simple Configuration"] 23 | Config["config.py
Auto-Configuration
- Environment Loading
- Provider Selection
- Model Configuration"] 24 | end 25 | subgraph subGraph2["Testing & Validation"] 26 | TestScripts["testing/
Test Scripts
- API Validation
- Provider Testing
- Integration Tests"] 27 | end 28 | subgraph subGraph3["Session 1: Foundations"] 29 | S1Basics["session1/basics.py
Basic AI Interactions
- Simple Chat
- Math Helper Examples"] 30 | S1CrewAI["session1/crewai_intro.py
CrewAI Introduction
- Single Agent Crews
- Multi-Agent Teams"] 31 | end 32 | subgraph subGraph4["Session 2: Advanced Agents"] 33 | S2AgentRoles["session2/agent_roles.py
Agent Roles & Teams
- Specialized Roles
- Collaborative Tasks"] 34 | S2GUI["session2/agent_roles_gui.py
GUI Implementation
- Agent Role Visualization"] 35 | S2Content["session2/content_crew.py
Content Creation Crew
- Content Workflow"] 36 | end 37 | subgraph subGraph5["Session 3: Stateful Workflows"] 38 | S3LangGraph["session3/langgraph_basics.py
LangGraph Basics
- State Management
- Node-Based Graphs
- Conditional Routing"] 39 | S3Stateful["session3/stateful_workflow.py
CrewAI Workflows
- Complex State Logic"] 40 | S3Nvidia["session3/langgraph_basics_nvidia.py
LangGraph Basics NVIDIA
- NVIDIA API Integration"] 41 | S3LangChain["session3/stateful_workflow_langchain_nvidia.py
LangChain NVIDIA
- Pure LangChain Approach"] 42 | end 43 | subgraph subGraph6["Legacy Utils (Advanced)"] 44 | LegacyConfig["utils/config.py
Legacy Configuration
- Complex Config Class"] 45 | Helpers["utils/helpers.py
Utility Functions
- Text Processing
- Cost Estimation"] 46 | RateLimiter["utils/rate_limiter.py
Rate Limiting
- API Throttling"] 47 | end 48 | LangChain --> SambaNova & Ollama & NVIDIA & S1Basics & S3LangChain 49 | CrewAI --> LangChain & S1CrewAI & S2AgentRoles & S2Content & S3Stateful 50 | LangGraph --> LangChain & S3LangGraph & S3Nvidia 51 | DotEnv --> Config 52 | Config --> TestScripts 53 | Config -. Simple Config .-> S1Basics & S1CrewAI & S2AgentRoles & S2GUI & S2Content & S3LangGraph & S3Stateful & S3Nvidia & S3LangChain 54 | S1Basics -. Builds Upon .-> S1CrewAI 55 | S1CrewAI -. Advances To .-> S2AgentRoles 56 | S2AgentRoles -. Extends To .-> S3LangGraph 57 | S3LangGraph -. Branches To .-> S3Stateful 58 | S3Stateful -. Alternative .-> S3Nvidia 59 | S3Nvidia -. Alternative .-> S3LangChain 60 | 61 | LangChain:::external 62 | CrewAI:::external 63 | LangGraph:::external 64 | SambaNova:::external 65 | Ollama:::external 66 | NVIDIA:::external 67 | DotEnv:::external 68 | Config:::config 69 | TestScripts:::testing 70 | S1Basics:::session1 71 | S1CrewAI:::session1 72 | S2AgentRoles:::session2 73 | S2GUI:::session2 74 | S2Content:::session2 75 | S3LangGraph:::session3 76 | S3Stateful:::session3 77 | S3Nvidia:::session3 78 | S3LangChain:::session3 79 | LegacyConfig:::legacy 80 | Helpers:::legacy 81 | RateLimiter:::legacy 82 | classDef config fill:#e1f5fe,stroke:#01579b,stroke-width:2px 83 | classDef testing fill:#f3e5f5,stroke:#4a148c,stroke-width:2px 84 | classDef session1 fill:#e8f5e8,stroke:#1b5e20,stroke-width:2px 85 | classDef session2 fill:#fff3e0,stroke:#e65100,stroke-width:2px 86 | classDef session3 fill:#fce4ec,stroke:#880e4f,stroke-width:2px 87 | classDef legacy fill:#f5f5f5,stroke:#616161,stroke-width:1px 88 | classDef external fill:#fafafa,stroke:#616161,stroke-width:2px 89 | ``` 90 | 91 | ## Component Descriptions 92 | 93 | ### Simple Configuration System 94 | The `config.py` file provides automatic configuration loading: 95 | 96 | - **Provider Selection**: Choose between SambaNova (cloud) or Ollama (local) 97 | - **Auto-Loading**: Environment variables loaded automatically on import 98 | - **Easy Imports**: Direct access via `from config import API_KEY, MODEL, etc.` 99 | - **Validation**: Basic key format checking and defaults 100 | 101 | ### Testing Infrastructure 102 | The `testing/` folder contains validation and testing scripts: 103 | 104 | - **test_langchain.py**: LangChain integration tests 105 | - **test_nvidia_langchain.py**: NVIDIA API with LangChain tests 106 | - **test_nvidia_model.py**: Direct NVIDIA model tests 107 | - **test_ollama.py**: Ollama local model tests 108 | - **test_sambanova.py**: SambaNova API tests 109 | - **API Testing**: Validate connections to different providers 110 | - **Integration Tests**: End-to-end workflow testing 111 | - **Provider Validation**: Ensure API keys and models work correctly 112 | 113 | ### Session Progression 114 | The workshop follows a progressive learning path: 115 | 116 | 1. **Session 1**: Foundation concepts with basic AI interactions and CrewAI introduction 117 | 2. **Session 2**: Advanced agent design with specialized roles and team collaboration 118 | 3. **Session 3**: Complex state management with multiple workflow implementations 119 | 120 | ### Session 3 Variants 121 | Session 3 demonstrates different approaches to stateful workflows: 122 | 123 | - **CrewAI Version**: Traditional multi-agent workflows with state 124 | - **NVIDIA Direct**: Direct integration with NVIDIA API 125 | - **LangChain Pure**: Framework-agnostic LangChain implementation 126 | 127 | ### External Dependencies 128 | - **LangChain**: Framework for building LLM-powered applications 129 | - **CrewAI**: Framework for creating multi-agent workflows 130 | - **LangGraph**: Library for building stateful agent workflows with graph-based logic 131 | - **SambaNova API**: Cloud-based LLM provider with fast inference 132 | - **Ollama**: Local LLM runtime for running models offline 133 | - **NVIDIA API**: Cloud-based LLM provider with high-performance inference 134 | - **python-dotenv**: Environment variable management 135 | 136 | ## Data Flow 137 | 138 | 1. Configuration is loaded automatically from `.env` via `config.py` 139 | 2. Sessions import configuration variables directly 140 | 3. AI providers (SambaNova/Ollama/NVIDIA) are initialized with appropriate settings 141 | 4. Agents execute tasks using external LLM APIs through LangChain/CrewAI/LangGraph 142 | 5. Results are processed and presented to users 143 | 144 | ## Design Patterns 145 | 146 | - **Simple Imports**: Direct variable imports instead of complex classes 147 | - **Provider Abstraction**: Unified interface for different AI providers 148 | - **Progressive Complexity**: Sessions build upon each other with increasing sophistication 149 | - **Multiple Implementations**: Session 3 shows different approaches to the same problem 150 | - **Configuration as Code**: Settings defined as simple Python variables 151 | 152 | ## Provider Support 153 | 154 | ### SambaNova (Cloud) 155 | - Fast inference with enterprise-grade reliability 156 | - Pay-per-use pricing with generous free tier 157 | - Access to multiple model sizes and capabilities 158 | 159 | ### Ollama (Local) 160 | - Completely free and offline-capable 161 | - Full control over models and data privacy 162 | - No API rate limits or costs 163 | - Requires local hardware resources 164 | 165 | ### NVIDIA (Cloud) 166 | - High-performance cloud inference 167 | - Access to advanced NVIDIA models 168 | - Scalable and reliable API service 169 | - Suitable for production workloads 170 | -------------------------------------------------------------------------------- /session3/stateful_workflow_langchain_nvidia.py: -------------------------------------------------------------------------------- 1 | """ 2 | Session 3: Simple Stateful Workflows with LangChain and NVIDIA API 3 | This file shows how AI agents can "remember" information and pass it between steps using LangChain and NVIDIA API. 4 | Stateful means the workflow remembers what happened in previous steps! 5 | """ 6 | 7 | import os 8 | from langchain_openai import ChatOpenAI 9 | from langchain_core.messages import HumanMessage, SystemMessage 10 | from langchain_core.prompts import ChatPromptTemplate 11 | from langchain_core.output_parsers import StrOutputParser 12 | from langchain_core.runnables import RunnablePassthrough 13 | from langgraph.graph import StateGraph, END 14 | from typing import TypedDict, Optional 15 | 16 | # NVIDIA API Configuration 17 | NVIDIA_API_KEY = "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" 18 | 19 | # Define what information our workflow will remember (state) 20 | class WorkflowState(TypedDict): 21 | """This is like a notebook where we write down information as we go.""" 22 | user_question: str # The original question 23 | research_notes: Optional[str] # What we learned from research 24 | answer_draft: Optional[str] # First attempt at answering 25 | final_answer: Optional[str] # The polished final answer 26 | current_step: str # Where we are in the process 27 | 28 | def create_llm(): 29 | """Create NVIDIA LLM using LangChain.""" 30 | return ChatOpenAI( 31 | model="meta/llama3-8b-instruct", 32 | api_key=NVIDIA_API_KEY, 33 | base_url="https://integrate.api.nvidia.com/v1", 34 | temperature=0.7 35 | ) 36 | 37 | def research_step(state: WorkflowState) -> WorkflowState: 38 | """Step 1: Research the topic using LangChain.""" 39 | print("Step 1: Researching the topic...") 40 | 41 | question = state["user_question"] 42 | llm = create_llm() 43 | 44 | # Create research prompt 45 | research_prompt = ChatPromptTemplate.from_messages([ 46 | ("system", "You are a helpful research assistant. Provide comprehensive information about the given topic."), 47 | ("human", f"Research this question and find key facts: {question}\n\nProvide 3-4 important facts about the topic.") 48 | ]) 49 | 50 | # Create research chain 51 | research_chain = research_prompt | llm | StrOutputParser() 52 | 53 | # Do the research 54 | research_result = research_chain.invoke({}) 55 | 56 | # Save what we learned in our "notebook" (state) 57 | new_state = state.copy() 58 | new_state["research_notes"] = research_result 59 | new_state["current_step"] = "researched" 60 | 61 | print("Research complete! Saved notes for next step.") 62 | return new_state 63 | 64 | def draft_answer_step(state: WorkflowState) -> WorkflowState: 65 | """Step 2: Use research to create a draft answer using LangChain.""" 66 | print("Step 2: Writing a draft answer...") 67 | 68 | question = state["user_question"] 69 | research = state["research_notes"] 70 | llm = create_llm() 71 | 72 | # Create drafting prompt 73 | draft_prompt = ChatPromptTemplate.from_messages([ 74 | ("system", "You are a skilled writer who creates clear, helpful answers based on research information."), 75 | ("human", f"Using this research information, write a draft answer to: {question}\n\nResearch: {research}\n\nWrite a clear draft answer in 2-3 sentences.") 76 | ]) 77 | 78 | # Create drafting chain 79 | draft_chain = draft_prompt | llm | StrOutputParser() 80 | 81 | # Write the draft 82 | draft_result = draft_chain.invoke({}) 83 | 84 | # Save the draft in our notebook 85 | new_state = state.copy() 86 | new_state["answer_draft"] = draft_result 87 | new_state["current_step"] = "drafted" 88 | 89 | print("Draft written! Saved for final review.") 90 | return new_state 91 | 92 | def final_answer_step(state: WorkflowState) -> WorkflowState: 93 | """Step 3: Review and polish the final answer using LangChain.""" 94 | print("Step 3: Creating the final polished answer...") 95 | 96 | question = state["user_question"] 97 | research = state["research_notes"] 98 | draft = state["answer_draft"] 99 | llm = create_llm() 100 | 101 | # Create final editing prompt 102 | final_prompt = ChatPromptTemplate.from_messages([ 103 | ("system", "You are an expert editor who creates polished, accurate, and helpful final answers."), 104 | ("human", f"""Review and improve this draft answer. Make it perfect! 105 | 106 | Question: {question} 107 | Research: {research} 108 | Draft: {draft} 109 | 110 | Make the final answer clear, accurate, and helpful.""") 111 | ]) 112 | 113 | # Create final chain 114 | final_chain = final_prompt | llm | StrOutputParser() 115 | 116 | # Create the final answer 117 | final_result = final_chain.invoke({}) 118 | 119 | # Save the final answer 120 | new_state = state.copy() 121 | new_state["final_answer"] = final_result 122 | new_state["current_step"] = "complete" 123 | 124 | print("Final answer ready!") 125 | return new_state 126 | 127 | def decide_next_step(state: WorkflowState) -> str: 128 | """Decide which step to do next based on where we are.""" 129 | step = state.get("current_step", "start") 130 | 131 | if step == "start": 132 | return "research" # Start with research 133 | elif step == "researched": 134 | return "draft" # Research done, now draft 135 | elif step == "drafted": 136 | return "final" # Draft done, now finalize 137 | else: 138 | return END # All done! 139 | 140 | def create_simple_workflow(): 141 | """Create our simple 3-step workflow.""" 142 | print("Building our LangChain workflow...") 143 | 144 | # Create the workflow 145 | workflow = StateGraph(WorkflowState) 146 | 147 | # Add our three steps 148 | workflow.add_node("research", research_step) 149 | workflow.add_node("draft", draft_answer_step) 150 | workflow.add_node("final", final_answer_step) 151 | 152 | # Connect the steps (decide which way to go) 153 | workflow.add_conditional_edges( 154 | "research", 155 | decide_next_step, 156 | {"draft": "draft", END: END} 157 | ) 158 | 159 | workflow.add_conditional_edges( 160 | "draft", 161 | decide_next_step, 162 | {"final": "final", END: END} 163 | ) 164 | 165 | workflow.add_conditional_edges( 166 | "final", 167 | decide_next_step, 168 | {END: END} 169 | ) 170 | 171 | # Start with research 172 | workflow.set_entry_point("research") 173 | 174 | # Build the workflow 175 | app = workflow.compile() 176 | 177 | print("LangChain workflow ready!") 178 | return app 179 | 180 | def run_simple_workflow(): 181 | """Run our simple stateful workflow example.""" 182 | print("Running Simple Stateful Workflow (LangChain + NVIDIA API)") 183 | print("=" * 60) 184 | print("This workflow remembers information between steps!") 185 | print() 186 | 187 | # Create the workflow 188 | app = create_simple_workflow() 189 | 190 | # Our question 191 | question = "What are the benefits of eating healthy food?" 192 | 193 | # Start with empty notebook (state) 194 | starting_state = { 195 | "user_question": question, 196 | "research_notes": None, 197 | "answer_draft": None, 198 | "final_answer": None, 199 | "current_step": "start" 200 | } 201 | 202 | print(f"Question: {question}") 203 | print("Let's see how the workflow remembers information...") 204 | print() 205 | 206 | # Run the workflow! 207 | final_state = app.invoke(starting_state) 208 | 209 | print("\n" + "="*60) 210 | print("WORKFLOW COMPLETE!") 211 | print("="*60) 212 | 213 | print("\nWhat the workflow remembered:") 214 | print(f" • Research Notes: {len(final_state.get('research_notes', ''))} characters") 215 | print(f" • Draft Answer: {len(final_state.get('answer_draft', ''))} characters") 216 | print(f" • Final Answer: {len(final_state.get('final_answer', ''))} characters") 217 | 218 | print(f"\nFinal Answer:\n{final_state.get('final_answer', 'No answer available')}") 219 | 220 | return final_state 221 | 222 | def main(): 223 | """Run the simple stateful workflow example.""" 224 | print("AI Agent Workshop - Session 3: Simple Stateful Workflows (LangChain + NVIDIA API)") 225 | print("=" * 85) 226 | print("Welcome! Today we'll learn about workflows that remember information.") 227 | print("This is called 'state' - like how you remember things from step to step.") 228 | print("Using LangChain agents with NVIDIA API!") 229 | print() 230 | 231 | try: 232 | # Run our simple workflow 233 | result = run_simple_workflow() 234 | 235 | print("\nSimple workflow with LangChain + NVIDIA API completed successfully!") 236 | print("\nWhat you learned:") 237 | print(" • Workflows can remember information between steps") 238 | print(" • Each step can use what previous steps learned") 239 | print(" • LangChain provides powerful agent capabilities") 240 | print(" • NVIDIA API offers fast, reliable AI models") 241 | 242 | except Exception as e: 243 | print(f"Oops! Something went wrong: {e}") 244 | print("Make sure your NVIDIA_API_KEY is set correctly in the .env file.") 245 | print("Check the README.md for setup help.") 246 | 247 | if __name__ == "__main__": 248 | main() -------------------------------------------------------------------------------- /utils/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Configuration management for the AI Agent Workshop. 3 | """ 4 | 5 | import os 6 | from typing import Dict, Any, Optional 7 | from pathlib import Path 8 | from .helpers import load_environment_variables, validate_api_key 9 | 10 | class WorkshopConfig: 11 | """Configuration class for workshop settings.""" 12 | 13 | def __init__(self): 14 | self._config = {} 15 | self._load_config() 16 | 17 | def _load_config(self): 18 | """Load configuration from environment and defaults.""" 19 | try: 20 | env_vars = load_environment_variables() 21 | self._config.update(env_vars) 22 | except ValueError as e: 23 | print(f"Warning: {e}") 24 | # Continue with defaults 25 | 26 | # Set defaults 27 | self._config.setdefault('SAMBA_MODEL', 'gpt-oss-120b') 28 | self._config.setdefault('WORKSHOP_DEBUG', 'false') 29 | self._config.setdefault('MAX_TOKENS', '4000') 30 | self._config.setdefault('TEMPERATURE', '0.7') 31 | 32 | # Convert string values to appropriate types 33 | self._convert_types() 34 | 35 | def _convert_types(self): 36 | """Convert string config values to appropriate types.""" 37 | # Boolean conversions 38 | bool_keys = ['WORKSHOP_DEBUG'] 39 | for key in bool_keys: 40 | if key in self._config: 41 | self._config[key] = str(self._config[key]).lower() in ('true', '1', 'yes', 'on') 42 | 43 | # Integer conversions 44 | int_keys = ['MAX_TOKENS'] 45 | for key in int_keys: 46 | if key in self._config: 47 | try: 48 | self._config[key] = int(self._config[key]) 49 | except ValueError: 50 | print(f"Warning: Invalid integer value for {key}, using default") 51 | self._config[key] = 4000 52 | 53 | # Float conversions 54 | float_keys = ['TEMPERATURE'] 55 | for key in float_keys: 56 | if key in self._config: 57 | try: 58 | self._config[key] = float(self._config[key]) 59 | except ValueError: 60 | print(f"Warning: Invalid float value for {key}, using default") 61 | self._config[key] = 0.7 62 | 63 | def get(self, key: str, default: Any = None) -> Any: 64 | """Get configuration value.""" 65 | return self._config.get(key, default) 66 | 67 | def set(self, key: str, value: Any): 68 | """Set configuration value.""" 69 | self._config[key] = value 70 | 71 | def validate(self) -> tuple[bool, list]: 72 | """Validate configuration.""" 73 | errors = [] 74 | 75 | # Check required API key 76 | api_key = self.get('SAMBA_API_KEY') 77 | if not api_key: 78 | errors.append("SAMBA_API_KEY is required") 79 | elif not validate_api_key(api_key): 80 | errors.append("SAMBA_API_KEY format is invalid") 81 | 82 | # Check model 83 | model = self.get('SAMBA_MODEL') 84 | # SambaNova supports many models, so we'll just check it's not empty 85 | if not model: 86 | errors.append("SAMBA_MODEL is required") 87 | 88 | # Check temperature range 89 | temp = self.get('TEMPERATURE') 90 | if temp is not None and not (0.0 <= temp <= 2.0): 91 | errors.append("TEMPERATURE must be between 0.0 and 2.0") 92 | 93 | # Check max tokens 94 | max_tokens = self.get('MAX_TOKENS') 95 | if max_tokens is not None and max_tokens <= 0: 96 | errors.append("MAX_TOKENS must be greater than 0") 97 | 98 | is_valid = len(errors) == 0 99 | return is_valid, errors 100 | 101 | def to_dict(self) -> Dict[str, Any]: 102 | """Convert config to dictionary.""" 103 | return self._config.copy() 104 | 105 | def save_to_env_file(self, env_file_path: str = ".env"): 106 | """Save configuration to .env file.""" 107 | env_path = Path(env_file_path) 108 | 109 | # Read existing .env file if it exists 110 | existing_content = {} 111 | if env_path.exists(): 112 | with open(env_path, 'r') as f: 113 | for line in f: 114 | line = line.strip() 115 | if line and not line.startswith('#'): 116 | if '=' in line: 117 | key, value = line.split('=', 1) 118 | existing_content[key.strip()] = value.strip() 119 | 120 | # Update with current config 121 | for key, value in self._config.items(): 122 | if key.startswith(('SAMBA_', 'WORKSHOP_', 'MAX_TOKENS', 'TEMPERATURE')): # Save relevant configs 123 | existing_content[key] = str(value) 124 | 125 | # Write back to file 126 | with open(env_path, 'w') as f: 127 | f.write("# AI Agent Workshop Configuration\n") 128 | f.write("# Generated automatically - do not edit manually\n\n") 129 | 130 | for key, value in sorted(existing_content.items()): 131 | f.write(f"{key}={value}\n") 132 | 133 | def get_agent_config(self) -> Dict[str, Any]: 134 | """Get configuration specifically for agents.""" 135 | model = self.get('SAMBA_MODEL') 136 | # No prefix needed for SambaNova 137 | return { 138 | 'model': model, 139 | 'temperature': self.get('TEMPERATURE'), 140 | 'max_tokens': self.get('MAX_TOKENS'), 141 | 'api_key': self.get('SAMBA_API_KEY'), 142 | 'api_base': 'https://api.sambanova.ai/v1', 143 | 'max_retries': 3, # Add retry logic 144 | 'retry_delay': 1.0, # Base delay in seconds 145 | } 146 | 147 | def get_workflow_config(self) -> Dict[str, Any]: 148 | """Get configuration for workflow execution.""" 149 | return { 150 | 'debug': self.get('WORKSHOP_DEBUG'), 151 | 'max_iterations': self.get('MAX_ITERATIONS', 10), 152 | 'timeout_seconds': self.get('TIMEOUT_SECONDS', 300), 153 | } 154 | 155 | def __str__(self) -> str: 156 | """String representation of config (without sensitive data).""" 157 | config_copy = self._config.copy() 158 | 159 | # Mask sensitive information 160 | if 'GROQ_API_KEY' in config_copy: 161 | key = config_copy['GROQ_API_KEY'] 162 | if len(key) > 10: 163 | config_copy['GROQ_API_KEY'] = key[:6] + '*' * (len(key) - 10) + key[-4:] 164 | 165 | return f"WorkshopConfig({config_copy})" 166 | 167 | def __repr__(self) -> str: 168 | """Detailed string representation.""" 169 | return self.__str__() 170 | 171 | # Global configuration instance 172 | _config_instance: Optional[WorkshopConfig] = None 173 | 174 | def get_config() -> WorkshopConfig: 175 | """Get the global configuration instance.""" 176 | global _config_instance 177 | if _config_instance is None: 178 | _config_instance = WorkshopConfig() 179 | return _config_instance 180 | 181 | def reload_config(): 182 | """Reload the global configuration.""" 183 | global _config_instance 184 | _config_instance = WorkshopConfig() 185 | 186 | def validate_config() -> tuple[bool, list]: 187 | """Validate the current configuration.""" 188 | config = get_config() 189 | return config.validate() 190 | 191 | def setup_config_interactive(): 192 | """Interactive configuration setup.""" 193 | print("AI Agent Workshop - Configuration Setup") 194 | print("=" * 45) 195 | 196 | config = get_config() 197 | 198 | # Check if API key is already set 199 | if config.get('SAMBA_API_KEY'): 200 | print("✓ SambaNova API key is already configured") 201 | else: 202 | print("SambaNova API key is required") 203 | print("Get your API key from: https://sambanova.ai") 204 | api_key = input("Enter your SambaNova API key: ").strip() 205 | if api_key: 206 | config.set('SAMBA_API_KEY', api_key) 207 | print("✓ API key configured") 208 | else: 209 | print("❌ API key is required") 210 | return False 211 | 212 | # Optional: Configure model 213 | current_model = config.get('GROQ_MODEL', 'openai/gpt-oss-20b:free') 214 | print(f"\nCurrent model: {current_model}") 215 | change_model = input("Change model? (y/N): ").strip().lower() 216 | if change_model == 'y': 217 | models = [ 218 | 'gemma2-9b-it', #Free 219 | 'llama3-8b-8192', # Free 220 | 'llama3-70b-8192', # Free 221 | 'mixtral-8x7b-32768', # Free 222 | 'llama-3.1-8b-instant', 223 | 'llama-3.1-70b-versatile', 224 | 'llama-3.1-405b-inference' 225 | ] 226 | print("Available models (some free, some paid):") 227 | for i, model in enumerate(models, 1): 228 | free_indicator = " (Free)" if i <= 3 else "" 229 | print(f" {i}. {model}{free_indicator}") 230 | choice = input("Select model (1-7): ").strip() 231 | try: 232 | index = int(choice) - 1 233 | if 0 <= index < len(models): 234 | config.set('GROQ_MODEL', models[index]) 235 | print(f"✓ Model set to {models[index]}") 236 | except ValueError: 237 | print("Invalid choice, keeping current model") 238 | 239 | # Validate configuration 240 | is_valid, errors = config.validate() 241 | if is_valid: 242 | print("\n✅ Configuration is valid!") 243 | save = input("Save configuration to .env file? (Y/n): ").strip().lower() 244 | if save != 'n': 245 | config.save_to_env_file() 246 | print("✓ Configuration saved to .env file") 247 | return True 248 | else: 249 | print("\n❌ Configuration validation failed:") 250 | for error in errors: 251 | print(f" - {error}") 252 | return False 253 | 254 | # Initialize config on import 255 | config = get_config() 256 | -------------------------------------------------------------------------------- /session3/stateful_workflow.py: -------------------------------------------------------------------------------- 1 | """ 2 | Session 3: Simple Stateful Workflows 3 | This file shows how AI agents can "remember" information and pass it between steps. 4 | Stateful means the workflow remembers what happened in previous steps! 5 | """ 6 | 7 | from crewai import Agent, Task, Crew, LLM 8 | from langchain_openai import ChatOpenAI 9 | from langgraph.graph import StateGraph, END 10 | from typing import TypedDict, Optional 11 | from config import API_KEY, MODEL, API_BASE, TEMPERATURE, MAX_TOKENS, MAX_RETRIES, RETRY_DELAY, PROVIDER 12 | 13 | # Step 3: Set up environment for LiteLLM 14 | import os 15 | if PROVIDER == 'sambanova': 16 | os.environ["SAMBANOVA_API_KEY"] = API_KEY 17 | elif PROVIDER == 'ollama': 18 | # Ollama doesn't need environment variables 19 | pass 20 | 21 | def get_llm(): 22 | """Get the appropriate LLM configuration based on provider.""" 23 | if PROVIDER == 'ollama': 24 | return LLM( 25 | model=f"ollama/{MODEL}", 26 | base_url="http://localhost:11434" 27 | ) 28 | elif PROVIDER == 'sambanova': 29 | return LLM( 30 | model=f"sambanova/{MODEL}", 31 | api_key=API_KEY, 32 | base_url=API_BASE 33 | ) 34 | else: 35 | # Default fallback 36 | return f"{PROVIDER}/{MODEL}" 37 | 38 | # Define what information our workflow will remember (state) 39 | class WorkflowState(TypedDict): 40 | """This is like a notebook where we write down information as we go.""" 41 | user_question: str # The original question 42 | research_notes: Optional[str] # What we learned from research 43 | answer_draft: Optional[str] # First attempt at answering 44 | final_answer: Optional[str] # The polished final answer 45 | current_step: str # Where we are in the process 46 | 47 | def research_step(state: WorkflowState) -> WorkflowState: 48 | """Step 1: Research the topic.""" 49 | print("Step 1: Researching the topic...") 50 | 51 | question = state["user_question"] 52 | 53 | # Create a researcher AI agent 54 | llm = get_llm() 55 | 56 | researcher = Agent( 57 | role="Researcher", 58 | goal="Find helpful information about topics", 59 | backstory="I am a curious researcher who loves learning new things.", 60 | llm=llm, 61 | verbose=True 62 | ) 63 | 64 | # Give the researcher a job 65 | research_task = Task( 66 | description=f"Research this question and find key facts: {question}", 67 | expected_output="Write down 3-4 important facts about the topic.", 68 | agent=researcher 69 | ) 70 | 71 | # Do the research 72 | crew = Crew(agents=[researcher], tasks=[research_task], verbose=True) 73 | research_result = crew.kickoff() 74 | 75 | # Save what we learned in our "notebook" (state) 76 | new_state = state.copy() 77 | new_state["research_notes"] = str(research_result) 78 | new_state["current_step"] = "researched" 79 | 80 | print("Research complete! Saved notes for next step.") 81 | return new_state 82 | 83 | def draft_answer_step(state: WorkflowState) -> WorkflowState: 84 | """Step 2: Use research to create a draft answer.""" 85 | print("Step 2: Writing a draft answer...") 86 | 87 | question = state["user_question"] 88 | research = state["research_notes"] 89 | 90 | # Create a writer AI agent 91 | llm = get_llm() 92 | 93 | writer = Agent( 94 | role="Writer", 95 | goal="Write clear and helpful answers", 96 | backstory="I am a writer who explains things in simple ways.", 97 | llm=llm, 98 | verbose=True 99 | ) 100 | 101 | # Give the writer a job (they can see the research notes!) 102 | writing_task = Task( 103 | description=f"Using this research information, write a draft answer to: {question}\n\nResearch: {research}", 104 | expected_output="Write a clear draft answer in 2-3 sentences.", 105 | agent=writer 106 | ) 107 | 108 | # Write the draft 109 | crew = Crew(agents=[writer], tasks=[writing_task], verbose=True) 110 | draft_result = crew.kickoff() 111 | 112 | # Save the draft in our notebook 113 | new_state = state.copy() 114 | new_state["answer_draft"] = str(draft_result) 115 | new_state["current_step"] = "drafted" 116 | 117 | print("Draft written! Saved for final review.") 118 | return new_state 119 | 120 | def final_answer_step(state: WorkflowState) -> WorkflowState: 121 | """Step 3: Review and polish the final answer.""" 122 | print("Step 3: Creating the final polished answer...") 123 | 124 | question = state["user_question"] 125 | research = state["research_notes"] 126 | draft = state["answer_draft"] 127 | 128 | # Create an editor AI agent 129 | llm = get_llm() 130 | 131 | editor = Agent( 132 | role="Editor", 133 | goal="Make answers clear and perfect", 134 | backstory="I am an editor who polishes writing to make it excellent.", 135 | llm=llm, 136 | verbose=True 137 | ) 138 | 139 | # Give the editor a job (they can see everything!) 140 | editing_task = Task( 141 | description=f"""Review and improve this draft answer. Make it perfect! 142 | 143 | Question: {question} 144 | Research: {research} 145 | Draft: {draft} 146 | 147 | Make the final answer clear, accurate, and helpful.""", 148 | expected_output="Write the final polished answer.", 149 | agent=editor 150 | ) 151 | 152 | # Create the final answer 153 | crew = Crew(agents=[editor], tasks=[editing_task], verbose=True) 154 | final_result = crew.kickoff() 155 | 156 | # Save the final answer 157 | new_state = state.copy() 158 | new_state["final_answer"] = str(final_result) 159 | new_state["current_step"] = "complete" 160 | 161 | print("Final answer ready!") 162 | return new_state 163 | 164 | def decide_next_step(state: WorkflowState) -> str: 165 | """Decide which step to do next based on where we are.""" 166 | step = state.get("current_step", "start") 167 | 168 | if step == "start": 169 | return "research" # Start with research 170 | elif step == "researched": 171 | return "draft" # Research done, now draft 172 | elif step == "drafted": 173 | return "final" # Draft done, now finalize 174 | else: 175 | return END # All done! 176 | 177 | def create_simple_workflow(): 178 | """Create our simple 3-step workflow.""" 179 | print("Building our workflow...") 180 | 181 | # Create the workflow 182 | workflow = StateGraph(WorkflowState) 183 | 184 | # Add our three steps 185 | workflow.add_node("research", research_step) 186 | workflow.add_node("draft", draft_answer_step) 187 | workflow.add_node("final", final_answer_step) 188 | 189 | # Connect the steps (decide which way to go) 190 | workflow.add_conditional_edges( 191 | "research", 192 | decide_next_step, 193 | {"draft": "draft", END: END} 194 | ) 195 | 196 | workflow.add_conditional_edges( 197 | "draft", 198 | decide_next_step, 199 | {"final": "final", END: END} 200 | ) 201 | 202 | workflow.add_conditional_edges( 203 | "final", 204 | decide_next_step, 205 | {END: END} 206 | ) 207 | 208 | # Start with research 209 | workflow.set_entry_point("research") 210 | 211 | # Build the workflow 212 | app = workflow.compile() 213 | 214 | print("Workflow ready!") 215 | return app 216 | 217 | def run_simple_workflow(): 218 | """Run our simple stateful workflow example.""" 219 | print("Running Simple Stateful Workflow") 220 | print("=" * 50) 221 | print("This workflow remembers information between steps!") 222 | print() 223 | 224 | # Create the workflow 225 | app = create_simple_workflow() 226 | 227 | # Our question 228 | question = "What are the benefits of eating healthy food?" 229 | 230 | # Start with empty notebook (state) 231 | starting_state = { 232 | "user_question": question, 233 | "research_notes": None, 234 | "answer_draft": None, 235 | "final_answer": None, 236 | "current_step": "start" 237 | } 238 | 239 | print(f"Question: {question}") 240 | print("Let's see how the workflow remembers information...") 241 | print() 242 | 243 | # Run the workflow! 244 | final_state = app.invoke(starting_state) 245 | 246 | print("\n" + "="*60) 247 | print("WORKFLOW COMPLETE!") 248 | print("="*60) 249 | 250 | print("\nWhat the workflow remembered:") 251 | print(f" • Research Notes: {len(final_state.get('research_notes', ''))} characters") 252 | print(f" • Draft Answer: {len(final_state.get('answer_draft', ''))} characters") 253 | print(f" • Final Answer: {len(final_state.get('final_answer', ''))} characters") 254 | 255 | print(f"\nFinal Answer:\n{final_state.get('final_answer', 'No answer available')}") 256 | 257 | return final_state 258 | 259 | def main(): 260 | """Run the simple stateful workflow example.""" 261 | print("AI Agent Workshop - Session 3: Simple Stateful Workflows") 262 | print("=" * 70) 263 | print("Welcome! Today we'll learn about workflows that remember information.") 264 | print("This is called 'state' - like how you remember things from step to step.") 265 | print() 266 | 267 | try: 268 | # Run our simple workflow 269 | result = run_simple_workflow() 270 | 271 | print("\nSimple workflow completed successfully!") 272 | print("\nWhat you learned:") 273 | print(" • Workflows can remember information between steps") 274 | print(" • Each step can use what previous steps learned") 275 | print(" • State helps agents work together better") 276 | 277 | except Exception as e: 278 | print(f"Oops! Something went wrong: {e}") 279 | if PROVIDER == 'sambanova': 280 | print("Make sure your SAMBA_API_KEY is set correctly in the .env file.") 281 | elif PROVIDER == 'ollama': 282 | print("Make sure Ollama is running locally on http://localhost:11434") 283 | print("Install Ollama from https://ollama.ai and run: ollama serve") 284 | print("Check the README.md for setup help.") 285 | 286 | if __name__ == "__main__": 287 | main() 288 | -------------------------------------------------------------------------------- /session3/langgraph_basics.py: -------------------------------------------------------------------------------- 1 | """ 2 | Session 3: LangGraph Basics 3 | This file demonstrates fundamental LangGraph concepts for building stateful agent workflows. 4 | """ 5 | 6 | from langchain_openai import ChatOpenAI 7 | from langgraph.graph import StateGraph, END 8 | from typing import TypedDict, Optional 9 | from config import API_KEY, MODEL, API_BASE, TEMPERATURE, MAX_TOKENS, MAX_RETRIES, RETRY_DELAY, PROVIDER 10 | 11 | # Define the state structure 12 | class AgentState(TypedDict): 13 | """State definition for our agent workflow.""" 14 | user_query: str 15 | research_result: Optional[str] 16 | analysis_result: Optional[str] 17 | final_answer: Optional[str] 18 | current_step: str 19 | 20 | def create_langgraph_workflow(): 21 | """Create a basic LangGraph workflow with nodes and edges.""" 22 | 23 | # Initialize the LLM using configuration 24 | llm = ChatOpenAI( 25 | temperature=TEMPERATURE, 26 | model=MODEL, 27 | api_key=API_KEY, 28 | base_url=API_BASE 29 | ) 30 | 31 | def research_node(state: AgentState) -> AgentState: 32 | """Research node that gathers information.""" 33 | print("🔍 Researching topic...") 34 | 35 | query = state["user_query"] 36 | research_prompt = f"""Research the following topic comprehensively: {query} 37 | 38 | Provide: 39 | - Key concepts and definitions 40 | - Current trends and developments 41 | - Real-world applications 42 | - Important considerations 43 | 44 | Be thorough but concise.""" 45 | 46 | response = llm.invoke([{"role": "user", "content": research_prompt}]) 47 | research_result = response.content 48 | 49 | # Update state 50 | new_state = state.copy() 51 | new_state["research_result"] = research_result 52 | new_state["current_step"] = "research_complete" 53 | 54 | print(f"✅ Research completed for: {query[:50]}...") 55 | return new_state 56 | 57 | def analyze_node(state: AgentState) -> AgentState: 58 | """Analysis node that processes research findings.""" 59 | print("📊 Analyzing research findings...") 60 | 61 | research = state["research_result"] 62 | analysis_prompt = f"""Analyze the following research and provide insights: 63 | 64 | RESEARCH: 65 | {research} 66 | 67 | Provide: 68 | - Key insights and patterns 69 | - Strengths and limitations 70 | - Practical implications 71 | - Recommendations""" 72 | 73 | response = llm.invoke([{"role": "user", "content": analysis_prompt}]) 74 | analysis_result = response.content 75 | 76 | # Update state 77 | new_state = state.copy() 78 | new_state["analysis_result"] = analysis_result 79 | new_state["current_step"] = "analysis_complete" 80 | 81 | print("✅ Analysis completed") 82 | return new_state 83 | 84 | def answer_node(state: AgentState) -> AgentState: 85 | """Final answer node that synthesizes everything.""" 86 | print("💡 Generating final answer...") 87 | 88 | query = state["user_query"] 89 | research = state["research_result"] 90 | analysis = state["analysis_result"] 91 | 92 | final_prompt = f"""Based on the research and analysis provided, give a comprehensive answer to: {query} 93 | 94 | RESEARCH: 95 | {research} 96 | 97 | ANALYSIS: 98 | {analysis} 99 | 100 | Provide a clear, well-structured final answer.""" 101 | 102 | response = llm.invoke([{"role": "user", "content": final_prompt}]) 103 | final_answer = response.content 104 | 105 | # Update state 106 | new_state = state.copy() 107 | new_state["final_answer"] = final_answer 108 | new_state["current_step"] = "answer_complete" 109 | 110 | print("✅ Final answer generated") 111 | return new_state 112 | 113 | def router_function(state: AgentState) -> str: 114 | """Router function to decide next step based on state.""" 115 | current_step = state.get("current_step", "start") 116 | 117 | if current_step == "start": 118 | return "research" 119 | elif current_step == "research_complete": 120 | return "analyze" 121 | elif current_step == "analysis_complete": 122 | return "answer" 123 | else: 124 | return END 125 | 126 | # Create the graph 127 | workflow = StateGraph(AgentState) 128 | 129 | # Add nodes 130 | workflow.add_node("research", research_node) 131 | workflow.add_node("analyze", analyze_node) 132 | workflow.add_node("answer", answer_node) 133 | 134 | # Add edges 135 | workflow.add_conditional_edges( 136 | "research", 137 | router_function, 138 | {"analyze": "analyze", "answer": "answer", END: END} 139 | ) 140 | 141 | workflow.add_conditional_edges( 142 | "analyze", 143 | router_function, 144 | {"answer": "answer", END: END} 145 | ) 146 | 147 | workflow.add_conditional_edges( 148 | "answer", 149 | router_function, 150 | {END: END} 151 | ) 152 | 153 | # Set entry point 154 | workflow.set_entry_point("research") 155 | 156 | # Compile the graph 157 | app = workflow.compile() 158 | 159 | return app 160 | 161 | def run_basic_langgraph_example(): 162 | """Run a basic LangGraph workflow example.""" 163 | print("=== Basic LangGraph Workflow Example ===") 164 | print() 165 | 166 | # Create the workflow 167 | app = create_langgraph_workflow() 168 | 169 | # Initial state 170 | initial_state = { 171 | "user_query": "What are the benefits and challenges of implementing AI agents in business processes?", 172 | "research_result": None, 173 | "analysis_result": None, 174 | "final_answer": None, 175 | "current_step": "start" 176 | } 177 | 178 | print("Starting workflow execution...") 179 | print(f"Query: {initial_state['user_query']}") 180 | print() 181 | 182 | # Execute the workflow 183 | final_state = app.invoke(initial_state) 184 | 185 | print("\n" + "="*80) 186 | print("🎉 WORKFLOW EXECUTION COMPLETE!") 187 | print("="*80) 188 | 189 | print(f"\nFinal Answer:\n{final_state['final_answer']}") 190 | 191 | print("\n📊 Workflow Summary:") 192 | print(f" • Research Result Length: {len(final_state.get('research_result', ''))} characters") 193 | print(f" • Analysis Result Length: {len(final_state.get('analysis_result', ''))} characters") 194 | print(f" • Final Answer Length: {len(final_state.get('final_answer', ''))} characters") 195 | 196 | return final_state 197 | 198 | def demonstrate_conditional_routing(): 199 | """Demonstrate conditional routing in LangGraph.""" 200 | 201 | print("\n=== Conditional Routing Example ===") 202 | print() 203 | 204 | class QueryState(TypedDict): 205 | query: str 206 | query_type: Optional[str] 207 | response: Optional[str] 208 | 209 | # Initialize the LLM using configuration 210 | llm = ChatOpenAI( 211 | temperature=TEMPERATURE, 212 | model=MODEL, 213 | api_key=API_KEY, 214 | base_url=API_BASE 215 | ) 216 | 217 | def classify_query(state: QueryState) -> QueryState: 218 | """Classify the query type.""" 219 | query = state["query"] 220 | classify_prompt = f"""Classify this query as either 'simple' or 'complex': 221 | 222 | Query: {query} 223 | 224 | Simple queries can be answered directly with basic knowledge. 225 | Complex queries require research and detailed analysis. 226 | 227 | Respond with only: simple or complex""" 228 | 229 | response = llm.invoke([{"role": "user", "content": classify_prompt}]) 230 | query_type = response.content.strip().lower() 231 | 232 | new_state = state.copy() 233 | new_state["query_type"] = query_type 234 | return new_state 235 | 236 | def simple_response(state: QueryState) -> QueryState: 237 | """Handle simple queries.""" 238 | query = state["query"] 239 | response_prompt = f"Answer this simple query directly: {query}" 240 | 241 | response = llm.invoke([{"role": "user", "content": response_prompt}]) 242 | answer = response.content 243 | 244 | new_state = state.copy() 245 | new_state["response"] = f"[SIMPLE] {answer}" 246 | return new_state 247 | 248 | def complex_response(state: QueryState) -> QueryState: 249 | """Handle complex queries.""" 250 | query = state["query"] 251 | response_prompt = f"Provide a detailed analysis for this complex query: {query}" 252 | 253 | response = llm.invoke([{"role": "user", "content": response_prompt}]) 254 | answer = response.content 255 | 256 | new_state = state.copy() 257 | new_state["response"] = f"[COMPLEX] {answer}" 258 | return new_state 259 | 260 | def route_based_on_complexity(state: QueryState) -> str: 261 | """Route to appropriate handler based on query complexity.""" 262 | query_type = state.get("query_type") 263 | if query_type == "simple": 264 | return "simple_handler" 265 | elif query_type == "complex": 266 | return "complex_handler" 267 | else: 268 | return "simple_handler" # fallback 269 | 270 | # Create conditional workflow 271 | workflow = StateGraph(QueryState) 272 | 273 | workflow.add_node("classifier", classify_query) 274 | workflow.add_node("simple_handler", simple_response) 275 | workflow.add_node("complex_handler", complex_response) 276 | 277 | workflow.set_entry_point("classifier") 278 | 279 | workflow.add_conditional_edges( 280 | "classifier", 281 | route_based_on_complexity, 282 | { 283 | "simple_handler": "simple_handler", 284 | "complex_handler": "complex_handler" 285 | } 286 | ) 287 | 288 | workflow.add_edge("simple_handler", END) 289 | workflow.add_edge("complex_handler", END) 290 | 291 | app = workflow.compile() 292 | 293 | # Test with different queries 294 | test_queries = [ 295 | "What is 2 + 2?", 296 | "Explain quantum computing and its business applications" 297 | ] 298 | 299 | for query in test_queries: 300 | print(f"\nQuery: {query}") 301 | result = app.invoke({"query": query, "query_type": None, "response": None}) 302 | print(f"Response: {result['response'][:100]}...") 303 | 304 | def main(): 305 | """Run LangGraph basics examples.""" 306 | print("AI Agent Workshop - Session 3: LangGraph Basics") 307 | print("=" * 55) 308 | 309 | try: 310 | # Run basic workflow example 311 | run_basic_langgraph_example() 312 | 313 | # Demonstrate conditional routing 314 | demonstrate_conditional_routing() 315 | 316 | print("\n✅ LangGraph basics completed successfully!") 317 | print("\n💡 Key Concepts Learned:") 318 | print(" • State management in workflows") 319 | print(" • Node-based graph construction") 320 | print(" • Conditional routing logic") 321 | print(" • Sequential and parallel execution") 322 | 323 | except Exception as e: 324 | print(f"Error running LangGraph examples: {e}") 325 | if PROVIDER == 'sambanova': 326 | print("Make sure your SAMBA_API_KEY is set correctly in the .env file.") 327 | elif PROVIDER == 'ollama': 328 | print("Make sure Ollama is running locally on http://localhost:11434") 329 | print("Install Ollama from https://ollama.ai and run: ollama serve") 330 | print("Also ensure langgraph is installed: uv pip install langgraph") 331 | 332 | if __name__ == "__main__": 333 | main() 334 | -------------------------------------------------------------------------------- /session3/langgraph_basics_nvidia.py: -------------------------------------------------------------------------------- 1 | """ 2 | Session 3: LangGraph Basics with NVIDIA API 3 | This file demonstrates fundamental LangGraph concepts for building stateful agent workflows using NVIDIA API. 4 | """ 5 | 6 | import os 7 | from dotenv import load_dotenv 8 | from langchain_openai import ChatOpenAI 9 | from langgraph.graph import StateGraph, END 10 | from typing import TypedDict, Optional 11 | 12 | # Load environment variables 13 | load_dotenv() 14 | 15 | # NVIDIA API Configuration 16 | NVIDIA_API_KEY = "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" 17 | 18 | # Define the state structure 19 | class AgentState(TypedDict): 20 | """State definition for our agent workflow.""" 21 | user_query: str 22 | research_result: Optional[str] 23 | analysis_result: Optional[str] 24 | final_answer: Optional[str] 25 | current_step: str 26 | 27 | def create_langgraph_workflow(): 28 | """Create a basic LangGraph workflow with nodes and edges.""" 29 | 30 | # Initialize the LLM using NVIDIA API 31 | llm = ChatOpenAI( 32 | temperature=0.7, 33 | model="meta/llama3-8b-instruct", 34 | api_key=NVIDIA_API_KEY, 35 | base_url="https://integrate.api.nvidia.com/v1" 36 | ) 37 | 38 | def research_node(state: AgentState) -> AgentState: 39 | """Research node that gathers information.""" 40 | print("🔍 Researching topic...") 41 | 42 | query = state["user_query"] 43 | research_prompt = f"""Research the following topic comprehensively: {query} 44 | 45 | Provide: 46 | - Key concepts and definitions 47 | - Current trends and developments 48 | - Real-world applications 49 | - Important considerations 50 | 51 | Be thorough but concise.""" 52 | 53 | response = llm.invoke([{"role": "user", "content": research_prompt}]) 54 | research_result = response.content 55 | 56 | # Update state 57 | new_state = state.copy() 58 | new_state["research_result"] = research_result 59 | new_state["current_step"] = "research_complete" 60 | 61 | print(f"✅ Research completed for: {query[:50]}...") 62 | return new_state 63 | 64 | def analyze_node(state: AgentState) -> AgentState: 65 | """Analysis node that processes research findings.""" 66 | print("📊 Analyzing research findings...") 67 | 68 | research = state["research_result"] 69 | analysis_prompt = f"""Analyze the following research and provide insights: 70 | 71 | RESEARCH: 72 | {research} 73 | 74 | Provide: 75 | - Key insights and patterns 76 | - Strengths and limitations 77 | - Practical implications 78 | - Recommendations""" 79 | 80 | response = llm.invoke([{"role": "user", "content": analysis_prompt}]) 81 | analysis_result = response.content 82 | 83 | # Update state 84 | new_state = state.copy() 85 | new_state["analysis_result"] = analysis_result 86 | new_state["current_step"] = "analysis_complete" 87 | 88 | print("✅ Analysis completed") 89 | return new_state 90 | 91 | def answer_node(state: AgentState) -> AgentState: 92 | """Final answer node that synthesizes everything.""" 93 | print("💡 Generating final answer...") 94 | 95 | query = state["user_query"] 96 | research = state["research_result"] 97 | analysis = state["analysis_result"] 98 | 99 | final_prompt = f"""Based on the research and analysis provided, give a comprehensive answer to: {query} 100 | 101 | RESEARCH: 102 | {research} 103 | 104 | ANALYSIS: 105 | {analysis} 106 | 107 | Provide a clear, well-structured final answer.""" 108 | 109 | response = llm.invoke([{"role": "user", "content": final_prompt}]) 110 | final_answer = response.content 111 | 112 | # Update state 113 | new_state = state.copy() 114 | new_state["final_answer"] = final_answer 115 | new_state["current_step"] = "answer_complete" 116 | 117 | print("✅ Final answer generated") 118 | return new_state 119 | 120 | def router_function(state: AgentState) -> str: 121 | """Router function to decide next step based on state.""" 122 | current_step = state.get("current_step", "start") 123 | 124 | if current_step == "start": 125 | return "research" 126 | elif current_step == "research_complete": 127 | return "analyze" 128 | elif current_step == "analysis_complete": 129 | return "answer" 130 | else: 131 | return END 132 | 133 | # Create the graph 134 | workflow = StateGraph(AgentState) 135 | 136 | # Add nodes 137 | workflow.add_node("research", research_node) 138 | workflow.add_node("analyze", analyze_node) 139 | workflow.add_node("answer", answer_node) 140 | 141 | # Add edges 142 | workflow.add_conditional_edges( 143 | "research", 144 | router_function, 145 | {"analyze": "analyze", "answer": "answer", END: END} 146 | ) 147 | 148 | workflow.add_conditional_edges( 149 | "analyze", 150 | router_function, 151 | {"answer": "answer", END: END} 152 | ) 153 | 154 | workflow.add_conditional_edges( 155 | "answer", 156 | router_function, 157 | {END: END} 158 | ) 159 | 160 | # Set entry point 161 | workflow.set_entry_point("research") 162 | 163 | # Compile the graph 164 | app = workflow.compile() 165 | 166 | return app 167 | 168 | def run_basic_langgraph_example(): 169 | """Run a basic LangGraph workflow example.""" 170 | print("=== Basic LangGraph Workflow Example (NVIDIA API) ===") 171 | print() 172 | 173 | # Create the workflow 174 | app = create_langgraph_workflow() 175 | 176 | # Initial state 177 | initial_state = { 178 | "user_query": "What are the benefits and challenges of implementing AI agents in business processes?", 179 | "research_result": None, 180 | "analysis_result": None, 181 | "final_answer": None, 182 | "current_step": "start" 183 | } 184 | 185 | print("Starting workflow execution...") 186 | print(f"Query: {initial_state['user_query']}") 187 | print() 188 | 189 | # Execute the workflow 190 | final_state = app.invoke(initial_state) 191 | 192 | print("\n" + "="*80) 193 | print("🎉 WORKFLOW EXECUTION COMPLETE!") 194 | print("="*80) 195 | 196 | print(f"\nFinal Answer:\n{final_state['final_answer']}") 197 | 198 | print("\n📊 Workflow Summary:") 199 | print(f" • Research Result Length: {len(final_state.get('research_result', ''))} characters") 200 | print(f" • Analysis Result Length: {len(final_state.get('analysis_result', ''))} characters") 201 | print(f" • Final Answer Length: {len(final_state.get('final_answer', ''))} characters") 202 | 203 | return final_state 204 | 205 | def demonstrate_conditional_routing(): 206 | """Demonstrate conditional routing in LangGraph.""" 207 | 208 | print("\n=== Conditional Routing Example (NVIDIA API) ===") 209 | print() 210 | 211 | class QueryState(TypedDict): 212 | query: str 213 | query_type: Optional[str] 214 | response: Optional[str] 215 | 216 | # Initialize the LLM using NVIDIA API 217 | llm = ChatOpenAI( 218 | temperature=0.7, 219 | model="meta/llama3-8b-instruct", 220 | api_key=NVIDIA_API_KEY, 221 | base_url="https://integrate.api.nvidia.com/v1" 222 | ) 223 | 224 | def classify_query(state: QueryState) -> QueryState: 225 | """Classify the query type.""" 226 | query = state["query"] 227 | classify_prompt = f"""Classify this query as either 'simple' or 'complex': 228 | 229 | Query: {query} 230 | 231 | Simple queries can be answered directly with basic knowledge. 232 | Complex queries require research and detailed analysis. 233 | 234 | Respond with only: simple or complex""" 235 | 236 | response = llm.invoke([{"role": "user", "content": classify_prompt}]) 237 | query_type = response.content.strip().lower() 238 | 239 | new_state = state.copy() 240 | new_state["query_type"] = query_type 241 | return new_state 242 | 243 | def simple_response(state: QueryState) -> QueryState: 244 | """Handle simple queries.""" 245 | query = state["query"] 246 | response_prompt = f"Answer this simple query directly: {query}" 247 | 248 | response = llm.invoke([{"role": "user", "content": response_prompt}]) 249 | answer = response.content 250 | 251 | new_state = state.copy() 252 | new_state["response"] = f"[SIMPLE] {answer}" 253 | return new_state 254 | 255 | def complex_response(state: QueryState) -> QueryState: 256 | """Handle complex queries.""" 257 | query = state["query"] 258 | response_prompt = f"Provide a detailed analysis for this complex query: {query}" 259 | 260 | response = llm.invoke([{"role": "user", "content": response_prompt}]) 261 | answer = response.content 262 | 263 | new_state = state.copy() 264 | new_state["response"] = f"[COMPLEX] {answer}" 265 | return new_state 266 | 267 | def route_based_on_complexity(state: QueryState) -> str: 268 | """Route to appropriate handler based on query complexity.""" 269 | query_type = state.get("query_type") 270 | if query_type == "simple": 271 | return "simple_handler" 272 | elif query_type == "complex": 273 | return "complex_handler" 274 | else: 275 | return "simple_handler" # fallback 276 | 277 | # Create conditional workflow 278 | workflow = StateGraph(QueryState) 279 | 280 | workflow.add_node("classifier", classify_query) 281 | workflow.add_node("simple_handler", simple_response) 282 | workflow.add_node("complex_handler", complex_response) 283 | 284 | workflow.set_entry_point("classifier") 285 | 286 | workflow.add_conditional_edges( 287 | "classifier", 288 | route_based_on_complexity, 289 | { 290 | "simple_handler": "simple_handler", 291 | "complex_handler": "complex_handler" 292 | } 293 | ) 294 | 295 | workflow.add_edge("simple_handler", END) 296 | workflow.add_edge("complex_handler", END) 297 | 298 | app = workflow.compile() 299 | 300 | # Test with different queries 301 | test_queries = [ 302 | "What is 2 + 2?", 303 | "Explain quantum computing and its business applications" 304 | ] 305 | 306 | for query in test_queries: 307 | print(f"\nQuery: {query}") 308 | result = app.invoke({"query": query, "query_type": None, "response": None}) 309 | print(f"Response: {result['response'][:100]}...") 310 | 311 | def main(): 312 | """Run LangGraph basics examples.""" 313 | print("AI Agent Workshop - Session 3: LangGraph Basics (NVIDIA API)") 314 | print("=" * 60) 315 | 316 | try: 317 | # Run basic workflow example 318 | run_basic_langgraph_example() 319 | 320 | # Demonstrate conditional routing 321 | demonstrate_conditional_routing() 322 | 323 | print("\n✅ LangGraph basics with NVIDIA API completed successfully!") 324 | print("\n💡 Key Concepts Learned:") 325 | print(" • State management in workflows") 326 | print(" • Node-based graph construction") 327 | print(" • Conditional routing logic") 328 | print(" • Sequential and parallel execution") 329 | 330 | except Exception as e: 331 | print(f"Error running LangGraph examples: {e}") 332 | print("Make sure your NVIDIA_API_KEY is set correctly in the .env file.") 333 | print("Also ensure langgraph is installed: uv pip install langgraph") 334 | 335 | if __name__ == "__main__": 336 | main() -------------------------------------------------------------------------------- /CODE_REVIEW_REPORT.md: -------------------------------------------------------------------------------- 1 | # AI Agent Workshop - Comprehensive Code Review Report 2 | 3 | ## 📋 Executive Summary 4 | 5 | | Aspect | Rating | Status | 6 | | ------------------------------ | ------ | ----------------------------------- | 7 | | **Overall Grade** | B+ | Good with room for improvement | 8 | | **Architecture** | A- | Well-structured layered design | 9 | | **Documentation** | B | Comprehensive but inconsistent | 10 | | **Code Quality** | B | Good practices with some gaps | 11 | | **Production Readiness** | C+ | Basic resilience, needs improvement | 12 | | **User Experience** | B+ | Good learning progression | 13 | 14 | **Summary**: The AI Agent Workshop is a well-structured educational project demonstrating progressive AI agent development. It shows solid architectural decisions but needs improvements in documentation consistency, error handling, and production readiness. 15 | 16 | --- 17 | 18 | ## 📚 Documentation Review 19 | 20 | ### Current State 21 | 22 | | Component | Status | Issues | Recommendations | 23 | | ---------------------------- | ----------------- | ------------------------------------------- | ---------------------------------------- | 24 | | **README.md** | ✅ Good | Mermaid diagram syntax error (fixed) | Keep updated with implementation changes | 25 | | **Curriculum.md** | ⚠️ Needs Update | API provider, duration, audience mismatches | Align with README implementation | 26 | | **Inline Code Docs** | ✅ Good | Some functions under-documented | Add docstrings to all public functions | 27 | | **Setup Instructions** | ✅ Excellent | Clear and beginner-friendly | Consider interactive setup script | 28 | 29 | ### Key Documentation Issues 30 | 31 | | Issue | Current | Should Be | Impact | 32 | | ---------------- | -------------------------------------------------------------- | ----------- | ---------------------- | 33 | | API Provider | Curriculum: OpenAI`
`README: OpenRouter | OpenRouter | Confusion for learners | 34 | | Session Duration | Curriculum: 1hr each`
`README: 30min each | 30 minutes | Time management issues | 35 | | Target Audience | Curriculum: College students`
`README: Absolute beginners | Beginners | Accessibility problems | 36 | | Installation | Curriculum:`uv pip install
`README: `uv sync` | `uv sync` | Setup failures | 37 | 38 | --- 39 | 40 | ## 🏗️ Architecture & Code Quality Review 41 | 42 | ### Architecture Assessment 43 | 44 | | Component | Rating | Strengths | Issues | 45 | | ---------------------------------- | ------ | --------------------------------------------- | ------------------------------------------ | 46 | | **Layered Design** | A | Clear separation: config → utils → sessions | None | 47 | | **Configuration Management** | B+ | Centralized config with validation | Hard-coded values, no env-specific configs | 48 | | **Modularity** | A- | Well-organized utilities and sessions | Some tight coupling in session files | 49 | | **Dependency Management** | B+ | Clean pyproject.toml structure | Could use dependency groups | 50 | 51 | ### Code Quality Issues 52 | 53 | | Category | Current Issues | Examples | Recommendations | 54 | | -------------------------- | ------------------------ | ------------------------------------------------ | ---------------------------------------- | 55 | | **Type Hints** | Incomplete | `keys: list` should be `List[str]` | Add comprehensive type hints | 56 | | **Error Handling** | Basic | Some API calls lack try/catch | Add try/catch to all API interactions | 57 | | **Rate Limiting** | Implemented but not used | Rate limiter exists but not applied to LLM calls | Connect rate limiter to actual API calls | 58 | | **State Validation** | Missing | No schema validation for workflow states | Add state validation functions | 59 | 60 | --- 61 | 62 | ## 🤖 AI Agent Development Best Practices 63 | 64 | ### Framework Usage Assessment 65 | 66 | | Framework | Rating | Strengths | Issues | 67 | | ------------------- | ------ | --------------------------------- | -------------------------------- | 68 | | **CrewAI** | A- | Good agent roles, task sequencing | Verbose output in production | 69 | | **LangChain** | B+ | Proper LLM integration | Could use more advanced features | 70 | | **LangGraph** | B | Clear state management | Router functions need robustness | 71 | | **LiteLLM** | B+ | Good API abstraction | Limited error handling | 72 | 73 | ### Agent Development Issues 74 | 75 | | Issue | Current | Recommended | Priority | 76 | | -------------------------------- | ------- | ------------------------------------ | -------- | 77 | | **Error Recovery** | None | Add error handling in workflow nodes | High | 78 | | **State Persistence** | None | Demonstrate state saving/loading | Medium | 79 | | **Agent Validation** | Basic | Add agent capability validation | Medium | 80 | | **Performance Monitoring** | None | Add execution time tracking | Low | 81 | 82 | --- 83 | 84 | ## 🛡️ Resilience & Production Readiness 85 | 86 | ### Current Resilience Features 87 | 88 | | Feature | Status | Implementation | Gaps | 89 | | ------------------------ | ------------ | ------------------------------------- | ---------------------------------- | 90 | | **Rate Limiting** | ⚠️ Partial | Infrastructure exists but not applied | Connect to LLM calls | 91 | | **API Validation** | ✅ Good | Key format and model validation | Add connectivity tests | 92 | | **Error Messages** | ✅ Good | User-friendly error formatting | Some technical errors leak through | 93 | | **Configuration** | ✅ Good | Environment variable handling | No environment-specific configs | 94 | 95 | ### Missing Production Features 96 | 97 | | Feature | Current State | Recommendation | Priority | 98 | | ------------------------- | ------------- | ------------------------------ | -------- | 99 | | **Logging** | None | Add structured logging system | High | 100 | | **Metrics** | None | Add performance monitoring | Medium | 101 | | **Health Checks** | None | Add system health validation | Medium | 102 | | **Caching** | None | Add response caching | Low | 103 | | **Circuit Breaker** | None | Add failure threshold handling | Medium | 104 | 105 | --- 106 | 107 | ## 🎯 Session-Specific Review 108 | 109 | ### Session Assessment 110 | 111 | | Session | Rating | Strengths | Issues | Improvements | 112 | | ------------------- | ------ | ------------------------ | ---------------------- | --------------------------- | 113 | | **Session 1** | B+ | Good LangChain intro | Limited error handling | Add API failure fallbacks | 114 | | **Session 2** | A- | Excellent CrewAI demos | GUI demo mode unclear | Add real vs demo indicators | 115 | | **Session 3** | B | Clear LangGraph examples | No state persistence | Add state saving examples | 116 | 117 | ### Learning Progression 118 | 119 | | Aspect | Rating | Current | Recommended | 120 | | -------------------------- | ------ | ------------------------- | ----------------------------- | 121 | | **Difficulty Curve** | A | Well-balanced progression | Maintain current approach | 122 | | **Hands-on Focus** | A- | Good practical examples | Add more interactive elements | 123 | | **Error Guidance** | B | Basic error messages | Add troubleshooting guides | 124 | | **Advanced Options** | C | Limited extensions | Add difficulty levels | 125 | 126 | --- 127 | 128 | ## 🔧 Recommended Improvements 129 | 130 | ### High Priority (Immediate Action Required) 131 | 132 | | # | Issue | Solution | Effort | Impact | 133 | | - | ----------------------------- | ---------------------------------- | ------ | ------ | 134 | | 1 | Documentation inconsistencies | Align curriculum with README | Low | High | 135 | | 2 | Missing rate limiting | Connect rate limiter to LLM calls | Medium | High | 136 | | 3 | Error handling gaps | Add try/catch to all API calls | Medium | High | 137 | | 4 | Type hints incomplete | Add comprehensive type annotations | Low | Medium | 138 | 139 | ### Medium Priority (Next Sprint) 140 | 141 | | # | Issue | Solution | Effort | Impact | 142 | | - | ------------------- | ---------------------------- | ------ | ------ | 143 | | 5 | Logging system | Implement structured logging | Medium | High | 144 | | 6 | Environment configs | Add dev/prod configuration | Low | Medium | 145 | | 7 | State validation | Add schema validation | Medium | Medium | 146 | | 8 | Unit tests | Add basic test coverage | High | Medium | 147 | 148 | ### Low Priority (Future Enhancements) 149 | 150 | | # | Issue | Solution | Effort | Impact | 151 | | -- | ------------------ | ----------------------------- | ------ | ------ | 152 | | 9 | Response caching | Add caching layer | Medium | Low | 153 | | 10 | Metrics collection | Add performance monitoring | Medium | Low | 154 | | 11 | Configuration UI | Web-based config interface | High | Low | 155 | | 12 | Advanced examples | Complex multi-agent scenarios | High | Low | 156 | 157 | --- 158 | 159 | ## 📊 Code Metrics 160 | 161 | ### File Structure Analysis 162 | 163 | | Directory | Files | Lines of Code | Primary Language | Test Coverage | 164 | | ------------------- | ----- | ------------- | ---------------- | ------------- | 165 | | **Root** | 6 | ~200 | Markdown/Python | N/A | 166 | | **utils/** | 3 | ~600 | Python | 0% | 167 | | **session1/** | 2 | ~150 | Python | 0% | 168 | | **session2/** | 3 | ~600 | Python | 0% | 169 | | **session3/** | 2 | ~400 | Python | 0% | 170 | | **Total** | 16 | ~1950 | Python/Markdown | 0% | 171 | 172 | ### Dependency Analysis 173 | 174 | | Framework | Usage | Version Specified | Criticality | 175 | | ------------------- | ------------------------- | ----------------- | ----------- | 176 | | **CrewAI** | Multi-agent orchestration | Latest | High | 177 | | **LangChain** | LLM integration | Latest | High | 178 | | **LangGraph** | Stateful workflows | Latest | High | 179 | | **LiteLLM** | API abstraction | Latest | Medium | 180 | | **Streamlit** | GUI components | Latest | Low | 181 | 182 | ### API Usage Patterns 183 | 184 | | API | Calls/Session | Error Handling | Rate Limiting | Caching | 185 | | ----------------------- | --------------- | -------------- | ------------- | ------- | 186 | | **OpenRouter** | 3-5 per session | Basic | Planned | None | 187 | | **Configuration** | 1 per session | Good | N/A | None | 188 | | **File I/O** | Minimal | None | N/A | None | 189 | 190 | --- 191 | 192 | ## 🎯 Action Plan 193 | 194 | ### Phase 1: Critical Fixes (Week 1) 195 | 196 | - [ ] Align curriculum with README implementation 197 | - [ ] Implement actual rate limiting on LLM calls 198 | - [ ] Add comprehensive error handling 199 | - [ ] Complete type hints 200 | 201 | ### Phase 2: Production Readiness (Week 2-3) 202 | 203 | - [ ] Add structured logging system 204 | - [ ] Implement environment-specific configurations 205 | - [ ] Add state validation and schema enforcement 206 | - [ ] Create basic test suite 207 | 208 | ### Phase 3: Enhancement (Week 4+) 209 | 210 | - [ ] Add performance monitoring and metrics 211 | - [ ] Implement response caching 212 | - [ ] Create advanced examples and tutorials 213 | - [ ] Add interactive configuration UI 214 | 215 | --- 216 | 217 | ## 📈 Overall Assessment 218 | 219 | ### Strengths 220 | 221 | - ✅ Well-structured learning progression 222 | - ✅ Good use of modern AI frameworks 223 | - ✅ Comprehensive documentation 224 | - ✅ Interactive GUI components 225 | - ✅ Progressive complexity approach 226 | 227 | ### Critical Gaps 228 | 229 | - ❌ Documentation inconsistencies across files 230 | - ❌ Rate limiting not actually implemented 231 | - ❌ Missing comprehensive error handling 232 | - ❌ No logging or monitoring systems 233 | - ❌ Incomplete type annotations 234 | 235 | ### Business Impact 236 | 237 | - **Educational Value**: High - provides excellent learning experience 238 | - **Production Readiness**: Medium - needs significant improvements for real-world use 239 | - **Maintainability**: Medium - good structure but needs better practices 240 | - **Scalability**: Low - no performance monitoring or optimization 241 | 242 | ### Final Recommendation 243 | 244 | **Proceed with Phase 1 fixes immediately**, then implement Phase 2 improvements. The workshop provides an excellent foundation for AI agent education but requires production hardening for real-world deployment. 245 | 246 | --- 247 | 248 | *Code Review Completed: December 7, 2025* 249 | *Review Scope: All source files, documentation, and architecture* 250 | *Review Methodology: Manual code inspection, architectural analysis, best practices evaluation* 251 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🤖 AI Agent Workshop for Beginners 2 | 3 | ## Welcome! 4 | 5 | **Hello!** This is a simple workshop to learn about AI agents. We'll build smart AI helpers that can work together like a team. No advanced coding experience needed! 6 | 7 | ## What You'll Learn 8 | 9 | - How AI agents work (like smart assistants) 10 | - How to make multiple AI agents work together 11 | - How AI can remember information between steps 12 | - **🆕 Advanced Features**: Intelligent rate limiting and error handling for production-ready AI applications 13 | 14 | ## Before We Start 15 | 16 | You need: 17 | 18 | - **Python** (version 3.11 or higher) - most computers already have this! 19 | - **Basic Python knowledge** - if you can write `print("hello")`, you're ready! 20 | - **Internet connection** - to talk to AI services 21 | - **A computer** - Windows, Mac, or Linux 22 | 23 | ## 🚀 Quick Setup (5 minutes!) 24 | 25 | ### Step 1: Get the Code 26 | 27 | ```bash 28 | # Download this project 29 | git clone https://github.com/ashishpatel26/AIAgentWorkshop-New.git 30 | cd ai-agent-workshop 31 | ``` 32 | 33 | ### Step 2: Install Tools 34 | 35 | ```bash 36 | # Install the UV package manager (easy Python installer) 37 | # On Windows: 38 | powershell -c "irm https://astral.sh/uv/install.sh | iex" 39 | 40 | # On Mac/Linux: 41 | curl -LsSf https://astral.sh/uv/install.sh | sh 42 | ``` 43 | 44 | ### Step 3: Install Python Packages 45 | 46 | ```bash 47 | # Install all needed tools (dependencies are now managed in pyproject.toml) 48 | uv sync 49 | ``` 50 | 51 | ### Step 4: Configure Environment 52 | 53 | 1. **Copy the environment template:** 54 | 55 | ```bash 56 | cp .env.example .env 57 | ``` 58 | 2. **Choose your AI provider and edit `.env`:** 59 | 60 | **For SambaNova (Cloud API - Recommended):** 61 | 62 | ```bash 63 | # AI Provider Selection 64 | AI_PROVIDER=sambanova 65 | 66 | # SambaNova API Configuration 67 | SAMBA_API_KEY=your_sambanova_api_key_here 68 | SAMBA_MODEL=gpt-oss-120b 69 | 70 | # Workshop Configuration 71 | WORKSHOP_DEBUG=false 72 | MAX_TOKENS=4000 73 | TEMPERATURE=0.7 74 | ``` 75 | 76 | **For Ollama (Local Models - Free):** 77 | 78 | ```bash 79 | # AI Provider Selection 80 | AI_PROVIDER=ollama 81 | 82 | # Ollama Configuration 83 | OLLAMA_MODEL=gemma3:4b 84 | 85 | # Workshop Configuration 86 | WORKSHOP_DEBUG=false 87 | MAX_TOKENS=4000 88 | TEMPERATURE=0.7 89 | ``` 90 | 91 | **For NVIDIA (Cloud API):** 92 | 93 | ```bash 94 | # AI Provider Selection 95 | AI_PROVIDER=nvidia 96 | 97 | # NVIDIA API Configuration 98 | NVIDIA_API_KEY=your_nvidia_api_key_here 99 | 100 | # Workshop Configuration 101 | WORKSHOP_DEBUG=false 102 | MAX_TOKENS=4000 103 | TEMPERATURE=0.7 104 | ``` 105 | 106 | ### Step 5: Get API Keys 107 | 108 | **SambaNova Setup:** 109 | 110 | 1. Visit [SambaNova](https://sambanova.ai) and create account 111 | 2. Get your API key from the dashboard 112 | 3. Replace `your_sambanova_api_key_here` in `.env` 113 | 114 | **Ollama Setup:** 115 | 116 | 1. Install Ollama from [ollama.ai](https://ollama.ai) 117 | 2. Pull a model: `ollama pull gemma3:4b` 118 | 3. Start Ollama: `ollama serve` 119 | 120 | **NVIDIA Setup:** 121 | 122 | 1. Visit [NVIDIA AI Models](https://build.nvidia.com/models) and create an account 123 | 2. Go to [API Keys Settings](https://build.nvidia.com/settings/api-keys) to generate your API key 124 | 3. Copy the generated API key 125 | 4. Replace `your_nvidia_api_key_here` in `.env` with your actual API key 126 | 127 | ### Step 6: Test Everything Works 128 | 129 | ```bash 130 | # Test basic imports 131 | python -c "import crewai, langchain_openai; print('✅ Ready to start!')" 132 | 133 | # Test your configuration 134 | uv run python -c "from config import API_KEY, MODEL; print(f'✅ Config loaded: {MODEL}')" 135 | 136 | # Run a simple test (choose based on your provider) 137 | uv run python testing/test_sambanova.py # For SambaNova 138 | uv run python testing/test_ollama.py # For Ollama 139 | uv run python testing/test_nvidia_langchain.py # For NVIDIA 140 | ``` 141 | 142 | ## 🎯 What We'll Build (3 Simple Sessions) 143 | 144 | ### Session 1: Your First AI Agent (30 minutes) 145 | 146 | Learn the basics! We'll create: 147 | 148 | - A simple AI that can chat 149 | - An AI that can use tools (like a calculator) 150 | - Your first AI "crew" (team) 151 | 152 | **Run it:** 153 | 154 | ```bash 155 | cd session1 156 | uv run basics.py # Learn basic AI chat 157 | uv run crewai_intro.py # Learn about AI teams 158 | ``` 159 | 160 | ### Session 2: AI Agents Working Together (30 minutes) 161 | 162 | Make AI agents collaborate! We'll build: 163 | 164 | - Agents with different jobs (researcher, writer) 165 | - A simple content creation team 166 | - How agents share information 167 | 168 | **Run it (Command Line):** 169 | 170 | ```bash 171 | cd session2 172 | uv run agent_roles.py # See different AI jobs 173 | uv run content_crew.py # Watch AI create content together 174 | ``` 175 | 176 | **🎨 Interactive GUIs Available!** 177 | 178 | **Session 2 Advanced GUI:** 179 | 180 | ```bash 181 | # Run the comprehensive multi-team GUI 182 | uv run streamlit run agent_roles_gui.py 183 | ``` 184 | 185 | **GUI Features:** 186 | 187 | - 🎨 Beautiful, modern web interfaces 188 | - 👥 Interactive agent team demonstrations 189 | - 📊 Real-time progress tracking 190 | - 💾 Results history and comparison 191 | - 🎯 Educational explanations 192 | - 🔄 Demo mode (works without API keys!) 193 | 194 | ### Session 3: Smart Workflows (30 minutes) 195 | 196 | AI that remembers! We'll create: 197 | 198 | - Workflows that pass information between steps 199 | - AI that learns from previous steps 200 | - Simple state management 201 | 202 | **Run it:** 203 | 204 | ```bash 205 | cd session3 206 | uv run stateful_workflow.py # See AI remember information 207 | ``` 208 | 209 | ## 📁 What's In This Project 210 | 211 | ```bash 212 | ai-agent-workshop/ 213 | ├── README.md # This guide (you're reading it!) 214 | ├── GIT_SETUP.md # Git setup instructions for all OS 215 | ├── CODE_REVIEW_REPORT.md # Comprehensive code review and recommendations 216 | ├── architecture.md # Detailed codebase architecture documentation 217 | ├── ai_agent_workshop_curriculum.md # Workshop curriculum and lesson plans 218 | ├── pyproject.toml # Project configuration and dependencies 219 | ├── uv.lock # Dependency lock file 220 | ├── .env.example # Template for your settings 221 | ├── .gitignore # Git ignore rules 222 | ├── config.py # Simple configuration (loads automatically) 223 | ├── session1/ # Basic AI examples 224 | │ ├── basics.py # Your first AI agents 225 | │ └── crewai_intro.py # AI working in teams 226 | ├── session2/ # AI collaboration 227 | │ ├── agent_roles.py # Different AI jobs 228 | │ └── content_crew.py # AI creating content together 229 | ├── session3/ # Smart workflows 230 | │ ├── stateful_workflow.py # AI that remembers 231 | │ ├── langgraph_basics_nvidia.py # LangGraph basics with NVIDIA 232 | │ ├── stateful_workflow_langchain_nvidia.py # LangChain NVIDIA workflow 233 | │ └── langgraph_basics.py # Graph basics 234 | ├── testing/ # Test scripts and utilities 235 | │ ├── test_langchain.py # LangChain tests 236 | │ ├── test_nvidia_langchain.py # NVIDIA API tests 237 | │ ├── test_nvidia_model.py # Direct NVIDIA model tests 238 | │ ├── test_ollama.py # Ollama local model tests 239 | │ └── test_sambanova.py # SambaNova API tests 240 | └── utils/ # Helper tools (advanced users only) 241 | ├── config.py # Legacy configuration 242 | ├── helpers.py # Utility functions 243 | └── rate_limiter.py # API rate limiting 244 | ├── .qodo/ # Project artifacts 245 | ``` 246 | 247 | #### 🏗️ Code Architecture Diagram 248 | 249 | ```mermaid 250 | flowchart TD 251 | %% Configuration files 252 | envFile[📄 .env
Environment Variables] 253 | configPy[📄 config.py
Simple Auto-Config] 254 | pyproject[📄 pyproject.toml
Dependencies] 255 | 256 | %% Session 1 files 257 | basics[📄 session1/basics.py
Basic Chat & Tools] 258 | crewaiIntro[📄 session1/crewai_intro.py
Agent Teams Intro] 259 | 260 | %% Session 2 files 261 | agentRoles[📄 session2/agent_roles.py
Agent Roles & Tasks] 262 | contentCrew[📄 session2/content_crew.py
Content Creation] 263 | 264 | %% Session 3 files 265 | statefulWF[📄 session3/stateful_workflow.py
Stateful Workflows] 266 | nvidiaWF[📄 session3/langgraph_basics_nvidia.py
LangGraph Basics NVIDIA] 267 | langchainWF[📄 session3/stateful_workflow_langchain_nvidia.py
LangChain Version] 268 | 269 | %% Testing files 270 | testFiles[📁 testing/
Test Scripts] 271 | 272 | %% External frameworks 273 | langchain[(🤖 LangChain)] 274 | crewai[(👥 CrewAI)] 275 | langgraph[(📊 LangGraph)] 276 | sambanova[(🌐 SambaNova API)] 277 | ollama[(🏠 Ollama Local)] 278 | nvidia[(🚀 NVIDIA API)] 279 | 280 | %% Connections 281 | envFile --> configPy 282 | pyproject --> configPy 283 | configPy --> basics 284 | configPy --> crewaiIntro 285 | configPy --> agentRoles 286 | configPy --> contentCrew 287 | configPy --> statefulWF 288 | configPy --> nvidiaWF 289 | configPy --> langchainWF 290 | configPy --> testFiles 291 | 292 | basics --> langchain 293 | crewaiIntro --> crewai 294 | agentRoles --> crewai 295 | contentCrew --> crewai 296 | statefulWF --> langgraph 297 | nvidiaWF --> langgraph 298 | langchainWF --> langchain 299 | 300 | langchain --> sambanova 301 | langchain --> ollama 302 | langchain --> nvidia 303 | crewai --> sambanova 304 | crewai --> ollama 305 | crewai --> nvidia 306 | langgraph --> sambanova 307 | langgraph --> ollama 308 | langgraph --> nvidia 309 | ``` 310 | 311 | ## 📊 Individual File Code Architectures 312 | 313 | ### Session 1: Basic AI Interactions 314 | 315 | **File: `session1/basics.py`** - Demonstrates fundamental AI chat and tool usage with LangChain 316 | 317 | ```mermaid 318 | flowchart TD 319 | A[🚀 main
Entry Point] --> B[💬 basic_chat_example
Chat Demo] 320 | A --> C[🔢 simple_math_helper
Math Demo] 321 | 322 | B --> D[🤖 ChatOpenAI
LLM Instance] 323 | C --> D 324 | 325 | D --> E[⚙️ get_config
Configuration] 326 | E --> F[🔧 get_agent_config
Agent Settings] 327 | 328 | F --> G[📡 invoke
API Call] 329 | G --> H[📄 Display Response] 330 | ``` 331 | 332 | **File: `session1/crewai_intro.py`** - Introduction to multi-agent systems with CrewAI 333 | 334 | ```mermaid 335 | flowchart TD 336 | A[🚀 main
Entry Point] --> B[👥 create_simple_crew
Crew Setup] 337 | B --> C[🤖 Agent
AI Assistant] 338 | B --> D[📋 Task
Work Assignment] 339 | B --> E[🎯 Crew
Team Orchestrator] 340 | 341 | C --> F[⚙️ get_config
Configuration] 342 | F --> G[🔧 get_agent_config
Agent Settings] 343 | G --> H[🤖 ChatOpenAI
LLM Instance] 344 | 345 | E --> I[▶️ kickoff
Execute Tasks] 346 | I --> J[📊 Display Results] 347 | ``` 348 | 349 | ### Session 2: Multi-Agent Collaboration 350 | 351 | **File: `session2/agent_roles.py`** - Demonstrates different AI agent roles working together 352 | 353 | ```mermaid 354 | flowchart TD 355 | A[🚀 main
Entry Point] --> B[📊 demonstrate_agent_roles
Business Demo] 356 | A --> C[🍳 show_simple_roles
Simple Demo] 357 | 358 | B --> D[⏱️ create_rate_limited_llm
Rate Limited LLM] 359 | C --> D 360 | 361 | D --> E[⚙️ get_config
Configuration] 362 | E --> F[🔧 get_agent_config
Agent Settings] 363 | F --> G[🤖 ChatOpenAI
LLM Instance] 364 | 365 | B --> H[📈 Agent
Data Analyst] 366 | B --> I[🎯 Agent
Business Strategist] 367 | B --> J[📋 Task
Analysis Task] 368 | B --> K[📋 Task
Strategy Task] 369 | 370 | C --> L[👨‍🍳 Agent
Chef] 371 | C --> M[🥗 Agent
Nutritionist] 372 | C --> N[📋 Task
Recipe Task] 373 | C --> O[📋 Task
Health Task] 374 | 375 | J --> P[👥 Crew
Business Crew] 376 | K --> P 377 | N --> Q[👥 Crew
Food Crew] 378 | O --> Q 379 | 380 | P --> R[▶️ kickoff
Execute] 381 | Q --> R 382 | R --> S[📊 Display Results] 383 | ``` 384 | 385 | **File: `session2/content_crew.py`** - Complete content creation workflow with specialized agents 386 | 387 | ```mermaid 388 | flowchart TD 389 | A[🚀 main
Entry Point] --> B[📝 run_content_creation_workflow
Main Workflow] 390 | B --> C[👥 create_content_creation_crew
Agent Setup] 391 | B --> D[📋 create_content_tasks
Task Setup] 392 | 393 | C --> E[🔍 Agent
Researcher] 394 | C --> F[✍️ Agent
Writer] 395 | C --> G[✏️ Agent
Editor] 396 | 397 | D --> H[📋 Task
Research Task] 398 | D --> I[📋 Task
Writing Task] 399 | D --> J[📋 Task
Editing Task] 400 | 401 | E --> K[⚙️ get_config
Configuration] 402 | F --> K 403 | G --> K 404 | K --> L[🔧 get_agent_config
Agent Settings] 405 | L --> M[🤖 ChatOpenAI
LLM Instance] 406 | 407 | H --> N[👥 Crew
Content Crew] 408 | I --> N 409 | J --> N 410 | 411 | N --> O[▶️ kickoff
Execute Workflow] 412 | O --> P[📄 Display Final Result] 413 | ``` 414 | 415 | ### Session 3: Stateful Workflows 416 | 417 | **File: `session3/stateful_workflow.py`** - Demonstrates AI workflows that remember information between steps 418 | 419 | ```mermaid 420 | flowchart TD 421 | A[🚀 main
Entry Point] --> B[🔄 run_simple_workflow
Main Demo] 422 | B --> C[⚙️ create_simple_workflow
Workflow Setup] 423 | 424 | C --> D[📊 StateGraph
Workflow Graph] 425 | C --> E[🧠 WorkflowState
State Definition] 426 | 427 | D --> F[🔍 research_step
Research Node] 428 | D --> G[📝 draft_answer_step
Draft Node] 429 | D --> H[✅ final_answer_step
Final Node] 430 | 431 | F --> I[🔀 decide_next_step
Router Function] 432 | G --> I 433 | H --> I 434 | 435 | I --> J[🏁 END
Workflow Complete] 436 | I --> F 437 | I --> G 438 | I --> H 439 | 440 | F --> K[🔍 Agent
Researcher] 441 | G --> L[✍️ Agent
Writer] 442 | H --> M[✏️ Agent
Editor] 443 | 444 | K --> N[⏱️ create_rate_limited_llm
Rate Limited LLM] 445 | L --> N 446 | M --> N 447 | 448 | N --> O[⚙️ get_config
Configuration] 449 | O --> P[🔧 get_agent_config
Agent Settings] 450 | P --> Q[🤖 ChatOpenAI
LLM Instance] 451 | 452 | B --> R[▶️ app.invoke
Execute Workflow] 453 | R --> S[📊 Display Results] 454 | ``` 455 | 456 | **File: `session3/langgraph_basics.py`** - Fundamental LangGraph concepts and conditional routing 457 | 458 | ```mermaid 459 | flowchart TD 460 | A[🚀 main
Entry Point] --> B[🧠 run_basic_langgraph_example
Basic Example] 461 | A --> C[🔀 demonstrate_conditional_routing
Routing Demo] 462 | 463 | B --> D[⚙️ create_langgraph_workflow
Workflow Creation] 464 | D --> E[📊 StateGraph
Graph Builder] 465 | D --> F[🧠 AgentState
State Definition] 466 | 467 | E --> G[🔍 research_node
Research Node] 468 | E --> H[📊 analyze_node
Analysis Node] 469 | E --> I[💡 answer_node
Answer Node] 470 | E --> J[🎛️ router_function
Decision Logic] 471 | 472 | G --> K[📡 LLM.invoke
API Call] 473 | H --> K 474 | I --> K 475 | 476 | J --> L[🏁 END
Complete] 477 | J --> G 478 | J --> H 479 | J --> I 480 | 481 | C --> M[🧠 QueryState
State Definition] 482 | C --> N[🏷️ classify_query
Classification] 483 | C --> O[📝 simple_response
Simple Handler] 484 | C --> P[📋 complex_response
Complex Handler] 485 | C --> Q[🎯 route_based_on_complexity
Smart Router] 486 | 487 | N --> R[📡 LLM.invoke
Classify Query] 488 | O --> S[📡 LLM.invoke
Simple Answer] 489 | P --> T[📡 LLM.invoke
Complex Answer] 490 | 491 | Q --> U[🏁 END
Complete] 492 | Q --> O 493 | Q --> P 494 | 495 | B --> V[▶️ app.invoke
Execute] 496 | C --> W[▶️ app.invoke
Execute] 497 | V --> X[📊 Display Results] 498 | W --> X 499 | ``` 500 | 501 | ### Utils: Helper Modules 502 | 503 | **File: `utils/config.py`** - Central configuration management and validation system 504 | 505 | ```mermaid 506 | flowchart TD 507 | A[🔑 get_config
Global Instance] --> B[⚙️ WorkshopConfig
Main Class] 508 | B --> C[📥 _load_config
Load Settings] 509 | B --> D[🔄 _convert_types
Type Conversion] 510 | B --> E[✅ validate
Configuration Check] 511 | 512 | C --> F[📂 load_environment_variables
From helpers.py] 513 | F --> G[📄 load_dotenv
Load .env file] 514 | F --> H[🔐 validate_api_key
Key Validation] 515 | 516 | B --> I[🤖 get_agent_config
Agent Settings] 517 | B --> J[🔄 get_workflow_config
Workflow Settings] 518 | B --> K[💾 save_to_env_file
Persist Config] 519 | 520 | I --> L[🏷️ openrouter/model
Model Prefix] 521 | J --> M[⏱️ timeout/debug
Workflow Params] 522 | 523 | E --> N[🔑 API Key Check] 524 | E --> O[🤖 Model Validation] 525 | E --> P[🌡️ Temperature Range] 526 | E --> Q[🔢 Token Limits] 527 | ``` 528 | 529 | **File: `utils/helpers.py`** - Utility functions for environment handling and data processing 530 | 531 | ```mermaid 532 | flowchart TD 533 | A[📂 load_environment_variables
Env Loading] --> B[📄 load_dotenv
Load .env] 534 | A --> C[🔐 validate_api_key
Key Validation] 535 | A --> D[📊 Return Dict
Env Variables] 536 | 537 | E[📝 format_agent_response
Response Formatting] --> F[🔤 String Check] 538 | E --> G[🔄 Object Conversion] 539 | E --> H[🧹 Clean Output] 540 | 541 | I[📊 create_progress_indicator
Progress Bar] --> J[🔢 Calculate Percentage] 542 | I --> K[▬ Create Bar String] 543 | I --> L[📄 Return Formatted String] 544 | 545 | M[🛡️ safe_get_nested_value
Safe Dict Access] --> N[🔍 Try Key Access] 546 | M --> O[⚠️ Exception Handling] 547 | M --> P[🔙 Return Default] 548 | 549 | Q[✂️ truncate_text
Text Truncation] --> R[📏 Length Check] 550 | Q --> S[➕ Add Suffix] 551 | Q --> T[📄 Return Truncated] 552 | 553 | U[📋 format_workflow_summary
Summary Creation] --> V[📊 Extract State Data] 554 | U --> W[📝 Format Lines] 555 | U --> X[📄 Return Summary] 556 | 557 | Y[🤖 get_available_models
Model List] --> Z[📋 Return Model Array] 558 | AA[💰 estimate_cost
Cost Calculation] --> BB[🔢 Token Estimation] 559 | AA --> CC[🔍 Cost Lookup] 560 | AA --> DD[💵 Return Cost] 561 | ``` 562 | 563 | **File: `utils/rate_limiter.py`** - Intelligent API rate limiting and retry logic 564 | 565 | ```mermaid 566 | flowchart TD 567 | A[🛡️ RateLimiter
Main Class] --> B[🚀 __init__
Initialize] 568 | A --> C[⏱️ _calculate_delay
Delay Calculation] 569 | A --> D[🔍 _extract_retry_after
Header Parsing] 570 | A --> E[🚨 _is_rate_limit_error
Error Detection] 571 | A --> F[🔄 call_with_retry
Retry Logic] 572 | 573 | B --> G[🔢 max_retries
Retry Count] 574 | B --> H[⏱️ base_delay
Base Delay] 575 | B --> I[⏱️ max_delay
Max Delay] 576 | 577 | C --> J[📈 Exponential Backoff
2^attempt] 578 | C --> K[🎲 Add Jitter
±25%] 579 | C --> L[🛑 Cap at Max
Delay Limit] 580 | 581 | D --> M[🔍 Regex Search
X-RateLimit-Reset] 582 | D --> N[📅 Timestamp Parse] 583 | D --> O[🧮 Calculate Delay] 584 | 585 | E --> P[🔤 Error String Check] 586 | E --> Q[🎯 Keyword Match
rate limit, 429, etc.] 587 | 588 | F --> R[🔁 Retry Loop
max_retries + 1] 589 | F --> S[⏱️ Rate Limiting
Min Interval] 590 | F --> T[⚠️ Exception Handling] 591 | F --> U[⏱️ Delay Calculation] 592 | F --> V[✅ Success Return] 593 | 594 | W[🏭 create_rate_limited_llm
Factory Function] --> X[🛡️ RateLimiter
Instance] 595 | W --> Y[🤖 ChatOpenAI
LLM Creation] 596 | W --> Z[🤖 Return LLM
With Retry Logic] 597 | ``` 598 | 599 | ## 🆘 Having Problems? 600 | 601 | ### "API Key Not Working" 602 | 603 | **For SambaNova:** 604 | 605 | - Check your `.env` file has the correct key from SambaNova dashboard 606 | - Make sure `AI_PROVIDER=sambanova` is set 607 | - Verify your SambaNova account has credits 608 | 609 | **For Ollama:** 610 | 611 | - Make sure Ollama is running: `ollama serve` 612 | - Check that your model is pulled: `ollama list` 613 | - Verify `AI_PROVIDER=ollama` and correct model name in `.env` 614 | 615 | **For NVIDIA:** 616 | 617 | - Check your `.env` file has the correct key from NVIDIA API dashboard 618 | - Make sure `AI_PROVIDER=nvidia` is set 619 | - Verify your NVIDIA account has credits and API access 620 | 621 | ### "Model Not Found" or "404 Error" 622 | 623 | - For SambaNova: Check available models at [SambaNova Models](https://sambanova.ai) 624 | - For Ollama: Pull the model first: `ollama pull gemma3:4b` 625 | - For NVIDIA: Check available models at [NVIDIA API Models](https://build.nvidia.com) 626 | - Update your `.env` file with the correct model name 627 | 628 | ### "Connection Failed" 629 | 630 | **SambaNova:** 631 | 632 | - Check internet connection 633 | - Verify API key is active 634 | - Try a different model 635 | 636 | **Ollama:** 637 | 638 | - Ensure Ollama is running on http://localhost:11434 639 | - Check: `curl http://localhost:11434/api/tags` 640 | - Restart Ollama if needed 641 | 642 | **NVIDIA:** 643 | 644 | - Check internet connection 645 | - Verify API key is active and has sufficient credits 646 | - Try a different model if available 647 | 648 | ### "Package Installation Failed" 649 | 650 | ```bash 651 | # Try reinstalling dependencies 652 | uv sync --reinstall 653 | ``` 654 | 655 | ### "Python Not Found" 656 | 657 | - Download Python from python.org (version 3.8+) 658 | - Make sure `python` command works in terminal 659 | 660 | ### Still Stuck? 661 | 662 | - Check that all files are in the right folders 663 | - Try running: `python -c "print('Python works!')"` 664 | - Ask for help - you're learning something new! 🚀 665 | 666 | ## 🎉 You're Done! 667 | 668 | **Congratulations!** You've learned about AI agents. What you built: 669 | 670 | - 🤖 AI that can chat and use tools 671 | - 👥 AI agents working as a team 672 | - 🧠 AI that remembers information between steps 673 | - ⚡ **Production-ready features**: Intelligent rate limiting, error handling, and API resilience 674 | 675 | ## Next Steps 676 | 677 | Ready for more? Try: 678 | 679 | - Change the questions in the examples 680 | - Add your own AI agents 681 | - Build something fun with what you learned! 682 | 683 | --- 684 | 685 | **Happy AI Building!** 🚀🤖 686 | -------------------------------------------------------------------------------- /session2/agent_roles_gui.py: -------------------------------------------------------------------------------- 1 | """ 2 | Agent Roles GUI - Interactive AI Agent Workshop 3 | A beautiful Streamlit interface for exploring agent roles and collaboration from session2/agent_roles.py 4 | """ 5 | 6 | import streamlit as st 7 | import time 8 | from typing import Dict, List 9 | from crewai import Agent, Task, Crew, LLM 10 | from config import API_KEY, MODEL, API_BASE, TEMPERATURE, MAX_TOKENS, MAX_RETRIES, RETRY_DELAY, PROVIDER 11 | 12 | # Step 3: Set up environment for LiteLLM 13 | import os 14 | if PROVIDER == 'sambanova': 15 | os.environ["SAMBANOVA_API_KEY"] = API_KEY 16 | elif PROVIDER == 'ollama': 17 | # Ollama doesn't need environment variables 18 | pass 19 | 20 | def get_llm(): 21 | """Get the appropriate LLM configuration based on provider.""" 22 | if PROVIDER == 'ollama': 23 | return LLM( 24 | model=f"ollama/{MODEL}", 25 | base_url="http://localhost:11434" 26 | ) 27 | elif PROVIDER == 'sambanova': 28 | return LLM( 29 | model=f"sambanova/{MODEL}", 30 | api_key=API_KEY, 31 | base_url=API_BASE 32 | ) 33 | else: 34 | # Default fallback 35 | return f"{PROVIDER}/{MODEL}" 36 | 37 | # Custom CSS for beautiful design 38 | def load_css(): 39 | st.markdown(""" 40 | 158 | """, unsafe_allow_html=True) 159 | 160 | # Agent creation functions 161 | def create_business_team_agents(): 162 | """Create business analysis team agents.""" 163 | llm = get_llm() 164 | analyst = Agent( 165 | role="Data Analyst", 166 | goal="Look at data and find useful patterns", 167 | backstory="I love working with numbers and finding hidden insights in data.", 168 | llm=llm, 169 | verbose=False 170 | ) 171 | strategist = Agent( 172 | role="Business Strategist", 173 | goal="Create plans based on data insights", 174 | backstory="I am good at making business plans and giving advice for growth.", 175 | llm=llm, 176 | verbose=False 177 | ) 178 | return analyst, strategist 179 | 180 | def create_food_team_agents(language="English"): 181 | """Create food preparation team agents.""" 182 | llm = get_llm() 183 | 184 | if "Gujarati" in language: 185 | chef_backstory = "I am a master Gujarati chef specializing in traditional Gujarati cuisine, farsan, and festive dishes. I know all about Gujarati flavors, spices, and cooking techniques." 186 | nutritionist_backstory = "I am a nutrition expert familiar with Gujarati dietary traditions, Ayurvedic principles, and the nutritional value of traditional Gujarati ingredients." 187 | else: 188 | chef_backstory = "I am a creative chef who loves making delicious food." 189 | nutritionist_backstory = "I am a health expert who makes sure food is good for you." 190 | 191 | chef = Agent( 192 | role="Chef", 193 | goal="Create and describe recipes", 194 | backstory=chef_backstory, 195 | llm=llm, 196 | verbose=False 197 | ) 198 | nutritionist = Agent( 199 | role="Nutritionist", 200 | goal="Check if food is healthy", 201 | backstory=nutritionist_backstory, 202 | llm=llm, 203 | verbose=False 204 | ) 205 | return chef, nutritionist 206 | 207 | # Analysis functions 208 | def run_business_team_analysis(sales_data): 209 | """Run business team analysis with fallback for demo.""" 210 | try: 211 | analyst, strategist = create_business_team_agents() 212 | 213 | analysis_task = Task( 214 | description=f"Look at this simple sales data: {sales_data}. Find trends.", 215 | expected_output="Tell me if sales are going up or down, and by how much.", 216 | agent=analyst 217 | ) 218 | 219 | strategy_task = Task( 220 | description="Based on the sales analysis, suggest 2 ways to increase sales next quarter.", 221 | expected_output="Two simple suggestions for growing the business.", 222 | agent=strategist, 223 | context=[analysis_task] 224 | ) 225 | 226 | crew = Crew( 227 | agents=[analyst, strategist], 228 | tasks=[analysis_task, strategy_task], 229 | verbose=False, 230 | memory=True, 231 | cache=True, 232 | max_rpm=1 233 | ) 234 | result = crew.kickoff() 235 | return f"Business Team Analysis Complete!\n\nSales Data: {sales_data}\n\nResult:\n{str(result)}" 236 | except Exception as e: 237 | # Fallback demo response 238 | return f"""Business Team Analysis Complete! 239 | 240 | Sales Data: {sales_data} 241 | 242 | Result: 243 | 📊 Data Analyst Findings: 244 | - Sales show an upward trend over the quarters 245 | - Growth rate: Approximately 25-30% quarter over quarter 246 | - Strong performance in Q3 and Q4 247 | 248 | 🎯 Business Strategist Recommendations: 249 | 1. Continue marketing campaigns that drove Q3-Q4 growth 250 | 2. Expand successful product lines identified in the analysis 251 | 3. Consider seasonal promotions to maintain momentum 252 | 253 | *Note: This is a demo response. Set up your SAMBA_API_KEY for real AI analysis.*""" 254 | 255 | def run_food_team_analysis(recipe_request, language="English"): 256 | """Run food team analysis with fallback for demo.""" 257 | try: 258 | chef, nutritionist = create_food_team_agents(language) 259 | 260 | # Adjust prompts based on language 261 | if "Gujarati" in language: 262 | recipe_description = f"Create a simple Gujarati recipe for {recipe_request}. Include traditional Gujarati ingredients and cooking methods. Provide the recipe in Gujarati language with English translations." 263 | recipe_output = "List ingredients in Gujarati with English translations and provide cooking steps in both languages." 264 | health_description = "Check if this Gujarati recipe is healthy according to traditional Gujarati dietary principles and suggest improvements." 265 | else: 266 | recipe_description = f"Create a simple recipe for {recipe_request}." 267 | recipe_output = "List ingredients and basic steps." 268 | health_description = "Check if this recipe is healthy and suggest improvements." 269 | 270 | recipe_task = Task( 271 | description=recipe_description, 272 | expected_output=recipe_output, 273 | agent=chef 274 | ) 275 | 276 | health_task = Task( 277 | description=health_description, 278 | expected_output="Say if it's healthy and give one healthy tip.", 279 | agent=nutritionist, 280 | context=[recipe_task] 281 | ) 282 | 283 | crew = Crew( 284 | agents=[chef, nutritionist], 285 | tasks=[recipe_task, health_task], 286 | verbose=False, 287 | memory=True, 288 | cache=True, 289 | max_rpm=1 290 | ) 291 | result = crew.kickoff() 292 | return f"Food Team Analysis Complete!\n\nRecipe Request: {recipe_request}\nLanguage: {language}\n\nResult:\n{str(result)}" 293 | except Exception as e: 294 | # Fallback demo response - intelligent analysis of request 295 | recipe_lower = recipe_request.lower() 296 | 297 | if "Gujarati" in language: 298 | # Gujarati cuisine - analyze request context and provide thoughtful response 299 | 300 | # First, understand what the user is asking for 301 | request_analysis = "" 302 | 303 | # Check for specific traditional Gujarati dish names 304 | if any(word in recipe_lower for word in ['dhokla', 'dhoklaa', 'ઢોકળા']): 305 | request_analysis = "User is requesting Dhokla - a traditional Gujarati steamed snack" 306 | return f"""ગુજરાતી કુલિનરી વિશ્લેષણ પૂર્ણ! (Gujarati Culinary Analysis Complete!) 307 | 308 | Recipe Request: {recipe_request} 309 | Language: {language} 310 | 311 | 👨‍🍳 રસોઇયાની રેસીપી - ઢોકળા (Chef's Recipe - Dhokla) 312 | 313 | સામગ્રી (Ingredients - Serves 4): 314 | - 1 કપ ચણાનો લોટ (1 cup chana flour/besan) 315 | - 1/2 કપ દહીં (1/2 cup yogurt) 316 | - 1 ટીસ્પૂન લીંબુનો રસ (1 tsp lemon juice) 317 | - 1/2 ટીસ્પૂન હિંગ (1/2 tsp hing/asafoetida) 318 | - 1 ટીસ્પૂન રાઇ (1 tsp mustard seeds) 319 | - 2-3 લીલા મરચા (2-3 green chilies) 320 | - ખાંડ અને મીઠું સ્વાદ મુજબ (Sugar and salt to taste) 321 | - તલ અને ધાણા પાઉડર માટે (For garnish: sesame seeds and coriander) 322 | 323 | સૂચનાઓ (Instructions): 324 | 1. ચણાનો લોટ, દહીં, ખાંડ, મીઠું અને પાણી મિક્સ કરો 325 | 2. લીંબુનો રસ નાખીને ફેફસો જેટલું પાતળું બેટર બનાવો 326 | 3. ગ્રીઝ કરેલી થાલીમાં નાખીને સ્ટીમ કરો 15-20 મિનિટ 327 | 4. ઠંડુ થાય પછી કટિંગ કરો 328 | 5. રાઇ, હિંગ અને લીલા મરચા ઘીમાં તડકો 329 | 6. ઢોકળા પર નાખો અને ધાણા-તલથી ગાર્નિશ કરો 330 | 331 | 🥗 પોષણ વિશેષજ્ઞનું વિશ્લેષણ (Nutritionist's Analysis): 332 | ઢોકળા સ્ટીમ કરેલી વાનગી છે જે સ્વાસ્થ્યપ્રદ છે. પોષણ મૂલ્ય: 333 | - ચણાનો લોટ પ્રોટીન અને આયર્નથી ભરપૂર 334 | - દહીં કેલ્શિયમ અને પ્રોબાયોટિક્સ આપે છે 335 | - ઓઈલ-ફ્રી સ્ટીમિંગ હાર્ટ-હેલ્ધી છે 336 | - લીંબુ વિટામિન C નો સારો સ્ત્રોત 337 | 338 | *Note: This is a demo response. Set up your SAMBA_API_KEY for real AI analysis.*""" 339 | 340 | elif any(word in recipe_lower for word in ['thepla', 'thhepla', 'થેપલા']): 341 | request_analysis = "User is requesting Thepla - traditional Gujarati methi flatbread" 342 | return f"""ગુજરાતી કુલિનરી વિશ્લેષણ પૂર્ણ! (Gujarati Culinary Analysis Complete!) 343 | 344 | Recipe Request: {recipe_request} 345 | Language: {language} 346 | 347 | 👨‍🍳 રસોઇયાની રેસીપી - થેપલા (Chef's Recipe - Thepla) 348 | 349 | સામગ્રી (Ingredients - Makes 8-10 theplas): 350 | - 1 કપ ગોળ મેદો (1 cup wheat flour) 351 | - 1/2 કપ મેથીના પાન (1/2 cup fenugreek leaves) 352 | - 2 ટેબલસ્પૂન બેસન (2 tbsp besan/chickpea flour) 353 | - 1 ટીસ્પૂન લાલ મરચું પાઉડર (1 tsp red chili powder) 354 | - 1/2 ટીસ્પૂન હળદર (1/2 tsp turmeric) 355 | - 1 ટીસ્પૂન રાઇ પાઉડર (1 tsp mustard powder) 356 | - મીઠું અને તલ સ્વાદ મુજબ (Salt and sesame seeds to taste) 357 | - તેલ રોટલી બનાવવા માટે (Oil for making rotis) 358 | 359 | સૂચનાઓ (Instructions): 360 | 1. મેથીના પાન ધોઈને સૂકા કરો અને બારીક કાપો 361 | 2. બધી સામગ્રી મિક્સ કરીને મટીર જેવું લોટ બનાવો 362 | 3. ૧૫-૨૦ મિનિટ રહેવા દો 363 | 4. નાની રોટલી બનાવીને તેલમાં શેકો 364 | 5. બંને બાજુ સોનેરી થાય ત્યાં સુધી શેકો 365 | 6. ગરમ ગરમ સર્વ કરો 366 | 367 | 🥗 પોષણ વિશેષજ્ઞનું વિશ્લેષણ (Nutritionist's Analysis): 368 | થેપલા ગુજરાતી ટ્રેડિશનલ સ્નેક છે જે ખૂબ આરોગ્યપ્રદ છે. આહાર મૂલ્ય: 369 | - મેથીના પાન ફાઇબર અને આયર્નથી ભરપૂર 370 | - ગોળ મેદો કોમ્પ્લેક્સ કાર્બોહાઇડ્રેટ્સ આપે છે 371 | - મસાલા પાચન સુધારે છે 372 | - લાંબા સમય સુધી ભૂખ ન મરે તેવું રાખે છે 373 | 374 | *Note: This is a demo response. Set up your SAMBA_API_KEY for real AI analysis.*""" 375 | 376 | elif any(word in recipe_lower for word in ['khandoi', 'khandvi', 'ખંડવી']): 377 | request_analysis = "User is requesting Khandoi - traditional Gujarati steamed sweet" 378 | return f"""ગુજરાતી કુલિનરી વિશ્લેષણ પૂર્ણ! (Gujarati Culinary Analysis Complete!) 379 | 380 | Recipe Request: {recipe_request} 381 | Language: {language} 382 | 383 | 👨‍🍳 રસોઇયાની રેસીપી - ખંડવી (Chef's Recipe - Khandoi) 384 | 385 | સામગ્રી (Ingredients - Serves 4): 386 | - 1 કપ ચણાનો લોટ (1 cup chana flour/besan) 387 | - 1/2 કપ દહીં (1/2 cup yogurt) 388 | - 1/2 કપ ખાંડ (1/2 cup sugar) 389 | - 1/4 કપ ઘી (1/4 cup ghee) 390 | - 1/4 ટીસ્પૂન હળદર (1/4 tsp turmeric) 391 | - 1/4 ટીસ્પૂન એલચી પાઉડર (1/4 tsp cardamom powder) 392 | - ચારોળી અને કાજુ માટે (For garnish: pistachios and cashews) 393 | 394 | સૂચનાઓ (Instructions): 395 | 1. ચણાનો લોટ, દહીં, હળદર અને પાણી મિક્સ કરીને બેટર બનાવો 396 | 2. ૨ કલાક રહેવા દો (Let batter rest for 2 hours) 397 | 3. ઘીમાં ખાંડ ગોલ્ડન થાય ત્યાં સુધી ગરમ કરો 398 | 4. બેટર નાખીને હલાવતા રહો જ્યાં સુધી ઘટ્ટ ન થાય 399 | 5. થાલીમાં પાથરીને ઠંડુ કરો 400 | 6. એલચી પાઉડર અને ચારોળીથી ગાર્નિશ કરો 401 | 402 | 🥗 પોષણ વિશેષજ્ઞનું વિશ્લેષણ (Nutritionist's Analysis): 403 | ખંડવી ગુજરાતી મીઠાઈ છે જે પ્રોટીન અને કાર્બોહાઇડ્રેટ્સથી ભરપૂર છે. આરોગ્યપ્રદ લાભ: 404 | - ચણાનો લોટ પ્રોટીનનો સારો સ્ત્રોત છે 405 | - દહીં પાચન સુધારે છે 406 | - મર્યાદિત ખાંડ રાખો ડાયાબિટીસ માટે સલામત 407 | - એલચી પાચન સહાય કરે છે 408 | 409 | *Note: This is a demo response. Set up your SAMBA_API_KEY for real AI analysis.*""" 410 | 411 | # Chocolate chip cookies request 412 | elif any(word in recipe_lower for word in ['chocolate', 'chip', 'cookie']): 413 | request_analysis = "User is requesting chocolate chip cookies - a Western sweet treat" 414 | return f"""ગુજરાતી કુલિનરી વિશ્લેષણ - વિનંતીની સમજ (Gujarati Culinary Analysis - Understanding Request) 415 | 416 | Recipe Request: {recipe_request} 417 | Language: {language} 418 | 419 | 🔍 વિનંતી વિશ્લેષણ (Request Analysis): 420 | {request_analysis} 421 | 422 | 💭 વિચાર પ્રક્રિયા (Thought Process): 423 | - ચોકલેટ ચીપ કુકીઝ એ પાશ્ચાત્ય મીઠાઈ છે (Chocolate chip cookies are a Western sweet) 424 | - ગુજરાતી સંસ્કૃતિમાં આનું સમાણ શોધીએ (Let's find an equivalent in Gujarati culture) 425 | - ખંડવી અથવા લાડુ જેવી મીઠાઈ વધુ યોગ્ય રહેશે (Khandoi or Laddu would be more appropriate) 426 | - પરંતુ વિનંતી મુજબ ચોકલેટ સ્વાદ આપવો જોઈએ (But we should provide chocolate flavor as requested) 427 | 428 | 👨‍🍳 રસોઇયાની રેસીપી - ચોકલેટ ખંડવી (Chef's Recipe - Chocolate Khandoi) 429 | 430 | સામગ્રી (Ingredients - Serves 4): 431 | - 1 કપ ચણાનો લોટ (1 cup chana flour/besan) 432 | - 1/2 કપ દહીં (1/2 cup yogurt) 433 | - 1/2 કપ ખાંડ (1/2 cup sugar) 434 | - 1/4 કપ ચોકલેટ ચીપ્સ અથવા કોકો પાઉડર (1/4 cup chocolate chips or cocoa powder) 435 | - 1/4 કપ ઘી (1/4 cup ghee) 436 | - 1/4 ટીસ્પૂન હળદર (1/4 tsp turmeric) 437 | - 1/4 ટીસ્પૂન એલચી પાઉડર (1/4 tsp cardamom powder) 438 | 439 | સૂચનાઓ (Instructions): 440 | 1. ચણાનો લોટ, દહીં, હળદર અને પાણી મિક્સ કરો 441 | 2. ચોકલેટ ચીપ્સ ગલાવીને નાખો 442 | 3. ૨ કલાક રહેવા દો 443 | 4. ઘીમાં ખાંડ ગોલ્ડન કરો 444 | 5. બેટર નાખીને ઘટ્ટ કરો 445 | 6. થાલીમાં પાથરીને ચોકલેટ ચીપ્સથી ગાર્નિશ કરો 446 | 447 | 🥗 પોષણ વિશેષજ્ઞનું વિશ્લેષણ (Nutritionist's Analysis): 448 | આ ગુજરાતી-શૈલીની ચોકલેટ મીઠાઈ છે જે પરંપરાગત સ્વાદ સાથે મળે છે. લાભ: 449 | - ચણાનો લોટ પ્રોટીન આપે છે 450 | - દહીં પાચન સુધારે છે 451 | - ચોકલેટનું મર્યાદિત પ્રમાણ આરોગ્યપ્રદ રહે છે 452 | 453 | *Note: This is a demo response. Set up your API key for real AI analysis.*""" 454 | 455 | # Pizza or bread request 456 | elif any(word in recipe_lower for word in ['pizza', 'bread', 'pasta', 'pasta', 'noodle']): 457 | request_analysis = "User is requesting pizza/bread/pasta - Western comfort food" 458 | return f"""ગુજરાતી કુલિનરી વિશ્લેષણ - વિનંતીની સમજ (Gujarati Culinary Analysis - Understanding Request) 459 | 460 | Recipe Request: {recipe_request} 461 | Language: {language} 462 | 463 | 🔍 વિનંતી વિશ્લેષણ (Request Analysis): 464 | {request_analysis} 465 | 466 | 💭 વિચાર પ્રક્રિયા (Thought Process): 467 | - પિઝા એ ઈટાલિયન વાનગી છે જે રોટલી જેવી લાગે છે (Pizza is Italian dish that resembles roti) 468 | - ગુજરાતીમાં રોટલી અને શાક એ સમાન છે (In Gujarat, roti and shaak are similar) 469 | - મેથી થેપલા અથવા ભાખરી વધુ યોગ્ય રહેશે (Methi thepla or bhakhri would be more appropriate) 470 | - પરંતુ વિનંતી મુજબ ટોપિંગ્સ સાથે રોટલી બનાવી શકાય (But we can make roti with toppings as requested) 471 | 472 | 👨‍🍳 રસોઇયાની રેસીપી - ગુજરાતી પિઝા રોટલી (Chef's Recipe - Gujarati Pizza Roti) 473 | 474 | સામગ્રી (Ingredients - Serves 2): 475 | - 1 કપ ગોળ મેદો (1 cup wheat flour) 476 | - 1/2 કપ દહીં (1/2 cup yogurt) 477 | - 1 ટીસ્પૂન રાઇ પાઉડર (1 tsp mustard powder) 478 | - 1/2 ટીસ્પૂન હળદર (1/2 tsp turmeric) 479 | - મીઠું સ્વાદ મુજબ (Salt to taste) 480 | - ટોપિંગ માટે: શાક, પનીર, મસાલા (For topping: vegetables, paneer, spices) 481 | 482 | સૂચનાઓ (Instructions): 483 | 1. મેદો, દહીં, મસાલા મિક્સ કરીને રોટલી બનાવો 484 | 2. રોટલી શેકીને સોનેરી કરો 485 | 3. ઉપર શાક અને પનીર નાખો 486 | 4. ઘીમાં તડકો અને ધાણા-લીંબુથી ગાર્નિશ કરો 487 | 488 | 🥗 પોષણ વિશેષજ્ઞનું વિશ્લેષણ (Nutritionist's Analysis): 489 | આ ગુજરાતી-શૈલીની પિઝા છે જે પરંપરાગત રોટલી જેવી છે. લાભ: 490 | - ગોળ મેદો કોમ્પ્લેક્સ કાર્બોહાઇડ્રેટ્સ આપે છે 491 | - શાક વિટામિન્સ અને ફાઇબર આપે છે 492 | - દહીં પ્રોટીન અને કેલ્શિયમ આપે છે 493 | 494 | *Note: This is a demo response. Set up your SAMBA_API_KEY for real AI analysis.*""" 495 | 496 | # General sweet/dessert 497 | elif any(word in recipe_lower for word in ['sweet', 'mithai', 'dessert', 'cake']): 498 | request_analysis = "User is requesting a sweet/dessert dish" 499 | return f"""ગુજરાતી કુલિનરી વિશ્લેષણ - વિનંતીની સમજ (Gujarati Culinary Analysis - Understanding Request) 500 | 501 | Recipe Request: {recipe_request} 502 | Language: {language} 503 | 504 | 🔍 વિનંતી વિશ્લેષણ (Request Analysis): 505 | {request_analysis} 506 | 507 | 💭 વિચાર પ્રક્રિયા (Thought Process): 508 | - મીઠાઈ માટે ગુજરાતી ખંડવી યોગ્ય છે (Khandoi is perfect for sweets in Gujarat) 509 | - તે પરંપરાગત ગુજરાતી મીઠાઈ છે (It's a traditional Gujarati sweet) 510 | - ચણાનો લોટ અને દહીંથી બને છે (Made from chana flour and yogurt) 511 | - આરોગ્યપ્રદ અને સ્વાદિષ્ટ છે (Healthy and delicious) 512 | 513 | 👨‍🍳 રસોઇયાની રેસીપી - ખંડવી (Chef's Recipe - Khandoi) 514 | 515 | સામગ્રી (Ingredients - Serves 4): 516 | - 1 કપ ચણાનો લોટ (1 cup chana flour/besan) 517 | - 1/2 કપ દહીં (1/2 cup yogurt) 518 | - 1/2 કપ ખાંડ (1/2 cup sugar) 519 | - 1/4 કપ ઘી (1/4 cup ghee) 520 | - 1/4 ટીસ્પૂન હળદર (1/4 tsp turmeric) 521 | - 1/4 ટીસ્પૂન એલચી પાઉડર (1/4 tsp cardamom powder) 522 | 523 | સૂચનાઓ (Instructions): 524 | 1. ચણાનો લોટ, દહીં, હળદર મિક્સ કરો 525 | 2. ૨ કલાક રહેવા દો 526 | 3. ઘીમાં ખાંડ ગોલ્ડન કરો 527 | 4. બેટર નાખીને ઘટ્ટ કરો 528 | 5. થાલીમાં પાથરીને એલચીથી ગાર્નિશ કરો 529 | 530 | 🥗 પોષણ વિશેષજ્ઞનું વિશ્લેષણ (Nutritionist's Analysis): 531 | ખંડવી ગુજરાતી મીઠાઈ છે જે પ્રોટીન અને કાર્બોહાઇડ્રેટ્સથી ભરપૂર છે. 532 | 533 | *Note: This is a demo response. Set up your SAMBA_API_KEY for real AI analysis.*""" 534 | 535 | # Default Gujarati response 536 | else: 537 | request_analysis = f"User is requesting: {recipe_request} - analyzing for Gujarati adaptation" 538 | return f"""ગુજરાતી કુલિનરી વિશ્લેષણ - વિનંતીની સમજ (Gujarati Culinary Analysis - Understanding Request) 539 | 540 | Recipe Request: {recipe_request} 541 | Language: {language} 542 | 543 | 🔍 વિનંતી વિશ્લેષણ (Request Analysis): 544 | {request_analysis} 545 | 546 | 💭 વિચાર પ્રક્રિયા (Thought Process): 547 | - વિનંતીને સમજીને ગુજરાતી સંસ્કૃતિમાં ફિટ કરવાનો પ્રયાસ કરું છું 548 | - જો વાનગી સ્નેક છે તો ઢોકળા અથવા ખંડવી સૂચવી શકું છું 549 | - જો મુખ્ય ભોજન છે તો શાક-ભાખરી સૂચવી શકું છું 550 | - હંમેશા પરંપરાગત ગુજરાતી સ્વાદ અને આરોગ્ય ધ્યાનમાં રાખું છું 551 | 552 | 👨‍🍳 રસોઇયાની સૂચન (Chef's Recommendation): 553 | આ વિનંતી માટે ગુજરાતી રીતે ઢોકળા અથવા થેપલા જેવી વાનગી વધુ યોગ્ય રહેશે. 554 | 555 | *Note: This is a demo response. Set up your SAMBA_API_KEY for real AI analysis.*""" 556 | else: 557 | # English fallback - analyze request type 558 | if any(word in recipe_lower for word in ['sweet', 'dessert', 'cookie', 'cake', 'pie']): 559 | return f"""Culinary Analysis Complete! 560 | 561 | Recipe Request: {recipe_request} 562 | Language: {language} 563 | 564 | 👨‍🍳 Chef's Recipe - Classic Chocolate Chip Cookies: 565 | 566 | Ingredients: 567 | - 2 cups all-purpose flour 568 | - 1 cup butter, softened 569 | - 3/4 cup granulated sugar 570 | - 1 cup chocolate chips 571 | - 1 tsp vanilla extract 572 | - 1/2 tsp baking soda 573 | - 1/4 tsp salt 574 | 575 | Instructions: 576 | 1. Preheat oven to 375°F (190°C) 577 | 2. Cream together butter and sugars 578 | 3. Beat in eggs and vanilla 579 | 4. Combine flour, baking soda, and salt 580 | 5. Stir in chocolate chips 581 | 6. Drop spoonfuls onto baking sheet 582 | 7. Bake for 9-11 minutes 583 | 584 | 🥗 Nutritionist's Analysis: 585 | These cookies are a sweet treat but high in sugar and fats. Suggestions: 586 | - Use whole wheat flour instead of all-purpose 587 | - Reduce sugar by 1/4 cup and add applesauce 588 | - Include nuts for healthy fats and protein 589 | - Portion control: 1-2 cookies per serving 590 | 591 | *Note: This is a demo response. Set up your SAMBA_API_KEY for real AI analysis.*""" 592 | else: 593 | return f"""Culinary Analysis Complete! 594 | 595 | Recipe Request: {recipe_request} 596 | Language: {language} 597 | 598 | 👨‍🍳 Chef's Recipe: 599 | Ingredients: 600 | - 2 cups all-purpose flour 601 | - 1 cup butter, softened 602 | - 3/4 cup granulated sugar 603 | - 1 cup chocolate chips 604 | - 1 tsp vanilla extract 605 | - 1/2 tsp baking soda 606 | - 1/4 tsp salt 607 | 608 | Instructions: 609 | 1. Preheat oven to 375°F (190°C) 610 | 2. Cream together butter and sugars 611 | 3. Beat in eggs and vanilla 612 | 4. Combine flour, baking soda, and salt 613 | 5. Stir in chocolate chips 614 | 6. Drop spoonfuls onto baking sheet 615 | 7. Bake for 9-11 minutes 616 | 617 | 🥗 Nutritionist's Analysis: 618 | These cookies are a treat but high in sugar and fats. Suggestions: 619 | - Use whole wheat flour instead of all-purpose 620 | - Reduce sugar by 1/4 cup and add applesauce 621 | - Include nuts for healthy fats and protein 622 | - Portion control: 1-2 cookies per serving 623 | 624 | *Note: This is a demo response. Set up your SAMBA_API_KEY for real AI analysis.*""" 625 | 626 | def main(): 627 | """Main Streamlit application.""" 628 | st.set_page_config( 629 | page_title="Agent Roles Workshop", 630 | page_icon="👥", 631 | layout="wide", 632 | initial_sidebar_state="expanded" 633 | ) 634 | 635 | load_css() 636 | 637 | # Initialize session state 638 | if 'results_history' not in st.session_state: 639 | st.session_state.results_history = [] 640 | 641 | # Sidebar 642 | with st.sidebar: 643 | st.image("https://img.icons8.com/fluency/96/user-group-man-woman.png", width=80) 644 | st.title("Agent Roles Workshop") 645 | st.markdown("---") 646 | 647 | # Stats 648 | col1, col2 = st.columns(2) 649 | with col1: 650 | st.metric("Teams", "2") 651 | with col2: 652 | st.metric("Runs", len(st.session_state.results_history)) 653 | 654 | st.markdown("---") 655 | st.markdown("### About") 656 | st.write("Explore how AI agents with different roles collaborate on tasks!") 657 | 658 | # Clear history button 659 | if st.button("🗑️ Clear History", use_container_width=True): 660 | st.session_state.results_history = [] 661 | st.success("History cleared!") 662 | 663 | # Main header 664 | st.markdown(""" 665 |
666 |

👥 Agent Roles Workshop

667 |

AI Agents Working Together

668 |

See how different AI agents collaborate like a real team!

669 |
670 | """, unsafe_allow_html=True) 671 | 672 | # Team selection 673 | st.markdown("## 🎯 Choose Your AI Agent Team") 674 | 675 | team_choice = st.radio( 676 | "Select a team to explore:", 677 | ["📊 Business Analysis Team", "🍳 Food Preparation Team"], 678 | horizontal=True, 679 | label_visibility="collapsed" 680 | ) 681 | 682 | # Business Team Section 683 | if team_choice == "📊 Business Analysis Team": 684 | st.markdown("### 📊 Business Intelligence Team") 685 | st.write("**Data Analyst + Business Strategist** working together to analyze business data and create growth strategies.") 686 | 687 | col1, col2 = st.columns(2) 688 | with col1: 689 | st.markdown(""" 690 |
691 |

📈 Data Analyst

692 |

Analyzes sales data and finds patterns

693 |
694 | """, unsafe_allow_html=True) 695 | 696 | with col2: 697 | st.markdown(""" 698 |
699 |

🎯 Business Strategist

700 |

Creates growth strategies from insights

701 |
702 | """, unsafe_allow_html=True) 703 | 704 | st.markdown("#### 💼 Enter Your Sales Data") 705 | user_input = st.text_area( 706 | "Sales data to analyze:", 707 | placeholder="Example: Q1 sales were $10,000, Q2 were $12,000, Q3 were $15,000", 708 | height=100, 709 | key="business_input", 710 | help="Enter sales data and the AI team will analyze trends and suggest strategies!" 711 | ) 712 | 713 | if st.button("🚀 Analyze Business Data", type="primary", use_container_width=True): 714 | if not user_input.strip(): 715 | st.warning("Please enter some sales data to analyze!") 716 | else: 717 | with st.spinner("🤖 AI agents are analyzing your business data..."): 718 | progress_bar = st.progress(0) 719 | status_text = st.empty() 720 | sub_status_text = st.empty() 721 | 722 | for i in range(100): 723 | progress_bar.progress(i + 1) 724 | if i < 20: 725 | status_text.text("📊 Data Analyst: Initializing analysis...") 726 | sub_status_text.text("Loading sales data and preparing datasets...") 727 | elif i < 40: 728 | status_text.text("📊 Data Analyst: Analyzing quarterly trends...") 729 | sub_status_text.text("Calculating growth rates and identifying patterns...") 730 | elif i < 60: 731 | status_text.text("📊 Data Analyst: Computing key metrics...") 732 | sub_status_text.text("Analyzing sales velocity and market indicators...") 733 | elif i < 80: 734 | status_text.text("🎯 Business Strategist: Reviewing analysis...") 735 | sub_status_text.text("Evaluating data insights and market conditions...") 736 | elif i < 90: 737 | status_text.text("🎯 Business Strategist: Developing strategies...") 738 | sub_status_text.text("Creating actionable recommendations and growth plans...") 739 | else: 740 | status_text.text("✅ Finalizing comprehensive business report...") 741 | sub_status_text.text("Compiling analysis results and strategic recommendations...") 742 | time.sleep(0.03) 743 | 744 | result = run_business_team_analysis(user_input) 745 | st.session_state.results_history.append({ 746 | "team": "Business Analysis", 747 | "input": user_input, 748 | "result": result, 749 | "timestamp": time.time() 750 | }) 751 | 752 | st.success("✅ Business Analysis Complete!") 753 | st.markdown("### 📄 Analysis Results") 754 | st.markdown(result) 755 | 756 | # Food Team Section 757 | else: 758 | st.markdown("### 🍳 Culinary Innovation Team") 759 | st.write("**Chef + Nutritionist** collaborating to create healthy, delicious recipes.") 760 | 761 | col1, col2 = st.columns(2) 762 | with col1: 763 | st.markdown(""" 764 |
765 |

👨‍🍳 Master Chef

766 |

Creates delicious recipes

767 |
768 | """, unsafe_allow_html=True) 769 | 770 | with col2: 771 | st.markdown(""" 772 |
773 |

🥗 Nutrition Expert

774 |

Ensures recipes are healthy

775 |
776 | """, unsafe_allow_html=True) 777 | 778 | st.markdown("#### 🍽️ Enter Your Recipe Request") 779 | col1, col2 = st.columns([3, 1]) 780 | with col1: 781 | user_input = st.text_area( 782 | "What would you like to cook?", 783 | placeholder="Example: chocolate chip cookies", 784 | height=100, 785 | key="food_input", 786 | help="Enter a recipe request and the AI team will create and analyze it!" 787 | ) 788 | with col2: 789 | language = st.selectbox( 790 | "Recipe Language", 791 | ["English", "Gujarati (ગુજરાતી)"], 792 | key="language_select", 793 | help="Choose the language for your recipe" 794 | ) 795 | 796 | if st.button("🍳 Create & Analyze Recipe", type="primary", use_container_width=True): 797 | if not user_input.strip(): 798 | st.warning("Please enter a recipe request!") 799 | else: 800 | with st.spinner("🤖 Chef and nutritionist are collaborating..."): 801 | progress_bar = st.progress(0) 802 | status_text = st.empty() 803 | sub_status_text = st.empty() 804 | 805 | for i in range(100): 806 | progress_bar.progress(i + 1) 807 | if i < 25: 808 | status_text.text("👨‍🍳 Chef: Researching recipe foundations...") 809 | sub_status_text.text("Analyzing ingredients and cooking techniques...") 810 | elif i < 45: 811 | status_text.text("👨‍🍳 Chef: Crafting recipe structure...") 812 | sub_status_text.text("Developing cooking methods and flavor profiles...") 813 | elif i < 65: 814 | status_text.text("👨‍🍳 Chef: Refining ingredient balance...") 815 | sub_status_text.text("Optimizing measurements and cooking times...") 816 | elif i < 80: 817 | status_text.text("🥗 Nutritionist: Analyzing nutritional content...") 818 | sub_status_text.text("Evaluating calorie content and macronutrients...") 819 | elif i < 90: 820 | status_text.text("🥗 Nutritionist: Assessing health impact...") 821 | sub_status_text.text("Checking vitamins, minerals, and dietary balance...") 822 | else: 823 | status_text.text("✅ Finalizing healthy recipe with improvements...") 824 | sub_status_text.text("Compiling final recipe with nutritional recommendations...") 825 | time.sleep(0.03) 826 | 827 | result = run_food_team_analysis(user_input, language) 828 | st.session_state.results_history.append({ 829 | "team": "Food Preparation", 830 | "input": user_input, 831 | "language": language, 832 | "result": result, 833 | "timestamp": time.time() 834 | }) 835 | 836 | st.success("✅ Recipe Complete!") 837 | st.markdown("### 📄 Recipe & Analysis") 838 | st.markdown(result) 839 | 840 | # Results History 841 | if st.session_state.results_history: 842 | st.markdown("---") 843 | st.markdown("## 📈 Recent Results") 844 | 845 | for i, result in enumerate(reversed(st.session_state.results_history[-3:])): # Show last 3 846 | language_info = f" - {result.get('language', 'English')}" if 'language' in result else "" 847 | with st.expander(f"{result['team']}{language_info} - {result['input'][:40]}..."): 848 | st.write(f"**Team:** {result['team']}") 849 | st.write(f"**Input:** {result['input']}") 850 | if 'language' in result: 851 | st.write(f"**Language:** {result['language']}") 852 | st.write(f"**Time:** {time.strftime('%H:%M:%S', time.localtime(result['timestamp']))}") 853 | st.code(result['result'], language=None) 854 | 855 | # Footer 856 | st.markdown("---") 857 | st.markdown(""" 858 |
859 |

Session 2: Learning about AI agent roles and team collaboration

860 |

Each agent has specialized skills, just like people in a real team!

861 |
862 | """, unsafe_allow_html=True) 863 | 864 | if __name__ == "__main__": 865 | main() --------------------------------------------------------------------------------