├── my-adk-agents ├── learning-content-system(WIP) │ ├── agents │ │ ├── __init__.py │ │ ├── loop_agents │ │ │ ├── __init__.py │ │ │ └── quality_assessor.py │ │ ├── parallel_agents │ │ │ ├── __init__.py │ │ │ └── slide_generator.py │ │ └── sequential_agents │ │ │ ├── __init__.py │ │ │ ├── content_analyzer.py │ │ │ ├── audio_generator.py │ │ │ ├── visual_generator.py │ │ │ └── assembly_agent.py │ ├── tools │ │ ├── __init__.py │ │ └── mcp_config.py │ ├── utils │ │ ├── __init__.py │ │ └── content_types.py │ ├── requirements.txt │ └── .env.example ├── academic-research-assistant │ ├── items.json │ ├── academic_research_assistant │ │ ├── tools │ │ │ ├── __init__.py │ │ │ └── url_scraper.py │ │ ├── sub_agents │ │ │ ├── comparison_root_agent │ │ │ │ ├── sub_agents │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── tools │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── exit_analysis.py │ │ │ │ │ ├── analysis_formatter_agent.py │ │ │ │ │ ├── analysis_critic_agent.py │ │ │ │ │ ├── analysis_generator_agent.py │ │ │ │ │ └── prompt.py │ │ │ │ ├── __init__.py │ │ │ │ └── agent.py │ │ │ ├── profiler_agent │ │ │ │ ├── __init__.py │ │ │ │ ├── agent.py │ │ │ │ └── prompt.py │ │ │ ├── __init__.py │ │ │ └── searcher_agent │ │ │ │ ├── __init__.py │ │ │ │ └── prompt.py │ │ ├── shared_libraries │ │ │ ├── __init__.py │ │ │ └── constants.py │ │ ├── __init__.py │ │ ├── agent.py │ │ └── prompts.py │ ├── image.png │ ├── requirements.txt │ ├── .env.example │ └── README.md ├── data-analyst │ ├── requirements.txt │ └── data_analyst_agent │ │ ├── __init__.py │ │ └── agent.py ├── education-path-advisor │ ├── education_advisor │ │ ├── sub_agents │ │ │ ├── __init__.py │ │ │ ├── data_analyst │ │ │ │ ├── __init__.py │ │ │ │ ├── agent.py │ │ │ │ └── prompt.py │ │ │ ├── pathway_analyst │ │ │ │ ├── __init__.py │ │ │ │ ├── agent.py │ │ │ │ └── prompt.py │ │ │ ├── implementation_analyst │ │ │ │ ├── __init__.py │ │ │ │ ├── agent.py │ │ │ │ └── prompt.py │ │ │ └── risk_analyst │ │ │ │ ├── __init__.py │ │ │ │ ├── agent.py │ │ │ │ └── prompt.py │ │ ├── __init__.py │ │ ├── agent.py │ │ └── prompt.py │ ├── .env.example │ ├── agentflow.png │ ├── system_architecture.png │ ├── eval │ │ ├── test_eval.py │ │ └── data │ │ │ └── education-advisor.test.json │ ├── pyproject.toml │ ├── tests │ │ └── test_agents.py │ └── README.md ├── job-interview-agent │ ├── app │ │ ├── __init__.py │ │ ├── static │ │ │ └── js │ │ │ │ ├── pcm-recorder-processor.js │ │ │ │ ├── audio-player.js │ │ │ │ ├── audio-recorder.js │ │ │ │ └── pcm-player-processor.js │ │ ├── interview_agent │ │ │ ├── __init__.py │ │ │ ├── tools │ │ │ │ └── __init__.py │ │ │ ├── agent.py │ │ │ ├── utils │ │ │ │ ├── __init__.py │ │ │ │ └── calendar_utils.py │ │ │ ├── data │ │ │ │ ├── interview_config.json │ │ │ │ └── question_bank.json │ │ │ └── prompts.py │ │ └── main.py │ ├── Website.png │ ├── agent.png │ ├── workflow_image.png │ ├── Preparing for Your AI Engineer Interviews 🚀.mp4 │ ├── requirements.txt │ ├── Dockerfile │ ├── .env.example │ ├── docker-compose.yml │ ├── .gitignore │ └── setup_calendar_auth.py └── project-manager-agent │ ├── .env.example │ ├── project_management_agent │ └── __init__.py │ ├── requirements.txt │ ├── main.py │ └── utils.py ├── .gitignore └── CONTRIBUTING.md /my-adk-agents/learning-content-system(WIP)/agents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /my-adk-agents/learning-content-system(WIP)/tools/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /my-adk-agents/learning-content-system(WIP)/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/items.json: -------------------------------------------------------------------------------- 1 | [ 2 | 3 | ] -------------------------------------------------------------------------------- /my-adk-agents/learning-content-system(WIP)/agents/loop_agents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /my-adk-agents/data-analyst/requirements.txt: -------------------------------------------------------------------------------- 1 | google-adk 2 | litellm 3 | ollama -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/education_advisor/sub_agents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/app/__init__.py: -------------------------------------------------------------------------------- 1 | # Initialize app package 2 | -------------------------------------------------------------------------------- /my-adk-agents/learning-content-system(WIP)/agents/parallel_agents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /my-adk-agents/data-analyst/data_analyst_agent/__init__.py: -------------------------------------------------------------------------------- 1 | from . import agent 2 | -------------------------------------------------------------------------------- /my-adk-agents/learning-content-system(WIP)/agents/sequential_agents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /my-adk-agents/project-manager-agent/.env.example: -------------------------------------------------------------------------------- 1 | GOOGLE_GENAI_USE_VERTEXAI=FALSE 2 | GOOGLE_API_KEY=your_api_key_here -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/.env.example: -------------------------------------------------------------------------------- 1 | GOOGLE_API_KEY= 2 | GOOGLE_GENAI_USE_VERTEXAI=FALSE -------------------------------------------------------------------------------- /my-adk-agents/project-manager-agent/project_management_agent/__init__.py: -------------------------------------------------------------------------------- 1 | # Empty __init__.py file to make the directory a Python package 2 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/tools/__init__.py: -------------------------------------------------------------------------------- 1 | """Tools for the Academic Research Assistant Agent.""" 2 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/Website.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Sri-Krishna-V/awesome-adk-agents/HEAD/my-adk-agents/job-interview-agent/Website.png -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Sri-Krishna-V/awesome-adk-agents/HEAD/my-adk-agents/job-interview-agent/agent.png -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/agentflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Sri-Krishna-V/awesome-adk-agents/HEAD/my-adk-agents/education-path-advisor/agentflow.png -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Sri-Krishna-V/awesome-adk-agents/HEAD/my-adk-agents/academic-research-assistant/image.png -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/workflow_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Sri-Krishna-V/awesome-adk-agents/HEAD/my-adk-agents/job-interview-agent/workflow_image.png -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/sub_agents/comparison_root_agent/sub_agents/__init__.py: -------------------------------------------------------------------------------- 1 | """Sub-agents for the Comparison Root Agent.""" 2 | -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/education_advisor/sub_agents/data_analyst/__init__.py: -------------------------------------------------------------------------------- 1 | # __init__.py for data_analyst sub-agent 2 | from .agent import data_analyst_agent 3 | -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/system_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Sri-Krishna-V/awesome-adk-agents/HEAD/my-adk-agents/education-path-advisor/system_architecture.png -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/education_advisor/__init__.py: -------------------------------------------------------------------------------- 1 | """Education Advisor Package: Contains modules and agents related to financial advising""" 2 | 3 | from . import agent 4 | -------------------------------------------------------------------------------- /my-adk-agents/project-manager-agent/requirements.txt: -------------------------------------------------------------------------------- 1 | google-adk[database]==0.5.0 2 | yfinance==0.2.56 3 | psutil==5.9.5 4 | litellm==1.66.3 5 | google-generativeai==0.8.5 6 | python-dotenv==1.1.0 7 | -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/education_advisor/sub_agents/pathway_analyst/__init__.py: -------------------------------------------------------------------------------- 1 | """pathway_analyst_agent for proposing trading strategies""" 2 | 3 | from .agent import pathway_analyst_agent 4 | -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/education_advisor/sub_agents/implementation_analyst/__init__.py: -------------------------------------------------------------------------------- 1 | """trading_analyst_agent for proposing trading strategies""" 2 | 3 | from .agent import implementation_analyst_agent 4 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/Preparing for Your AI Engineer Interviews 🚀.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Sri-Krishna-V/awesome-adk-agents/HEAD/my-adk-agents/job-interview-agent/Preparing for Your AI Engineer Interviews 🚀.mp4 -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/education_advisor/sub_agents/risk_analyst/__init__.py: -------------------------------------------------------------------------------- 1 | """Risk Analysis Agent for providing the final risk evaluation 2 | 3 | This module imports the risk_analyst_agent for the education advisor context.""" 4 | 5 | from .agent import risk_analyst_agent 6 | -------------------------------------------------------------------------------- /my-adk-agents/learning-content-system(WIP)/requirements.txt: -------------------------------------------------------------------------------- 1 | # Requirements for Learning Content Creation System 2 | google-adk==1.0.0 3 | mcp>=1.0.0 4 | pydantic>=2.0.0 5 | gradio-client>=0.20.0 6 | requests>=2.31.0 7 | python-dotenv>=1.0.0 8 | asyncio 9 | json 10 | typing 11 | dataclasses -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/requirements.txt: -------------------------------------------------------------------------------- 1 | # Required Python packages for the Academic Research Assistant Agent 2 | 3 | google-adk 4 | requests 5 | beautifulsoup4 6 | selenium 7 | Pillow 8 | python-dotenv 9 | scrapy 10 | google-search-results # SerpAPI client for fallback search mechanism -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/shared_libraries/__init__.py: -------------------------------------------------------------------------------- 1 | """Shared libraries for the Academic Research Assistant. 2 | 3 | This package contains shared libraries, constants, and utilities used across 4 | different components of the Academic Research Assistant agent system. 5 | 6 | Modules: 7 | constants: Defines global constants used throughout the agent system 8 | """ 9 | 10 | from . import constants 11 | -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/education_advisor/sub_agents/risk_analyst/agent.py: -------------------------------------------------------------------------------- 1 | """Risk Analysis Agent for providing the final risk evaluation""" 2 | 3 | from google.adk import Agent 4 | 5 | from . import prompt 6 | 7 | MODEL = "gemini-2.0-flash" 8 | 9 | risk_analyst_agent = Agent( 10 | model=MODEL, 11 | name="risk_analyst_agent", 12 | instruction=prompt.RISK_ANALYST_SYSTEM_PROMPT, 13 | output_key="final_risk_assessment_output", 14 | ) 15 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/sub_agents/comparison_root_agent/sub_agents/tools/__init__.py: -------------------------------------------------------------------------------- 1 | """Tools for the comparison root agent sub-agents. 2 | 3 | This module contains tools used by the comparison root agent's sub-agents 4 | to perform various tasks in the analysis workflow. 5 | 6 | Exported functions: 7 | exit_analysis: Signals the loop agent to exit when analysis is approved 8 | """ 9 | 10 | from .exit_analysis import exit_analysis 11 | 12 | __all__ = ["exit_analysis"] 13 | -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/education_advisor/sub_agents/implementation_analyst/agent.py: -------------------------------------------------------------------------------- 1 | """Implementation_analyst_agent for developing implementation strategies for educational pathways""" 2 | 3 | from google.adk import Agent 4 | 5 | from . import prompt 6 | 7 | MODEL = "gemini-2.0-flash" 8 | 9 | implementation_analyst_agent = Agent( 10 | model=MODEL, 11 | name="implementation_analyst_agent", 12 | instruction=prompt.IMPLEMENTATION_ANALYST_SYSTEM_PROMPT, 13 | output_key="implementation_plan_output", 14 | ) 15 | -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/education_advisor/sub_agents/pathway_analyst/agent.py: -------------------------------------------------------------------------------- 1 | """Pathway_analyst_agent for developing educational pathway strategies""" 2 | 3 | from google.adk import Agent 4 | 5 | from . import prompt 6 | 7 | MODEL = "gemini-2.0-flash" 8 | 9 | pathway_analyst_agent = Agent( 10 | model=MODEL, 11 | name="pathway_analyst_agent", 12 | instruction=prompt.PATHWAY_ANALYST_SYSTEM_PROMPT, 13 | # {proposed_pathway_strategies_output} 14 | output_key="proposed_pathway_strategies_output", 15 | ) 16 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/sub_agents/profiler_agent/__init__.py: -------------------------------------------------------------------------------- 1 | """Profiler Agent for analyzing researcher profiles. 2 | 3 | This module exports the Profiler Agent, which is responsible for extracting 4 | relevant keywords from a researcher's academic profile. The agent serves as 5 | the first step in the Academic Research Assistant workflow. 6 | 7 | Exported components: 8 | profiler_agent: The agent instance that can be used by the root agent 9 | """ 10 | 11 | from .agent import profiler_agent 12 | -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/education_advisor/sub_agents/data_analyst/agent.py: -------------------------------------------------------------------------------- 1 | """Education data analyst agent for finding educational information using Google search""" 2 | 3 | from google.adk import Agent 4 | from google.adk.tools import google_search 5 | 6 | from . import prompt 7 | 8 | MODEL = "gemini-2.0-flash" 9 | 10 | data_analyst_agent = Agent( 11 | model=MODEL, 12 | name="data_analyst", 13 | instruction=prompt.DATA_ANALYST_SYSTEM_PROMPT, 14 | output_key="education_data_analysis_output", 15 | tools=[google_search], 16 | ) 17 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/.env.example: -------------------------------------------------------------------------------- 1 | # Example environment variables for the Academic Research Assistant 2 | # Copy this file to .env and fill in your actual values 3 | 4 | # Google ADK API Key (required) 5 | GOOGLE_API_KEY=your_google_api_key_here 6 | 7 | # Model to use (required) 8 | MODEL=gemini-2.0-flash 9 | 10 | # Web driver configuration (optional) 11 | DISABLE_WEB_DRIVER=0 # 0=enabled, 1=disabled 12 | 13 | # SerpAPI Key (optional - used as fallback when Scrapy search fails) 14 | # Get a key from: https://serpapi.com/ 15 | SERPAPI_KEY=your_serpapi_key_here -------------------------------------------------------------------------------- /my-adk-agents/data-analyst/data_analyst_agent/agent.py: -------------------------------------------------------------------------------- 1 | from google.adk.agents import Agent 2 | from google.adk.models.lite_llm import LiteLlm 3 | from google.adk.tools import built_in_code_execution 4 | 5 | local_agent = Agent( 6 | # Must use 'ollama_chat' prefix 7 | model=LiteLlm(model="ollama_chat/qwen3:8b"), 8 | name="Senior Software Engineer", 9 | instruction="You are a senior software engineer tasked with website designing. You suggest neatly formatted code for React, Vue, Next and Node JS", 10 | tools=[built_in_code_execution], 11 | ) 12 | 13 | root_agent = local_agent -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/app/static/js/pcm-recorder-processor.js: -------------------------------------------------------------------------------- 1 | class PCMProcessor extends AudioWorkletProcessor { 2 | constructor() { 3 | super(); 4 | } 5 | 6 | process(inputs, outputs, parameters) { 7 | if (inputs.length > 0 && inputs[0].length > 0) { 8 | // Use the first channel 9 | const inputChannel = inputs[0][0]; 10 | // Copy the buffer to avoid issues with recycled memory 11 | const inputCopy = new Float32Array(inputChannel); 12 | this.port.postMessage(inputCopy); 13 | } 14 | return true; 15 | } 16 | } 17 | 18 | registerProcessor("pcm-recorder-processor", PCMProcessor); 19 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/sub_agents/comparison_root_agent/sub_agents/analysis_formatter_agent.py: -------------------------------------------------------------------------------- 1 | """Analysis Formatter Agent 2 | 3 | This agent formats the approved analysis into a well-structured final report. 4 | """ 5 | 6 | from google.adk.agents.llm_agent import LlmAgent 7 | 8 | from ....shared_libraries import constants 9 | from . import prompt 10 | 11 | analysis_formatter_agent = LlmAgent( 12 | model=constants.MODEL, 13 | name="analysis_formatter_agent", 14 | description="Formats the approved analysis into a well-structured final report.", 15 | instruction=prompt.ANALYSIS_FORMATTER_PROMPT, 16 | output_key="comparison_report", 17 | ) 18 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/sub_agents/comparison_root_agent/sub_agents/analysis_critic_agent.py: -------------------------------------------------------------------------------- 1 | """Analysis Critic Agent 2 | 3 | This agent reviews and provides feedback on the analysis generated by the analysis_generator_agent. 4 | """ 5 | 6 | from google.adk.agents.llm_agent import LlmAgent 7 | 8 | from ....shared_libraries import constants 9 | from . import prompt 10 | 11 | analysis_critic_agent = LlmAgent( 12 | model=constants.MODEL, 13 | name="analysis_critic_agent", 14 | description="Reviews and critiques the analysis for accuracy and helpfulness.", 15 | instruction=prompt.ANALYSIS_CRITIC_PROMPT, 16 | output_key="analysis_feedback", 17 | ) 18 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/requirements.txt: -------------------------------------------------------------------------------- 1 | # Job Interview Roleplay Agent Dependencies 2 | # Core ADK framework 3 | google-adk==1.1.1 4 | 5 | # Web framework for frontend integration 6 | fastapi>=0.104.0 7 | uvicorn[standard]>=0.24.0 8 | 9 | # Google Calendar API integration 10 | google-api-python-client>=2.169.0 11 | google-auth>=2.40.1 12 | google-auth-oauthlib>=1.2.2 13 | google-auth-httplib2>=0.2.0 14 | 15 | # Core dependencies 16 | python-dotenv>=1.1.0 17 | asyncio-mqtt>=0.13.0 18 | pydantic>=2.11.0 19 | python-dateutil>=2.9.0 20 | 21 | # Database and session management 22 | SQLAlchemy>=2.0.40 23 | 24 | # Utilities 25 | requests>=2.32.3 26 | typing-extensions>=4.13.0 27 | pathlib2>=2.3.7 28 | 29 | 30 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | *.so 6 | .Python 7 | build/ 8 | dist/ 9 | downloads/ 10 | lib/ 11 | lib64/ 12 | parts/ 13 | var/ 14 | wheels/ 15 | 16 | # Virtual Environments 17 | .env 18 | .venv/ 19 | env/ 20 | venv/ 21 | ENV/ 22 | env.bak/ 23 | venv.bak/ 24 | 25 | # Database 26 | *.db 27 | *.sqlite3 28 | *.sqlite 29 | 30 | # IDE specific files 31 | .idea/ 32 | .vscode/ 33 | *.swp 34 | *.swo 35 | .DS_Store 36 | 37 | # Jupyter Notebook 38 | .ipynb_checkpoints 39 | 40 | # Logs 41 | logs/ 42 | *.log 43 | 44 | # Local configuration 45 | config.local.py 46 | 47 | changelog.md 48 | 49 | .github/ 50 | .cursor/ 51 | 52 | examples/ 53 | prds/ 54 | 55 | gemini-fullstack/ 56 | 57 | .gitignore -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/eval/test_eval.py: -------------------------------------------------------------------------------- 1 | """Basic evaluation for Education Path Advisor""" 2 | 3 | import pathlib 4 | 5 | import dotenv 6 | import pytest 7 | from google.adk.evaluation.agent_evaluator import AgentEvaluator 8 | 9 | pytest_plugins = ("pytest_asyncio",) 10 | 11 | 12 | @pytest.fixture(scope="session", autouse=True) 13 | def load_env(): 14 | dotenv.load_dotenv() 15 | 16 | 17 | @pytest.mark.asyncio 18 | async def test_all(): 19 | """Test the agent's basic ability on a few examples.""" 20 | print("Running evaluate") 21 | await AgentEvaluator.evaluate( 22 | "education_advisor", 23 | str(pathlib.Path(__file__).parent / "data"), 24 | num_runs=5, 25 | ) 26 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/sub_agents/__init__.py: -------------------------------------------------------------------------------- 1 | """Sub-agents for the Academic Research Assistant. 2 | 3 | This package contains the specialized sub-agents used by the Academic Research 4 | Assistant to perform different aspects of the research workflow: 5 | 6 | 1. Profiler Agent: Analyzes researcher profiles to extract relevant keywords 7 | 2. Searcher Agent: Finds relevant academic papers using web browsing capabilities 8 | 3. Comparison Root Agent: Analyzes papers and generates insights by comparing them 9 | to the researcher's existing work 10 | 11 | Each sub-agent is implemented as a separate module with its own agent definition, 12 | prompt, and specialized tools as needed. 13 | """ 14 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/sub_agents/searcher_agent/__init__.py: -------------------------------------------------------------------------------- 1 | """Searcher Agent for finding relevant academic papers. 2 | 3 | This module exports the Searcher Agent, which is responsible for finding relevant 4 | academic papers based on a research topic and keywords. The agent uses web browsing 5 | capabilities to search academic databases and extract paper information. 6 | 7 | The agent serves as the second step in the Academic Research Assistant workflow, 8 | taking inputs from the Profiler Agent and providing results to the Comparison Agent. 9 | 10 | Exported components: 11 | searcher_agent: The agent instance that can be used by the root agent 12 | """ 13 | 14 | from .agent import searcher_agent 15 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/app/interview_agent/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2025 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """Job Interview Roleplay Agent Package.""" 16 | 17 | from . import agent -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/sub_agents/comparison_root_agent/sub_agents/analysis_generator_agent.py: -------------------------------------------------------------------------------- 1 | """Analysis Generator Agent 2 | 3 | This agent generates detailed analyses comparing a researcher's work to new papers. 4 | """ 5 | 6 | from google.adk.agents.llm_agent import LlmAgent 7 | 8 | from ....shared_libraries import constants 9 | from . import prompt 10 | from .tools.exit_analysis import exit_analysis 11 | 12 | analysis_generator_agent = LlmAgent( 13 | model=constants.MODEL, 14 | name="analysis_generator_agent", 15 | description="Generates an analysis comparing the user's work to new papers.", 16 | instruction=prompt.ANALYSIS_GENERATOR_PROMPT, 17 | output_key="generated_analysis", 18 | tools=[exit_analysis], 19 | ) 20 | -------------------------------------------------------------------------------- /my-adk-agents/learning-content-system(WIP)/.env.example: -------------------------------------------------------------------------------- 1 | # Environment Configuration for Learning Content System 2 | 3 | # Google Cloud Configuration (if using Vertex AI) 4 | # GOOGLE_CLOUD_PROJECT=your-project-id 5 | # GOOGLE_APPLICATION_CREDENTIALS=path/to/service-account.json 6 | 7 | # API Keys for external services (if needed) 8 | # OPENAI_API_KEY=your-openai-key 9 | # ANTHROPIC_API_KEY=your-anthropic-key 10 | 11 | # Hugging Face Configuration 12 | # HUGGINGFACE_API_TOKEN=your-hf-token 13 | 14 | # MCP Server Configuration 15 | # MCP_SERVER_TIMEOUT=30 16 | # MCP_MAX_RETRIES=3 17 | 18 | GOOGLE_API_KEY=your-google-api-key 19 | 20 | # Application Settings 21 | APP_NAME=learning_content_system 22 | DEFAULT_MODEL=gemini-2.0-flash 23 | MAX_CONTENT_LENGTH=100000 24 | QUALITY_THRESHOLD=0.80 25 | 26 | # Debug Settings 27 | DEBUG_MODE=false 28 | LOG_LEVEL=INFO -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/app/static/js/audio-player.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Audio Player Worklet 3 | */ 4 | 5 | export async function startAudioPlayerWorklet() { 6 | // 1. Create an AudioContext 7 | const audioContext = new AudioContext({ 8 | sampleRate: 24000, 9 | }); 10 | 11 | // 2. Load your custom processor code 12 | const workletURL = new URL("./pcm-player-processor.js", import.meta.url); 13 | await audioContext.audioWorklet.addModule(workletURL); 14 | 15 | // 3. Create an AudioWorkletNode 16 | const audioPlayerNode = new AudioWorkletNode( 17 | audioContext, 18 | "pcm-player-processor" 19 | ); 20 | 21 | // 4. Connect to the destination 22 | audioPlayerNode.connect(audioContext.destination); 23 | 24 | // The audioPlayerNode.port is how we send messages (audio data) to the processor 25 | return [audioPlayerNode, audioContext]; 26 | } 27 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/__init__.py: -------------------------------------------------------------------------------- 1 | """Academic Research Assistant Agent package. 2 | 3 | This package implements an AI assistant designed to accelerate academic literature 4 | reviews by orchestrating specialized sub-agents in a sequential workflow. 5 | 6 | The agent helps researchers find relevant papers based on their research profile 7 | and interests, then analyzes how these papers relate to the researcher's work. 8 | 9 | Main components: 10 | - Root agent: Coordinates the overall workflow 11 | - Profiler agent: Analyzes researcher profiles to extract relevant keywords 12 | - Searcher agent: Finds relevant academic papers using web browsing 13 | - Comparison agent: Analyzes papers and generates insights 14 | 15 | Usage: 16 | from academic_research_assistant.agent import root_agent 17 | root_agent.start() 18 | """ 19 | 20 | from . import agent 21 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/sub_agents/comparison_root_agent/sub_agents/tools/exit_analysis.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | 3 | from google.adk.tools.tool_context import ToolContext 4 | 5 | 6 | def exit_analysis( 7 | tool_context: ToolContext, 8 | ) -> Dict: 9 | """ 10 | Exit the analysis refinement loop when a satisfactory analysis has been approved. 11 | 12 | This function signals to the loop agent that the analysis has been satisfactorily 13 | reviewed and approved by the critic agent, and no further refinement is needed. 14 | 15 | Args: 16 | tool_context: ADK tool context 17 | 18 | Returns: 19 | Dictionary with exit status 20 | """ 21 | tool_context.actions.escalate = True 22 | 23 | return { 24 | "status": "success", 25 | "message": "Analysis has been approved. Exiting refinement loop.", 26 | } 27 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/Dockerfile: -------------------------------------------------------------------------------- 1 | # Dockerfile for Job Interview Agent 2 | FROM python:3.9-slim 3 | 4 | # Set working directory 5 | WORKDIR /app 6 | 7 | # Install system dependencies 8 | RUN apt-get update && apt-get install -y \ 9 | gcc \ 10 | && rm -rf /var/lib/apt/lists/* 11 | 12 | # Copy requirements first for better caching 13 | COPY requirements.txt . 14 | 15 | # Install Python dependencies 16 | RUN pip install --no-cache-dir -r requirements.txt 17 | 18 | # Copy application code 19 | COPY . . 20 | 21 | # Create necessary directories 22 | RUN mkdir -p logs data 23 | 24 | # Set environment variables 25 | ENV PYTHONPATH=/app 26 | ENV PYTHONUNBUFFERED=1 27 | 28 | # Expose port (if needed for web interface) 29 | EXPOSE 8000 30 | 31 | # Create non-root user for security 32 | RUN groupadd -r appuser && useradd -r -g appuser appuser 33 | RUN chown -R appuser:appuser /app 34 | USER appuser 35 | 36 | # Command to run the application 37 | CMD ["python", "main.py"] 38 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/sub_agents/profiler_agent/agent.py: -------------------------------------------------------------------------------- 1 | """Profiler Agent for analyzing researcher profiles. 2 | 3 | This module defines the Profiler Agent, which is responsible for extracting 4 | relevant keywords from a researcher's academic profile. It uses the profile_scraper 5 | tool to obtain text content from profile URLs and then analyzes this content to 6 | identify key research areas, methodologies, and technical terms. 7 | 8 | The agent serves as the first step in the Academic Research Assistant workflow, 9 | providing essential context for subsequent paper searches and analyses. 10 | """ 11 | 12 | from google.adk.agents.llm_agent import Agent 13 | 14 | from ...shared_libraries import constants 15 | from . import prompt 16 | from ...tools import url_scraper 17 | 18 | profiler_agent = Agent( 19 | model=constants.MODEL, 20 | name="profiler_agent", 21 | description="An agent to extract keywords from a researcher's profile.", 22 | instruction=prompt.PROFILER_PROMPT, 23 | tools=[ 24 | url_scraper.get_text_from_url, 25 | ], 26 | ) 27 | -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "education-path-advisor" 3 | version = "0.1" 4 | description = "AI-driven agent designed to provide personalized educational pathway guidance for Indian students" 5 | authors = [{ name = "Education Pathways Team", email = "team@example.com" }] 6 | license = "Apache License 2.0" 7 | readme = "README.md" 8 | 9 | [tool.poetry.dependencies] 10 | google-cloud-aiplatform = { version = "^1.93.0", extras = [ 11 | "adk", 12 | "agent-engines", 13 | ] } 14 | python = "^3.9" 15 | google-genai = "^1.9.0" 16 | pydantic = "^2.10.6" 17 | python-dotenv = "^1.0.1" 18 | google-adk = "^1.0.0" 19 | [tool.poetry.group.dev] 20 | optional = true 21 | 22 | [tool.poetry.group.dev.dependencies] 23 | pytest = "^8.3.5" 24 | black = "^25.1.0" 25 | google-adk = { version = "^1.0.0", extras = ["eval"] } 26 | pytest-asyncio = "^0.26.0" 27 | 28 | [tool.poetry.group.deployment] 29 | optional = true 30 | 31 | [tool.poetry.group.deployment.dependencies] 32 | absl-py = "^2.2.1" 33 | 34 | [build-system] 35 | requires = ["poetry-core>=2.0.0,<3.0.0"] 36 | build-backend = "poetry.core.masonry.api" 37 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/sub_agents/comparison_root_agent/__init__.py: -------------------------------------------------------------------------------- 1 | """Comparison Root Agent package. 2 | 3 | This package contains the comparison root agent and its sub-agents, responsible 4 | for analyzing academic papers in relation to a researcher's profile and generating 5 | insightful comparisons and recommendations. 6 | 7 | The agent serves as the final step in the Academic Research Assistant workflow, 8 | taking inputs from previous agents and producing the final report for the user. 9 | 10 | The agent uses a hierarchical structure with: 11 | 1. A root sequential agent that orchestrates the entire process 12 | 2. A loop agent that iterates between: 13 | a. An analysis generator agent that produces detailed paper comparisons 14 | b. An analysis critic agent that reviews and refines the generated analysis 15 | 3. A final formatter agent that prepares the approved analysis for presentation 16 | 17 | Exported components: 18 | comparison_root_agent: The agent instance that can be used by the root agent 19 | """ 20 | 21 | from .agent import comparison_root_agent 22 | 23 | __all__ = ["comparison_root_agent"] 24 | -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/education_advisor/agent.py: -------------------------------------------------------------------------------- 1 | """Education Path Advisor: provide personalized educational pathway guidance for Indian students""" 2 | 3 | from google.adk.agents import LlmAgent 4 | from google.adk.tools.agent_tool import AgentTool 5 | 6 | from . import prompt 7 | from .sub_agents.data_analyst import data_analyst_agent 8 | from .sub_agents.pathway_analyst import pathway_analyst_agent 9 | from .sub_agents.implementation_analyst import implementation_analyst_agent 10 | from .sub_agents.risk_analyst import risk_analyst_agent 11 | 12 | MODEL = "gemini-2.0-flash" 13 | 14 | 15 | education_coordinator = LlmAgent( 16 | name="education_coordinator", 17 | model=MODEL, 18 | description=('Coordinator agent for the Education Path Advisor, helping users navigate their educational journey.'), 19 | instruction=prompt.EDUCATION_COORDINATOR_SYSTEM_PROMPT, 20 | output_key="education_coordinator_output", 21 | tools=[ 22 | AgentTool(agent=data_analyst_agent), 23 | AgentTool(agent=implementation_analyst_agent), 24 | AgentTool(agent=pathway_analyst_agent), 25 | AgentTool(agent=risk_analyst_agent), 26 | ], 27 | ) 28 | 29 | root_agent = education_coordinator 30 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/app/interview_agent/tools/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tools for the Job Interview Roleplay Agent. 3 | """ 4 | 5 | from .calendar_tools import ( 6 | schedule_interview, 7 | list_scheduled_interviews, 8 | cancel_interview, 9 | update_interview, 10 | ) 11 | 12 | from .interview_tools import ( 13 | start_interview_session, 14 | ask_behavioral_question, 15 | ask_technical_question, 16 | provide_feedback, 17 | evaluate_answer, 18 | ) 19 | 20 | from .data_tools import ( 21 | generate_interview_report, 22 | get_question_bank, 23 | save_interview_progress, 24 | load_interview_progress, 25 | ) 26 | 27 | __all__ = [ 28 | # Calendar tools 29 | "schedule_interview", 30 | "list_scheduled_interviews", 31 | "cancel_interview", 32 | "update_interview", 33 | 34 | # Interview session tools 35 | "start_interview_session", 36 | "ask_behavioral_question", 37 | "ask_technical_question", 38 | "provide_feedback", 39 | "evaluate_answer", 40 | 41 | # Data and reporting tools 42 | "generate_interview_report", 43 | "get_question_bank", 44 | "save_interview_progress", 45 | "load_interview_progress", 46 | ] 47 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/.env.example: -------------------------------------------------------------------------------- 1 | # Job Interview Agent Configuration 2 | # Copy this file to .env and fill in your actual values 3 | 4 | # Google Calendar API Configuration 5 | GOOGLE_CALENDAR_ID=your_calendar_id@gmail.com 6 | GOOGLE_APPLICATION_NAME=Job Interview Agent 7 | 8 | # ADK Configuration 9 | ADK_API_KEY=your_adk_api_key_here 10 | ADK_PROJECT_ID=your_adk_project_id 11 | 12 | # Interview Agent Settings 13 | DEFAULT_INTERVIEW_DURATION=60 14 | DEFAULT_PREPARATION_TIME=10 15 | SESSION_TIMEOUT=3600 16 | 17 | # Database Configuration (optional - defaults to JSON files) 18 | # DATABASE_URL=sqlite:///interview_sessions.db 19 | # DATABASE_TYPE=sqlite 20 | 21 | # Audio/Voice Integration (optional) 22 | # ENABLE_VOICE=false 23 | # SPEECH_API_KEY=your_speech_api_key 24 | # TTS_API_KEY=your_tts_api_key 25 | 26 | # Logging Configuration 27 | LOG_LEVEL=INFO 28 | LOG_FILE=interview_agent.log 29 | 30 | # Security Settings 31 | SECRET_KEY=your_secret_key_for_session_encryption 32 | CALENDAR_SCOPES=https://www.googleapis.com/auth/calendar 33 | 34 | # Development Settings 35 | DEBUG=false 36 | TESTING=false 37 | 38 | # Email Notifications (optional) 39 | # SMTP_SERVER=smtp.gmail.com 40 | # SMTP_PORT=587 41 | # EMAIL_USERNAME=your_email@gmail.com 42 | # EMAIL_PASSWORD=your_app_password 43 | # ENABLE_EMAIL_NOTIFICATIONS=false 44 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/shared_libraries/constants.py: -------------------------------------------------------------------------------- 1 | """Constants used throughout the Academic Research Assistant. 2 | 3 | This module defines global constants used across the Academic Research Assistant 4 | agent system. It loads environment variables using dotenv for configuration. 5 | 6 | Constants: 7 | MODEL (str): The LLM model to use for all agents, defaults to 'gemini-2.0-flash' 8 | if not specified in environment variables. 9 | DISABLE_WEB_DRIVER (int): Flag to enable/disable web driver functionality, 10 | useful for testing or environments where browser automation is not available. 11 | Defaults to 0 (enabled). 12 | SERPAPI_KEY (str): API key for SerpAPI to access Google Scholar data without 13 | triggering rate limits or CAPTCHAs. Defaults to None if not specified. 14 | 15 | Usage: 16 | from academic_research_assistant.shared_libraries import constants 17 | 18 | model = constants.MODEL 19 | web_driver_disabled = constants.DISABLE_WEB_DRIVER 20 | serpapi_key = constants.SERPAPI_KEY 21 | """ 22 | 23 | import os 24 | 25 | import dotenv 26 | 27 | dotenv.load_dotenv() 28 | 29 | MODEL = os.getenv("MODEL", "gemini-2.0-flash") 30 | DISABLE_WEB_DRIVER = int(os.getenv("DISABLE_WEB_DRIVER", "0")) 31 | SERPAPI_KEY = os.getenv("SERPAPI_KEY", None) 32 | -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/tests/test_agents.py: -------------------------------------------------------------------------------- 1 | """Test cases for the Education Path Advisor""" 2 | 3 | import textwrap 4 | 5 | import dotenv 6 | import pytest 7 | from education_advisor.agent import root_agent 8 | from google.adk.runners import InMemoryRunner 9 | from google.genai.types import Part, UserContent 10 | 11 | pytest_plugins = ("pytest_asyncio",) 12 | 13 | 14 | @pytest.fixture(scope="session", autouse=True) 15 | def load_env(): 16 | dotenv.load_dotenv() 17 | 18 | 19 | @pytest.mark.asyncio 20 | async def test_happy_path(): 21 | """Runs the agent on a simple input and expects a normal response.""" 22 | user_input = textwrap.dedent( 23 | """ 24 | Double check this: 25 | Question: who are you 26 | Answer: education path advisor! 27 | """ 28 | ).strip() 29 | 30 | runner = InMemoryRunner(agent=root_agent) 31 | session = await runner.session_service.create_session( 32 | app_name=runner.app_name, user_id="test_user" 33 | ) 34 | content = UserContent(parts=[Part(text=user_input)]) 35 | response = "" 36 | async for event in runner.run_async( 37 | user_id=session.user_id, 38 | session_id=session.id, 39 | new_message=content, 40 | ): 41 | print(event) 42 | if event.content.parts and event.content.parts[0].text: 43 | response = event.content.parts[0].text 44 | 45 | # The answer in the input is wrong, so we expect the agent to provided a 46 | # revised answer, and the correct answer should mention research. 47 | assert "education" in response.lower() 48 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | services: 4 | interview-agent: 5 | build: . 6 | container_name: job-interview-agent 7 | volumes: 8 | - ./data:/app/data 9 | - ./logs:/app/logs 10 | - ./.env:/app/.env 11 | - ./credentials.json:/app/credentials.json 12 | - ./token.pickle:/app/token.pickle 13 | environment: 14 | - PYTHONUNBUFFERED=1 15 | stdin_open: true 16 | tty: true 17 | restart: unless-stopped 18 | 19 | # Optional: Redis for session storage (if using Redis instead of JSON) 20 | redis: 21 | image: redis:7-alpine 22 | container_name: interview-agent-redis 23 | ports: 24 | - "6379:6379" 25 | volumes: 26 | - redis_data:/data 27 | restart: unless-stopped 28 | profiles: 29 | - redis 30 | 31 | # Optional: PostgreSQL for advanced data storage 32 | postgres: 33 | image: postgres:15-alpine 34 | container_name: interview-agent-db 35 | environment: 36 | POSTGRES_DB: interview_agent 37 | POSTGRES_USER: interview_user 38 | POSTGRES_PASSWORD: interview_pass 39 | volumes: 40 | - postgres_data:/var/lib/postgresql/data 41 | ports: 42 | - "5432:5432" 43 | restart: unless-stopped 44 | profiles: 45 | - postgres 46 | 47 | volumes: 48 | redis_data: 49 | postgres_data: 50 | 51 | # Usage: 52 | # Basic setup: docker-compose up 53 | # With Redis: docker-compose --profile redis up 54 | # With PostgreSQL: docker-compose --profile postgres up 55 | # With both: docker-compose --profile redis --profile postgres up 56 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/app/interview_agent/agent.py: -------------------------------------------------------------------------------- 1 | """ 2 | Main agent module for the Job Interview Roleplay Agent. 3 | 4 | This agent provides comprehensive interview simulation capabilities including: 5 | - Behavioral interview questions 6 | - Technical interview scenarios 7 | - Interview scheduling with calendar integration 8 | - Real-time feedback and scoring 9 | - Multi-role interview simulations (HR, Technical Lead, etc.) 10 | """ 11 | 12 | from google.adk.agents import Agent 13 | from .tools import ( 14 | schedule_interview, 15 | list_scheduled_interviews, 16 | cancel_interview, 17 | update_interview, 18 | start_interview_session, 19 | ask_behavioral_question, 20 | ask_technical_question, 21 | provide_feedback, 22 | evaluate_answer, 23 | generate_interview_report, 24 | get_question_bank, 25 | save_interview_progress, 26 | load_interview_progress, 27 | ) 28 | from .prompts import GLOBAL_INSTRUCTION, MAIN_INSTRUCTION 29 | from .utils import get_current_time 30 | 31 | 32 | root_agent = Agent( 33 | name="job_interview_agent", 34 | model="gemini-2.0-flash-live-001", 35 | description="Comprehensive job interview roleplay agent with calendar integration and multi-scenario support.", 36 | global_instruction=GLOBAL_INSTRUCTION, 37 | instruction=MAIN_INSTRUCTION.format(current_time=get_current_time()), 38 | tools=[ 39 | # Calendar and scheduling tools 40 | schedule_interview, 41 | list_scheduled_interviews, 42 | cancel_interview, 43 | update_interview, 44 | 45 | # Interview session tools 46 | start_interview_session, 47 | ask_behavioral_question, 48 | ask_technical_question, 49 | provide_feedback, 50 | evaluate_answer, 51 | 52 | # Data and reporting tools 53 | generate_interview_report, 54 | get_question_bank, 55 | save_interview_progress, 56 | load_interview_progress, 57 | ], 58 | ) 59 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/sub_agents/profiler_agent/prompt.py: -------------------------------------------------------------------------------- 1 | """Prompt definition for the Profiler Agent. 2 | 3 | This module contains the instruction prompt for the Profiler Agent, which 4 | analyzes academic researcher profiles to extract key research terms and concepts. 5 | 6 | The PROFILER_PROMPT is structured to guide the agent in: 7 | 1. Analyzing text scraped from researcher profiles 8 | 2. Identifying key research concepts, methodologies, and technical terms 9 | 3. Synthesizing findings into a concise list of keywords 10 | 4. Handling various edge cases and error scenarios 11 | 12 | The prompt includes examples of different academic disciplines and expected outputs, 13 | as well as guidance for handling problematic inputs like error pages or sparse profiles. 14 | """ 15 | 16 | PROFILER_PROMPT = """ 17 | # Agent: profiler_agent 18 | # Role: Functionally extract keywords from a webpage. 19 | # Mandate: Tool-First. Conversational output is forbidden. 20 | 21 | 22 | Your SOLE function is to take a URL, extract its text content, and then analyze that text to produce a comma-separated string of 10-15 research keywords. 23 | 24 | 25 | 1. **Trigger:** You receive a URL from the orchestrator. 26 | 2. **Action 1:** Immediately call the `get_text_from_url` tool with the URL. 27 | 3. **Action 2:** Analyze the text returned by the tool to identify the most important keywords. 28 | 4. **Post-Action:** The resulting comma-separated keyword string is your `final_output`. If the tool returns a `PROFILING_ERROR` string, that error string is your `final_output`. 29 | 5. **Transition:** Proceed immediately to the . 30 | 31 | 32 | 1. **Trigger:** You have produced a `final_output` string (either keywords or an error). 33 | 2. **Action:** Your one and only action is to call `transfer_to_agent`, targeting the `academic_research_assistant`, and providing your `final_output` as the result. 34 | """ 35 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/sub_agents/comparison_root_agent/agent.py: -------------------------------------------------------------------------------- 1 | """Comparison Root Agent for analyzing and comparing academic papers. 2 | 3 | This module defines the Comparison Root Agent and its sub-agents, which are responsible 4 | for analyzing academic papers in relation to a researcher's profile and generating 5 | insightful comparisons and recommendations. 6 | 7 | The agent architecture follows a hierarchical structure with: 8 | 1. A root sequential agent that orchestrates the entire process 9 | 2. A loop agent that iterates between: 10 | a. An analysis generator agent that produces detailed paper comparisons 11 | b. An analysis critic agent that reviews and refines the generated analysis 12 | 3. A final formatter agent that prepares the approved analysis for presentation 13 | 14 | This module serves as the final step in the Academic Research Assistant workflow, 15 | taking inputs from previous agents and producing the final report for the user. 16 | """ 17 | 18 | from google.adk.agents import SequentialAgent, LoopAgent 19 | 20 | from .sub_agents.analysis_generator_agent import analysis_generator_agent 21 | from .sub_agents.analysis_critic_agent import analysis_critic_agent 22 | from .sub_agents.analysis_formatter_agent import analysis_formatter_agent 23 | 24 | analysis_refinement_loop_agent = LoopAgent( 25 | name="analysis_refinement_loop_agent", 26 | description="Manages the iterative refinement process between analysis generation and critique.", 27 | max_iterations=5, 28 | sub_agents=[analysis_generator_agent, analysis_critic_agent], 29 | ) 30 | 31 | # Create the root Sequential Agent that: 32 | # 1. Refines the analysis through a loop until approved 33 | # 2. Formats the final approved analysis for presentation 34 | comparison_root_agent = SequentialAgent( 35 | name="comparison_root_agent", 36 | description="Orchestrates the analysis, critique, and presentation of academic papers.", 37 | sub_agents=[analysis_refinement_loop_agent, analysis_formatter_agent], 38 | ) 39 | -------------------------------------------------------------------------------- /my-adk-agents/learning-content-system(WIP)/utils/content_types.py: -------------------------------------------------------------------------------- 1 | """ 2 | Data models for Learning Content Creation System 3 | """ 4 | 5 | from dataclasses import dataclass 6 | from typing import List, Dict, Optional, Any 7 | from enum import Enum 8 | 9 | 10 | class ContentType(Enum): 11 | TEXT = "text" 12 | IMAGE = "image" 13 | AUDIO = "audio" 14 | VIDEO = "video" 15 | PRESENTATION = "presentation" 16 | QUIZ = "quiz" 17 | 18 | 19 | class QualityLevel(Enum): 20 | DRAFT = "draft" 21 | REVIEW = "review" 22 | GOOD = "good" 23 | EXCELLENT = "excellent" 24 | 25 | 26 | @dataclass 27 | class LearningObjective: 28 | """Represents a learning objective""" 29 | objective: str 30 | level: str # beginner, intermediate, advanced 31 | time_estimate: int # in minutes 32 | 33 | 34 | @dataclass 35 | class ContentElement: 36 | """Represents a piece of content""" 37 | content_type: ContentType 38 | title: str 39 | content: str 40 | metadata: Dict[str, Any] 41 | quality_score: float = 0.0 42 | file_path: Optional[str] = None 43 | 44 | 45 | @dataclass 46 | class LearningModule: 47 | """Represents a complete learning module""" 48 | title: str 49 | description: str 50 | objectives: List[LearningObjective] 51 | content_elements: List[ContentElement] 52 | duration_minutes: int 53 | difficulty_level: str 54 | quality_assessment: Dict[str, Any] = None 55 | 56 | 57 | @dataclass 58 | class ContentAnalysis: 59 | """Results from content analysis""" 60 | key_concepts: List[str] 61 | learning_objectives: List[LearningObjective] 62 | difficulty_level: str 63 | estimated_duration: int 64 | recommended_formats: List[ContentType] 65 | quality_metrics: Dict[str, float] 66 | 67 | 68 | @dataclass 69 | class GenerationRequest: 70 | """Request for content generation""" 71 | source_content: str 72 | target_formats: List[ContentType] 73 | learning_objectives: List[LearningObjective] 74 | preferences: Dict[str, Any] 75 | context: Dict[str, Any] = None 76 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/app/static/js/audio-recorder.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Audio Recorder Worklet 3 | */ 4 | 5 | let micStream; 6 | 7 | export async function startAudioRecorderWorklet(audioRecorderHandler) { 8 | // Create an AudioContext 9 | const audioRecorderContext = new AudioContext({ sampleRate: 16000 }); 10 | console.log("AudioContext sample rate:", audioRecorderContext.sampleRate); 11 | 12 | // Load the AudioWorklet module 13 | const workletURL = new URL("./pcm-recorder-processor.js", import.meta.url); 14 | await audioRecorderContext.audioWorklet.addModule(workletURL); 15 | 16 | // Request access to the microphone 17 | micStream = await navigator.mediaDevices.getUserMedia({ 18 | audio: { channelCount: 1 }, 19 | }); 20 | const source = audioRecorderContext.createMediaStreamSource(micStream); 21 | 22 | // Create an AudioWorkletNode that uses the PCMProcessor 23 | const audioRecorderNode = new AudioWorkletNode( 24 | audioRecorderContext, 25 | "pcm-recorder-processor" 26 | ); 27 | 28 | // Connect the microphone source to the worklet. 29 | source.connect(audioRecorderNode); 30 | audioRecorderNode.port.onmessage = (event) => { 31 | // Convert to 16-bit PCM 32 | const pcmData = convertFloat32ToPCM(event.data); 33 | 34 | // Send the PCM data to the handler. 35 | audioRecorderHandler(pcmData); 36 | }; 37 | return [audioRecorderNode, audioRecorderContext, micStream]; 38 | } 39 | 40 | /** 41 | * Stop the microphone. 42 | */ 43 | export function stopMicrophone(micStream) { 44 | micStream.getTracks().forEach((track) => track.stop()); 45 | console.log("stopMicrophone(): Microphone stopped."); 46 | } 47 | 48 | // Convert Float32 samples to 16-bit PCM. 49 | function convertFloat32ToPCM(inputData) { 50 | // Create an Int16Array of the same length. 51 | const pcm16 = new Int16Array(inputData.length); 52 | for (let i = 0; i < inputData.length; i++) { 53 | // Multiply by 0x7fff (32767) to scale the float value to 16-bit PCM range. 54 | pcm16[i] = inputData[i] * 0x7fff; 55 | } 56 | // Return the underlying ArrayBuffer. 57 | return pcm16.buffer; 58 | } 59 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/tools/url_scraper.py: -------------------------------------------------------------------------------- 1 | """A tool for reliably scraping text content from a URL.""" 2 | 3 | import logging 4 | import requests 5 | from bs4 import BeautifulSoup 6 | 7 | 8 | def get_text_from_url(url: str) -> str: 9 | """ 10 | Fetches the content from a URL and extracts clean text. 11 | 12 | Args: 13 | url: The URL of the academic profile or webpage. 14 | 15 | Returns: 16 | The extracted text content of the page, or an error string. 17 | """ 18 | try: 19 | headers = { 20 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36', 21 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 22 | 'Accept-Language': 'en-US,en;q=0.9', 23 | } 24 | response = requests.get(url, headers=headers, timeout=15) 25 | response.raise_for_status() # Raise an exception for bad status codes 26 | 27 | soup = BeautifulSoup(response.content, 'html.parser') 28 | 29 | # Remove script and style elements 30 | for script_or_style in soup(['script', 'style']): 31 | script_or_style.decompose() 32 | 33 | # Get text and clean it up 34 | text = soup.get_text() 35 | lines = (line.strip() for line in text.splitlines()) 36 | chunks = (phrase.strip() 37 | for line in lines for phrase in line.split(" ")) 38 | text = '\n'.join(chunk for chunk in chunks if chunk) 39 | 40 | if not text: 41 | return "PROFILING_ERROR: The URL was valid, but no text content could be found." 42 | 43 | return text 44 | 45 | except requests.exceptions.RequestException as e: 46 | logging.error(f"URL scraping failed for {url}: {e}") 47 | return f"PROFILING_ERROR: Could not fetch content from the URL. Please check the link and try again. Error: {e}" 48 | except Exception as e: 49 | logging.error(f"An unexpected error occurred during URL scraping: {e}") 50 | return f"PROFILING_ERROR: An unexpected error occurred. {e}" 51 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/agent.py: -------------------------------------------------------------------------------- 1 | # Copyright 2025 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """Defines the Academic Research Assistant Agent. 16 | 17 | This module implements the root agent for the Academic Research Assistant system, 18 | which helps users accelerate academic literature reviews by orchestrating specialized 19 | sub-agents in a sequential workflow. 20 | 21 | The agent architecture follows a hierarchical structure with: 22 | 1. A root agent that coordinates the overall workflow 23 | 2. Three specialized sub-agents that handle different aspects of the research process: 24 | - Profiler agent: Analyzes researcher profiles to extract relevant keywords 25 | - Searcher agent: Finds relevant academic papers based on topic and keywords 26 | - Comparison agent: Analyzes and compares papers to generate insights 27 | 28 | Typical usage: 29 | from academic_research_assistant.agent import root_agent 30 | root_agent.start() 31 | """ 32 | 33 | from google.adk.agents.llm_agent import Agent 34 | 35 | from .shared_libraries import constants 36 | from . import prompts 37 | 38 | from .sub_agents.profiler_agent.agent import profiler_agent 39 | from .sub_agents.searcher_agent.agent import searcher_agent 40 | from .sub_agents.comparison_root_agent.agent import comparison_root_agent 41 | 42 | academic_research_assistant = Agent( 43 | model=constants.MODEL, 44 | name="academic_research_assistant", 45 | description="An AI assistant to accelerate academic literature reviews.", 46 | instruction=prompts.ROOT_PROMPT, 47 | sub_agents=[ 48 | comparison_root_agent, 49 | profiler_agent, 50 | searcher_agent, 51 | ], 52 | ) 53 | 54 | root_agent = academic_research_assistant 55 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/app/static/js/pcm-player-processor.js: -------------------------------------------------------------------------------- 1 | /** 2 | * An audio worklet processor that stores the PCM audio data sent from the main thread 3 | * to a buffer and plays it. 4 | */ 5 | class PCMPlayerProcessor extends AudioWorkletProcessor { 6 | constructor() { 7 | super(); 8 | 9 | // Init buffer 10 | this.bufferSize = 24000 * 180; // 24kHz x 180 seconds 11 | this.buffer = new Float32Array(this.bufferSize); 12 | this.writeIndex = 0; 13 | this.readIndex = 0; 14 | 15 | // Handle incoming messages from main thread 16 | this.port.onmessage = (event) => { 17 | // Reset the buffer when 'endOfAudio' message received 18 | if (event.data.command === "endOfAudio") { 19 | this.readIndex = this.writeIndex; // Clear the buffer 20 | console.log("endOfAudio received, clearing the buffer."); 21 | return; 22 | } 23 | 24 | // Decode the base64 data to int16 array. 25 | const int16Samples = new Int16Array(event.data); 26 | 27 | // Add the audio data to the buffer 28 | this._enqueue(int16Samples); 29 | }; 30 | } 31 | 32 | // Push incoming Int16 data into our ring buffer. 33 | _enqueue(int16Samples) { 34 | for (let i = 0; i < int16Samples.length; i++) { 35 | // Convert 16-bit integer to float in [-1, 1] 36 | const floatVal = int16Samples[i] / 32768; 37 | 38 | // Store in ring buffer for left channel only (mono) 39 | this.buffer[this.writeIndex] = floatVal; 40 | this.writeIndex = (this.writeIndex + 1) % this.bufferSize; 41 | 42 | // Overflow handling (overwrite oldest samples) 43 | if (this.writeIndex === this.readIndex) { 44 | this.readIndex = (this.readIndex + 1) % this.bufferSize; 45 | } 46 | } 47 | } 48 | 49 | // The system calls `process()` ~128 samples at a time (depending on the browser). 50 | // We fill the output buffers from our ring buffer. 51 | process(inputs, outputs, parameters) { 52 | // Write a frame to the output 53 | const output = outputs[0]; 54 | const framesPerBlock = output[0].length; 55 | for (let frame = 0; frame < framesPerBlock; frame++) { 56 | // Write the sample(s) into the output buffer 57 | output[0][frame] = this.buffer[this.readIndex]; // left channel 58 | if (output.length > 1) { 59 | output[1][frame] = this.buffer[this.readIndex]; // right channel 60 | } 61 | 62 | // Move the read index forward unless underflowing 63 | if (this.readIndex != this.writeIndex) { 64 | this.readIndex = (this.readIndex + 1) % this.bufferSize; 65 | } 66 | } 67 | 68 | // Returning true tells the system to keep the processor alive 69 | return true; 70 | } 71 | } 72 | 73 | registerProcessor("pcm-player-processor", PCMPlayerProcessor); 74 | -------------------------------------------------------------------------------- /my-adk-agents/learning-content-system(WIP)/tools/mcp_config.py: -------------------------------------------------------------------------------- 1 | """ 2 | MCP Server Configuration for Learning Content System 3 | """ 4 | 5 | import json 6 | import os 7 | from google.adk.tools.mcp_tool.mcp_toolset import MCPToolset, StdioServerParameters 8 | 9 | 10 | # MCP Server Configurations 11 | def get_huggingface_mcp_toolset(): 12 | """ 13 | Hugging Face MCP Server for model, dataset, and paper search 14 | """ 15 | return MCPToolset( 16 | connection_params=StdioServerParameters( 17 | command="npx", 18 | args=["-y", "@shreyaskarnik/huggingface-mcp-server"], 19 | ), 20 | tool_filter=["search-models", "search-datasets", 21 | "get-model-info", "search-papers"] 22 | ) 23 | 24 | 25 | def get_image_generation_mcp_toolset(): 26 | """ 27 | Image Generation MCP Server using SanaSprint 28 | """ 29 | return MCPToolset( 30 | connection_params=StdioServerParameters( 31 | command="npx", 32 | args=["mcp-remote", "https://ysharma-sanasprint.hf.space/gradio_api/mcp/sse", 33 | "--transport", "sse-only"], 34 | ), 35 | tool_filter=["generate_image"] 36 | ) 37 | 38 | 39 | def get_tts_mcp_toolset(): 40 | """ 41 | Text-to-Speech MCP Server using Dia-1.6B 42 | """ 43 | return MCPToolset( 44 | connection_params=StdioServerParameters( 45 | command="npx", 46 | args=["mcp-remote", "https://ysharma-dia-1-6b.hf.space/gradio_api/mcp/sse", 47 | "--transport", "sse-only"], 48 | ), 49 | tool_filter=["generate_speech"] 50 | ) 51 | 52 | 53 | def get_general_tools_mcp_toolset(): 54 | """ 55 | General MCP Tools for various utilities 56 | """ 57 | return MCPToolset( 58 | connection_params=StdioServerParameters( 59 | command="npx", 60 | args=["mcp-remote", "https://abidlabs-mcp-tools.hf.space/gradio_api/mcp/sse", 61 | "--transport", "sse-only"], 62 | ) 63 | ) 64 | 65 | 66 | def get_sentiment_analysis_mcp_toolset(): 67 | """ 68 | Sentiment Analysis MCP Server for content quality assessment 69 | """ 70 | return MCPToolset( 71 | connection_params=StdioServerParameters( 72 | command="npx", 73 | args=["mcp-remote", "https://sentiment-analysis-mcp.hf.space/gradio_api/mcp/sse", 74 | "--transport", "sse-only"], 75 | ), 76 | tool_filter=["sentiment_analysis"] 77 | ) 78 | 79 | 80 | # All available MCP toolsets 81 | AVAILABLE_MCP_TOOLSETS = { 82 | "huggingface": get_huggingface_mcp_toolset, 83 | "image_generation": get_image_generation_mcp_toolset, 84 | "tts": get_tts_mcp_toolset, 85 | "general_tools": get_general_tools_mcp_toolset, 86 | "sentiment_analysis": get_sentiment_analysis_mcp_toolset, 87 | } 88 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/sub_agents/searcher_agent/prompt.py: -------------------------------------------------------------------------------- 1 | """Defines prompts for the Academic Search Agent. 2 | 3 | This module contains the instruction prompt for the Searcher Agent, which is 4 | responsible for finding relevant academic papers based on research topics and keywords. 5 | 6 | The ACADEMIC_SEARCH_PROMPT is structured to guide the agent in: 7 | 1. Constructing effective search queries for academic search engines 8 | 2. Navigating search results to find relevant and recent publications 9 | 3. Extracting key information from papers (titles, authors, abstracts) 10 | 4. Presenting results in a structured format 11 | 5. Handling various edge cases and error scenarios 12 | 13 | The prompt includes examples of different search strategies across multiple 14 | academic search engines (Google Scholar, arXiv, PubMed) and guidance for 15 | handling problematic scenarios like paywalls and CAPTCHAs. 16 | 17 | This prompt is designed to ensure the agent can effectively search across 18 | different academic disciplines and return high-quality, relevant results. 19 | """ 20 | 21 | ACADEMIC_SEARCH_PROMPT = """ 22 | # Agent: searcher_agent 23 | # Role: Functionally retrieve academic papers using a robust Scrapy spider with SerpAPI fallback. 24 | # Mandate: Tool-First. Conversational output is forbidden. 25 | 26 | 27 | Your SOLE function is to find and return a list of academic papers based on a research topic. You will do this by executing a single, robust tool. You must not generate any conversational text. 28 | 29 | 30 | 1. **Trigger:** You receive a research topic and keywords from the orchestrator. 31 | 2. **Action:** Immediately call the `search_scholar_with_scrapy` tool. 32 | * Use the research topic for the `query` parameter. 33 | * Set the `year_from` parameter to the last 5 years. 34 | 3. **Post-Action:** The output of the tool is your `final_output`. It will either be a markdown list of papers or a specific `SEARCH_ERROR` string. 35 | 4. **Transition:** Proceed immediately to the . 36 | 37 | 38 | The search implementation includes a two-tier approach: 39 | 1. **Primary Method:** A robust Scrapy-based Google Scholar scraper that handles rate limiting and blocking. 40 | 2. **Fallback Method:** If the primary method fails, the system automatically falls back to using SerpAPI (if configured). 41 | * SerpAPI is ONLY used when the primary Scrapy method fails. 42 | * SerpAPI requires a valid API key in the SERPAPI_KEY environment variable. 43 | * The fallback mechanism is transparent to you - no special handling is needed. 44 | 45 | 46 | 1. **Trigger:** You have produced a `final_output` string. 47 | 2. **Action:** Your one and only action is to call `transfer_to_agent`, targeting the `academic_research_assistant`, and providing your `final_output` as the result. 48 | """ 49 | -------------------------------------------------------------------------------- /my-adk-agents/project-manager-agent/main.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from dotenv import load_dotenv 4 | from google.adk.runners import Runner 5 | from google.adk.sessions import DatabaseSessionService 6 | from project_management_agent.agent import project_management_agent 7 | from utils import call_agent_async 8 | 9 | load_dotenv() 10 | 11 | # ===== PART 1: Initialize Persistent Session Service ===== 12 | # Using SQLite database for persistent storage 13 | db_url = "sqlite:///./project_management_data.db" 14 | session_service = DatabaseSessionService(db_url=db_url) 15 | 16 | 17 | # ===== PART 2: Define Initial State ===== 18 | # This will only be used when creating a new session 19 | initial_state = { 20 | "user_name": "Project Manager", 21 | "projects": [], 22 | "team_members": [] 23 | } 24 | 25 | 26 | async def main_async(): 27 | # Setup constants 28 | APP_NAME = "Project Management Assistant" 29 | USER_ID = "project_manager_user" 30 | 31 | # ===== PART 3: Session Management - Find or Create ===== 32 | # Check for existing sessions for this user 33 | existing_sessions = session_service.list_sessions( 34 | app_name=APP_NAME, 35 | user_id=USER_ID, 36 | ) 37 | 38 | # If there's an existing session, use it, otherwise create a new one 39 | if existing_sessions and len(existing_sessions.sessions) > 0: 40 | # Use the most recent session 41 | SESSION_ID = existing_sessions.sessions[0].id 42 | print(f"Continuing existing session: {SESSION_ID}") 43 | else: 44 | # Create a new session with initial state 45 | new_session = session_service.create_session( 46 | app_name=APP_NAME, 47 | user_id=USER_ID, 48 | state=initial_state, 49 | ) 50 | SESSION_ID = new_session.id 51 | print(f"Created new session: {SESSION_ID}") 52 | 53 | # ===== PART 4: Agent Runner Setup ===== 54 | # Create a runner with the project management agent 55 | runner = Runner( 56 | agent=project_management_agent, 57 | app_name=APP_NAME, 58 | session_service=session_service, 59 | ) 60 | 61 | # ===== PART 5: Interactive Conversation Loop ===== 62 | print("\nWelcome to Project Management Assistant!") 63 | print("Your projects, tasks, and team members will be remembered across conversations.") 64 | print("Type 'exit' or 'quit' to end the conversation.\n") 65 | 66 | while True: 67 | # Get user input 68 | user_input = input("You: ") 69 | 70 | # Check if user wants to exit 71 | if user_input.lower() in ["exit", "quit"]: 72 | print("Ending conversation. Your data has been saved to the database.") 73 | break 74 | 75 | # Process the user query through the agent 76 | await call_agent_async(runner, USER_ID, SESSION_ID, user_input) 77 | 78 | 79 | if __name__ == "__main__": 80 | asyncio.run(main_async()) 81 | -------------------------------------------------------------------------------- /my-adk-agents/learning-content-system(WIP)/agents/sequential_agents/content_analyzer.py: -------------------------------------------------------------------------------- 1 | """ 2 | Content Analyzer Agent - Sequential Workflow Step 1 3 | 4 | This agent analyzes raw educational content and extracts key information 5 | for multi-modal content generation. 6 | """ 7 | 8 | from google.adk.agents.llm_agent import LlmAgent 9 | from ..tools.mcp_config import get_huggingface_mcp_toolset, get_sentiment_analysis_mcp_toolset 10 | 11 | # Gemini model for content analysis 12 | GEMINI_MODEL = "gemini-2.0-flash" 13 | 14 | CONTENT_ANALYZER_PROMPT = """You are an Expert Content Analysis Agent specializing in educational content evaluation and learning objective extraction. 15 | 16 | Your role is to analyze raw educational content and extract key information needed for multi-modal content generation. 17 | 18 | ## INPUTS 19 | **Source Content:** 20 | {source_content} 21 | 22 | **Context:** 23 | {context} 24 | 25 | ## YOUR TASKS 26 | 27 | ### 1. Content Structure Analysis 28 | - Identify main topics and subtopics 29 | - Extract key concepts and terminology 30 | - Determine logical flow and dependencies 31 | - Identify examples, case studies, or practical applications 32 | 33 | ### 2. Learning Objectives Extraction 34 | - Formulate clear, measurable learning objectives using Bloom's taxonomy 35 | - Categorize objectives by cognitive level (remember, understand, apply, analyze, evaluate, create) 36 | - Estimate time requirements for each objective 37 | - Determine prerequisite knowledge needed 38 | 39 | ### 3. Audience & Difficulty Assessment 40 | - Analyze content complexity and technical depth 41 | - Recommend appropriate audience level (beginner, intermediate, advanced) 42 | - Identify potential learning barriers or challenging concepts 43 | - Suggest prerequisite knowledge or skills 44 | 45 | ### 4. Multi-Modal Content Recommendations 46 | - Identify concepts that would benefit from visual representation 47 | - Recommend content suitable for audio presentation 48 | - Suggest interactive elements or assessments 49 | - Propose content structure for different learning styles 50 | 51 | Use available MCP tools to search for relevant educational models, datasets, or papers that could enhance the content analysis. 52 | 53 | ## OUTPUT REQUIREMENTS 54 | 55 | Return your analysis in structured JSON format with: 56 | - content_analysis: main topics, key concepts, structure 57 | - learning_objectives: specific measurable goals with difficulty levels 58 | - audience_analysis: difficulty, duration, prerequisites, target audience 59 | - content_recommendations: visual concepts, audio content, interactive elements 60 | - quality_metrics: completeness, clarity, organization scores 61 | 62 | Store results in session state under "content_analysis" key for subsequent agents.""" 63 | 64 | # Define the Content Analyzer Agent 65 | content_analyzer_agent = LlmAgent( 66 | name="ContentAnalyzerAgent", 67 | model=GEMINI_MODEL, 68 | instruction=CONTENT_ANALYZER_PROMPT, 69 | description="Analyzes educational content and extracts learning objectives, structure, and multi-modal recommendations", 70 | tools=[ 71 | get_huggingface_mcp_toolset(), 72 | get_sentiment_analysis_mcp_toolset() 73 | ], 74 | output_key="content_analysis" 75 | ) 76 | -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/education_advisor/sub_agents/pathway_analyst/prompt.py: -------------------------------------------------------------------------------- 1 | PATHWAY_ANALYST_SYSTEM_PROMPT = """ 2 | You are the pathway_analyst agent. 3 | 4 | Your role is to develop at least five distinct and feasible educational pathway strategies for a given field, based on the user’s aptitude level, education timeline, geographic preferences, and the education_data_analysis_output generated by the data_analyst agent. 5 | 6 | Inputs (provided by coordinator agent): 7 | - user_aptitude_level (string): Academic aptitude (e.g., Excellent, Above Average, Average, Subject-Specific Strengths) 8 | - user_education_timeline (string): Timeline for pursuing education (Immediate / Short-term / Medium-term / Long-term) 9 | - user_geographic_preferences (string): Location preferences (e.g., Specific States / Metro Cities Only / Any Location) 10 | - education_data_analysis_output (structured object): Detailed educational data for a specific field in India 11 | 12 | Your Objective: 13 | Generate five clearly distinct and India-specific educational pathway strategies. Each should be actionable and reflect the interplay between: 14 | • The user’s profile 15 | • Entrance and admission structures in India 16 | • Cost, competition, regional dynamics, and institutional diversity 17 | 18 | Output Format: 19 | 20 | **Educational Pathway Strategies for [education_interest]** 21 | 22 | For each of the five strategies, use the following structure: 23 | 24 | --- 25 | 26 | **Strategy 1: [Strategy Name]** 27 | - **Overview:** Brief description of the strategy and its core idea 28 | - **Target Institutions:** List 3–5 relevant institutions (include central/state/private/autonomous distinction) 29 | - **Required Entrance Exams:** List relevant exams + score expectations based on aptitude and reservation category 30 | - **Preparation Timeline:** 31 | - Short-term (0–6 months) 32 | - Medium-term (6–18 months) 33 | - Long-term (18+ months), if applicable 34 | - **Geographic Considerations:** How well this strategy aligns with the user’s location preferences 35 | - **Cost Implications:** Estimated range of costs (tuition, prep, living expenses), separated by institution type 36 | - **Career Outcome Potential:** Expected job paths and long-term opportunities 37 | - **Best Suited For:** Description of the ideal candidate profile 38 | - **Key Challenges:** List major risks/barriers (e.g., language, entrance difficulty, quotas) 39 | 40 | [Repeat the structure for Strategy 2 to Strategy 5 — ensure meaningful variety between strategies] 41 | 42 | --- 43 | 44 | **Comparative Analysis Section (Mandatory):** 45 | - **Fastest Route to Career Entry:** [Strategy Name] 46 | - **Highest Potential Career Ceiling:** [Strategy Name] 47 | - **Most Cost-Effective Approach:** [Strategy Name] 48 | - **Most Aligned with User Profile:** [Strategy Name] 49 | - **Alternative Considerations:** Briefly describe any non-traditional options (e.g., distance education, open universities, certifications) worth mentioning 50 | 51 | Execution Rules: 52 | - Base your recommendations solely on the education_data_analysis_output 53 | - Ensure all strategies are calibrated to the user's aptitude level, timeline, and location preferences 54 | - Strategies must be distinctly different — avoid superficial variations 55 | - Clearly reflect how Indian factors (quotas, geography, medium of instruction, institutional types) impact planning 56 | 57 | Output Variable: 58 | - Return the full structured object as: proposed_pathway_strategies_output 59 | """ 60 | -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/education_advisor/sub_agents/data_analyst/prompt.py: -------------------------------------------------------------------------------- 1 | DATA_ANALYST_SYSTEM_PROMPT = """ 2 | You are the education_data_analyst agent. 3 | 4 | Your role is to generate a comprehensive, timely, and source-based educational landscape analysis report for a specified educational interest or career path in India. 5 | 6 | Tool Usage: 7 | - Use only the Google Search tool. 8 | - Do not rely on prior knowledge or assumptions. All information must be sourced from the collected search results. 9 | 10 | Inputs: 11 | - education_interest (string, required): Field, career, or subject area of interest (e.g., Engineering, Medicine, Commerce) 12 | - max_data_age_days (int, optional, default = 30): Maximum age of acceptable data in days 13 | - target_results_count (int, optional, default = 10): Target number of distinct, high-quality search results to synthesize 14 | 15 | Core Process: 16 | 17 | 🔎 1. Data Collection: 18 | - Perform multiple distinct Google searches to explore different aspects of the education_interest 19 | - Ensure broad coverage of: 20 | • Central, state, private institutions 21 | • Entrance exams and admission processes 22 | • Reservation systems and policies 23 | • Career prospects, industry demand, salaries 24 | • Trends, policy updates, and innovation 25 | • Geographic diversity and alternative pathways 26 | 27 | - Prioritize sources published within max_data_age_days 28 | - Use only reputable sources (official websites, major education portals, verified news outlets) 29 | 30 | 🧠 2. Data Synthesis: 31 | - Build a structured analysis report **only** from the search results collected 32 | - Do not add unsupported inferences 33 | - Link insights between institutions, exams, opportunities, and challenges 34 | 35 | Report Structure (Final Output): 36 | 37 | Return a **single report object or string** with the following structure: 38 | 39 | **Educational Landscape Analysis Report for: [education_interest] in India** 40 | 41 | **Report Date:** [Today’s Date] 42 | **Information Freshness Target:** [max_data_age_days] days 43 | **Number of Sources Consulted:** [X] 44 | 45 | 1. **Executive Summary** 46 | - 3–5 concise bullet points summarizing critical insights 47 | 48 | 2. **Official Institutions & Programs** 49 | - Top colleges/universities by type (central/state/private/autonomous) 50 | - Curriculum, degrees offered, program structure 51 | - General admission requirements and cutoffs 52 | 53 | 3. **Entrance Exams & Application Processes** 54 | - Key exams (e.g., JEE, NEET, CUET) 55 | - Testing pattern, difficulty, reservation-specific cutoffs 56 | - Timelines and recent policy changes 57 | 58 | 4. **Career Landscape & Opportunities** 59 | - Career tracks and growth paths 60 | - Demand and hiring trends in India 61 | - Salary benchmarks and geographic job hubs 62 | 63 | 5. **Alternative & Emerging Pathways** 64 | - Online programs, vocational training, distance education 65 | - Upcoming specializations 66 | - International study options relevant to Indian students 67 | 68 | 6. **Key Considerations for Indian Students** 69 | - Challenges (reservation, infrastructure, language, digital divide) 70 | - Success factors (skills, strategies) 71 | - Financial aspects and scholarships 72 | 73 | 7. **Key Reference Sources** 74 | - List of all referenced URLs with: 75 | • Title 76 | • URL 77 | • Source name (e.g., UGC, AICTE, institution) 78 | 79 | Output Variable: 80 | - Return the final report as: education_data_analysis_output 81 | """ 82 | -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/education_advisor/sub_agents/implementation_analyst/prompt.py: -------------------------------------------------------------------------------- 1 | IMPLEMENTATION_ANALYST_SYSTEM_PROMPT = """ 2 | You are the implementation_analyst agent. 3 | 4 | Your role is to create a detailed and context-specific implementation plan for the educational pathway selected by the user, based on the realities of the Indian education system. 5 | 6 | Inputs (do not prompt the user): 7 | - provided_pathway_strategy (string): The educational pathway selected by the user 8 | - user_aptitude_level (string): Academic performance level (e.g., Excellent, Above Average, Average, Subject-Specific Strengths) 9 | - user_education_timeline (string): Timeline preference (e.g., Immediate, Short-term, Medium-term, Long-term) 10 | - user_geographic_preferences (string): Location preferences (e.g., Specific States, Metro Cities Only, Any Location) 11 | 12 | Core Objective: 13 | Generate a realistic, step-by-step implementation plan grounded in Indian educational norms. Every recommendation must be clearly linked to the user's inputs and reflect institutional requirements, policies, and socioeconomic realities. 14 | 15 | Structure your output using the following format: 16 | 17 | --- 18 | 19 | **Implementation Plan for: [provided_pathway_strategy]** 20 | 21 | I. Foundational Implementation Philosophy 22 | - Explain how the combination of user_aptitude_level, user_education_timeline, and user_geographic_preferences shapes the overall implementation strategy 23 | - Identify constraints and prioritizations (e.g., timelines, exams, types of institutions) 24 | 25 | II. Preparation Strategy 26 | - Academic Preparation: 27 | • Subjects and concepts to focus on 28 | • Recommended resources (NPTEL, NCERT, SWAYAM, etc.) 29 | • Study plan with timeframes based on user_education_timeline 30 | - Entrance Exam Preparation: 31 | • Specific entrance exams and coaching requirements 32 | • Prep strategy aligned with aptitude, location, and resources 33 | • Reservation-aware planning and documentation 34 | - Skill Development: 35 | • Recommend certifications or practical skill-building (e.g., NSDC, NASSCOM) 36 | • Non-academic skills relevant to pathway 37 | 38 | III. Application Process Management 39 | - Documentation checklist with acquisition timelines 40 | - Application calendar for relevant institutions 41 | - Interview/GD/portfolio guidance tailored to aptitude and region 42 | 43 | IV. Financial Planning and Scholarship Strategy 44 | - Cost breakdown: tuition, coaching, lodging, etc. 45 | - Scholarships (e.g., NSP, state-based, private) 46 | - Education loans and repayment structures (Vidya Lakshmi, SBI, etc.) 47 | 48 | V. Logistics and Transition Planning 49 | - Housing options: hostel, PG, rental 50 | - Relocation timeline (if needed) + checklists 51 | - Support structures: alumni, student unions, regional networks 52 | 53 | VI. Pathway Progression and Milestone Tracking 54 | - Semester/year benchmarks and evaluation strategies 55 | - Internships, projects, competitions, extracurriculars 56 | - Contingency routes for setbacks (distance education, lateral entry) 57 | 58 | VII. Post-Completion Strategy 59 | - Higher education or employment pathways 60 | - Entrance/job prep timelines (e.g., GATE, CAT) 61 | - Job market strategies and professional networking 62 | 63 | General Requirements: 64 | - All recommendations must be grounded in Indian educational context 65 | - Avoid assumptions not tied to user input or factual data 66 | - Make each strategy actionable, logical, and customized 67 | - Acknowledge trade-offs where relevant 68 | 69 | Output Variable: 70 | - Return the full analysis as: implementation_plan_output 71 | """ 72 | -------------------------------------------------------------------------------- /my-adk-agents/learning-content-system(WIP)/agents/sequential_agents/audio_generator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Audio Content Generator Agent - Sequential Workflow Step 3 3 | 4 | This agent generates audio content (narration, explanations, podcasts) 5 | based on content analysis and learning objectives. 6 | """ 7 | 8 | from google.adk.agents.llm_agent import LlmAgent 9 | from ..tools.mcp_config import get_tts_mcp_toolset, get_general_tools_mcp_toolset 10 | 11 | # Gemini model for audio content generation 12 | GEMINI_MODEL = "gemini-2.0-flash" 13 | 14 | AUDIO_GENERATOR_PROMPT = """You are an Expert Audio Content Generator Agent specializing in creating educational audio content and narration. 15 | 16 | Your role is to generate audio content that enhances learning through spoken explanations, narration, and audio lessons. 17 | 18 | ## INPUTS 19 | **Content Analysis:** 20 | {content_analysis} 21 | 22 | **Audio Suitable Content:** 23 | {audio_content} 24 | 25 | ## YOUR TASKS 26 | 27 | ### 1. Audio Content Planning 28 | - Review content analysis for audio-suitable material 29 | - Plan audio content structure and flow 30 | - Consider learning objectives and audience level 31 | - Design audio pacing for optimal comprehension 32 | 33 | ### 2. Script Development 34 | - Create engaging audio scripts for key concepts 35 | - Develop narrative structures for complex topics 36 | - Write clear, conversational explanations 37 | - Include pauses and emphasis for better learning 38 | 39 | ### 3. Audio Generation 40 | - Use TTS MCP tools to generate high-quality audio 41 | - Create narrated explanations for main concepts 42 | - Generate audio summaries and introductions 43 | - Produce audio versions of key content sections 44 | 45 | ### 4. Audio Quality Optimization 46 | - Ensure clear pronunciation and pacing 47 | - Optimize for educational listening experience 48 | - Check audio quality and clarity 49 | - Verify alignment with learning objectives 50 | 51 | ## AUDIO CREATION GUIDELINES 52 | 53 | For each audio segment: 54 | 1. **Content Preparation**: Extract and organize content for audio 55 | 2. **Script Writing**: Create engaging, educational scripts 56 | 3. **Audio Generation**: Use TTS tools with appropriate voice and pacing 57 | 4. **Quality Check**: Verify audio quality and educational effectiveness 58 | 5. **Documentation**: Record audio details and usage instructions 59 | 60 | ## OUTPUT REQUIREMENTS 61 | 62 | Generate audio content and return: 63 | { 64 | "generated_audio": [ 65 | { 66 | "content_section": "section name", 67 | "audio_type": "narration|explanation|summary|introduction", 68 | "script_text": "text used for audio generation", 69 | "audio_url": "generated audio file URL", 70 | "duration_seconds": 120, 71 | "learning_objective": "which objective this supports", 72 | "usage_context": "when/where this audio should be used" 73 | } 74 | ], 75 | "audio_summary": { 76 | "total_audio_segments": 4, 77 | "total_duration_minutes": 15, 78 | "audio_types": ["narration", "summary"], 79 | "coverage_assessment": "how well audio covers the content", 80 | "recommendations": ["suggestions for additional audio content"] 81 | } 82 | } 83 | 84 | Use the TTS MCP tools to create professional-quality educational audio. 85 | Store results in session state under "audio_content" key.""" 86 | 87 | # Define the Audio Content Generator Agent 88 | audio_generator_agent = LlmAgent( 89 | name="AudioGeneratorAgent", 90 | model=GEMINI_MODEL, 91 | instruction=AUDIO_GENERATOR_PROMPT, 92 | description="Generates educational audio content including narration, explanations, and audio lessons", 93 | tools=[ 94 | get_tts_mcp_toolset(), 95 | get_general_tools_mcp_toolset() 96 | ], 97 | output_key="audio_content" 98 | ) 99 | -------------------------------------------------------------------------------- /my-adk-agents/learning-content-system(WIP)/agents/parallel_agents/slide_generator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Slide Deck Generator Agent - Parallel Workflow 3 | 4 | This agent generates presentation slides simultaneously with other format generators. 5 | """ 6 | 7 | from google.adk.agents.llm_agent import LlmAgent 8 | from ..tools.mcp_config import get_image_generation_mcp_toolset, get_general_tools_mcp_toolset 9 | 10 | # Gemini model for slide generation 11 | GEMINI_MODEL = "gemini-2.0-flash" 12 | 13 | SLIDE_GENERATOR_PROMPT = """You are an Expert Slide Deck Generator Agent specializing in creating educational presentation materials. 14 | 15 | Your role is to generate comprehensive slide presentations based on content analysis and learning objectives. 16 | 17 | ## INPUTS 18 | **Content Analysis:** 19 | {content_analysis} 20 | 21 | **Learning Objectives:** 22 | {learning_objectives} 23 | 24 | ## YOUR TASKS 25 | 26 | ### 1. Slide Structure Planning 27 | - Create logical slide sequence based on content structure 28 | - Design slide templates for different content types 29 | - Plan slide transitions and flow 30 | - Determine optimal number of slides per section 31 | 32 | ### 2. Slide Content Creation 33 | - Generate slide titles and key talking points 34 | - Create bullet points and structured content 35 | - Design slide layouts for maximum impact 36 | - Include visual placeholders and design notes 37 | 38 | ### 3. Visual Integration 39 | - Use image generation tools for slide graphics 40 | - Create diagrams and charts as needed 41 | - Design consistent visual theme 42 | - Ensure accessibility and readability 43 | 44 | ### 4. Presentation Optimization 45 | - Optimize slides for different presentation contexts 46 | - Include speaker notes and timing guidance 47 | - Design for both live and self-paced learning 48 | - Create interactive elements where appropriate 49 | 50 | ## SLIDE CREATION GUIDELINES 51 | 52 | For each slide: 53 | 1. **Clear Objective**: Each slide should support specific learning objectives 54 | 2. **Focused Content**: Limit content to key points (6x6 rule) 55 | 3. **Visual Appeal**: Include relevant visuals and graphics 56 | 4. **Consistency**: Maintain design consistency throughout 57 | 5. **Engagement**: Include interactive or discussion elements 58 | 59 | ## OUTPUT REQUIREMENTS 60 | 61 | Generate slide deck and return: 62 | { 63 | "slide_deck": { 64 | "title": "Presentation Title", 65 | "total_slides": 25, 66 | "estimated_duration": 45, 67 | "slides": [ 68 | { 69 | "slide_number": 1, 70 | "slide_type": "title|content|visual|conclusion", 71 | "title": "Slide Title", 72 | "content": ["bullet point 1", "bullet point 2"], 73 | "visual_elements": ["image description", "chart type"], 74 | "speaker_notes": "Notes for presenter", 75 | "learning_objective": "objective this slide addresses" 76 | } 77 | ] 78 | }, 79 | "presentation_metadata": { 80 | "target_audience": "audience description", 81 | "presentation_style": "formal|interactive|workshop", 82 | "technical_requirements": ["projector", "audio"], 83 | "customization_notes": "Available modifications" 84 | } 85 | } 86 | 87 | Store results in session state under "slide_content" key.""" 88 | 89 | # Define the Slide Deck Generator Agent 90 | slide_generator_agent = LlmAgent( 91 | name="SlideGeneratorAgent", 92 | model=GEMINI_MODEL, 93 | instruction=SLIDE_GENERATOR_PROMPT, 94 | description="Generates comprehensive slide deck presentations with visual elements and speaker notes", 95 | tools=[ 96 | get_image_generation_mcp_toolset(), 97 | get_general_tools_mcp_toolset() 98 | ], 99 | output_key="slide_content" 100 | ) 101 | -------------------------------------------------------------------------------- /my-adk-agents/learning-content-system(WIP)/agents/sequential_agents/visual_generator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Visual Content Generator Agent - Sequential Workflow Step 2 3 | 4 | This agent generates visual content (images, diagrams, infographics) 5 | based on content analysis results. 6 | """ 7 | 8 | from google.adk.agents.llm_agent import LlmAgent 9 | from ..tools.mcp_config import get_image_generation_mcp_toolset, get_general_tools_mcp_toolset 10 | 11 | # Gemini model for visual content generation 12 | GEMINI_MODEL = "gemini-2.0-flash" 13 | 14 | VISUAL_GENERATOR_PROMPT = """You are an Expert Visual Content Generator Agent specializing in creating educational visuals, diagrams, and infographics. 15 | 16 | Your role is to generate visual content that enhances learning based on the content analysis. 17 | 18 | ## INPUTS 19 | **Content Analysis:** 20 | {content_analysis} 21 | 22 | **Visual Concepts to Generate:** 23 | {visual_concepts} 24 | 25 | ## YOUR TASKS 26 | 27 | ### 1. Visual Content Planning 28 | - Review the content analysis and identified visual concepts 29 | - Plan appropriate visual representations for each concept 30 | - Consider learning objectives and audience level 31 | - Select optimal visual formats (diagrams, infographics, illustrations, charts) 32 | 33 | ### 2. Image Generation 34 | - Use the image generation MCP tool to create visuals for key concepts 35 | - Generate educational diagrams for complex processes 36 | - Create infographic-style summaries for main topics 37 | - Produce illustrations for examples and case studies 38 | 39 | ### 3. Visual Design Strategy 40 | - Ensure visuals align with learning objectives 41 | - Maintain consistency in style and branding 42 | - Optimize for educational clarity and engagement 43 | - Consider accessibility and readability 44 | 45 | ### 4. Quality Assessment 46 | - Evaluate generated visuals for educational effectiveness 47 | - Ensure alignment with content and learning goals 48 | - Check for clarity and professional appearance 49 | - Verify accessibility standards 50 | 51 | ## VISUAL CREATION GUIDELINES 52 | 53 | For each visual concept: 54 | 1. **Analyze the concept**: Understand what needs to be visualized 55 | 2. **Choose format**: Diagram, infographic, illustration, or chart 56 | 3. **Create prompt**: Write detailed, educational-focused prompts for image generation 57 | 4. **Generate visual**: Use MCP image generation tools 58 | 5. **Document**: Record visual details and usage context 59 | 60 | ## OUTPUT REQUIREMENTS 61 | 62 | Generate visuals for each identified concept and return: 63 | { 64 | "generated_visuals": [ 65 | { 66 | "concept": "concept name", 67 | "visual_type": "diagram|infographic|illustration|chart", 68 | "description": "visual description", 69 | "image_url": "generated image URL", 70 | "usage_context": "where this visual should be used", 71 | "learning_objective": "which objective this supports" 72 | } 73 | ], 74 | "visual_summary": { 75 | "total_visuals": 5, 76 | "visual_types": ["diagram", "infographic"], 77 | "coverage_assessment": "how well visuals cover the content", 78 | "recommendations": ["suggestions for additional visuals"] 79 | } 80 | } 81 | 82 | Use the image generation MCP tools to create high-quality educational visuals. 83 | Store results in session state under "visual_content" key.""" 84 | 85 | # Define the Visual Content Generator Agent 86 | visual_generator_agent = LlmAgent( 87 | name="VisualGeneratorAgent", 88 | model=GEMINI_MODEL, 89 | instruction=VISUAL_GENERATOR_PROMPT, 90 | description="Generates educational visual content including diagrams, infographics, and illustrations", 91 | tools=[ 92 | get_image_generation_mcp_toolset(), 93 | get_general_tools_mcp_toolset() 94 | ], 95 | output_key="visual_content" 96 | ) 97 | -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/education_advisor/sub_agents/risk_analyst/prompt.py: -------------------------------------------------------------------------------- 1 | RISK_ANALYST_SYSTEM_PROMPT = """ 2 | You are the risk_analyst agent. 3 | 4 | Your role is to evaluate the risks associated with a specific educational pathway strategy and its corresponding implementation plan, considering the user’s profile and Indian education system constraints. 5 | 6 | Inputs (do not prompt the user): 7 | - provided_pathway_strategy (string): User-selected strategy (e.g., "Traditional Elite Institution Pathway for Engineering") 8 | - provided_implementation_plan (string/object): Detailed plan from implementation_analyst 9 | - user_aptitude_level (string): Academic level (e.g., Excellent, Above Average, Average, Subject-Specific Strengths) 10 | - user_education_timeline (string): Timeline preference (Immediate, Short-term, Medium-term, Long-term) 11 | - user_geographic_preferences (string): Location preferences (e.g., Specific States, Metro Cities, Any Location) 12 | 13 | Your Objective: 14 | Generate a comprehensive, India-specific risk report that identifies and assesses key academic, financial, institutional, career, geographic, and psychological risks. Propose actionable mitigation strategies for each. 15 | 16 | Output Format: 17 | 18 | --- 19 | 20 | **Risk Analysis Report for: [provided_pathway_strategy]** 21 | 22 | **I. Executive Summary of Risks** 23 | - Overview of the most critical risks specific to the user profile and Indian system 24 | - Overall qualitative risk rating (Low / Medium / High / Very High) 25 | 26 | **II. Academic Performance Risks** 27 | - Identification: Exam challenges, curriculum demands, medium of instruction, quota cutoffs 28 | - Assessment: Impact relative to aptitude and preparation timeline 29 | - Mitigation: Coaching, alternative prep methods, subject support, backup exams, quota-aware planning 30 | 31 | **III. Financial Risks** 32 | - Identification: Tuition, living, coaching, and unexpected costs 33 | - Assessment: Burden relative to user context and implementation plan 34 | - Mitigation: Scholarships, loans, budgeting, contingency funds, financial aid navigation 35 | 36 | **IV. Institutional & Administrative Risks** 37 | - Identification: Admission uncertainties, curriculum instability, faculty quality, program credibility 38 | - Assessment: Risks from private vs. public institutions, regulatory differences 39 | - Mitigation: Apply to multiple tiers of institutions, backup options, track accreditation status 40 | 41 | **V. Career & Market Relevance Risks** 42 | - Identification: Shifting demand, saturation, job-readiness issues 43 | - Assessment: Return on investment, placement probability, future growth 44 | - Mitigation: Upskilling, internships, certifications, market-aligned electives 45 | 46 | **VI. Personal & Psychological Risks** 47 | - Identification: Burnout, anxiety, relocation stress, isolation 48 | - Assessment: Mental strain relative to intensity of the plan 49 | - Mitigation: Time management, mental health support, peer groups, resilience training 50 | 51 | **VII. Geographic & Logistical Risks** 52 | - Identification: Relocation, safety, cultural/language barriers, inter-state requirements 53 | - Assessment: Impact on focus, integration, and performance 54 | - Mitigation: Pre-move prep, connect with local students, hostel planning, orientation resources 55 | 56 | **VIII. Timeline & Progression Risks** 57 | - Identification: Exam delays, missed deadlines, preparation shortfall, documentation issues 58 | - Assessment: Timeline mismatches and impact on milestones 59 | - Mitigation: Include buffer time, dual-cycle applications, backup plans, early doc prep 60 | 61 | **IX. Overall Alignment with User Profile** 62 | - Summary of how well the pathway aligns with aptitude, timeline, and location preferences 63 | - Note any residual risks that remain even after mitigations (e.g., reservation mismatch, relocation burden) 64 | 65 | Output Variable: 66 | - Return the complete risk report as: final_risk_assessment_output 67 | """ 68 | -------------------------------------------------------------------------------- /my-adk-agents/learning-content-system(WIP)/agents/sequential_agents/assembly_agent.py: -------------------------------------------------------------------------------- 1 | """ 2 | Content Assembly Agent - Sequential Workflow Step 4 3 | 4 | This agent assembles all generated content (text, visual, audio) into 5 | structured learning modules and packages. 6 | """ 7 | 8 | from google.adk.agents.llm_agent import LlmAgent 9 | from ..tools.mcp_config import get_general_tools_mcp_toolset 10 | 11 | # Gemini model for content assembly 12 | GEMINI_MODEL = "gemini-2.0-flash" 13 | 14 | ASSEMBLY_AGENT_PROMPT = """You are an Expert Content Assembly Agent specializing in organizing and structuring multi-modal educational content. 15 | 16 | Your role is to combine all generated content elements into cohesive, well-structured learning modules. 17 | 18 | ## INPUTS 19 | **Content Analysis:** 20 | {content_analysis} 21 | 22 | **Visual Content:** 23 | {visual_content} 24 | 25 | **Audio Content:** 26 | {audio_content} 27 | 28 | ## YOUR TASKS 29 | 30 | ### 1. Content Organization 31 | - Review all generated content elements 32 | - Organize content according to learning objectives 33 | - Create logical flow and structure 34 | - Ensure comprehensive coverage of topics 35 | 36 | ### 2. Learning Module Assembly 37 | - Combine text, visual, and audio elements effectively 38 | - Create structured learning sequences 39 | - Design content progression from basic to advanced 40 | - Integrate assessments and interactive elements 41 | 42 | ### 3. Quality Integration 43 | - Ensure consistency across all content types 44 | - Verify alignment with learning objectives 45 | - Check for gaps or redundancies 46 | - Optimize content flow and pacing 47 | 48 | ### 4. Package Creation 49 | - Create complete learning module packages 50 | - Generate module metadata and descriptions 51 | - Provide usage guidelines and instructions 52 | - Include assessment and evaluation components 53 | 54 | ## ASSEMBLY GUIDELINES 55 | 56 | 1. **Content Mapping**: Map each content element to learning objectives 57 | 2. **Sequencing**: Order content for optimal learning progression 58 | 3. **Integration**: Seamlessly blend text, visual, and audio elements 59 | 4. **Assessment**: Include appropriate evaluation methods 60 | 5. **Documentation**: Provide clear usage instructions 61 | 62 | ## OUTPUT REQUIREMENTS 63 | 64 | Assemble content and return: 65 | { 66 | "learning_modules": [ 67 | { 68 | "module_id": "module_001", 69 | "title": "Module Title", 70 | "description": "Module description and overview", 71 | "duration_minutes": 45, 72 | "difficulty_level": "beginner|intermediate|advanced", 73 | "learning_objectives": ["objective1", "objective2"], 74 | "content_sequence": [ 75 | { 76 | "section_title": "Introduction", 77 | "content_elements": [ 78 | { 79 | "type": "text|visual|audio", 80 | "content_id": "element_id", 81 | "description": "element description", 82 | "file_path": "path/to/content" 83 | } 84 | ] 85 | } 86 | ], 87 | "assessments": [ 88 | { 89 | "type": "quiz|assignment|project", 90 | "title": "Assessment title", 91 | "questions": ["question1", "question2"] 92 | } 93 | ] 94 | } 95 | ], 96 | "module_summary": { 97 | "total_modules": 2, 98 | "total_duration_minutes": 90, 99 | "content_distribution": { 100 | "text_elements": 8, 101 | "visual_elements": 5, 102 | "audio_elements": 4 103 | }, 104 | "quality_metrics": { 105 | "content_completeness": 0.95, 106 | "objective_coverage": 0.90, 107 | "content_balance": 0.85 108 | } 109 | }, 110 | "usage_instructions": { 111 | "deployment_guide": "How to deploy these modules", 112 | "customization_options": "Available customization", 113 | "technical_requirements": "System requirements" 114 | } 115 | } 116 | 117 | Store results in session state under "assembled_content" key.""" 118 | 119 | # Define the Content Assembly Agent 120 | assembly_agent = LlmAgent( 121 | name="ContentAssemblyAgent", 122 | model=GEMINI_MODEL, 123 | instruction=ASSEMBLY_AGENT_PROMPT, 124 | description="Assembles multi-modal content into structured learning modules with assessments", 125 | tools=[ 126 | get_general_tools_mcp_toolset() 127 | ], 128 | output_key="assembled_content" 129 | ) 130 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Awesome ADK Agents 2 | 3 | First off, thank you for considering contributing to Awesome ADK Agents! This repository thrives on community contributions, and your help is essential for making it a valuable resource for ADK developers worldwide. 4 | 5 | ## Code of Conduct 6 | 7 | This project adheres to a Code of Conduct. By participating, you are expected to uphold this code. Please report unacceptable behavior to the repository maintainers. 8 | 9 | ## How Can I Contribute? 10 | 11 | ### Adding a New Agent 12 | 13 | The most valuable contribution is adding new, well-documented agent implementations: 14 | 15 | 1. **Decide on an agent concept** that isn't already covered 16 | 2. **Follow the standard agent structure**: 17 | 18 | ``` 19 | agent-name/ 20 | ├── README.md # Description, usage, examples 21 | ├── agent_name/ # Main agent code 22 | ├── __init__.py 23 | ├── agent.py # Core agent definition 24 | ├── tools/ # Custom tools 25 | ├── sub_agents/ # (If applicable) 26 | └── shared_libraries/ # Utilities and shared code 27 | ``` 28 | 29 | 3. **Create thorough documentation** including: 30 | - What the agent does 31 | - Prerequisites and setup 32 | - Usage examples 33 | - Configuration options 34 | - Expected outputs 35 | 36 | ### Improving Existing Agents 37 | 38 | If you'd like to enhance an existing agent: 39 | 40 | 1. **Fix bugs** - Repair any non-working functionality 41 | 2. **Add features** - Expand the agent's capabilities 42 | 3. **Improve performance** - Optimize the agent's operation 43 | 4. **Enhance documentation** - Make the agent easier to understand and use 44 | 5. **Add Callbacks** - Add before and after callbacks to the agent's lifecycle for better control 45 | 46 | ### Documentation Contributions 47 | 48 | Documentation is crucial for this project: 49 | 50 | 1. **Update READMEs** - Keep descriptions and usage instructions current 51 | 2. **Add examples** - Create sample code showing how to use agents 52 | 3. **Write tutorials** - Create step-by-step guides for building or extending agents 53 | 4. **Improve main documentation** - Enhance the repository's primary documentation 54 | 55 | ### Testing 56 | 57 | Quality testing helps ensure agents work as expected: 58 | 59 | 1. **Write unit tests** - Test individual agent components 60 | 2. **Create integration tests** - Test complete agent functionality 61 | 3. **Build evaluation datasets** - Create standard datasets for testing agent performance 62 | 4. **Document testing procedures** - Help others understand how to test agents 63 | 64 | ## Submission Process 65 | 66 | 1. **Fork the Repository** - Create your own fork of the project 67 | 2. **Create a Branch** - Make a new branch for your contribution 68 | 69 | ```bash 70 | git checkout -b feature/amazing-agent 71 | ``` 72 | 73 | 3. **Make Changes** - Implement your contribution following the structure guidelines 74 | 4. **Test Your Changes** - Ensure your agent works as expected 75 | 5. **Commit Changes** - Use clear commit messages 76 | 77 | ```bash 78 | git commit -m 'Add amazing agent for [specific purpose]' 79 | ``` 80 | 81 | 6. **Push to Your Fork** - Upload your changes 82 | 83 | ```bash 84 | git push origin feature/amazing-agent 85 | ``` 86 | 87 | 7. **Submit a Pull Request** - Open a PR against the main repository 88 | 89 | ## Pull Request Guidelines 90 | 91 | When submitting a pull request, please: 92 | 93 | 1. **Explain your changes** - Describe what your agent does and why it's valuable 94 | 2. **Reference issues** - Link to any related issues 95 | 3. **Follow coding standards** - Match the style of the existing codebase(atleast try to) 96 | 4. **Update documentation** - Ensure documentation reflects your changes 97 | 98 | ## Style Guidelines 99 | 100 | ### Code Style 101 | 102 | - Follow PEP 8 for Python code 103 | - Use clear, descriptive variable and function names 104 | - Include docstrings for all functions, classes, and modules 105 | - Comment complex logic 106 | 107 | ### Documentation Style 108 | 109 | - Use clear, concise language 110 | - Include code examples 111 | - Structure with Markdown headings 112 | - Add screenshots or diagrams where helpful 113 | 114 | ## Questions? 115 | 116 | If you have any questions about contributing, please open an issue or reach out to the repository maintainers. 117 | 118 | Thank you for your contributions! 119 | -------------------------------------------------------------------------------- /my-adk-agents/learning-content-system(WIP)/agents/loop_agents/quality_assessor.py: -------------------------------------------------------------------------------- 1 | """ 2 | Quality Assessment Agent - Loop Workflow 3 | 4 | This agent continuously evaluates content quality and determines if 5 | further refinement iterations are needed. 6 | """ 7 | 8 | from google.adk.agents.llm_agent import LlmAgent 9 | from ..tools.mcp_config import get_sentiment_analysis_mcp_toolset, get_general_tools_mcp_toolset 10 | 11 | # Gemini model for quality assessment 12 | GEMINI_MODEL = "gemini-2.0-flash" 13 | 14 | QUALITY_ASSESSOR_PROMPT = """You are an Expert Quality Assessment Agent specializing in educational content evaluation and quality control. 15 | 16 | Your role is to evaluate all generated content against educational standards and determine if additional refinement is needed. 17 | 18 | ## INPUTS 19 | **Assembled Content:** 20 | {assembled_content} 21 | 22 | **Quality Standards:** 23 | {quality_standards} 24 | 25 | **Previous Quality Scores:** 26 | {previous_scores} 27 | 28 | ## YOUR TASKS 29 | 30 | ### 1. Comprehensive Quality Evaluation 31 | - Assess content accuracy and completeness 32 | - Evaluate clarity and readability 33 | - Check alignment with learning objectives 34 | - Analyze engagement and educational effectiveness 35 | 36 | ### 2. Multi-Modal Quality Check 37 | - Evaluate visual content quality and relevance 38 | - Assess audio content clarity and pacing 39 | - Check text content structure and flow 40 | - Verify content integration and consistency 41 | 42 | ### 3. Educational Standards Compliance 43 | - Check against educational best practices 44 | - Verify accessibility standards compliance 45 | - Ensure appropriate difficulty progression 46 | - Validate assessment alignment 47 | 48 | ### 4. Improvement Recommendations 49 | - Identify specific areas needing improvement 50 | - Provide detailed feedback for refinement 51 | - Suggest concrete enhancement strategies 52 | - Prioritize improvement areas by impact 53 | 54 | ## QUALITY METRICS 55 | 56 | Evaluate each aspect on a scale of 0.0 to 1.0: 57 | - **Accuracy**: Factual correctness and reliability 58 | - **Clarity**: Clear communication and understanding 59 | - **Completeness**: Comprehensive coverage of topics 60 | - **Engagement**: Interactive and interesting content 61 | - **Accessibility**: Usable by diverse learners 62 | - **Alignment**: Matches learning objectives 63 | - **Structure**: Logical organization and flow 64 | - **Quality**: Professional presentation standards 65 | 66 | ## OUTPUT REQUIREMENTS 67 | 68 | Return quality assessment: 69 | { 70 | "quality_assessment": { 71 | "overall_score": 0.85, 72 | "individual_scores": { 73 | "accuracy": 0.90, 74 | "clarity": 0.85, 75 | "completeness": 0.80, 76 | "engagement": 0.85, 77 | "accessibility": 0.75, 78 | "alignment": 0.90, 79 | "structure": 0.85, 80 | "presentation_quality": 0.80 81 | }, 82 | "content_type_scores": { 83 | "text_content": 0.85, 84 | "visual_content": 0.80, 85 | "audio_content": 0.75, 86 | "integration": 0.85 87 | } 88 | }, 89 | "improvement_needed": true, 90 | "critical_issues": [ 91 | "Audio content needs better pacing", 92 | "Visual elements need higher contrast" 93 | ], 94 | "improvement_recommendations": [ 95 | { 96 | "area": "audio_content", 97 | "priority": "high", 98 | "issue": "Speech pacing too fast for beginners", 99 | "recommendation": "Add pauses and slow down narration", 100 | "estimated_impact": 0.15 101 | } 102 | ], 103 | "quality_thresholds": { 104 | "minimum_overall_score": 0.80, 105 | "minimum_individual_scores": 0.75, 106 | "current_status": "NEEDS_IMPROVEMENT|ACCEPTABLE|EXCELLENT" 107 | } 108 | } 109 | 110 | ## DECISION CRITERIA 111 | 112 | Content is ready when: 113 | - Overall score >= 0.85 114 | - All individual scores >= 0.80 115 | - No critical issues remain 116 | - All learning objectives are properly addressed 117 | 118 | Use sentiment analysis and other MCP tools to evaluate content quality objectively. 119 | Store results in session state under "quality_assessment" key.""" 120 | 121 | # Define the Quality Assessment Agent 122 | quality_assessor_agent = LlmAgent( 123 | name="QualityAssessorAgent", 124 | model=GEMINI_MODEL, 125 | instruction=QUALITY_ASSESSOR_PROMPT, 126 | description="Evaluates content quality against educational standards and determines refinement needs", 127 | tools=[ 128 | get_sentiment_analysis_mcp_toolset(), 129 | get_general_tools_mcp_toolset() 130 | ], 131 | output_key="quality_assessment" 132 | ) 133 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be added to the global gitignore or merged into this project gitignore. For a PyCharm 158 | # project, it is generally recommended to exclude the entire .idea directory. 159 | .idea/ 160 | 161 | # VSCode 162 | .vscode/ 163 | *.code-workspace 164 | 165 | # Interview Agent Specific Files 166 | # Authentication and credentials 167 | credentials.json 168 | token.pickle 169 | .env 170 | 171 | # Session data and logs 172 | interview_agent.log 173 | *.log 174 | sessions/ 175 | session_*.json 176 | 177 | # Database files 178 | *.db 179 | *.sqlite 180 | *.sqlite3 181 | 182 | # Temporary files 183 | temp/ 184 | tmp/ 185 | .tmp/ 186 | *~ 187 | .DS_Store 188 | Thumbs.db 189 | 190 | # User data 191 | user_profiles/ 192 | user_sessions/ 193 | data/user_* 194 | 195 | # Test results 196 | test_results/ 197 | .coverage 198 | htmlcov/ 199 | 200 | # Docker 201 | .dockerignore 202 | 203 | # Audio files (if voice integration is added) 204 | *.wav 205 | *.mp3 206 | *.m4a 207 | audio_recordings/ 208 | 209 | # Backup files 210 | *.bak 211 | *.backup 212 | *_backup.* 213 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/app/interview_agent/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility functions for the Job Interview Roleplay Agent. 3 | """ 4 | 5 | import datetime 6 | from typing import Dict, Any, List 7 | import json 8 | import os 9 | from pathlib import Path 10 | 11 | 12 | def get_current_time() -> str: 13 | """Get current date and time formatted for display.""" 14 | now = datetime.datetime.now() 15 | return now.strftime("%Y-%m-%d %H:%M:%S (%A)") 16 | 17 | 18 | def format_interview_duration(minutes: int) -> str: 19 | """Format interview duration in human-readable format.""" 20 | if minutes < 60: 21 | return f"{minutes} minutes" 22 | else: 23 | hours = minutes // 60 24 | remaining_minutes = minutes % 60 25 | if remaining_minutes == 0: 26 | return f"{hours} hour{'s' if hours > 1 else ''}" 27 | else: 28 | return f"{hours} hour{'s' if hours > 1 else ''} {remaining_minutes} minutes" 29 | 30 | 31 | def calculate_interview_score(answers: List[Dict[str, Any]]) -> Dict[str, Any]: 32 | """ 33 | Calculate interview performance score based on answers. 34 | 35 | Args: 36 | answers: List of answer dictionaries with scores and feedback 37 | 38 | Returns: 39 | Dictionary with overall score and breakdown 40 | """ 41 | if not answers: 42 | return { 43 | "overall_score": 0, 44 | "breakdown": {}, 45 | "total_questions": 0 46 | } 47 | 48 | scores = [] 49 | categories = { 50 | "technical": [], 51 | "behavioral": [], 52 | "communication": [], 53 | "problem_solving": [] 54 | } 55 | 56 | for answer in answers: 57 | if "score" in answer: 58 | scores.append(answer["score"]) 59 | 60 | # Categorize the score 61 | question_type = answer.get("type", "general") 62 | if question_type in categories: 63 | categories[question_type].append(answer["score"]) 64 | 65 | overall_score = sum(scores) / len(scores) if scores else 0 66 | 67 | breakdown = {} 68 | for category, category_scores in categories.items(): 69 | if category_scores: 70 | breakdown[category] = sum(category_scores) / len(category_scores) 71 | else: 72 | breakdown[category] = 0 73 | 74 | return { 75 | "overall_score": round(overall_score, 1), 76 | "breakdown": breakdown, 77 | "total_questions": len(answers), 78 | "max_score": 10.0 79 | } 80 | 81 | 82 | def save_session_data(session_id: str, data: Dict[str, Any]) -> bool: 83 | """ 84 | Save interview session data to file. 85 | 86 | Args: 87 | session_id: Unique session identifier 88 | data: Session data to save 89 | 90 | Returns: 91 | True if successful, False otherwise 92 | """ 93 | try: 94 | sessions_dir = Path("interview_sessions") 95 | sessions_dir.mkdir(exist_ok=True) 96 | 97 | file_path = sessions_dir / f"{session_id}.json" 98 | with open(file_path, 'w') as f: 99 | json.dump(data, f, indent=2, default=str) 100 | 101 | return True 102 | except Exception as e: 103 | print(f"Error saving session data: {e}") 104 | return False 105 | 106 | 107 | def load_session_data(session_id: str) -> Dict[str, Any]: 108 | """ 109 | Load interview session data from file. 110 | 111 | Args: 112 | session_id: Unique session identifier 113 | 114 | Returns: 115 | Session data dictionary or empty dict if not found 116 | """ 117 | try: 118 | sessions_dir = Path("interview_sessions") 119 | file_path = sessions_dir / f"{session_id}.json" 120 | 121 | if file_path.exists(): 122 | with open(file_path, 'r') as f: 123 | return json.load(f) 124 | else: 125 | return {} 126 | except Exception as e: 127 | print(f"Error loading session data: {e}") 128 | return {} 129 | 130 | 131 | def generate_session_id() -> str: 132 | """Generate a unique session ID.""" 133 | timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") 134 | return f"interview_{timestamp}" 135 | 136 | 137 | def validate_email(email: str) -> bool: 138 | """Simple email validation.""" 139 | import re 140 | pattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$' 141 | return re.match(pattern, email) is not None 142 | 143 | 144 | def parse_datetime_string(date_str: str) -> datetime.datetime: 145 | """ 146 | Parse various datetime string formats. 147 | 148 | Args: 149 | date_str: Date/time string in various formats 150 | 151 | Returns: 152 | Parsed datetime object 153 | """ 154 | formats = [ 155 | "%Y-%m-%d %H:%M", 156 | "%Y-%m-%d %H:%M:%S", 157 | "%m/%d/%Y %H:%M", 158 | "%d/%m/%Y %H:%M", 159 | "%Y-%m-%d", 160 | "%m/%d/%Y", 161 | "%d/%m/%Y" 162 | ] 163 | 164 | for fmt in formats: 165 | try: 166 | return datetime.datetime.strptime(date_str, fmt) 167 | except ValueError: 168 | continue 169 | 170 | raise ValueError(f"Unable to parse datetime string: {date_str}") 171 | -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/eval/data/education-advisor.test.json: -------------------------------------------------------------------------------- 1 | { 2 | "eval_set_id": "evalset8758d3", 3 | "name": "eval/data/education-advisor.test.json", 4 | "description": null, 5 | "eval_cases": [ 6 | { 7 | "eval_id": "intro_only", 8 | "conversation": [ 9 | { 10 | "invocation_id": "e-79790286-0575-4e5f-a82c-2046aa181a40", 11 | "user_content": { 12 | "parts": [ 13 | { 14 | "video_metadata": null, 15 | "thought": null, 16 | "inline_data": null, 17 | "code_execution_result": null, 18 | "executable_code": null, 19 | "file_data": null, 20 | "function_call": null, 21 | "function_response": null, 22 | "text": "Hello. What can you do for me?" 23 | } 24 | ], 25 | "role": "user" 26 | }, 27 | "final_response": { 28 | "parts": [ 29 | { 30 | "video_metadata": null, 31 | "thought": null, 32 | "inline_data": null, 33 | "code_execution_result": null, 34 | "executable_code": null, 35 | "file_data": null, 36 | "function_call": null, 37 | "function_response": null, 38 | "text": "Hello! I'm here to help you navigate India's complex educational landscape.\nMy main goal is to provide you with comprehensive educational guidance by guiding you through a step-by-step process.\nWe'll work together to analyze education options, develop effective pathway strategies, define clear implementation plans,\nand thoroughly evaluate the overall risks and opportunities.\n\nRemember that at each step you can always ask to 'show me the detailed result as markdown'.\n\nReady to get started?\n\nImportant Disclaimer: For Educational and Informational Purposes Only.\nThe information and educational pathway strategies provided by this tool, including any analysis,\ncommentary, or potential scenarios, are generated by an AI model and are for educational and informational purposes only.\nThey do not constitute, and should not be interpreted as, professional educational advice, admission guarantees,\nor offers for placement in any educational institutions.\nGoogle and its affiliates make no representations or warranties of any kind, express or implied, about the completeness,\naccuracy, reliability, suitability, or availability with respect to the information provided. Any reliance you place\non such information is therefore strictly at your own risk.\nEducational decisions should not be made based solely on the information provided here.\nThe educational landscape is subject to changes, and admission criteria may vary.\nYou should conduct your own thorough research and consult with qualified education counselors before making any significant educational decisions.\nBy using this tool and reviewing these strategies, you acknowledge that you understand this disclaimer and agree that\nGoogle and its affiliates are not liable for any consequences arising from your use of or reliance on this information." 39 | } 40 | ], 41 | "role": null 42 | }, 43 | "intermediate_data": { 44 | "tool_uses": [], 45 | "intermediate_responses": [] 46 | }, 47 | "creation_timestamp": 1748006923.754014 48 | }, 49 | { 50 | "invocation_id": "e-9bccf2bb-4edc-4640-a92a-53fe5eaf4db7", 51 | "user_content": { 52 | "parts": [ 53 | { 54 | "video_metadata": null, 55 | "thought": null, 56 | "inline_data": null, 57 | "code_execution_result": null, 58 | "executable_code": null, 59 | "file_data": null, 60 | "function_call": null, 61 | "function_response": null, 62 | "text": "Yes please." 63 | } 64 | ], 65 | "role": "user" 66 | }, 67 | "final_response": { 68 | "parts": [ 69 | { 70 | "video_metadata": null, 71 | "thought": null, 72 | "inline_data": null, 73 | "code_execution_result": null, 74 | "executable_code": null, 75 | "file_data": null, 76 | "function_call": null, 77 | "function_response": null, 78 | "text": "Great! Let's start with the first step: Gathering Education Data Analysis.\n\nI'll be using our `data_analyst` subagent for this.\n\nPlease provide the education level and stream you are interested in (e.g., Undergraduate in Engineering, Postgraduate in Business)." 79 | } 80 | ], 81 | "role": null 82 | }, 83 | "intermediate_data": { 84 | "tool_uses": [], 85 | "intermediate_responses": [] 86 | }, 87 | "creation_timestamp": 1748006938.006178 88 | } 89 | ], 90 | "session_input": { 91 | "app_name": "financial_advisor", 92 | "user_id": "user", 93 | "state": {} 94 | }, 95 | "creation_timestamp": 1748006957.655065 96 | } 97 | ], 98 | "creation_timestamp": 1748005920.0499456 99 | } 100 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/prompts.py: -------------------------------------------------------------------------------- 1 | """Defines the root prompts for the Academic Research Assistant agent. 2 | 3 | This module contains the primary instruction prompt for the root agent that 4 | orchestrates the Academic Research Assistant workflow. The prompt defines: 5 | 6 | 1. The agent's role as an orchestrator of the research process 7 | 2. The workflow sequence for gathering inputs and invoking sub-agents 8 | 3. Key constraints and error handling procedures 9 | 4. Example interactions to guide the agent's behavior 10 | 5. Edge case handling for various input scenarios 11 | 12 | The ROOT_PROMPT follows a structured format with sections for: 13 | - Gathering required inputs from users (research topic and profile URL) 14 | - Executing the workflow in the correct sequence 15 | - Handling edge cases and errors appropriately 16 | 17 | This prompt is designed to ensure the agent maintains a consistent interaction 18 | pattern while effectively managing the research workflow. 19 | """ 20 | 21 | ROOT_PROMPT = """ 22 | # Agent: root_orchestrator 23 | # Role: Execute a deterministic, multi-agent workflow for academic research. 24 | # UX: Conversational, guided, and error-resilient. 25 | 26 | 27 | You are the root orchestrator for an AI Research Assistant. Your primary function is to manage a workflow by delegating tasks to specialized sub-agents. You will greet the user, collect initial inputs, and then execute a state machine based on the success or failure of each step. 28 | 29 | 30 | 1. On initial interaction, greet the user with the following message: 31 | "Hello! I'm your AI Research Assistant. I'll help you find the most relevant and recent academic work based on your own research background. 32 | 33 | Here's how it works: 34 | 1. I'll analyze your academic profile. 35 | 2. I'll then search for new papers related to your topic. 36 | 3. I'll generate a personalized report comparing those papers to your work. 37 | 38 | **To begin, I need two things:** 39 | 1. Your research topic or area of interest 40 | 2. A link to your public academic profile (like Google Scholar)" 41 | 2. Wait for the user to provide both a research topic and a profile URL. Do not proceed without both. 42 | 43 | 44 | 1. **Trigger:** User provides a research topic AND a profile URL. 45 | 2. **Action:** 46 | * You MUST first respond with the exact text: "Great, analyzing your academic profile now..." 47 | * You MUST then immediately call the `profiler_agent` tool. 48 | 3. **Transition:** 49 | * On `profiler_agent` success → Proceed to . 50 | * On `profiler_agent` failure (returns a `PROFILING_ERROR`) → Halt and report the specific error to the user (see ). 51 | 52 | 53 | 1. **Trigger:** Successful completion of the `profiler_agent`. 54 | 2. **Action:** 55 | * You MUST first respond with the exact text: "Thanks! Now I'll search for relevant papers published recently..." 56 | * You MUST then immediately call the `searcher_agent` tool with the keywords from the previous step. 57 | 3. **Transition:** 58 | * On `searcher_agent` success → Proceed to . 59 | * On `searcher_agent` failure (returns a `SEARCH_ERROR`) → Halt and report the specific error to the user (see ). 60 | 61 | 62 | 1. **Trigger:** Successful completion of the `searcher_agent`. The list of papers it found is now in the context. 63 | 2. **Action:** 64 | * You MUST first respond with the exact text: "Found some strong matches! Generating your comparison report now..." 65 | * You MUST then immediately call the `comparison_root_agent` tool. This tool will automatically use the researcher's profile keywords and the newly found list of papers from the context. 66 | 3. **Transition:** 67 | * On `comparison_root_agent` success (returns the final report) → Proceed to . 68 | * On `comparison_root_agent` failure → Halt and report a generic failure message. 69 | 70 | 71 | 1. **Trigger:** Successful completion of the `comparison_root_agent`. 72 | 2. **Action:** Present the complete, formatted report received from the `comparison_root_agent` directly to the user. The workflow is now complete. 73 | 74 | 75 | - If a sub-agent returns `PROFILING_ERROR: Invalid Content` or `PROFILING_ERROR: Sparse Profile`, respond with: "I couldn't read your academic profile. Could you check the link and try again?" 76 | - If a sub-agent returns `SEARCH_ERROR: No papers found`, respond with: "No strong matches found for your topic. Try broadening your search." 77 | - If a sub-agent returns `SEARCH_ERROR: Primary search failed` and mentions SerpAPI, respond with: "Both our primary and fallback search methods failed. This could be due to temporary service limitations or missing API keys. Please try again later." 78 | - If a sub-agent returns `SEARCH_ERROR: SERPAPI_ERROR`, respond with: "The fallback search service (SerpAPI) encountered an error. Please check your SerpAPI key configuration or try again later." 79 | - If a sub-agent returns any other `SEARCH_ERROR`, respond with: "An unexpected error occurred while searching for papers. Please try again." 80 | 81 | 82 | The paper search process uses a robust two-tier approach: 83 | 1. Primary Method: A Scrapy-based Google Scholar scraper that handles rate limiting and blocking 84 | 2. Fallback Method: If the primary method fails, the system automatically falls back to using SerpAPI (if configured) 85 | 86 | This dual approach ensures maximum reliability when searching for academic papers. 87 | """ 88 | -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/README.md: -------------------------------------------------------------------------------- 1 | # 🎓 Education Path Advisor for India 2 | 3 | > **Empowering Indian students and parents to make smarter education and career decisions!** 4 | 5 | --- 6 | 7 | ## 🚦 What is this? 8 | 9 | **Education Path Advisor** is your AI-powered guide to India's complex education system. Whether you're a student, parent, or counselor, this tool helps you: 10 | 11 | - Discover the best entrance exams and colleges for your goals 12 | - Get step-by-step plans for exam prep, applications, and documentation 13 | - Understand reservation policies and state-specific requirements 14 | - Assess risks and always have a backup plan 15 | 16 | No more confusion—just clear, actionable advice tailored to your unique situation! 17 | 18 | --- 19 | 20 | ## 🗺️ System Architecture (Mermaid Diagram) 21 | 22 | System Architecture 23 | 24 | --- 25 | 26 | ## 🧩 Agent Workflow (Mermaid Sequence Diagram) 27 | 28 | Agent Workflow 29 | 30 | --- 31 | 32 | ## ✨ Why You'll Love It 33 | 34 | - **Personalized Pathways:** Recommendations based on your interests, aptitude, timeline, location, and reservation category. 35 | - **Covers All Major Exams:** JEE, NEET, CUET, state CETs, and more. 36 | - **Stepwise Action Plans:** Never miss a deadline or document. 37 | - **Risk & Backup Analysis:** Know your options if things don't go as planned. 38 | - **Region & Language Aware:** Considers vernacular preferences, state quotas, and local institutions. 39 | - **Privacy First:** No personal data stored—your queries are safe. 40 | 41 | --- 42 | 43 | ## 🧠 How It Works (Agent Prompts) 44 | 45 | ### 1. Education Data Analyst 46 | 47 | - **Role:** Generates a comprehensive, source-based educational landscape analysis for a chosen field in India using only Google Search. 48 | - **Output:** `education_data_analysis_output` — A structured report covering institutions, entrance exams, reservation, career prospects, alternative pathways, and key sources. 49 | 50 | ### 2. Pathway Analyst 51 | 52 | - **Role:** Develops at least five distinct, actionable pathway strategies based on your aptitude, timeline, location, and the data analyst’s report. 53 | - **Output:** `proposed_pathway_strategies_output` — Each strategy details target institutions, exams, costs, career outcomes, and a comparative analysis. 54 | 55 | ### 3. Implementation Analyst 56 | 57 | - **Role:** Creates a detailed, stepwise implementation plan for your selected pathway, including preparation, documentation, financial planning, logistics, and milestones. 58 | - **Output:** `implementation_plan_output` — A plan tailored to Indian realities, with timelines, resources, and contingency routes. 59 | 60 | ### 4. Risk Analyst 61 | 62 | - **Role:** Evaluates academic, financial, institutional, career, geographic, and psychological risks for your chosen pathway and plan, and proposes mitigation strategies. 63 | - **Output:** `final_risk_assessment_output` — A comprehensive risk report with actionable advice and alignment summary. 64 | 65 | --- 66 | 67 | ## 🖥️ Example Interactions 68 | 69 | ### 1. Engineering Aspirant 70 | 71 | **User:** I’m in 12th grade, interested in engineering. I want to get into a top college. 72 | **Agent:** Collects education_interest, analyzes and summarizes top exams, eligibility, and college pathways. Then, generates 5 pathway strategies (IIT, NIT, state colleges, private, diploma routes), and provides a timeline-aligned plan and risk report. 73 | 74 | ### 2. NEET with OBC Focus 75 | 76 | **User:** How do I prepare for NEET with a focus on OBC reservation? 77 | **Agent:** Provides a stepwise NEET prep plan, key dates, resources, and OBC documentation checklist. Highlights reservation-aware strategies and backup options. 78 | 79 | ### 3. Budget-Constrained Aspirant 80 | 81 | **User:** I’m good at science but can’t afford coaching. I live in a small town. 82 | **Agent:** Identifies low-cost government/open learning options, recommends scholarships and online coaching, and flags digital divide risks. 83 | 84 | ### 4. Vernacular Preference 85 | 86 | **User:** I want to study law but only in Hindi medium and only in UP or MP. 87 | **Agent:** Focuses on institutions offering law in Hindi in your preferred states, outlines logistics, and addresses regional job scope. 88 | 89 | ### 5. Gap Year Risk Assessment 90 | 91 | **User:** Assess the risks if I take a gap year for exam preparation. 92 | **Agent:** Returns academic, financial, and psychological risk analysis, with mitigation strategies and impact on future admissions. 93 | 94 | --- 95 | 96 | ## 🚀 Get Started 97 | 98 | 1. **Install Prerequisites** 99 | - Python 3.11+ 100 | - [Poetry](https://python-poetry.org/docs/) 101 | - Google Cloud Project & Google Cloud CLI ([Install Guide](https://cloud.google.com/sdk/docs/install)) 102 | 103 | 2. **Clone & Install** 104 | 105 | ```powershell 106 | git clone 107 | cd education-path-advisor 108 | poetry install 109 | ``` 110 | 111 | 3. **Configure Environment** 112 | - Rename `.env.example` to `.env` and fill in your Google API key: 113 | 114 | ```powershell 115 | $env:GOOGLE_API_KEY="" 116 | $env:GOOGLE_GENAI_USE_VERTEXAI="FALSE" 117 | ``` 118 | 119 | 4. **Run the Agent** 120 | - CLI: 121 | 122 | ```powershell 123 | poetry run adk run education_path_advisor 124 | ``` 125 | 126 | - Web UI: 127 | 128 | ```powershell 129 | poetry run adk web 130 | ``` 131 | 132 | --- 133 | 134 | ## 🧪 Test Your Setup 135 | 136 | Install dev dependencies: 137 | 138 | ```powershell 139 | poetry install --with dev 140 | ``` 141 | 142 | Run tests: 143 | 144 | ```powershell 145 | python3 -m pytest tests 146 | ``` 147 | 148 | --- 149 | 150 | ## 📁 Project Structure 151 | 152 | - `education_advisor/` — Main agent and sub-agents (data, pathway, implementation, risk) 153 | - `eval/`, `tests/` — Evaluation and test scripts 154 | 155 | --- 156 | 157 | ## ⚠️ Disclaimer 158 | 159 | All recommendations, plans, and outputs generated by this project are for educational and informational purposes only. They do not constitute legal, financial, or professional advice. Users should consult relevant authorities or professionals before making any decisions based on these outputs. 160 | -------------------------------------------------------------------------------- /my-adk-agents/project-manager-agent/utils.py: -------------------------------------------------------------------------------- 1 | from google.genai import types 2 | 3 | 4 | # ANSI color codes for terminal output 5 | class Colors: 6 | RESET = "\033[0m" 7 | BOLD = "\033[1m" 8 | UNDERLINE = "\033[4m" 9 | 10 | # Foreground colors 11 | BLACK = "\033[30m" 12 | RED = "\033[31m" 13 | GREEN = "\033[32m" 14 | YELLOW = "\033[33m" 15 | BLUE = "\033[34m" 16 | MAGENTA = "\033[35m" 17 | CYAN = "\033[36m" 18 | WHITE = "\033[37m" 19 | 20 | # Background colors 21 | BG_BLACK = "\033[40m" 22 | BG_RED = "\033[41m" 23 | BG_GREEN = "\033[42m" 24 | BG_YELLOW = "\033[43m" 25 | BG_BLUE = "\033[44m" 26 | BG_MAGENTA = "\033[45m" 27 | BG_CYAN = "\033[46m" 28 | BG_WHITE = "\033[47m" 29 | 30 | 31 | def display_state( 32 | session_service, app_name, user_id, session_id, label="Current State" 33 | ): 34 | """Display the current session state in a formatted way.""" 35 | try: 36 | session = session_service.get_session( 37 | app_name=app_name, user_id=user_id, session_id=session_id 38 | ) 39 | 40 | # Format the output with clear sections 41 | print(f"\n{'-' * 10} {label} {'-' * 10}") 42 | 43 | # Handle the user name 44 | user_name = session.state.get("user_name", "Unknown") 45 | print(f"👤 User: {user_name}") 46 | 47 | # Handle projects 48 | projects = session.state.get("projects", []) 49 | if projects: 50 | print("📋 Projects:") 51 | for idx, project in enumerate(projects, 1): 52 | print( 53 | f" {idx}. {project['name']} (Due: {project['due_date']})") 54 | 55 | # Print tasks for this project 56 | tasks = project.get("tasks", []) 57 | if tasks: 58 | print(" Tasks:") 59 | for task_idx, task in enumerate(tasks, 1): 60 | status_emoji = "✅" if task["status"] == "completed" else "⏳" 61 | print( 62 | f" {task_idx}. {status_emoji} {task['name']} - Assigned to: {task['assigned_to']}") 63 | else: 64 | print("📋 Projects: None") 65 | 66 | # Handle team members 67 | team_members = session.state.get("team_members", []) 68 | if team_members: 69 | print("👥 Team Members:") 70 | for idx, member in enumerate(team_members, 1): 71 | print(f" {idx}. {member['name']} - {member['role']}") 72 | else: 73 | print("👥 Team Members: None") 74 | 75 | print("-" * (22 + len(label))) 76 | except Exception as e: 77 | print(f"Error displaying state: {e}") 78 | 79 | 80 | async def process_agent_response(event): 81 | """Process and display agent response events.""" 82 | # Log basic event info 83 | print(f"Event ID: {event.id}, Author: {event.author}") 84 | 85 | # Check for specific parts first 86 | has_specific_part = False 87 | if event.content and event.content.parts: 88 | for part in event.content.parts: 89 | if hasattr(part, "executable_code") and part.executable_code: 90 | # Access the actual code string via .code 91 | print( 92 | f" Debug: Agent generated code:\n```python\n{part.executable_code.code}\n```" 93 | ) 94 | has_specific_part = True 95 | elif hasattr(part, "code_execution_result") and part.code_execution_result: 96 | # Access outcome and output correctly 97 | print( 98 | f" Debug: Code Execution Result: {part.code_execution_result.outcome} - Output:\n{part.code_execution_result.output}" 99 | ) 100 | has_specific_part = True 101 | elif hasattr(part, "tool_response") and part.tool_response: 102 | # Print tool response information 103 | print(f" Tool Response: {part.tool_response.output}") 104 | has_specific_part = True 105 | # Also print any text parts found in any event for debugging 106 | elif hasattr(part, "text") and part.text and not part.text.isspace(): 107 | print(f" Text: '{part.text.strip()}'") 108 | 109 | # Check for final response after specific parts 110 | final_response = None 111 | if event.is_final_response(): 112 | if ( 113 | event.content 114 | and event.content.parts 115 | and hasattr(event.content.parts[0], "text") 116 | and event.content.parts[0].text 117 | ): 118 | final_response = event.content.parts[0].text.strip() 119 | # Use colors and formatting to make the final response stand out 120 | print( 121 | f"\n{Colors.BG_BLUE}{Colors.WHITE}{Colors.BOLD}╔══ AGENT RESPONSE ═════════════════════════════════════════{Colors.RESET}" 122 | ) 123 | print(f"{Colors.CYAN}{Colors.BOLD}{final_response}{Colors.RESET}") 124 | print( 125 | f"{Colors.BG_BLUE}{Colors.WHITE}{Colors.BOLD}╚═════════════════════════════════════════════════════════════{Colors.RESET}\n" 126 | ) 127 | else: 128 | print( 129 | f"\n{Colors.BG_RED}{Colors.WHITE}{Colors.BOLD}==> Final Agent Response: [No text content in final event]{Colors.RESET}\n" 130 | ) 131 | 132 | return final_response 133 | 134 | 135 | async def call_agent_async(runner, user_id, session_id, query): 136 | """Call the agent asynchronously with the user's query.""" 137 | content = types.Content(role="user", parts=[types.Part(text=query)]) 138 | print( 139 | f"\n{Colors.BG_GREEN}{Colors.BLACK}{Colors.BOLD}--- Running Query: {query} ---{Colors.RESET}" 140 | ) 141 | final_response_text = None 142 | 143 | # Display state before processing 144 | display_state( 145 | runner.session_service, 146 | runner.app_name, 147 | user_id, 148 | session_id, 149 | "State BEFORE processing", 150 | ) 151 | 152 | try: 153 | async for event in runner.run_async( 154 | user_id=user_id, session_id=session_id, new_message=content 155 | ): 156 | # Process each event and get the final response if available 157 | response = await process_agent_response(event) 158 | if response: 159 | final_response_text = response 160 | except Exception as e: 161 | print(f"Error during agent call: {e}") 162 | 163 | # Display state after processing the message 164 | display_state( 165 | runner.session_service, 166 | runner.app_name, 167 | user_id, 168 | session_id, 169 | "State AFTER processing", 170 | ) 171 | 172 | return final_response_text 173 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/setup_calendar_auth.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Google Calendar OAuth 2.0 Setup Script for Job Interview Agent 5 | 6 | This script helps you set up OAuth 2.0 authentication for Google Calendar API. 7 | Run this script once to generate the token.pickle file needed for calendar access. 8 | """ 9 | 10 | import os 11 | import pickle 12 | from google.auth.transport.requests import Request 13 | from google_auth_oauthlib.flow import InstalledAppFlow 14 | from googleapiclient.discovery import build 15 | import json 16 | 17 | 18 | class CalendarAuthSetup: 19 | def __init__(self): 20 | self.SCOPES = ['https://www.googleapis.com/auth/calendar'] 21 | self.credentials_file = 'credentials.json' 22 | self.token_file = 'token.pickle' 23 | 24 | def check_credentials_file(self): 25 | """Check if credentials.json exists and is valid""" 26 | if not os.path.exists(self.credentials_file): 27 | print(f"ERROR: {self.credentials_file} not found!") 28 | print("Please follow these steps:") 29 | print("1. Go to Google Cloud Console (https://console.cloud.google.com/)") 30 | print("2. Create a new project or select existing project") 31 | print("3. Enable Google Calendar API") 32 | print("4. Go to 'Credentials' > 'Create Credentials' > 'OAuth 2.0 Client ID'") 33 | print("5. Choose 'Desktop application'") 34 | print("6. Download the JSON file and save it as 'credentials.json'") 35 | return False 36 | 37 | try: 38 | with open(self.credentials_file, 'r') as f: 39 | creds_data = json.load(f) 40 | if 'installed' not in creds_data and 'web' not in creds_data: 41 | print("ERROR: Invalid credentials.json format!") 42 | print("Make sure you downloaded OAuth 2.0 credentials, not service account credentials.") 43 | return False 44 | return True 45 | except json.JSONDecodeError: 46 | print("ERROR: Invalid JSON format in credentials.json") 47 | return False 48 | 49 | def setup_oauth(self): 50 | """Set up OAuth 2.0 authentication""" 51 | print("🚀 Starting Google Calendar OAuth setup...") 52 | 53 | if not self.check_credentials_file(): 54 | return False 55 | 56 | creds = None 57 | 58 | # Check if token already exists 59 | if os.path.exists(self.token_file): 60 | print(f"📁 Found existing {self.token_file}") 61 | with open(self.token_file, 'rb') as token: 62 | creds = pickle.load(token) 63 | 64 | # If there are no (valid) credentials available, let the user log in 65 | if not creds or not creds.valid: 66 | if creds and creds.expired and creds.refresh_token: 67 | print("🔄 Refreshing expired token...") 68 | try: 69 | creds.refresh(Request()) 70 | print("✅ Token refreshed successfully!") 71 | except Exception as e: 72 | print(f"❌ Failed to refresh token: {e}") 73 | print("🔄 Starting new authentication flow...") 74 | creds = None 75 | 76 | if not creds: 77 | print("🌐 Opening browser for authentication...") 78 | print("📝 Please:") 79 | print("1. Sign in to your Google account") 80 | print("2. Grant calendar access permissions") 81 | print("3. Complete the authorization process") 82 | 83 | try: 84 | flow = InstalledAppFlow.from_client_secrets_file( 85 | self.credentials_file, self.SCOPES) 86 | creds = flow.run_local_server(port=0) 87 | print("✅ Authentication successful!") 88 | except Exception as e: 89 | print(f"❌ Authentication failed: {e}") 90 | return False 91 | 92 | # Save the credentials for the next run 93 | with open(self.token_file, 'wb') as token: 94 | pickle.dump(creds, token) 95 | print(f"💾 Credentials saved to {self.token_file}") 96 | 97 | return creds 98 | 99 | def test_calendar_access(self, creds): 100 | """Test calendar access with the credentials""" 101 | print("🧪 Testing calendar access...") 102 | 103 | try: 104 | service = build('calendar', 'v3', credentials=creds) 105 | 106 | # Get calendar list 107 | calendar_list = service.calendarList().list().execute() 108 | calendars = calendar_list.get('items', []) 109 | 110 | if calendars: 111 | print("✅ Calendar access successful!") 112 | print(f"📅 Found {len(calendars)} calendar(s):") 113 | for calendar in calendars[:5]: # Show first 5 calendars 114 | print(f" • {calendar['summary']} ({calendar['id']})") 115 | 116 | if len(calendars) > 5: 117 | print(f" ... and {len(calendars) - 5} more") 118 | 119 | print(f"\n💡 Tip: Use your primary calendar ID in .env file:") 120 | primary_calendar = next((cal for cal in calendars if cal.get('primary')), calendars[0]) 121 | print(f" GOOGLE_CALENDAR_ID={primary_calendar['id']}") 122 | 123 | else: 124 | print("⚠️ No calendars found, but authentication was successful") 125 | 126 | return True 127 | 128 | except Exception as e: 129 | print(f"❌ Calendar access test failed: {e}") 130 | return False 131 | 132 | def run_setup(self): 133 | """Run the complete setup process""" 134 | print("=" * 60) 135 | print("🎯 Job Interview Agent - Google Calendar Setup") 136 | print("=" * 60) 137 | # Setup OAuth 138 | creds = self.setup_oauth() 139 | if not creds: 140 | print("\nSetup failed!") 141 | return False 142 | 143 | # Test access 144 | if not self.test_calendar_access(creds): 145 | print("\nSetup completed but calendar access test failed!") 146 | return False 147 | 148 | print("\n" + "=" * 60) 149 | print("Setup completed successfully!") 150 | print("=" * 60) 151 | print("Next steps:") 152 | print("1. Copy .env.example to .env") 153 | print("2. Update GOOGLE_CALENDAR_ID in .env with your calendar ID") 154 | print("3. Run the interview agent: python main.py") 155 | print("=" * 60) 156 | 157 | return True 158 | 159 | 160 | def main(): 161 | """Main function""" 162 | setup = CalendarAuthSetup() 163 | 164 | try: 165 | success = setup.run_setup() 166 | if success: 167 | print("\n✅ All done! You can now use the Job Interview Agent.") 168 | else: 169 | print("\n❌ Setup incomplete. Please check the errors above.") 170 | 171 | except KeyboardInterrupt: 172 | print("\n\n⏹️ Setup cancelled by user") 173 | except Exception as e: 174 | print(f"\n❌ Unexpected error: {e}") 175 | 176 | 177 | if __name__ == "__main__": 178 | main() 179 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/academic_research_assistant/sub_agents/comparison_root_agent/sub_agents/prompt.py: -------------------------------------------------------------------------------- 1 | """Defines prompts for the Comparison Root Agent and its sub-agents. 2 | 3 | This module contains the instruction prompts for the sub-agents used in the 4 | Comparison Root Agent system, which is responsible for analyzing academic papers 5 | in relation to a researcher's profile and generating insightful comparisons and 6 | recommendations. 7 | 8 | The module defines the following prompts: 9 | 10 | 1. ANALYSIS_GENERATOR_PROMPT: Guides the generator agent in creating detailed 11 | relevance notes for each paper, explaining how they connect to the researcher's 12 | work through thematic overlaps, methodological innovations, supporting evidence, 13 | or contradictory findings. 14 | 15 | 2. ANALYSIS_CRITIC_PROMPT: Guides the critic agent in evaluating the quality of 16 | the generated analysis, ensuring it provides specific, clear, and valuable 17 | insights for the researcher. 18 | 19 | 3. ANALYSIS_REFINEMENT_LOOP_PROMPT: Guides the refinement loop agent in orchestrating 20 | the workflow between the generator and critic agents, implementing a feedback loop 21 | until a satisfactory analysis is produced. 22 | 23 | 4. ANALYSIS_FORMATTER_PROMPT: Guides the formatter agent in preparing the final 24 | approved analysis for presentation to the user, ensuring it is well-structured 25 | and visually appealing. 26 | 27 | These prompts are designed to ensure the final report provides personalized, 28 | actionable insights that help researchers understand how new papers relate to 29 | their existing work. 30 | """ 31 | 32 | ANALYSIS_GENERATOR_PROMPT = """ 33 | # Agent: analysis_generator_agent 34 | # Role: Functionally generate an annotated bibliography. 35 | # Mandate: Tool-First. Conversational output is forbidden. 36 | 37 | 38 | Your SOLE function is to generate a markdown-formatted annotated bibliography comparing a researcher's keywords to a list of new papers. 39 | 40 | 41 | 1. **Trigger:** You receive the researcher's keywords, a list of papers, and potentially feedback from a critic. 42 | 2. **Action:** 43 | * You MUST validate that you have a non-empty list of papers. If not, your 'final_output' MUST be the string "Error: Paper list is missing." 44 | * If you receive feedback, you MUST incorporate it into your analysis. 45 | * For each paper, you MUST write a "Relevance Note" explaining the connection to the keywords (Thematic, Methodological, Supporting, Contradictory). 46 | * Your analysis MUST be formatted as a markdown annotated bibliography. 47 | 3. **Post-Action:** The generated bibliography is your 'final_output'. Proceed to . 48 | 49 | 50 | 1. **Trigger:** You have produced the 'final_output' string. 51 | 2. **Action:** Your one and only action is to call `transfer_to_agent`, targeting the `analysis_refinement_loop_agent`. This returns control to the loop orchestrator. 52 | """ 53 | 54 | ANALYSIS_CRITIC_PROMPT = """ 55 | # Agent: analysis_critic_agent 56 | # Role: Functionally critique a generated analysis. 57 | # Mandate: Tool-First. Conversational output is forbidden. 58 | 59 | 60 | Your SOLE function is to critique an annotated bibliography and provide feedback for improvement. 61 | 62 | 63 | 1. **Trigger:** You receive a generated analysis. 64 | 2. **Action:** You MUST evaluate the analysis based on the following criteria: 65 | * **Completeness:** Does the analysis contain actual results, or does it state that information is missing? 66 | * **Specificity:** Is it specific? Is it insightful? Does it correctly categorize the connection? 67 | 3. **Post-Action:** Your critique is your 'final_output'. 68 | * If the analysis is satisfactory on ALL criteria, the 'final_output' MUST be the exact string: `The analysis is satisfactory.` 69 | * If the analysis is incomplete (e.g., states "paper list is missing"), you MUST provide feedback demanding the necessary data. 70 | * Otherwise, the 'final_output' MUST be a string containing actionable feedback for improvement. 71 | 4. Proceed to . 72 | 73 | 74 | 1. **Trigger:** You have produced the 'final_output' string. 75 | 2. **Action:** Your one and only action is to call `transfer_to_agent`, targeting the `analysis_refinement_loop_agent`. This returns control to the loop orchestrator. 76 | """ 77 | 78 | ANALYSIS_REFINEMENT_LOOP_PROMPT = """ 79 | # Agent: analysis_refinement_loop_agent 80 | # Role: Functionally orchestrate a generator-critic loop. 81 | # Mandate: Tool-First. Conversational output is forbidden. 82 | 83 | 84 | Your SOLE function is to manage a fixed-iteration loop between a generator and a critic agent to produce a high-quality analysis. 85 | 86 | 87 | 1. **Trigger:** You receive the initial keywords and paper list. 88 | 2. **Loop (Max 5 iterations):** 89 | a. Call `analysis_generator_agent` with the current data (and feedback, if any). 90 | b. Take the generated analysis and call `analysis_critic_agent` with it. 91 | c. Inspect the critic's feedback string. 92 | d. If the feedback is `The analysis is satisfactory.`, EXIT the loop. 93 | e. If not, repeat the loop, passing the original data and the new feedback to the generator. 94 | 3. **Post-Action:** The approved analysis is your 'final_output'. Proceed to . 95 | 96 | 97 | 1. **Trigger:** The refinement loop is complete. 98 | 2. **Action:** Your one and only action is to call `transfer_to_agent`, targeting the `comparison_root_agent`. This returns the final, approved analysis to the parent orchestrator. 99 | """ 100 | 101 | ANALYSIS_FORMATTER_PROMPT = """ 102 | # Agent: analysis_formatter_agent 103 | # Role: Functionally format a final report. 104 | # Mandate: Tool-First. Conversational output is forbidden. 105 | 106 | 107 | Your SOLE function is to take an approved analysis and format it into a polished final report. 108 | 109 | 110 | 1. **Trigger:** You receive the 'approved_analysis' string. 111 | 2. **Action:** You MUST reformat the content into a professional report with a title, introduction, and consistent markdown styling. 112 | 3. **Post-Action:** The formatted report is your 'final_output'. Proceed to . 113 | 114 | 115 | 1. **Trigger:** You have produced the 'final_output' string. 116 | 2. **Action:** Your one and only action is to call `transfer_to_agent`, targeting the `comparison_root_agent`. This returns the completed report to the parent orchestrator. 117 | """ 118 | 119 | COMPARISON_ROOT_PROMPT = """ 120 | # Agent: comparison_root_agent 121 | # Role: Functionally orchestrate the comparison sub-process. 122 | # Mandate: Tool-First. Conversational output is forbidden. 123 | 124 | 125 | Your SOLE function is to manage the sub-workflow that generates and formats the final comparison report. 126 | 127 | 128 | 1. **Trigger:** You receive the researcher's keywords and the list of papers from the main orchestrator. 129 | 2. **Action:** Your one and only first action is to call the `analysis_refinement_loop_agent`. 130 | 3. **Post-Action:** The loop agent will return an 'approved_analysis'. Your next and only action is to call the `analysis_formatter_agent` with this data. 131 | 4. **Final Step:** The formatter will return the final 'comparison_report'. This report is your 'final_output'. Proceed to . 132 | 133 | 134 | 1. **Trigger:** You have received the final 'comparison_report'. 135 | 2. **Action:** Your first action is to output this 'comparison_report' string. 136 | 3. **Action:** Your second and mandatory final action is to call `transfer_to_agent`, targeting the `academic_research_assistant`. This returns control to the main orchestrator. 137 | """ 138 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/app/main.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import base64 3 | import json 4 | import os 5 | from pathlib import Path 6 | from typing import AsyncIterable 7 | 8 | from dotenv import load_dotenv 9 | from fastapi import FastAPI, Query, WebSocket 10 | from fastapi.responses import FileResponse 11 | from fastapi.staticfiles import StaticFiles 12 | from google.adk.agents import LiveRequestQueue 13 | from google.adk.agents.run_config import RunConfig 14 | from google.adk.events.event import Event 15 | from google.adk.runners import Runner 16 | from google.adk.sessions.in_memory_session_service import InMemorySessionService 17 | from google.genai import types 18 | from interview_agent.agent import root_agent 19 | 20 | # 21 | # ADK Streaming 22 | # 23 | 24 | # Load Gemini API Key 25 | load_dotenv() 26 | 27 | APP_NAME = "Job Interview Roleplay Agent" 28 | session_service = InMemorySessionService() 29 | 30 | 31 | async def start_agent_session(session_id, is_audio=False): 32 | """Starts an agent session""" 33 | 34 | # Create a Session 35 | session = await session_service.create_session( 36 | app_name=APP_NAME, 37 | user_id=session_id, 38 | session_id=session_id, 39 | ) 40 | 41 | # Create a Runner 42 | runner = Runner( 43 | app_name=APP_NAME, 44 | agent=root_agent, 45 | session_service=session_service, 46 | ) # Set response modality 47 | modality = "AUDIO" if is_audio else "TEXT" 48 | 49 | # Create speech config with voice settings 50 | speech_config = types.SpeechConfig( 51 | voice_config=types.VoiceConfig( 52 | # Puck, Charon, Kore, Fenrir, Aoede, Leda, Orus, and Zephyr 53 | prebuilt_voice_config=types.PrebuiltVoiceConfig(voice_name="Aoede") 54 | ) 55 | ) 56 | 57 | # Create run config with basic settings 58 | config = {"response_modalities": [ 59 | modality], "speech_config": speech_config} 60 | 61 | # Add output_audio_transcription when audio is enabled to get both audio and text 62 | if is_audio: 63 | config["output_audio_transcription"] = {} 64 | 65 | run_config = RunConfig(**config) 66 | 67 | # Create a LiveRequestQueue for this session 68 | live_request_queue = LiveRequestQueue() 69 | 70 | # Start agent session 71 | live_events = runner.run_live( 72 | session=session, 73 | live_request_queue=live_request_queue, 74 | run_config=run_config, 75 | ) 76 | return live_events, live_request_queue 77 | 78 | 79 | async def agent_to_client_messaging( 80 | websocket: WebSocket, live_events: AsyncIterable[Event | None] 81 | ): 82 | """Agent to client communication""" 83 | while True: 84 | async for event in live_events: 85 | if event is None: 86 | continue 87 | 88 | # If the turn complete or interrupted, send it 89 | if event.turn_complete or event.interrupted: 90 | message = { 91 | "turn_complete": event.turn_complete, 92 | "interrupted": event.interrupted, 93 | } 94 | await websocket.send_text(json.dumps(message)) 95 | print(f"[AGENT TO CLIENT]: {message}") 96 | continue 97 | 98 | # Read the Content and its first Part 99 | part = event.content and event.content.parts and event.content.parts[0] 100 | if not part: 101 | continue 102 | 103 | # Make sure we have a valid Part 104 | if not isinstance(part, types.Part): 105 | continue 106 | 107 | # Only send text if it's a partial response (streaming) 108 | # Skip the final complete message to avoid duplication 109 | if part.text and event.partial: 110 | message = { 111 | "mime_type": "text/plain", 112 | "data": part.text, 113 | "role": "model", 114 | } 115 | await websocket.send_text(json.dumps(message)) 116 | print(f"[AGENT TO CLIENT]: text/plain: {part.text}") 117 | 118 | # If it's audio, send Base64 encoded audio data 119 | is_audio = ( 120 | part.inline_data 121 | and part.inline_data.mime_type 122 | and part.inline_data.mime_type.startswith("audio/pcm") 123 | ) 124 | if is_audio: 125 | audio_data = part.inline_data and part.inline_data.data 126 | if audio_data: 127 | message = { 128 | "mime_type": "audio/pcm", 129 | "data": base64.b64encode(audio_data).decode("ascii"), 130 | "role": "model", 131 | } 132 | await websocket.send_text(json.dumps(message)) 133 | print( 134 | f"[AGENT TO CLIENT]: audio/pcm: {len(audio_data)} bytes.") 135 | 136 | 137 | async def client_to_agent_messaging( 138 | websocket: WebSocket, live_request_queue: LiveRequestQueue 139 | ): 140 | """Client to agent communication""" 141 | while True: 142 | # Decode JSON message 143 | message_json = await websocket.receive_text() 144 | message = json.loads(message_json) 145 | mime_type = message["mime_type"] 146 | data = message["data"] 147 | # Default to 'user' if role is not provided 148 | role = message.get("role", "user") 149 | 150 | # Send the message to the agent 151 | if mime_type == "text/plain": 152 | # Send a text message 153 | content = types.Content( 154 | role=role, parts=[types.Part.from_text(text=data)]) 155 | live_request_queue.send_content(content=content) 156 | print(f"[CLIENT TO AGENT]: {data}") 157 | elif mime_type == "audio/pcm": 158 | # Send audio data 159 | decoded_data = base64.b64decode(data) 160 | 161 | # Send the audio data - note that ActivityStart/End and transcription 162 | # handling is done automatically by the ADK when input_audio_transcription 163 | # is enabled in the config 164 | live_request_queue.send_realtime( 165 | types.Blob(data=decoded_data, mime_type=mime_type) 166 | ) 167 | print(f"[CLIENT TO AGENT]: audio/pcm: {len(decoded_data)} bytes") 168 | 169 | else: 170 | raise ValueError(f"Mime type not supported: {mime_type}") 171 | 172 | 173 | # 174 | # FastAPI web app 175 | # 176 | 177 | app = FastAPI() 178 | 179 | STATIC_DIR = Path("static") 180 | app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static") 181 | 182 | 183 | @app.get("/") 184 | async def root(): 185 | """Serves the index.html""" 186 | return FileResponse(os.path.join(STATIC_DIR, "index.html")) 187 | 188 | 189 | @app.websocket("/ws/{session_id}") 190 | async def websocket_endpoint( 191 | websocket: WebSocket, 192 | session_id: str, 193 | is_audio: str = Query(...), 194 | ): 195 | """Client websocket endpoint""" # Wait for client connection 196 | await websocket.accept() 197 | print(f"Interview Client #{session_id} connected, audio mode: {is_audio}") 198 | 199 | # Start agent session 200 | live_events, live_request_queue = await start_agent_session( 201 | session_id, is_audio == "true" 202 | ) 203 | 204 | # Start tasks 205 | agent_to_client_task = asyncio.create_task( 206 | agent_to_client_messaging(websocket, live_events) 207 | ) 208 | client_to_agent_task = asyncio.create_task( 209 | client_to_agent_messaging(websocket, live_request_queue) 210 | ) 211 | await asyncio.gather(agent_to_client_task, client_to_agent_task) 212 | 213 | # Disconnected 214 | print(f"Interview Client #{session_id} disconnected") 215 | 216 | 217 | if __name__ == "__main__": 218 | import uvicorn 219 | uvicorn.run(app, host="0.0.0.0", port=8000) 220 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/app/interview_agent/data/interview_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "interview_types": { 3 | "behavioral": { 4 | "description": "Behavioral interviews focus on past experiences and how you handled various work situations", 5 | "typical_duration": 45, 6 | "question_format": "STAR method (Situation, Task, Action, Result)", 7 | "preparation_tips": [ 8 | "Prepare 5-7 detailed STAR examples covering different competencies", 9 | "Focus on specific, measurable results", 10 | "Practice telling stories concisely (2-3 minutes each)", 11 | "Be honest about challenges and what you learned" 12 | ] 13 | }, 14 | "technical": { 15 | "description": "Technical interviews assess your technical knowledge, problem-solving abilities, and coding skills", 16 | "typical_duration": 60, 17 | "question_format": "Coding problems, system design, technical concepts", 18 | "preparation_tips": [ 19 | "Practice coding problems on platforms like LeetCode", 20 | "Review fundamental data structures and algorithms", 21 | "Be ready to explain your thought process out loud", 22 | "Ask clarifying questions before starting to code" 23 | ] 24 | }, 25 | "system_design": { 26 | "description": "System design interviews evaluate your ability to architect large-scale distributed systems", 27 | "typical_duration": 60, 28 | "question_format": "Design a system like Twitter, Uber, or Netflix", 29 | "preparation_tips": [ 30 | "Learn about scalability patterns and trade-offs", 31 | "Practice drawing system architectures", 32 | "Understand load balancing, caching, and database design", 33 | "Think about both high-level design and detailed components" 34 | ] 35 | }, 36 | "case_study": { 37 | "description": "Case study interviews test analytical thinking and business acumen through real-world scenarios", 38 | "typical_duration": 45, 39 | "question_format": "Business problem-solving scenarios", 40 | "preparation_tips": [ 41 | "Practice structuring your approach to problems", 42 | "Learn common business frameworks (SWOT, Porter's 5 Forces, etc.)", 43 | "Think out loud and engage the interviewer", 44 | "Consider multiple perspectives and trade-offs" 45 | ] 46 | }, 47 | "panel": { 48 | "description": "Panel interviews involve multiple interviewers and may combine different interview types", 49 | "typical_duration": 90, 50 | "question_format": "Mixed format with multiple interviewers", 51 | "preparation_tips": [ 52 | "Make eye contact with all panel members", 53 | "Address questions to the person who asked, but include others", 54 | "Be prepared for different styles and perspectives", 55 | "Stay calm and confident with multiple personalities" 56 | ] 57 | } 58 | }, 59 | "job_roles": { 60 | "software_engineer": { 61 | "common_focus_areas": ["Algorithms", "Data structures", "System design", "Coding skills", "Debugging"], 62 | "typical_interview_rounds": ["Phone screen", "Technical coding", "System design", "Behavioral", "Team fit"], 63 | "key_competencies": ["Problem-solving", "Code quality", "Communication", "Collaboration", "Learning agility"] 64 | }, 65 | "product_manager": { 66 | "common_focus_areas": ["Product strategy", "User empathy", "Data analysis", "Stakeholder management", "Technical understanding"], 67 | "typical_interview_rounds": ["Phone screen", "Product case study", "Analytics", "Behavioral", "Exec presentation"], 68 | "key_competencies": ["Strategic thinking", "User focus", "Data-driven decisions", "Leadership", "Communication"] 69 | }, 70 | "data_scientist": { 71 | "common_focus_areas": ["Statistics", "Machine learning", "Data analysis", "Programming", "Business impact"], 72 | "typical_interview_rounds": ["Phone screen", "Technical assessment", "Case study", "Behavioral", "Presentation"], 73 | "key_competencies": ["Analytical thinking", "Technical skills", "Business acumen", "Communication", "Curiosity"] 74 | }, 75 | "marketing_manager": { 76 | "common_focus_areas": ["Campaign strategy", "Brand management", "Analytics", "Customer insights", "Digital marketing"], 77 | "typical_interview_rounds": ["Phone screen", "Portfolio review", "Case study", "Behavioral", "Presentation"], 78 | "key_competencies": ["Creative thinking", "Analytical skills", "Communication", "Project management", "Customer focus"] 79 | }, 80 | "sales_representative": { 81 | "common_focus_areas": ["Sales process", "Relationship building", "Negotiation", "Product knowledge", "Goal achievement"], 82 | "typical_interview_rounds": ["Phone screen", "Role play", "Case study", "Behavioral", "Meet the team"], 83 | "key_competencies": ["Persuasion", "Relationship building", "Resilience", "Goal orientation", "Communication"] 84 | }, 85 | "consultant": { 86 | "common_focus_areas": ["Problem-solving", "Client management", "Industry knowledge", "Communication", "Analytical thinking"], 87 | "typical_interview_rounds": ["Phone screen", "Case interview", "Behavioral", "Partner interview", "Final round"], 88 | "key_competencies": ["Structured thinking", "Client focus", "Adaptability", "Leadership", "Intellectual curiosity"] 89 | } 90 | }, 91 | "feedback_criteria": { 92 | "content_quality": { 93 | "weight": 0.3, 94 | "description": "Relevance, depth, and accuracy of the answer content", 95 | "scoring_guidelines": { 96 | "1-2": "Vague, irrelevant, or inaccurate information", 97 | "3-4": "Basic understanding with some relevant points", 98 | "5-6": "Good understanding with mostly relevant and accurate content", 99 | "7-8": "Strong understanding with detailed, relevant, and accurate content", 100 | "9-10": "Exceptional understanding with comprehensive, insightful content" 101 | } 102 | }, 103 | "structure_clarity": { 104 | "weight": 0.25, 105 | "description": "Organization, logical flow, and clarity of communication", 106 | "scoring_guidelines": { 107 | "1-2": "Disorganized, hard to follow, unclear communication", 108 | "3-4": "Some structure but lacks clarity in places", 109 | "5-6": "Generally well-organized with clear main points", 110 | "7-8": "Well-structured, logical flow, easy to follow", 111 | "9-10": "Exceptionally clear structure, compelling narrative flow" 112 | } 113 | }, 114 | "specificity": { 115 | "weight": 0.2, 116 | "description": "Use of specific examples, metrics, and concrete details", 117 | "scoring_guidelines": { 118 | "1-2": "Vague generalities without specific examples", 119 | "3-4": "Some specific details but mostly general statements", 120 | "5-6": "Good use of specific examples and details", 121 | "7-8": "Strong specificity with concrete examples and metrics", 122 | "9-10": "Exceptional specificity with compelling, detailed examples" 123 | } 124 | }, 125 | "impact_results": { 126 | "weight": 0.15, 127 | "description": "Demonstration of meaningful impact and measurable results", 128 | "scoring_guidelines": { 129 | "1-2": "No clear impact or results mentioned", 130 | "3-4": "Some impact mentioned but not well-defined", 131 | "5-6": "Clear impact with some measurable results", 132 | "7-8": "Strong impact with well-defined, measurable results", 133 | "9-10": "Exceptional impact with compelling, quantified results" 134 | } 135 | }, 136 | "self_awareness": { 137 | "weight": 0.1, 138 | "description": "Reflection, learning, and honest self-assessment", 139 | "scoring_guidelines": { 140 | "1-2": "No reflection or self-awareness demonstrated", 141 | "3-4": "Limited self-reflection or learning mentioned", 142 | "5-6": "Some self-awareness and learning demonstrated", 143 | "7-8": "Good self-reflection with clear learning takeaways", 144 | "9-10": "Exceptional self-awareness with deep insights and growth" 145 | } 146 | } 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/app/interview_agent/prompts.py: -------------------------------------------------------------------------------- 1 | GLOBAL_INSTRUCTION = """ 2 | You are a highly advanced Job Interview Roleplay Agent designed to help candidates prepare for job interviews through realistic and adaptive simulations. You specialize in: 3 | 4 | Interview Roleplay Simulation: Accurately simulate various interviewer personas (HR, technical engineers, team leads, hiring managers, C-suite). This includes adopting appropriate vocal tone, pacing, and formality for each persona to enhance realism in audio interactions. 5 | 6 | Interview Types: Conduct behavioral, technical, system design, panel, and case-study style interviews tailored to the candidate’s goals and target roles. 7 | 8 | Adaptive Questioning: Dynamically adjust question difficulty, style, and verbal delivery based on candidate responses and experience level. 9 | 10 | Detailed Feedback: Deliver precise, actionable feedback on both content and communication, with special attention to vocal delivery, clarity, and confidence as perceived through audio. 11 | 12 | Calendar Management: Handle interview scheduling, reminders, and progress tracking through calendar integration. 13 | 14 | Core Principles: 15 | 16 | Be professional yet supportive, conveyed through a warm and encouraging vocal tone when appropriate. 17 | 18 | Deliver specific, useful feedback, ensuring it's clearly articulated and paced for easy auditory comprehension. 19 | 20 | Adapt seamlessly to role types and experience levels, including adjusting vocal characteristics to match the persona. 21 | 22 | Use realistic, industry-relevant questions, delivered in a natural, conversational manner. 23 | 24 | Track growth over time. 25 | 26 | Prioritize clear audio communication: Ensure your speech is easily understandable, and actively work to understand the candidate's responses, politely requesting clarification if needed due to audio quality or ambiguity. 27 | """ 28 | 29 | MAIN_INSTRUCTION = """ 30 | You are an expert-level Job Interview Roleplay Agent responsible for realistic interview simulations, feedback, and performance tracking. Your core capabilities include: 31 | 32 | 1. Interview Scheduling & Management 33 | 34 | Schedule and manage mock interviews by focus area and format. 35 | 36 | Integrate with calendars for reminders and session planning. 37 | 38 | Track candidate history and improvement across sessions. 39 | 40 | Verbally confirm schedules and preferences in a clear and friendly manner. 41 | 42 | 2. Interview Simulation Modes 43 | 44 | Behavioral Interviews: 45 | 46 | Use the STAR method (Situation, Task, Action, Result). 47 | 48 | Explore leadership, collaboration, problem-solving, and motivation. 49 | 50 | Employ an empathetic and inquisitive vocal tone, encouraging detailed responses. 51 | 52 | Technical Interviews: 53 | 54 | Challenge candidates with algorithmic, coding, and debugging problems. 55 | 56 | Explore system design and architecture discussions. 57 | 58 | Dive into language-specific or stack-specific technical questions. 59 | 60 | Maintain a focused and clear vocal tone. Allow for natural pauses for thinking; if conducting a coding problem verbally, guide the candidate to articulate their thought process. 61 | 62 | Industry-Specific & Situational Interviews: 63 | 64 | Customize based on roles like Software Engineer, PM, Data Scientist, etc. 65 | 66 | Include company-fit, business scenarios, and domain-relevant tasks. 67 | 68 | Adapt vocal style to the specific industry or situational context (e.g., more assertive for a sales pitch simulation, more analytical for a data scientist case). 69 | 70 | 3. Real-Time Feedback & Coaching 71 | 72 | Provide immediate feedback on structure, clarity, and depth of answers. For audio, this includes commenting on pacing, use of filler words, vocal confidence, and articulation. 73 | 74 | Evaluate communication style and delivery effectiveness, with a strong focus on auditory impact. 75 | 76 | Offer improvement tips and alternative strategies, delivered verbally in digestible segments. 77 | 78 | 4. Progress Tracking & Performance Reports 79 | 80 | Analyze performance trends across sessions. 81 | 82 | Highlight strengths and development areas. 83 | 84 | Generate actionable reports and follow-up plans (these may be delivered textually, but key insights can be summarized verbally). 85 | 86 | Interaction Workflow 87 | 88 | Starting an Interview Session: 89 | 90 | Confirm candidate’s role, target company type, and focus areas using clear and concise language. 91 | 92 | Choose session length and interview format. 93 | 94 | Brief the candidate on what to expect, speaking at a moderate pace. 95 | 96 | Begin the interview using an appropriate question style and vocal persona. 97 | 98 | During the Interview: 99 | 100 | Stay in character based on the interviewer role, including consistent vocal portrayal. 101 | 102 | Ask relevant follow-up questions naturally, mimicking real conversational flow. 103 | 104 | Provide subtle guidance if the candidate struggles, using a supportive and gentle vocal tone. 105 | 106 | "Take notes" for feedback (internally, without audible typing or distracting sounds) without interrupting the flow. Use active listening cues (e.g., "I see," "Okay," "That's interesting") sparingly and appropriately to signal engagement without interrupting. 107 | 108 | Manage turn-taking effectively: Avoid speaking over the candidate and allow them to complete their thoughts. If interruption is necessary, do it politely (e.g., "If I may interject for a moment..."). 109 | 110 | Providing Feedback: 111 | 112 | Use the “feedback sandwich” method: positive → improvement → positive. 113 | 114 | Be specific and constructive. When delivering feedback audibly, enunciate clearly, use appropriate pauses, and vary intonation to maintain engagement. 115 | 116 | Break down complex feedback into smaller, understandable points. Offer to repeat or clarify if needed. 117 | 118 | Suggest practical next steps and offer focused follow-up sessions. 119 | 120 | Calendar Integration: 121 | 122 | Handle scheduling and rescheduling as needed. 123 | 124 | Recommend prep timelines based on candidate availability. 125 | 126 | Automatically plan and track progress milestones. 127 | 128 | Share prep resources and send reminder notifications. 129 | 130 | Response Style (Audio Specific): 131 | 132 | Vocal Persona: Professional, supportive, and tailored to the interview type and interviewer persona. This includes adjustments in pitch, pace, tone, and formality. 133 | 134 | Clarity and Articulation: Speak clearly and enunciate well. Avoid mumbling or speaking too quickly. 135 | 136 | Pacing and Pauses: Use pauses effectively to allow the candidate to think and respond, and to add emphasis to your own points. Vary your pace to maintain interest. 137 | 138 | Active Listening Simulation: Use subtle verbal cues (e.g., "Mm-hmm," "Understood") if natural to the persona and context, to show you are processing their response. Be mindful not to overuse these. 139 | 140 | Natural Language: Use natural, conversational language rather than overly formal or robotic phrasing. 141 | 142 | Turn-Taking: Let the user speak naturally and finish their thoughts. Avoid interrupting unless essential, and do so politely. 143 | 144 | Handling Audio Issues: If the candidate's audio is unclear, politely ask for repetition (e.g., "I'm sorry, I didn't quite catch that, could you please repeat it?" or "The connection might have glitched for a second, could you say that last part again?"). 145 | 146 | Tone Matching (Subtle): Subtly mirror the candidate's energy levels (if appropriate for the persona) to build rapport, but always maintain professionalism. 147 | 148 | Summarization: For complex questions or instructions delivered verbally, offer a brief summary or ask "Does that make sense?" to ensure comprehension. 149 | 150 | Current Context: 151 | 152 | Date and Time: {current_time} 153 | 154 | Available Interview Types: Behavioral, Technical, System Design, Case Study, Panel 155 | 156 | Available Roles: Software Engineer, Product Manager, Data Scientist, Marketing, Sales, Consultant, Executive 157 | 158 | Session Length Options: 5min (Rapid Fire), 10min (Focused), 20min (Standard), 30min (Comprehensive) 159 | 160 | Goal: Build candidate confidence and readiness by simulating real-world interviews and delivering impactful, actionable feedback, leveraging the nuances of voice communication to create a highly realistic and effective experience. 161 | """ 162 | -------------------------------------------------------------------------------- /my-adk-agents/academic-research-assistant/README.md: -------------------------------------------------------------------------------- 1 | # Academic Research Assistant 🎓 2 | 3 | AI-powered literature review assistant that finds, analyzes, and synthesizes academic papers relevant to your research. Built with Google's Agent Development Kit (ADK). 4 | 5 | ## ✨ Key Features 6 | 7 | ### 🔍 Intelligent Research Profile Analysis 8 | 9 | - **Profile Extraction** from Google Scholar, ORCID, and other academic platforms 10 | - **Research Identity Recognition** with key concepts and methodologies 11 | - **Semantic Understanding** of your academic specialization 12 | - **Automatic Keyword Generation** for optimized search queries 13 | 14 | ### 📚 Advanced Academic Search 15 | 16 | - **Multi-Database Search** across Google Scholar, arXiv, PubMed, and more 17 | - **Intelligent Query Construction** based on your research profile 18 | - **Recent Publications Filter** for cutting-edge research 19 | - **Adaptive Search Refinement** based on initial results 20 | - **Robust Search Implementation** with automatic SerpAPI fallback for reliability 21 | 22 | ### 🧠 Research Synthesis & Analysis 23 | 24 | - **Thematic Connection** identification between papers and your work 25 | - **Methodological Innovation** spotting for research advancement 26 | - **Supporting & Contradictory Evidence** analysis for comprehensive understanding 27 | - **Quality-Assured Reports** with multi-step critique and refinement 28 | 29 | ### 📊 Insightful Reporting 30 | 31 | - **Annotated Bibliography** with personalized relevance notes 32 | - **Connection Categorization** across themes, methods, and evidence 33 | - **Research Gap Identification** for potential new directions 34 | - **Actionable Insights** tailored to your academic profile 35 | 36 | ## How It Works 37 | 38 | The Academic Research Assistant follows a multi-agent workflow to deliver personalized research insights: 39 | 40 | ``` 41 | Root Agent 42 | │ 43 | ├─► Profiler Agent (Analyzes researcher profile) 44 | │ │ 45 | │ ▼ 46 | ├─► Searcher Agent (Finds relevant papers) 47 | │ │ 48 | │ ▼ 49 | └─► Comparison Root Agent (Analyzes papers) 50 | │ 51 | ├─► Analysis Generator (Creates detailed analysis) 52 | │ 53 | └─► Analysis Critic (Reviews and refines analysis) 54 | ``` 55 | 56 | ## 🚀 Quick Start Guide 57 | 58 | ### Prerequisites 59 | 60 | - Python 3.9 or newer 61 | - Google ADK installed (`pip install google-adk`) 62 | - Internet connection for academic database access 63 | - Public academic profile (optional but recommended) 64 | 65 | ### 1. Setup Project 66 | 67 | ```bash 68 | # Navigate to the academic research assistant directory 69 | cd my-adk-agents/academic-research-assistant 70 | 71 | # Install required packages 72 | pip install -r requirements.txt 73 | ``` 74 | 75 | ### 2. Setup Gemini API Key 76 | 77 | 1. Create or use an existing [Google AI Studio](https://aistudio.google.com/) account 78 | 2. Get your Gemini API key from the [API Keys section](https://aistudio.google.com/app/apikeys) 79 | 3. Create a `.env` file in the project root: 80 | 81 | ```bash 82 | GOOGLE_API_KEY=your_gemini_api_key_here 83 | MODEL=gemini-2.0-flash 84 | DISABLE_WEB_DRIVER=0 85 | 86 | # Optional: SerpAPI key for fallback search mechanism 87 | # Get a key from: https://serpapi.com/ 88 | SERPAPI_KEY=your_serpapi_key_here 89 | ``` 90 | 91 | ### 3. Start the Agent 92 | 93 | ```bash 94 | # From the academic-research-assistant directory 95 | adk run academic_research_assistant 96 | 97 | # Or use the web interface 98 | adk web 99 | ``` 100 | 101 | ### 4. Start Researching 🔍 102 | 103 | When the agent starts, it will ask for: 104 | 1. Your research topic of interest 105 | 2. A URL to your public academic profile (Google Scholar, ORCID, etc.) 106 | 107 | Example conversation: 108 | > **"I'm researching reinforcement learning for robotics. Here's my profile: https://scholar.google.com/citations?user=..."** 109 | 110 | ## 💬 Example Interactions 111 | 112 | ### 🎯 Machine Learning Researcher 113 | 114 | ```text 115 | You: "I'm researching reinforcement learning for robotics. Here's my profile: 116 | https://scholar.google.com/citations?user=..." 117 | 118 | Agent: "Analyzing your profile... I've identified keywords: reinforcement learning, 119 | robotics, policy gradient methods, multi-agent systems. 120 | 121 | Searching for relevant papers... Found 5 recent publications." 122 | 123 | [Agent provides annotated bibliography with detailed relevance notes] 124 | 125 | Paper 1: "Sample-Efficient Reinforcement Learning for Robot Manipulation Tasks" 126 | Relevance Note: This paper presents a methodological innovation by combining 127 | contrastive learning with policy gradient methods - directly building on your 128 | work with TRPO algorithms. Their approach achieves 75% faster convergence and 129 | could address the sample efficiency challenges mentioned in your 2022 paper. 130 | ``` 131 | 132 | ### 🧬 Biomedical Scientist 133 | 134 | ```text 135 | You: "I'm interested in CRISPR gene editing techniques. Here's my ORCID: 136 | https://orcid.org/..." 137 | 138 | Agent: "Based on your profile, I see you focus on CRISPR-Cas9 off-target effects 139 | and delivery mechanisms. Searching for relevant papers..." 140 | 141 | [Agent provides detailed analysis of new methodologies and contradictory findings] 142 | 143 | Paper 3: "Novel delivery vectors for CRISPR-Cas9 with reduced immunogenicity" 144 | Relevance Note: This presents supporting evidence for your hypothesis on 145 | lipid nanoparticle delivery systems, confirming your findings on reduced 146 | immune response while extending the work to new tissue types not covered 147 | in your research. 148 | ``` 149 | 150 | ## ⚙️ Advanced Configuration 151 | 152 | ### 🔍 Search Engine Customization 153 | 154 | Edit `.env` file to customize search behavior: 155 | 156 | ```bash 157 | # Enable/disable web driver for interactive searches 158 | DISABLE_WEB_DRIVER=0 # 0=enabled, 1=disabled 159 | 160 | # Change model for different capabilities 161 | MODEL=gemini-2.0-pro # For more sophisticated analysis 162 | 163 | # SerpAPI Configuration (optional fallback mechanism) 164 | SERPAPI_KEY=your_serpapi_key_here # Only used when primary search fails 165 | ``` 166 | 167 | ### 📊 Analysis Customization 168 | 169 | You can modify the prompts in `academic_research_assistant/sub_agents/comparison_root_agent/prompt.py` to customize analysis focus: 170 | 171 | ```python 172 | # Customize analysis categories 173 | - **Thematic Overlap**: "This paper addresses the same theme of 'X' seen in your work on 'Y'." 174 | - **Methodological Innovation**: "This is relevant because it uses a novel 'Z' methodology that could be applied to your research." 175 | - **Supporting Evidence**: "Its findings on 'A' provide strong support for your previous conclusions about 'B'." 176 | - **Contradictory Evidence**: "This paper's results challenge your work on 'C' by showing 'D', suggesting a new direction for investigation." 177 | ``` 178 | 179 | ## 📁 Project Structure 180 | 181 | - `academic_research_assistant/` — Main agent code directory 182 | - `agent.py` — Root agent definition 183 | - `prompts.py` — Root agent prompts 184 | - `sub_agents/` — Specialized sub-agents 185 | - `profiler_agent/` — Profile analysis agent 186 | - `searcher_agent/` — Web search agent 187 | - `comparison_root_agent/` — Analysis orchestration agent 188 | - `tools/` — Utility functions for web scraping and processing 189 | - `scholar_scraper.py` — Robust Google Scholar scraper with SerpAPI fallback 190 | - `shared_libraries/` — Constants and shared utilities 191 | 192 | ## Troubleshooting 193 | 194 | **Profile Scraping Issues**: Ensure your profile URL is public and correctly formatted 195 | **Web Search Errors**: Check if `DISABLE_WEB_DRIVER=0` and selenium is properly installed 196 | **API Quota**: Monitor your Gemini API usage in Google Cloud Console 197 | **Browser Driver**: Update to the latest Chrome version if encountering web driver issues 198 | **SerpAPI Fallback**: If search fails and you see "SERPAPI_ERROR", check that your SerpAPI key is correctly set in the .env file 199 | 200 | ## Support 201 | 202 | - [ADK Documentation](https://google.github.io/adk-docs/) 203 | - [Report Issues](https://github.com/awesome-adk-agents/issues) 204 | 205 | --- 206 | 207 | **Accelerate your literature review! Start your research journey now with the Academic Research Assistant.** 208 | 209 | ## ⚠️ Disclaimer 210 | 211 | All recommendations, analyses, and outputs generated by this project are for research and informational purposes only. They do not constitute comprehensive literature reviews or guarantee academic accuracy. Users should verify all information and exercise their own academic judgment before incorporating these outputs into their research work. -------------------------------------------------------------------------------- /my-adk-agents/education-path-advisor/education_advisor/prompt.py: -------------------------------------------------------------------------------- 1 | EDUCATION_COORDINATOR_SYSTEM_PROMPT = """ 2 | You are the education_coordinator agent. 3 | 4 | Your role is to guide Indian students and parents through a structured multi-step educational advisory process by orchestrating a series of expert subagents. Your objective is to help users receive customized, step-by-step guidance based on their academic profile, preferences, and Indian education system realities. 5 | 6 | General Rules: 7 | - Begin with a warm welcome message explaining the full process. 8 | - At each step: 9 | • Prompt the user for required inputs (if not already available) 10 | • Call the correct subagent with the appropriate input parameters 11 | • Explain the output and its relevance 12 | - Maintain state by storing each output under the correct variable name. 13 | - Allow the user to type: "Show me the detailed result as markdown" at any point to see a structured summary. 14 | - Always use clear, numbered prompts when requesting information. 15 | 16 | --- 17 | 18 | 📍 Step 1: Gather Education Data 19 | Subagent: data_analyst 20 | 21 | Required User Input: 22 | - education_interest (e.g., Engineering, Medicine, Commerce, Law) 23 | 24 | Optional Parameters: 25 | - max_data_age_days (default: 30) 26 | - target_results_count (default: 10) 27 | 28 | Action: 29 | - Call data_analyst with education_interest 30 | - Store output as: education_data_analysis_output 31 | 32 | --- 33 | 34 | 📍 Step 2: Generate Pathway Strategies 35 | Subagent: pathway_analyst 36 | 37 | Required User Inputs: 38 | - user_aptitude_level (e.g., Excellent, Above Average, Average, Subject-Specific Strengths) 39 | - user_education_timeline (e.g., Immediate, Short-term, Medium-term, Long-term) 40 | - user_geographic_preferences (e.g., Specific States, Metro Cities Only, Any Location) 41 | 42 | Action: 43 | - Call pathway_analyst with: 44 | • education_data_analysis_output 45 | • user_aptitude_level 46 | • user_education_timeline 47 | • user_geographic_preferences 48 | - Store output as: proposed_pathway_strategies_output 49 | 50 | --- 51 | 52 | 📍 Step 3: Plan Implementation 53 | Subagent: implementation_analyst 54 | 55 | Required User Inputs: 56 | - provided_pathway_strategy (user selects one strategy from Step 2) 57 | 58 | Reuses Previous Inputs: 59 | - user_aptitude_level 60 | - user_education_timeline 61 | - user_geographic_preferences 62 | 63 | Action: 64 | - Call implementation_analyst with: 65 | • provided_pathway_strategy 66 | • user_aptitude_level 67 | • user_education_timeline 68 | • user_geographic_preferences 69 | - Store output as: implementation_plan_output 70 | 71 | --- 72 | 73 | 📍 Step 4: Assess Risks 74 | Subagent: risk_analyst 75 | 76 | Inputs: 77 | - provided_pathway_strategy 78 | - provided_implementation_plan (i.e., implementation_plan_output) 79 | - user_aptitude_level 80 | - user_education_timeline 81 | - user_geographic_preferences 82 | 83 | Action: 84 | - Call risk_analyst with all the above 85 | - Store output as: final_risk_assessment_output 86 | 87 | --- 88 | 89 | 🛑 Error Handling: 90 | - If any required input is missing at any step, pause and ask the user 91 | - If a subagent returns an error, notify the user and request clarification 92 | 93 | --- 94 | 95 | 📝 Markdown Summary Option: 96 | At any point, if the user says: 97 | "Show me the detailed result as markdown" 98 | 99 | You must respond with a well-structured markdown-formatted summary of all collected and generated outputs. 100 | 101 | --- 102 | 103 | 🎯 Initial Prompt to Start: 104 | "Let’s begin! 105 | 1. What is your current educational background? 106 | 2. What would you like to achieve in your education or career?" 107 | 108 | --- 109 | 110 | ### 📚 Example Interactions: 111 | 112 | #### ✅ Example 1: Happy Path – Engineering Aspirant with Clarity 113 | User: I’m in 12th grade, interested in engineering. I want to get into a top college. 114 | Agent: Great! Let's begin. 115 | → Collect education_interest = "Engineering" 116 | → Call data_analyst → education_data_analysis_output stored 117 | 118 | User: I’m academically above average. I can prepare for a year. I can study anywhere in India. 119 | → Collect aptitude, timeline, location 120 | → Call pathway_analyst → 5 pathway strategies generated 121 | 122 | User: I choose Strategy 2 – "State-Level Government Pathway" 123 | → Call implementation_analyst → Plan generated for timeline-aligned prep with state colleges 124 | 125 | → Call risk_analyst → Moderate risk report returned with quota-aware recommendations 126 | 127 | --- 128 | 129 | #### ⚠️ Example 2: Missing Input Recovery – Incomplete Initial Message 130 | User: I want to become a doctor. 131 | Agent: Thanks! To help you best, I need a bit more info: 132 | 1. What is your current academic level? 133 | 2. Have you started NEET preparation? 134 | 3. Do you have any state or location preferences? 135 | 136 | → Collect missing inputs, then proceed to Step 1 and Step 2 137 | 138 | --- 139 | 140 | #### ⚠️ Example 3: Contradictory Preferences – Ambitious Timeline 141 | User: I want to get into IIT but I only have 2 months to study and live in a rural area. 142 | Agent: Understood. IITs require JEE Advanced, which typically takes 1–2 years of intensive prep. 143 | 144 | Would you like to explore alternative engineering paths through state or private colleges that may better align with your timeline and resources? 145 | 146 | → pathway_analyst provides more realistic strategies alongside IIT pathway 147 | → risk_analyst flags high-risk for JEE plan, recommends backups 148 | 149 | --- 150 | 151 | #### ⚠️ Example 4: Location & Reservation Complexity 152 | User: I’m from Bihar, OBC-NCL category. Prefer Delhi or Bangalore. 153 | Agent: Thanks. I'll include metro city institutions with OBC quotas and consider your regional documentation needs. 154 | 155 | → pathway_analyst includes quota-aware options in metro regions 156 | → implementation_analyst includes document checklist for reservation verification 157 | → risk_analyst highlights urban housing/logistics risk and quota cutoffs 158 | 159 | --- 160 | 161 | #### ⚠️ Example 5: Overloaded Goal Set – Multi-Stage Planning 162 | User: I want to do MBBS, then MBA, then become an IAS officer. 163 | Agent: That’s an ambitious roadmap! Let's break it into stages. Shall we start with MBBS admission planning first? 164 | 165 | → Proceed with MBBS in Step 1 166 | → Risk analysis notes need for staged planning and long-term financial/mental resilience 167 | 168 | --- 169 | 170 | #### ⚠️ Example 6: Budget-Constrained Aspirant 171 | User: I’m good at science but can’t afford coaching. I live in a small town. 172 | Agent: Thanks. I’ll identify low-cost government or open learning options with minimal coaching dependence. 173 | 174 | → pathway_analyst includes NPTEL/SWAYAM-integrated options 175 | → implementation_analyst recommends public scholarships, online coaching 176 | → risk_analyst flags digital divide and proposes mitigation strategies 177 | 178 | --- 179 | 180 | #### ⚠️ Example 7: User Prefers Vernacular Language 181 | User: I want to study law but only in Hindi medium and only in UP or MP. 182 | Agent: Got it. I’ll focus on institutions offering law programs in Hindi and within your preferred states. 183 | 184 | → pathway_analyst filters regionally-aligned pathways 185 | → implementation_analyst outlines location-specific legal education logistics 186 | → risk_analyst addresses medium-of-instruction and regional job scope 187 | 188 | --- 189 | 190 | #### ⚠️ Example 8: Uncertain Aptitude, Seeks Guidance 191 | User: I’m not sure how good I am. I passed 12th but had average marks. 192 | Agent: No worries! We'll consider “average” as your working aptitude level and create balanced pathways that provide flexibility and realistic preparation windows. 193 | 194 | → Step 2 strategies include skill-building and flexible-entry institutions 195 | → Risk analysis includes low-pressure paths with high support and alternate backups 196 | 197 | --- 198 | 199 | #### ⚠️ Example 9: Overconfident User Ignores Timeline Reality 200 | User: I want to crack NEET in 3 months. I haven’t started preparing yet. 201 | Agent: NEET typically needs 1+ year of prep, especially for top ranks. Want to explore short-term or 2nd-cycle admissions, or skill-based healthcare alternatives? 202 | 203 | → pathway_analyst suggests alternate plans 204 | → implementation_analyst creates intense crash-plan + safer options 205 | → risk_analyst classifies plan as High Risk, recommends contingencies 206 | 207 | --- 208 | 209 | #### ⚠️ Example 10: Already Failed Once, Seeks Second Attempt 210 | User: I didn’t qualify for JEE last year. Can I try again while working part-time? 211 | Agent: Yes, we can structure a part-time-friendly implementation plan for your second attempt, while also exploring parallel certifications or diplomas. 212 | 213 | → implementation_analyst creates staggered timeline 214 | → pathway_analyst includes flexible-entry colleges and alternate fields 215 | → risk_analyst flags time constraints but provides structured resilience planning 216 | 217 | --- 218 | """ 219 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/app/interview_agent/utils/calendar_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Calendar utilities for interview scheduling, adapted from Jarvis calendar tools. 3 | """ 4 | 5 | import json 6 | import os 7 | from datetime import datetime, timedelta 8 | from pathlib import Path 9 | from typing import Optional, Dict, Any, List, Union 10 | 11 | from google.auth.transport.requests import Request 12 | from google.oauth2.credentials import Credentials 13 | from google_auth_oauthlib.flow import InstalledAppFlow 14 | from googleapiclient.discovery import build 15 | 16 | # Define scopes needed for Google Calendar 17 | SCOPES = ["https://www.googleapis.com/auth/calendar"] 18 | 19 | # Path for token storage 20 | TOKEN_PATH = Path(os.path.expanduser("~/.credentials/interview_calendar_token.json")) 21 | CREDENTIALS_PATH = Path("credentials.json") 22 | 23 | 24 | def get_calendar_service(): 25 | """ 26 | Authenticate and create a Google Calendar service object. 27 | 28 | Returns: 29 | A Google Calendar service object or None if authentication fails 30 | """ 31 | creds = None 32 | 33 | # Check if token exists and is valid 34 | if TOKEN_PATH.exists(): 35 | creds = Credentials.from_authorized_user_info( 36 | json.loads(TOKEN_PATH.read_text()), SCOPES 37 | ) 38 | 39 | # If credentials don't exist or are invalid, refresh or get new ones 40 | if not creds or not creds.valid: 41 | if creds and creds.expired and creds.refresh_token: 42 | creds.refresh(Request()) 43 | else: 44 | # If credentials.json doesn't exist, we can't proceed with OAuth flow 45 | if not CREDENTIALS_PATH.exists(): 46 | print( 47 | f"Error: {CREDENTIALS_PATH} not found. Please follow setup instructions." 48 | ) 49 | return None 50 | 51 | flow = InstalledAppFlow.from_client_secrets_file(CREDENTIALS_PATH, SCOPES) 52 | creds = flow.run_local_server(port=0) 53 | 54 | # Save the credentials for the next run 55 | TOKEN_PATH.parent.mkdir(exist_ok=True) 56 | TOKEN_PATH.write_text(creds.to_json()) 57 | 58 | # Build and return the Calendar service 59 | try: 60 | service = build("calendar", "v3", credentials=creds) 61 | return service 62 | except Exception as e: 63 | print(f"Error building calendar service: {e}") 64 | return None 65 | 66 | 67 | def parse_datetime(datetime_str: str) -> Optional[datetime]: 68 | """ 69 | Parse datetime string in format "YYYY-MM-DD HH:MM". 70 | 71 | Args: 72 | datetime_str: String in format "YYYY-MM-DD HH:MM" 73 | 74 | Returns: 75 | Parsed datetime object or None if invalid 76 | """ 77 | try: 78 | return datetime.strptime(datetime_str, "%Y-%m-%d %H:%M") 79 | except ValueError: 80 | try: 81 | # Try without time (assume start of day) 82 | return datetime.strptime(datetime_str, "%Y-%m-%d") 83 | except ValueError: 84 | return None 85 | 86 | 87 | def format_event_details(event: Dict[str, Any]) -> str: 88 | """Format calendar event details for display.""" 89 | title = event.get("summary", "No title") 90 | start = event.get("start", {}) 91 | end = event.get("end", {}) 92 | 93 | # Handle different datetime formats 94 | start_time = start.get("dateTime", start.get("date", "")) 95 | end_time = end.get("dateTime", end.get("date", "")) 96 | 97 | if start_time and end_time: 98 | try: 99 | # Parse and format times 100 | start_dt = datetime.fromisoformat(start_time.replace("Z", "+00:00")) 101 | end_dt = datetime.fromisoformat(end_time.replace("Z", "+00:00")) 102 | 103 | time_str = f"{start_dt.strftime('%Y-%m-%d %H:%M')} - {end_dt.strftime('%H:%M')}" 104 | except: 105 | time_str = f"{start_time} - {end_time}" 106 | else: 107 | time_str = "Time not specified" 108 | 109 | location = event.get("location", "") 110 | description = event.get("description", "") 111 | 112 | result = f"📅 {title}\n🕒 {time_str}" 113 | if location: 114 | result += f"\nLocation: {location}" 115 | if description: 116 | result += f"\nDescription: {description[:100]}{'...' if len(description) > 100 else ''}" 117 | 118 | return result 119 | 120 | 121 | def create_interview_description( 122 | interview_type: str, 123 | role: str, 124 | company: str, 125 | focus_areas: Optional[List[str]] = None, 126 | preparation_notes: str = "" 127 | ) -> str: 128 | """ 129 | Create a comprehensive description for interview calendar events. 130 | 131 | Args: 132 | interview_type: Type of interview (behavioral, technical, etc.) 133 | role: Job role being interviewed for 134 | company: Company name (optional) 135 | focus_areas: List of specific focus areas 136 | preparation_notes: Additional preparation notes 137 | 138 | Returns: 139 | Formatted description string 140 | """ 141 | description_parts = [ 142 | f"Interview Type: {interview_type.title()}", 143 | f"Role: {role}", 144 | ] 145 | 146 | if company: 147 | description_parts.append(f"Company: {company}") 148 | 149 | if focus_areas: 150 | description_parts.append(f"Focus Areas: {', '.join(focus_areas)}") 151 | 152 | description_parts.extend([ 153 | "", 154 | "Preparation Tips:", 155 | "• Review the job description and company background", 156 | "• Prepare STAR method examples for behavioral questions", 157 | "• Practice technical concepts relevant to the role", 158 | "• Prepare thoughtful questions about the role and company", 159 | "", 160 | "This is a practice interview session with AI roleplay", 161 | ]) 162 | 163 | if preparation_notes: 164 | description_parts.extend([ 165 | "", 166 | "Additional Notes:", 167 | preparation_notes 168 | ]) 169 | 170 | return "\n".join(description_parts) 171 | 172 | 173 | def find_free_time_slots( 174 | start_date: datetime, 175 | end_date: datetime, 176 | duration_minutes: int = 60, 177 | service=None 178 | ) -> List[Dict[str, str]]: 179 | """ 180 | Find available time slots for interview scheduling. 181 | 182 | Args: 183 | start_date: Start of search period 184 | end_date: End of search period 185 | duration_minutes: Required duration in minutes 186 | service: Calendar service object 187 | 188 | Returns: 189 | List of available time slots 190 | """ 191 | if not service: 192 | service = get_calendar_service() 193 | if not service: 194 | return [] 195 | 196 | try: 197 | # Get busy times from calendar 198 | body = { 199 | "timeMin": start_date.isoformat() + "Z", 200 | "timeMax": end_date.isoformat() + "Z", 201 | "items": [{"id": "primary"}] 202 | } 203 | 204 | response = service.freebusy().query(body=body).execute() 205 | busy_times = response.get("calendars", {}).get("primary", {}).get("busy", []) 206 | 207 | # Generate potential time slots (9 AM to 6 PM, weekdays only) 208 | free_slots = [] 209 | current = start_date.replace(hour=9, minute=0, second=0, microsecond=0) 210 | 211 | while current < end_date: 212 | # Skip weekends 213 | if current.weekday() >= 5: 214 | current += timedelta(days=1) 215 | current = current.replace(hour=9, minute=0, second=0, microsecond=0) 216 | continue 217 | 218 | # Skip outside business hours 219 | if current.hour < 9 or current.hour >= 18: 220 | if current.hour >= 18: 221 | current += timedelta(days=1) 222 | current = current.replace(hour=9, minute=0, second=0, microsecond=0) 223 | else: 224 | current = current.replace(hour=9, minute=0, second=0, microsecond=0) 225 | continue 226 | 227 | slot_end = current + timedelta(minutes=duration_minutes) 228 | 229 | # Check if this slot conflicts with any busy time 230 | is_free = True 231 | for busy in busy_times: 232 | busy_start = datetime.fromisoformat(busy["start"].replace("Z", "+00:00")) 233 | busy_end = datetime.fromisoformat(busy["end"].replace("Z", "+00:00")) 234 | 235 | if (current < busy_end and slot_end > busy_start): 236 | is_free = False 237 | break 238 | 239 | if is_free: 240 | free_slots.append({ 241 | "start": current.strftime("%Y-%m-%d %H:%M"), 242 | "end": slot_end.strftime("%Y-%m-%d %H:%M"), 243 | "formatted": f"{current.strftime('%A, %B %d at %I:%M %p')} - {slot_end.strftime('%I:%M %p')}" 244 | }) 245 | 246 | # Move to next 30-minute slot 247 | current += timedelta(minutes=30) 248 | 249 | # Limit to reasonable number of suggestions 250 | if len(free_slots) >= 10: 251 | break 252 | 253 | return free_slots 254 | 255 | except Exception as e: 256 | print(f"Error finding free time slots: {e}") 257 | return [] 258 | -------------------------------------------------------------------------------- /my-adk-agents/job-interview-agent/app/interview_agent/data/question_bank.json: -------------------------------------------------------------------------------- 1 | { 2 | "behavioral_questions": { 3 | "leadership": [ 4 | { 5 | "question": "Tell me about a time when you had to lead a team through a difficult project or challenge.", 6 | "follow_ups": [ 7 | "What made the project difficult?", 8 | "How did you motivate your team?", 9 | "What would you do differently next time?" 10 | ], 11 | "key_points": ["Leadership style", "Problem-solving", "Team motivation", "Results achieved"] 12 | }, 13 | { 14 | "question": "Describe a situation where you had to make a difficult decision as a leader.", 15 | "follow_ups": [ 16 | "What factors did you consider?", 17 | "How did you communicate your decision?", 18 | "What was the outcome?" 19 | ], 20 | "key_points": ["Decision-making process", "Stakeholder management", "Communication", "Accountability"] 21 | } 22 | ], 23 | "teamwork": [ 24 | { 25 | "question": "Tell me about a time when you had to work with a difficult team member.", 26 | "follow_ups": [ 27 | "What made them difficult to work with?", 28 | "How did you approach the situation?", 29 | "What was the result?" 30 | ], 31 | "key_points": ["Conflict resolution", "Communication skills", "Empathy", "Professional maturity"] 32 | }, 33 | { 34 | "question": "Describe a time when your team failed to meet a deadline or goal. What happened?", 35 | "follow_ups": [ 36 | "What caused the failure?", 37 | "How did you respond?", 38 | "What did you learn from this experience?" 39 | ], 40 | "key_points": ["Accountability", "Problem analysis", "Recovery strategies", "Learning mindset"] 41 | } 42 | ], 43 | "problem_solving": [ 44 | { 45 | "question": "Tell me about a complex problem you solved at work. Walk me through your approach.", 46 | "follow_ups": [ 47 | "How did you identify the root cause?", 48 | "What alternatives did you consider?", 49 | "How did you measure success?" 50 | ], 51 | "key_points": ["Analytical thinking", "Systematic approach", "Creativity", "Results measurement"] 52 | }, 53 | { 54 | "question": "Describe a time when you had to solve a problem with limited resources or information.", 55 | "follow_ups": [ 56 | "How did you gather the information you needed?", 57 | "What assumptions did you make?", 58 | "How did you validate your solution?" 59 | ], 60 | "key_points": ["Resourcefulness", "Risk assessment", "Decision-making under uncertainty", "Adaptability"] 61 | } 62 | ], 63 | "communication": [ 64 | { 65 | "question": "Tell me about a time when you had to explain a complex technical concept to non-technical stakeholders.", 66 | "follow_ups": [ 67 | "How did you tailor your communication?", 68 | "What challenges did you face?", 69 | "How did you ensure they understood?" 70 | ], 71 | "key_points": ["Simplification skills", "Audience awareness", "Clear communication", "Patience"] 72 | }, 73 | { 74 | "question": "Describe a situation where you had to deliver bad news to a client or stakeholder.", 75 | "follow_ups": [ 76 | "How did you prepare for the conversation?", 77 | "What was their reaction?", 78 | "How did you handle their concerns?" 79 | ], 80 | "key_points": ["Difficult conversations", "Empathy", "Transparency", "Solution-oriented thinking"] 81 | } 82 | ], 83 | "adaptability": [ 84 | { 85 | "question": "Tell me about a time when you had to quickly adapt to a significant change at work.", 86 | "follow_ups": [ 87 | "What was your initial reaction?", 88 | "How did you adjust your approach?", 89 | "What did you learn from the experience?" 90 | ], 91 | "key_points": ["Change management", "Flexibility", "Learning agility", "Resilience"] 92 | }, 93 | { 94 | "question": "Describe a time when you had to learn a new skill or technology quickly to complete a project.", 95 | "follow_ups": [ 96 | "How did you approach the learning process?", 97 | "What resources did you use?", 98 | "How did you apply what you learned?" 99 | ], 100 | "key_points": ["Learning agility", "Self-motivation", "Resource utilization", "Application skills"] 101 | } 102 | ] 103 | }, 104 | "technical_questions": { 105 | "software_engineering": [ 106 | { 107 | "question": "Explain the difference between a stack and a queue, and give real-world examples of when you'd use each.", 108 | "difficulty": "easy", 109 | "key_points": ["Data structure understanding", "LIFO vs FIFO", "Practical applications"], 110 | "follow_ups": [ 111 | "How would you implement a stack using an array?", 112 | "What are the time complexities for stack and queue operations?" 113 | ] 114 | }, 115 | { 116 | "question": "Design a URL shortener like bit.ly. What are the key components and considerations?", 117 | "difficulty": "medium", 118 | "key_points": ["System design", "Scalability", "Database design", "Caching"], 119 | "follow_ups": [ 120 | "How would you handle millions of requests per day?", 121 | "How would you ensure the shortened URLs are unique?", 122 | "What metrics would you track?" 123 | ] 124 | }, 125 | { 126 | "question": "Implement a function to detect if a linked list has a cycle.", 127 | "difficulty": "medium", 128 | "key_points": ["Algorithm design", "Time/space complexity", "Edge cases"], 129 | "follow_ups": [ 130 | "What's the time and space complexity of your solution?", 131 | "Can you do it with constant space?", 132 | "How would you find the start of the cycle?" 133 | ] 134 | } 135 | ], 136 | "data_science": [ 137 | { 138 | "question": "Explain the bias-variance tradeoff and how it affects model performance.", 139 | "difficulty": "medium", 140 | "key_points": ["ML fundamentals", "Model evaluation", "Overfitting/underfitting"], 141 | "follow_ups": [ 142 | "How would you detect high bias vs high variance?", 143 | "What techniques can you use to reduce each?", 144 | "Give an example of a high-bias and high-variance model." 145 | ] 146 | }, 147 | { 148 | "question": "You notice your model performs well on training data but poorly on test data. What could be the issues and how would you address them?", 149 | "difficulty": "easy", 150 | "key_points": ["Overfitting diagnosis", "Model validation", "Practical ML"], 151 | "follow_ups": [ 152 | "What validation techniques would you use?", 153 | "How would you adjust your model?", 154 | "What other data issues might cause this?" 155 | ] 156 | } 157 | ], 158 | "product_management": [ 159 | { 160 | "question": "How would you prioritize features for a mobile app with limited development resources?", 161 | "difficulty": "medium", 162 | "key_points": ["Prioritization frameworks", "Stakeholder management", "Resource allocation"], 163 | "follow_ups": [ 164 | "What framework would you use for prioritization?", 165 | "How would you gather and validate requirements?", 166 | "How would you communicate decisions to stakeholders?" 167 | ] 168 | }, 169 | { 170 | "question": "A key metric for your product has suddenly dropped by 20%. Walk me through how you would investigate and respond.", 171 | "difficulty": "medium", 172 | "key_points": ["Data analysis", "Root cause analysis", "Crisis response"], 173 | "follow_ups": [ 174 | "What data would you look at first?", 175 | "How would you determine if it's a real issue or measurement error?", 176 | "What stakeholders would you involve?" 177 | ] 178 | } 179 | ] 180 | }, 181 | "case_study_questions": { 182 | "business_strategy": [ 183 | { 184 | "question": "A ride-sharing company is losing market share to competitors. How would you develop a strategy to regain market leadership?", 185 | "time_limit": 30, 186 | "key_areas": ["Market analysis", "Competitive positioning", "Strategic planning", "Implementation"], 187 | "evaluation_criteria": [ 188 | "Structured thinking", 189 | "Market understanding", 190 | "Creative solutions", 191 | "Feasibility assessment" 192 | ] 193 | }, 194 | { 195 | "question": "You're the product manager for a social media platform. User engagement has been declining among teenagers. What would you do?", 196 | "time_limit": 25, 197 | "key_areas": ["User research", "Product strategy", "Feature development", "Metrics"], 198 | "evaluation_criteria": [ 199 | "User-centric thinking", 200 | "Data-driven approach", 201 | "Innovation", 202 | "Measurement plan" 203 | ] 204 | } 205 | ], 206 | "consulting": [ 207 | { 208 | "question": "A traditional retail bank wants to compete with fintech startups. What should their digital transformation strategy be?", 209 | "time_limit": 35, 210 | "key_areas": ["Digital transformation", "Competitive analysis", "Technology strategy", "Change management"], 211 | "evaluation_criteria": [ 212 | "Industry knowledge", 213 | "Strategic thinking", 214 | "Implementation planning", 215 | "Risk assessment" 216 | ] 217 | } 218 | ] 219 | } 220 | } 221 | --------------------------------------------------------------------------------