├── full-stack-ai-masterclass ├── README.md ├── 6-agent-ui │ ├── README.md │ ├── tests │ │ └── __init__.py │ ├── 6_agent_ui │ │ └── __init__.py │ ├── __pycache__ │ │ └── agent.cpython-312.pyc │ ├── app-gradio.py │ ├── pyproject.toml │ ├── agent.py │ ├── app-streamlit-markdown.py │ └── app-streamlit-stream.py ├── 7-db-agents │ ├── README.md │ ├── tests │ │ └── __init__.py │ ├── 7_db_agents │ │ └── __init__.py │ └── pyproject.toml ├── .gitignore ├── 5-agent-api │ ├── tests │ │ └── __init__.py │ ├── 5_agent_api │ │ └── __init__.py │ ├── .gitignore │ ├── api.py │ ├── pyproject.toml │ ├── README.md │ └── agent.py └── .env.example ├── ai-in-action ├── .gitignore ├── 1-invoice-ai-agents │ ├── .env.example │ ├── .gitignore │ ├── graph.png │ ├── requirements.txt │ ├── images │ │ └── langchain_interaction_diagram.png │ └── data │ │ └── invoice.md └── 2-diagram-generation-agents │ ├── .env.example │ ├── requirements.txt │ ├── output │ ├── diagram_20250116210127.png │ ├── diagram_20250116210314.png │ ├── diagram_20250116210415.png │ ├── diagram_20250116210512.png │ ├── diagram_20250116210613.png │ ├── diagram_20250116211806.png │ ├── diagram_20250116212021.png │ └── diagram_20250116212155.png │ ├── 2-image-generation-agent.py │ └── 1-diagram-generation-agent.py ├── smolagents ├── .gitignore └── intro │ ├── requirements.txt │ ├── .env.example │ ├── 1-hello-world.py │ ├── 3-tool-calling-agent.py │ └── 2-code-agent.py ├── pydantic-ai-masterclass ├── 9-agent-memory │ ├── .gitignore │ ├── requirements.txt │ ├── .env.example │ ├── 9.2-full-memory.py │ ├── 9.6-hello-world-variant.py │ ├── 9.3-filtered-memory.py │ ├── 9.1-hello-world.py │ ├── 9.4-persist-memory.py │ └── 9.5-multi-agent-memory.py ├── 12-model-settings │ ├── tests │ │ └── __init__.py │ ├── 12-model-settings │ │ └── __init__.py │ ├── .env.example │ ├── pyproject.toml │ └── README.md ├── 10-errors-and-reflection │ ├── .gitignore │ ├── requirements.txt │ ├── .env.example │ ├── 10.3-model-errors.py │ ├── 10.5-model-errors-switch-model.py │ ├── 10.4-model-errors-bypass-tools.py │ ├── 10.1-hello-world.py │ └── 10.2-self-correction.py ├── 6-result-validator-functions │ ├── .gitignore │ ├── requirements.txt │ ├── .env.example │ ├── 6.1-hello-world.py │ ├── 6.2-context.py │ ├── 6.6-python-validator.py │ ├── 6.5-sql-validator.py │ ├── 6.3-result-type.py │ └── 6.4-multiple-validators.py ├── .gitignore ├── 11-streaming │ ├── requirements.txt │ ├── .env.example │ ├── 11.1-hello-world.py │ ├── 11.2-stream-structured.py │ ├── app-markdown.py │ ├── agent.py │ └── app-streaming.py ├── 8-retries-usage-limits │ ├── requirements.txt │ ├── .env.example │ ├── 8.5-usage-limit-tokens.py │ ├── 8.1-hello-world.py │ ├── 8.2-agent-retries.py │ ├── 8.6-usage-request-limit.py │ ├── 8.3-tool-retries.py │ └── 8.4-result-validator-retries.py ├── 7-dependency-injection │ ├── requirements.txt │ ├── .env.example │ ├── 7.1-hello-world.py │ └── 7.5-combined-deps copy.py ├── 5-tools │ ├── requirements.txt │ ├── .env.example │ ├── 5.1-hello-world.py │ ├── 5.2-tools-plain.py │ ├── 5.3-tools-context.py │ ├── 5.6-tools-griffe.py │ ├── 5.4-tools-kwargs.py │ └── 5.5-tools-prepare.py ├── requirements.txt ├── 2-logfire │ ├── 2.1-hello-world.py │ ├── requirements.txt │ ├── .env.example │ ├── 2.2-span.py │ ├── 2.4-exception.py │ ├── 2.3-levels.py │ └── 2.5-instrument.py ├── 1-introduction │ ├── requirements.txt │ ├── .env.example │ ├── 1.1-hello-world.py │ ├── 1.2-agent-openai.py │ ├── 1.3-agent-ollama.py │ ├── 1.4-agent-azure.py │ └── 1.5-multi-model-agents.py ├── 3-structured-data │ ├── requirements.txt │ ├── .env.example │ ├── 3.1-hello-world.py │ ├── 3.2-capital.py │ ├── data │ │ └── invoice.md │ └── 3.3-invoice.py ├── 4-system-prompts │ ├── requirements.txt │ ├── .env.example │ ├── 4.2-coding-basic.py │ ├── 4.1-hello-world.py │ ├── 4.5-dynamic-basic.py │ ├── 4.6-dynamic-advanced.py │ └── 4.4-invoice.py └── .env.example ├── langgraph ├── conditional-edges │ ├── .gitignore │ ├── graph.png │ ├── requirements.txt │ ├── .env.example │ └── README.md ├── multiagent-graph │ ├── .gitignore │ ├── graph.png │ ├── img │ │ ├── user.png │ │ ├── analyst.png │ │ ├── diagram.png │ │ ├── review.png │ │ ├── summary.png │ │ ├── tester.png │ │ ├── architect.png │ │ └── developer.png │ ├── requirements.txt │ ├── .env.example │ ├── graph_ui.py │ └── README.md ├── structured-output │ ├── .gitignore │ ├── graph.png │ ├── requirements.txt │ ├── .env.example │ └── README.md ├── agents-financial-research │ ├── .gitignore │ ├── data │ │ ├── stocks.txt │ │ ├── unemployment_rates.csv │ │ ├── GDP_data.csv │ │ ├── inflation_rates.csv │ │ ├── unemployment_data.csv │ │ ├── average_gdp_2023.txt │ │ ├── bar_chart.png │ │ ├── cost_salary_comparison.csv │ │ ├── gdp_chart.png │ │ ├── gas_gdp_chart.png │ │ ├── dividends_chart.png │ │ ├── gdp_comparison.png │ │ ├── copper_gdp_chart.png │ │ ├── gdp_growth_table.png │ │ ├── natural_gas_prices.png │ │ ├── sf_november_temps.png │ │ ├── cost_of_living_chart.png │ │ ├── cost_vs_salary_plot.png │ │ ├── gas_gdp_correlation.png │ │ ├── gdp_comparison_chart.png │ │ ├── net_worth_dot_chart.png │ │ ├── numbers_and_squares.csv │ │ ├── cost_salary_comparison.png │ │ ├── gdp_germany_2000_2010.png │ │ ├── gdp_growth_comparison.png │ │ ├── inflation_rates_chart.png │ │ ├── natural_gas_gdp_germany.png │ │ ├── cost_of_living_comparison.csv │ │ ├── cost_of_living_comparison.png │ │ ├── gdp_comparison_chart_2023.png │ │ ├── natural_gas_prices_chart.png │ │ ├── unemployment_rates_chart.png │ │ ├── cost_of_living_salary_plot.png │ │ ├── germany_gas_gdp_correlation.png │ │ ├── natural_gas_prices_5_years.png │ │ ├── predicted_natural_gas_prices.png │ │ ├── cost_of_living_and_salaries.csv │ │ ├── cost_of_living_vs_salary_plot.png │ │ ├── natural_gas_prices_germany_gdp.png │ │ ├── average_salary_software_engineers.png │ │ ├── us_gender_distribution_pie_chart.png │ │ ├── stockholm_cost_of_living_pie_chart.png │ │ ├── cost_of_living.csv │ │ ├── cost_of_living_salary.csv │ │ ├── net_worth_comparison.csv │ │ ├── average_salary_software_engineers.csv │ │ ├── numbers_and_squares.txt │ │ ├── cost_of_living_vs_salary.csv │ │ ├── comparison_net_worth_US_Germany.txt │ │ ├── top_expensive_cities.csv │ │ ├── gdp_bar_chart.html │ │ └── Reasons_for_Low_Inflation_Rate_in_Switzerland.md │ ├── graph.png │ ├── .env.example │ ├── requirements.txt │ └── README.md └── state │ ├── tutorial-1 │ ├── graph.png │ ├── requirements.txt │ └── state.py │ ├── tutorial-2 │ ├── graph.png │ ├── requirements.txt │ └── state.py │ ├── tutorial-3 │ ├── graph.png │ ├── requirements.txt │ └── state.py │ └── README.md ├── pydanticai-graph-deepseek-r1 ├── .gitignore ├── .env.example └── requirements.txt ├── dify-101 └── flu-remedies.pdf ├── streamlit-chatbot-ui ├── data │ └── dolphins.pdf ├── db │ └── README.md ├── requirements.txt ├── .env.example ├── README.md ├── models.py ├── chat.py ├── html-to-pdf.py ├── chatbot_ui.py ├── ingest.py └── qa.md ├── langchain-rag-pdf └── tutorial-1 │ ├── db │ └── README.md │ ├── data │ └── dolphins.pdf │ ├── requirements.txt │ ├── .env.example │ ├── models.py │ ├── chat.py │ ├── README.md │ ├── html-to-pdf.py │ ├── ingest.py │ └── qa.md ├── notebooklm ├── tutorial-3 │ ├── ChatGPT_Prompt_Test_Strategy_Document.md │ └── ChatGPT_Prompt_User_Story.md ├── README.md └── tutorial-2 │ ├── prompt_app.md │ ├── prompt_reviews.md │ └── SafeBase_App_Store_Description.md ├── langgraph-intro └── README.md ├── langchain-rag ├── requirements.txt ├── .env.template ├── supabase_setup.sql └── ingest.py ├── python-local-setup ├── README.md └── fibonacci.py ├── langchain-ollama ├── README.md ├── js │ └── SignUpForm.js └── langollama-simple.ipynb ├── ollama-local-setup └── README.md ├── jupyter-vscode └── hello_world.ipynb ├── LICENSE ├── jupyter-online └── circle.ipynb └── README.md /full-stack-ai-masterclass/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ai-in-action/.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .env -------------------------------------------------------------------------------- /smolagents/.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .env -------------------------------------------------------------------------------- /full-stack-ai-masterclass/6-agent-ui/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /full-stack-ai-masterclass/7-db-agents/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /full-stack-ai-masterclass/.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | .DS_Store -------------------------------------------------------------------------------- /full-stack-ai-masterclass/5-agent-api/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /full-stack-ai-masterclass/6-agent-ui/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /full-stack-ai-masterclass/7-db-agents/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/9-agent-memory/.gitignore: -------------------------------------------------------------------------------- 1 | *.pickle -------------------------------------------------------------------------------- /smolagents/intro/requirements.txt: -------------------------------------------------------------------------------- 1 | smolagents 2 | litellm -------------------------------------------------------------------------------- /ai-in-action/1-invoice-ai-agents/.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY="" -------------------------------------------------------------------------------- /ai-in-action/1-invoice-ai-agents/.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .env -------------------------------------------------------------------------------- /full-stack-ai-masterclass/5-agent-api/5_agent_api/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /full-stack-ai-masterclass/6-agent-ui/6_agent_ui/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /full-stack-ai-masterclass/7-db-agents/7_db_agents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/12-model-settings/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ai-in-action/2-diagram-generation-agents/.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY="" -------------------------------------------------------------------------------- /langgraph/conditional-edges/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .env 3 | .DS_Store -------------------------------------------------------------------------------- /langgraph/multiagent-graph/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .env 3 | .DS_Store -------------------------------------------------------------------------------- /langgraph/structured-output/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .env 3 | .DS_Store -------------------------------------------------------------------------------- /pydantic-ai-masterclass/12-model-settings/12-model-settings/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pydanticai-graph-deepseek-r1/.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | .logfire 3 | .DS_Store -------------------------------------------------------------------------------- /langgraph/agents-financial-research/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .env 3 | .DS_Store -------------------------------------------------------------------------------- /pydantic-ai-masterclass/10-errors-and-reflection/.gitignore: -------------------------------------------------------------------------------- 1 | *.pickle 2 | *.sqlite -------------------------------------------------------------------------------- /pydantic-ai-masterclass/6-result-validator-functions/.gitignore: -------------------------------------------------------------------------------- 1 | .sqlite 2 | .env -------------------------------------------------------------------------------- /pydanticai-graph-deepseek-r1/.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY="" 2 | TAVILY_API_KEY="" -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/stocks.txt: -------------------------------------------------------------------------------- 1 | Microsoft 2 | Apple 3 | Tesla 4 | Nvidia -------------------------------------------------------------------------------- /pydantic-ai-masterclass/.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | .logfire 3 | .DS_Store 4 | .sqlite 5 | __pycache__ -------------------------------------------------------------------------------- /pydantic-ai-masterclass/12-model-settings/.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY="" 2 | GEMINI_API_KEY="" -------------------------------------------------------------------------------- /dify-101/flu-remedies.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/dify-101/flu-remedies.pdf -------------------------------------------------------------------------------- /smolagents/intro/.env.example: -------------------------------------------------------------------------------- 1 | HUGGINGFACE_TOKEN="your_token_here" 2 | OPENAI_API_KEY="your_key" -------------------------------------------------------------------------------- /full-stack-ai-masterclass/5-agent-api/.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | .logfire 3 | .DS_Store 4 | .sqlite 5 | __pycache__ -------------------------------------------------------------------------------- /full-stack-ai-masterclass/.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY="" 2 | GROQ_API_KEY="" 3 | SUPABASE_URL="" 4 | SUPABASE_KEY="" -------------------------------------------------------------------------------- /langgraph/conditional-edges/graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/conditional-edges/graph.png -------------------------------------------------------------------------------- /langgraph/multiagent-graph/graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/multiagent-graph/graph.png -------------------------------------------------------------------------------- /langgraph/state/tutorial-1/graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/state/tutorial-1/graph.png -------------------------------------------------------------------------------- /langgraph/state/tutorial-2/graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/state/tutorial-2/graph.png -------------------------------------------------------------------------------- /langgraph/state/tutorial-3/graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/state/tutorial-3/graph.png -------------------------------------------------------------------------------- /langgraph/structured-output/graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/structured-output/graph.png -------------------------------------------------------------------------------- /streamlit-chatbot-ui/data/dolphins.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/streamlit-chatbot-ui/data/dolphins.pdf -------------------------------------------------------------------------------- /streamlit-chatbot-ui/db/README.md: -------------------------------------------------------------------------------- 1 | ### README 2 | 3 | Database files, SQLite, ChromaDB will be stored in this folder 4 | -------------------------------------------------------------------------------- /langchain-rag-pdf/tutorial-1/db/README.md: -------------------------------------------------------------------------------- 1 | ### README 2 | 3 | Database files, SQLite, ChromaDB will be stored in this folder 4 | -------------------------------------------------------------------------------- /langgraph/multiagent-graph/img/user.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/multiagent-graph/img/user.png -------------------------------------------------------------------------------- /ai-in-action/1-invoice-ai-agents/graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/ai-in-action/1-invoice-ai-agents/graph.png -------------------------------------------------------------------------------- /langgraph/multiagent-graph/img/analyst.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/multiagent-graph/img/analyst.png -------------------------------------------------------------------------------- /langgraph/multiagent-graph/img/diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/multiagent-graph/img/diagram.png -------------------------------------------------------------------------------- /langgraph/multiagent-graph/img/review.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/multiagent-graph/img/review.png -------------------------------------------------------------------------------- /langgraph/multiagent-graph/img/summary.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/multiagent-graph/img/summary.png -------------------------------------------------------------------------------- /langgraph/multiagent-graph/img/tester.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/multiagent-graph/img/tester.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/unemployment_rates.csv: -------------------------------------------------------------------------------- 1 | Country,Unemployment Rate 2 | OECD Average,4.8 3 | United States,3.7 4 | -------------------------------------------------------------------------------- /langgraph/agents-financial-research/graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/graph.png -------------------------------------------------------------------------------- /langgraph/multiagent-graph/img/architect.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/multiagent-graph/img/architect.png -------------------------------------------------------------------------------- /langgraph/multiagent-graph/img/developer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/multiagent-graph/img/developer.png -------------------------------------------------------------------------------- /ai-in-action/2-diagram-generation-agents/requirements.txt: -------------------------------------------------------------------------------- 1 | colorama 2 | python-dotenv 3 | typing 4 | smolagents 5 | litellm 6 | mermaid-py 7 | -------------------------------------------------------------------------------- /langchain-rag-pdf/tutorial-1/data/dolphins.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langchain-rag-pdf/tutorial-1/data/dolphins.pdf -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/GDP_data.csv: -------------------------------------------------------------------------------- 1 | Country,GDP (in billion USD) 2 | Germany,4456.08 3 | Switzerland,884.94 4 | Austria,516.03 -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/inflation_rates.csv: -------------------------------------------------------------------------------- 1 | Country,Inflation Rate 2 | Germany,3.8 3 | Switzerland,2.1 4 | Austria,7.8 5 | -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/unemployment_data.csv: -------------------------------------------------------------------------------- 1 | Country,Unemployment Rate (2023) 2 | OECD Average,4.8% 3 | United States,3.7% -------------------------------------------------------------------------------- /pydantic-ai-masterclass/11-streaming/requirements.txt: -------------------------------------------------------------------------------- 1 | pydantic-ai 2 | openai 3 | colorama 4 | dataclasses 5 | asyncio 6 | devtools 7 | streamlit -------------------------------------------------------------------------------- /pydantic-ai-masterclass/8-retries-usage-limits/requirements.txt: -------------------------------------------------------------------------------- 1 | pydantic-ai 2 | openai 3 | colorama 4 | dataclasses 5 | asyncio 6 | devtools -------------------------------------------------------------------------------- /pydantic-ai-masterclass/9-agent-memory/requirements.txt: -------------------------------------------------------------------------------- 1 | pickle 2 | pydantic-ai 3 | openai 4 | colorama 5 | dataclasses 6 | asyncio 7 | devtools -------------------------------------------------------------------------------- /langgraph/multiagent-graph/requirements.txt: -------------------------------------------------------------------------------- 1 | python-dotenv 2 | langgraph 3 | langchain_openai 4 | langchain-core 5 | pydantic 6 | streamlit 7 | typing -------------------------------------------------------------------------------- /langgraph/state/tutorial-1/requirements.txt: -------------------------------------------------------------------------------- 1 | typing 2 | langgraph 3 | langgraph-checkpoint-sqlite 4 | langchain-ollama 5 | langchain-core 6 | pydantic -------------------------------------------------------------------------------- /langgraph/state/tutorial-2/requirements.txt: -------------------------------------------------------------------------------- 1 | typing 2 | langgraph 3 | langgraph-checkpoint-sqlite 4 | langchain-ollama 5 | langchain-core 6 | pydantic -------------------------------------------------------------------------------- /langgraph/state/tutorial-3/requirements.txt: -------------------------------------------------------------------------------- 1 | typing 2 | langgraph 3 | langgraph-checkpoint-sqlite 4 | langchain-ollama 5 | langchain-core 6 | pydantic -------------------------------------------------------------------------------- /ai-in-action/1-invoice-ai-agents/requirements.txt: -------------------------------------------------------------------------------- 1 | python-dotenv 2 | typing 3 | langgraph 4 | langchain-openai 5 | langchain-core 6 | pydantic 7 | colorama -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/average_gdp_2023.txt: -------------------------------------------------------------------------------- 1 | The average GDP of New York and California in 2023 is approximately 3,036,005 million USD. -------------------------------------------------------------------------------- /notebooklm/tutorial-3/ChatGPT_Prompt_Test_Strategy_Document.md: -------------------------------------------------------------------------------- 1 | Based on this, please create a detailed Test Plan, covering all aspects of the app. 2 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/7-dependency-injection/requirements.txt: -------------------------------------------------------------------------------- 1 | pydantic-ai 2 | openai 3 | colorama 4 | dataclasses 5 | asyncio 6 | devtools 7 | yfinance -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/bar_chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/bar_chart.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/cost_salary_comparison.csv: -------------------------------------------------------------------------------- 1 | City,Cost of Living,Average Salary 2 | San Francisco,3781,7795 3 | Zurich,3392,6770 4 | -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/gdp_chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/gdp_chart.png -------------------------------------------------------------------------------- /pydantic-ai-masterclass/10-errors-and-reflection/requirements.txt: -------------------------------------------------------------------------------- 1 | pydantic-ai 2 | openai 3 | colorama 4 | dataclasses 5 | asyncio 6 | devtools 7 | aiosqlite -------------------------------------------------------------------------------- /langgraph-intro/README.md: -------------------------------------------------------------------------------- 1 | ## Intro to LangGraph 2 | 3 | ### Watch the YouTube tutorial 4 | 5 | [https://youtu.be/fOUng7fMQ1Y](https://youtu.be/LhpNsjuffWg) 6 | -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/gas_gdp_chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/gas_gdp_chart.png -------------------------------------------------------------------------------- /langchain-rag/requirements.txt: -------------------------------------------------------------------------------- 1 | langchain-core 2 | langchain-community 3 | langchain-openai 4 | langchain_ollama 5 | # supabase-py 6 | supabase 7 | python-dotenv 8 | nltk -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/dividends_chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/dividends_chart.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/gdp_comparison.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/gdp_comparison.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/copper_gdp_chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/copper_gdp_chart.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/gdp_growth_table.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/gdp_growth_table.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/natural_gas_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/natural_gas_prices.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/sf_november_temps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/sf_november_temps.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/.env.example: -------------------------------------------------------------------------------- 1 | TAVILY_API_KEY="" 2 | ALPHAVANTAGE_API_KEY="" 3 | LANGCHAIN_TRACING_V2="https://api.smith.langchain.com" 4 | LANGCHAIN_API_KEY="" -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/cost_of_living_chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/cost_of_living_chart.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/cost_vs_salary_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/cost_vs_salary_plot.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/gas_gdp_correlation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/gas_gdp_correlation.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/gdp_comparison_chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/gdp_comparison_chart.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/net_worth_dot_chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/net_worth_dot_chart.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/numbers_and_squares.csv: -------------------------------------------------------------------------------- 1 | Number,Square 2 | 1,1 3 | 2,4 4 | 3,9 5 | 4,16 6 | 5,25 7 | 6,36 8 | 7,49 9 | 8,64 10 | 9,81 11 | 10,100 -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/cost_salary_comparison.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/cost_salary_comparison.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/gdp_germany_2000_2010.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/gdp_germany_2000_2010.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/gdp_growth_comparison.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/gdp_growth_comparison.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/inflation_rates_chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/inflation_rates_chart.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/natural_gas_gdp_germany.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/natural_gas_gdp_germany.png -------------------------------------------------------------------------------- /pydantic-ai-masterclass/5-tools/requirements.txt: -------------------------------------------------------------------------------- 1 | pydantic-ai==0.0.16 2 | openai==1.58.1 3 | colorama==0.4.6 4 | dataclasses 5 | asyncio 6 | devtools==0.12.2 7 | html-to-markdown==1.1.0 -------------------------------------------------------------------------------- /full-stack-ai-masterclass/6-agent-ui/__pycache__/agent.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/full-stack-ai-masterclass/6-agent-ui/__pycache__/agent.cpython-312.pyc -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/cost_of_living_comparison.csv: -------------------------------------------------------------------------------- 1 | City,Average Cost of Living,Median After-Tax Salary 2 | "Washington, DC",3139,5811 3 | San Francisco,3781,7795 4 | -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/cost_of_living_comparison.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/cost_of_living_comparison.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/gdp_comparison_chart_2023.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/gdp_comparison_chart_2023.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/natural_gas_prices_chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/natural_gas_prices_chart.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/unemployment_rates_chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/unemployment_rates_chart.png -------------------------------------------------------------------------------- /langgraph/conditional-edges/requirements.txt: -------------------------------------------------------------------------------- 1 | python-dotenv 2 | typing 3 | langgraph 4 | langgraph-checkpoint-sqlite 5 | langchain-ollama 6 | langchain_openai 7 | langchain-core 8 | pydantic -------------------------------------------------------------------------------- /langgraph/structured-output/requirements.txt: -------------------------------------------------------------------------------- 1 | python-dotenv 2 | typing 3 | langgraph 4 | langgraph-checkpoint-sqlite 5 | langchain-ollama 6 | langchain_openai 7 | langchain-core 8 | pydantic -------------------------------------------------------------------------------- /pydantic-ai-masterclass/requirements.txt: -------------------------------------------------------------------------------- 1 | pydantic-ai==0.0.15 2 | openai==1.58.1 3 | colorama==0.4.6 4 | dataclasses==0.6 5 | asyncio==3.4.3 6 | devtools==0.12.2 7 | html-to-markdown==1.1.0 -------------------------------------------------------------------------------- /ai-in-action/1-invoice-ai-agents/images/langchain_interaction_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/ai-in-action/1-invoice-ai-agents/images/langchain_interaction_diagram.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/cost_of_living_salary_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/cost_of_living_salary_plot.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/germany_gas_gdp_correlation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/germany_gas_gdp_correlation.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/natural_gas_prices_5_years.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/natural_gas_prices_5_years.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/predicted_natural_gas_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/predicted_natural_gas_prices.png -------------------------------------------------------------------------------- /pydantic-ai-masterclass/2-logfire/2.1-hello-world.py: -------------------------------------------------------------------------------- 1 | import logfire 2 | 3 | # Configure logfire 4 | logfire.configure() 5 | 6 | # Send a log 7 | logfire.info('Hello, {name}!', name='world') -------------------------------------------------------------------------------- /streamlit-chatbot-ui/requirements.txt: -------------------------------------------------------------------------------- 1 | langchain-core 2 | langchain-community 3 | langchain_ollama 4 | langchain-openai 5 | langchain_chroma 6 | python-dotenv 7 | pyppeteer 8 | pypdf 9 | uuid -------------------------------------------------------------------------------- /ai-in-action/2-diagram-generation-agents/output/diagram_20250116210127.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/ai-in-action/2-diagram-generation-agents/output/diagram_20250116210127.png -------------------------------------------------------------------------------- /ai-in-action/2-diagram-generation-agents/output/diagram_20250116210314.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/ai-in-action/2-diagram-generation-agents/output/diagram_20250116210314.png -------------------------------------------------------------------------------- /ai-in-action/2-diagram-generation-agents/output/diagram_20250116210415.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/ai-in-action/2-diagram-generation-agents/output/diagram_20250116210415.png -------------------------------------------------------------------------------- /ai-in-action/2-diagram-generation-agents/output/diagram_20250116210512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/ai-in-action/2-diagram-generation-agents/output/diagram_20250116210512.png -------------------------------------------------------------------------------- /ai-in-action/2-diagram-generation-agents/output/diagram_20250116210613.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/ai-in-action/2-diagram-generation-agents/output/diagram_20250116210613.png -------------------------------------------------------------------------------- /ai-in-action/2-diagram-generation-agents/output/diagram_20250116211806.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/ai-in-action/2-diagram-generation-agents/output/diagram_20250116211806.png -------------------------------------------------------------------------------- /ai-in-action/2-diagram-generation-agents/output/diagram_20250116212021.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/ai-in-action/2-diagram-generation-agents/output/diagram_20250116212021.png -------------------------------------------------------------------------------- /ai-in-action/2-diagram-generation-agents/output/diagram_20250116212155.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/ai-in-action/2-diagram-generation-agents/output/diagram_20250116212155.png -------------------------------------------------------------------------------- /langchain-rag-pdf/tutorial-1/requirements.txt: -------------------------------------------------------------------------------- 1 | langchain-core 2 | langchain-community 3 | langchain_ollama 4 | langchain-openai 5 | langchain_chroma 6 | python-dotenv 7 | pyppeteer 8 | pypdf 9 | uuid -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/cost_of_living_and_salaries.csv: -------------------------------------------------------------------------------- 1 | City, Cost of Living Rank, Average Salary (USD) 2 | Zurich, 1, 113579.20 3 | New York, 2, 91000 4 | Singapore, 1, 60000 5 | -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/cost_of_living_vs_salary_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/cost_of_living_vs_salary_plot.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/natural_gas_prices_germany_gdp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/natural_gas_prices_germany_gdp.png -------------------------------------------------------------------------------- /pydantic-ai-masterclass/2-logfire/requirements.txt: -------------------------------------------------------------------------------- 1 | pydantic-ai==0.0.15 2 | openai==1.58.1 3 | colorama==0.4.6 4 | dataclasses==0.6 5 | asyncio==3.4.3 6 | devtools==0.12.2 7 | html-to-markdown==1.1.0 -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/average_salary_software_engineers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/average_salary_software_engineers.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/us_gender_distribution_pie_chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/us_gender_distribution_pie_chart.png -------------------------------------------------------------------------------- /pydantic-ai-masterclass/1-introduction/requirements.txt: -------------------------------------------------------------------------------- 1 | pydantic-ai==0.0.15 2 | openai==1.58.1 3 | colorama==0.4.6 4 | dataclasses==0.6 5 | asyncio==3.4.3 6 | devtools==0.12.2 7 | html-to-markdown==1.1.0 -------------------------------------------------------------------------------- /python-local-setup/README.md: -------------------------------------------------------------------------------- 1 | ## How to setup a local Python development environment 2 | 3 | ### Watch the YouTube tutorial 4 | 5 | [https://youtu.be/3pbFb7X2ObU](https://youtu.be/3pbFb7X2ObU) 6 | 7 | -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/stockholm_cost_of_living_pie_chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidev9/tuts/HEAD/langgraph/agents-financial-research/data/stockholm_cost_of_living_pie_chart.png -------------------------------------------------------------------------------- /langgraph/agents-financial-research/requirements.txt: -------------------------------------------------------------------------------- 1 | python-dotenv 2 | typing 3 | langgraph 4 | langchain 5 | langchain_openai 6 | langchain_experimental 7 | langchain-core 8 | langchain-community 9 | pydantic -------------------------------------------------------------------------------- /pydantic-ai-masterclass/3-structured-data/requirements.txt: -------------------------------------------------------------------------------- 1 | pydantic-ai==0.0.15 2 | openai==1.58.1 3 | colorama==0.4.6 4 | dataclasses==0.6 5 | asyncio==3.4.3 6 | devtools==0.12.2 7 | html-to-markdown==1.1.0 -------------------------------------------------------------------------------- /pydantic-ai-masterclass/4-system-prompts/requirements.txt: -------------------------------------------------------------------------------- 1 | pydantic-ai==0.0.15 2 | openai==1.58.1 3 | colorama==0.4.6 4 | dataclasses==0.6 5 | asyncio==3.4.3 6 | devtools==0.12.2 7 | html-to-markdown==1.1.0 -------------------------------------------------------------------------------- /full-stack-ai-masterclass/6-agent-ui/app-gradio.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | from agent import get_response_with_history 3 | 4 | gr.ChatInterface( 5 | fn=get_response_with_history, 6 | type="messages" 7 | ).launch() -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/cost_of_living.csv: -------------------------------------------------------------------------------- 1 | City,Median Home Price,Median Monthly Rent,Average Annual Salary 2 | San Francisco,$1,500,000,N/A,N/A 3 | Miami,$889,590,$1,856,$64,080 4 | Salinas,$642,004,$1,574,$74,108 -------------------------------------------------------------------------------- /langgraph/conditional-edges/.env.example: -------------------------------------------------------------------------------- 1 | AZURE_OPENAI_ENDPOINT= 2 | AZURE_OPENAI_API_KEY= 3 | AZURE_OPENAI_API_VERSION= 4 | AZURE_OPENAI_API_DEPLOYMENT_NAME= 5 | 6 | LANGCHAIN_TRACING_V2="https://api.smith.langchain.com" 7 | LANGCHAIN_API_KEY= -------------------------------------------------------------------------------- /langgraph/multiagent-graph/.env.example: -------------------------------------------------------------------------------- 1 | AZURE_OPENAI_ENDPOINT= 2 | AZURE_OPENAI_API_KEY= 3 | AZURE_OPENAI_API_VERSION= 4 | AZURE_OPENAI_API_DEPLOYMENT_NAME= 5 | 6 | LANGCHAIN_TRACING_V2="https://api.smith.langchain.com" 7 | LANGCHAIN_API_KEY= -------------------------------------------------------------------------------- /langgraph/structured-output/.env.example: -------------------------------------------------------------------------------- 1 | AZURE_OPENAI_ENDPOINT= 2 | AZURE_OPENAI_API_KEY= 3 | AZURE_OPENAI_API_VERSION= 4 | AZURE_OPENAI_API_DEPLOYMENT_NAME= 5 | 6 | LANGCHAIN_TRACING_V2="https://api.smith.langchain.com" 7 | LANGCHAIN_API_KEY= -------------------------------------------------------------------------------- /pydantic-ai-masterclass/2-logfire/.env.example: -------------------------------------------------------------------------------- 1 | LLM_MODEL="gpt-4o-mini" # e.g., gpt-4, qwen2.5:32b 2 | OPENAI_API_KEY="" 3 | AZURE_OPENAI_ENDPOINT="" 4 | AZURE_OPENAI_API_KEY="" 5 | AZURE_OPENAI_API_VERSION="" 6 | AZURE_OPENAI_API_DEPLOYMENT_NAME="" -------------------------------------------------------------------------------- /pydantic-ai-masterclass/5-tools/.env.example: -------------------------------------------------------------------------------- 1 | LLM_MODEL="gpt-4o-mini" # e.g., gpt-4, qwen2.5:32b 2 | OPENAI_API_KEY="" 3 | AZURE_OPENAI_ENDPOINT="" 4 | AZURE_OPENAI_API_KEY="" 5 | AZURE_OPENAI_API_VERSION="" 6 | AZURE_OPENAI_API_DEPLOYMENT_NAME="" -------------------------------------------------------------------------------- /pydantic-ai-masterclass/1-introduction/.env.example: -------------------------------------------------------------------------------- 1 | LLM_MODEL="gpt-4o-mini" # e.g., gpt-4, qwen2.5:32b 2 | OPENAI_API_KEY="" 3 | AZURE_OPENAI_ENDPOINT="" 4 | AZURE_OPENAI_API_KEY="" 5 | AZURE_OPENAI_API_VERSION="" 6 | AZURE_OPENAI_API_DEPLOYMENT_NAME="" -------------------------------------------------------------------------------- /pydantic-ai-masterclass/11-streaming/.env.example: -------------------------------------------------------------------------------- 1 | LLM_MODEL="gpt-4o-mini" # e.g., gpt-4, qwen2.5:32b 2 | OPENAI_API_KEY="" 3 | AZURE_OPENAI_ENDPOINT="" 4 | AZURE_OPENAI_API_KEY="" 5 | AZURE_OPENAI_API_VERSION="" 6 | AZURE_OPENAI_API_DEPLOYMENT_NAME="" -------------------------------------------------------------------------------- /pydantic-ai-masterclass/9-agent-memory/.env.example: -------------------------------------------------------------------------------- 1 | LLM_MODEL="gpt-4o-mini" # e.g., gpt-4, qwen2.5:32b 2 | OPENAI_API_KEY="" 3 | AZURE_OPENAI_ENDPOINT="" 4 | AZURE_OPENAI_API_KEY="" 5 | AZURE_OPENAI_API_VERSION="" 6 | AZURE_OPENAI_API_DEPLOYMENT_NAME="" -------------------------------------------------------------------------------- /pydanticai-graph-deepseek-r1/requirements.txt: -------------------------------------------------------------------------------- 1 | pydantic-ai==0.0.20 2 | pydantic-graph 3 | openai 4 | colorama==0.4.6 5 | dataclasses==0.6 6 | asyncio==3.4.3 7 | devtools==0.12.2 8 | html-to-markdown==1.1.0 9 | beautifulsoup4 10 | tavily-python -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/cost_of_living_salary.csv: -------------------------------------------------------------------------------- 1 | City,Cost of Living,Average Salary 2 | San Diego,3346,62536 3 | Los Angeles,3187,60000 4 | Honolulu,3781,78000 5 | Miami,3103,75000 6 | Santa Barbara,3285,76349 7 | San Francisco,3781,95265 -------------------------------------------------------------------------------- /pydantic-ai-masterclass/.env.example: -------------------------------------------------------------------------------- 1 | LLM_MODEL="gpt-4o-mini" # e.g., gpt-4, qwen2.5:32b 2 | OPENAI_API_KEY="" 3 | GROQ_API_KEY="" 4 | AZURE_OPENAI_ENDPOINT="" 5 | AZURE_OPENAI_API_KEY="" 6 | AZURE_OPENAI_API_VERSION="" 7 | AZURE_OPENAI_API_DEPLOYMENT_NAME="" -------------------------------------------------------------------------------- /pydantic-ai-masterclass/3-structured-data/.env.example: -------------------------------------------------------------------------------- 1 | LLM_MODEL="gpt-4o-mini" # e.g., gpt-4, qwen2.5:32b 2 | OPENAI_API_KEY="" 3 | AZURE_OPENAI_ENDPOINT="" 4 | AZURE_OPENAI_API_KEY="" 5 | AZURE_OPENAI_API_VERSION="" 6 | AZURE_OPENAI_API_DEPLOYMENT_NAME="" -------------------------------------------------------------------------------- /pydantic-ai-masterclass/4-system-prompts/.env.example: -------------------------------------------------------------------------------- 1 | LLM_MODEL="gpt-4o-mini" # e.g., gpt-4, qwen2.5:32b 2 | OPENAI_API_KEY="" 3 | AZURE_OPENAI_ENDPOINT="" 4 | AZURE_OPENAI_API_KEY="" 5 | AZURE_OPENAI_API_VERSION="" 6 | AZURE_OPENAI_API_DEPLOYMENT_NAME="" -------------------------------------------------------------------------------- /pydantic-ai-masterclass/6-result-validator-functions/requirements.txt: -------------------------------------------------------------------------------- 1 | pydantic-ai==0.0.22 2 | openai 3 | colorama 4 | dataclasses 5 | asyncio 6 | devtools==0.12.2 7 | html-to-markdown==1.1.0 8 | aiosqlite 9 | langchain 10 | langchain_experimental -------------------------------------------------------------------------------- /pydantic-ai-masterclass/10-errors-and-reflection/.env.example: -------------------------------------------------------------------------------- 1 | LLM_MODEL="gpt-4o-mini" # e.g., gpt-4, qwen2.5:32b 2 | OPENAI_API_KEY="" 3 | AZURE_OPENAI_ENDPOINT="" 4 | AZURE_OPENAI_API_KEY="" 5 | AZURE_OPENAI_API_VERSION="" 6 | AZURE_OPENAI_API_DEPLOYMENT_NAME="" -------------------------------------------------------------------------------- /pydantic-ai-masterclass/7-dependency-injection/.env.example: -------------------------------------------------------------------------------- 1 | LLM_MODEL="gpt-4o-mini" # e.g., gpt-4, qwen2.5:32b 2 | OPENAI_API_KEY="" 3 | AZURE_OPENAI_ENDPOINT="" 4 | AZURE_OPENAI_API_KEY="" 5 | AZURE_OPENAI_API_VERSION="" 6 | AZURE_OPENAI_API_DEPLOYMENT_NAME="" -------------------------------------------------------------------------------- /pydantic-ai-masterclass/8-retries-usage-limits/.env.example: -------------------------------------------------------------------------------- 1 | LLM_MODEL="gpt-4o-mini" # e.g., gpt-4, qwen2.5:32b 2 | OPENAI_API_KEY="" 3 | AZURE_OPENAI_ENDPOINT="" 4 | AZURE_OPENAI_API_KEY="" 5 | AZURE_OPENAI_API_VERSION="" 6 | AZURE_OPENAI_API_DEPLOYMENT_NAME="" -------------------------------------------------------------------------------- /pydantic-ai-masterclass/6-result-validator-functions/.env.example: -------------------------------------------------------------------------------- 1 | LLM_MODEL="gpt-4o-mini" # e.g., gpt-4, qwen2.5:32b 2 | OPENAI_API_KEY="" 3 | AZURE_OPENAI_ENDPOINT="" 4 | AZURE_OPENAI_API_KEY="" 5 | AZURE_OPENAI_API_VERSION="" 6 | AZURE_OPENAI_API_DEPLOYMENT_NAME="" -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/net_worth_comparison.csv: -------------------------------------------------------------------------------- 1 | Year,US_Net_Worth,Germany_Net_Worth 2 | 2000,120000,100000 3 | 2003,130000,110000 4 | 2006,140000,115000 5 | 2009,150000,120000 6 | 2012,160000,130000 7 | 2015,170000,140000 8 | 2018,180000,145000 9 | 2021,190000,150000 -------------------------------------------------------------------------------- /pydantic-ai-masterclass/1-introduction/1.1-hello-world.py: -------------------------------------------------------------------------------- 1 | from pydantic_ai import Agent 2 | from pydantic_ai.models.openai import OpenAIModel 3 | model = OpenAIModel('gpt-4o-mini') 4 | agent = Agent(model=model) 5 | print(agent.run_sync("What is the capital of the United States?").data) -------------------------------------------------------------------------------- /langchain-ollama/README.md: -------------------------------------------------------------------------------- 1 | # Langchain with Ollama 2 | 3 | ## Watch the tutorial here 4 | 5 | https://youtu.be/fOUng7fMQ1Y 6 | 7 | ## Launch local HTTP server 8 | 9 | `python3 -m http.server 8080` 10 | 11 | ## Open browser 12 | 13 | `http://localhost:8080/data/archguide.html` 14 | -------------------------------------------------------------------------------- /langchain-rag/.env.template: -------------------------------------------------------------------------------- 1 | AZURE_OPENAI_ENDPOINT="" 2 | AZURE_OPENAI_API_KEY="" 3 | AZURE_OPENAI_API_VERSION="" 4 | AZURE_OPENAI_EMBEDDINGS_ENDPOINT="" 5 | AZURE_OPENAI_EMBEDDINGS_API_VERSION="" 6 | SUPABASE_URL="" 7 | SUPABASE_SERVICE_KEY="" 8 | LANGCHAIN_TRACING_V2="" 9 | LANGCHAIN_API_KEY="" -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/average_salary_software_engineers.csv: -------------------------------------------------------------------------------- 1 | Country,Min Salary (USD),Max Salary (USD),Average Salary (USD) 2 | United States,74300,258000,104907 3 | OECD Countries (Eastern Europe),22000,30000,26000 4 | OECD Countries (Singapore and Netherlands),50000,80000,65000 5 | Switzerland,N/A,N/A,N/A 6 | -------------------------------------------------------------------------------- /notebooklm/README.md: -------------------------------------------------------------------------------- 1 | ## Deep Dive NotebookLM 2 | 3 | - ### Watch YouTube Tutorial 1: [https://youtu.be/yPJzGT9UC9Y](https://youtu.be/yPJzGT9UC9Y) 4 | - ### Watch YouTube Tutorial 2: [https://youtu.be/pr4YnRGnvto](https://youtu.be/pr4YnRGnvto) 5 | - ### Watch YouTube Tutorial 3: [https://youtu.be/04_yl0VV2BM](https://youtu.be/04_yl0VV2BM) 6 | -------------------------------------------------------------------------------- /streamlit-chatbot-ui/.env.example: -------------------------------------------------------------------------------- 1 | AZURE_OPENAI_ENDPOINT="" 2 | AZURE_OPENAI_API_KEY="" 3 | AZURE_OPENAI_API_VERSION="" 4 | AZURE_OPENAI_API_DEPLOYMENT_NAME="" 5 | 6 | AZURE_OPENAI_EMBEDDINGS_ENDPOINT="" 7 | AZURE_OPENAI_EMBEDDINGS_API_VERSION="" 8 | 9 | LANGCHAIN_TRACING_V2="https://api.smith.langchain.com" 10 | LANGCHAIN_API_KEY="" -------------------------------------------------------------------------------- /langchain-rag-pdf/tutorial-1/.env.example: -------------------------------------------------------------------------------- 1 | AZURE_OPENAI_ENDPOINT="" 2 | AZURE_OPENAI_API_KEY="" 3 | AZURE_OPENAI_API_VERSION="" 4 | AZURE_OPENAI_API_DEPLOYMENT_NAME="" 5 | 6 | AZURE_OPENAI_EMBEDDINGS_ENDPOINT="" 7 | AZURE_OPENAI_EMBEDDINGS_API_VERSION="" 8 | 9 | LANGCHAIN_TRACING_V2="https://api.smith.langchain.com" 10 | LANGCHAIN_API_KEY="" -------------------------------------------------------------------------------- /notebooklm/tutorial-2/prompt_app.md: -------------------------------------------------------------------------------- 1 | Create a mobile app description for SafeBase, an insurance app that allows you to file claims from your phone, track the status of your claim and receive reimbursements straight into your bank account. Include detailed description of features and key differentiation points from similar apps. Output as Markdown. 2 | -------------------------------------------------------------------------------- /langgraph/state/README.md: -------------------------------------------------------------------------------- 1 | ## Deep Dive into LangGraph State 2 | 3 | - ### Watch YouTube Tutorial 1: [https://youtu.be/fOUng7fMQ1Y](https://youtu.be/BDFP6VSOARk) 4 | - ### Watch YouTube Tutorial 2: [https://youtu.be/1KDeWskxn78](https://youtu.be/1KDeWskxn78) 5 | - ### Watch YouTube Tutorial 3: [https://youtu.be/ZtH_oXmPT3g](https://youtu.be/ZtH_oXmPT3g) 6 | -------------------------------------------------------------------------------- /full-stack-ai-masterclass/5-agent-api/api.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI 2 | from agent import getMovieDetails 3 | 4 | app = FastAPI() 5 | 6 | # FastAPI endpoint 7 | @app.get("/getMovieDetails/") 8 | async def get_movie_details_endpoint(q: str | None = None, model: str | None = None, prompt: str | None = None): 9 | return await getMovieDetails(q, model, prompt) -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/numbers_and_squares.txt: -------------------------------------------------------------------------------- 1 | 1. Number: 1, Square: 1 2 | 2. Number: 2, Square: 4 3 | 3. Number: 3, Square: 9 4 | 4. Number: 4, Square: 16 5 | 5. Number: 5, Square: 25 6 | 6. Number: 6, Square: 36 7 | 7. Number: 7, Square: 49 8 | 8. Number: 8, Square: 64 9 | 9. Number: 9, Square: 81 10 | 10. Number: 10, Square: 100 11 | -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/cost_of_living_vs_salary.csv: -------------------------------------------------------------------------------- 1 | City,Cost of Living Index,Average Salary (USD) 2 | Singapore,100,5197.0 3 | Zurich,100,9464.916666666666 4 | Hong Kong,99,55928.0 5 | New York,98,78620.0 6 | Geneva,97,9464.916666666666 7 | Paris,96,0.0 8 | Copenhagen,95,0.0 9 | Los Angeles,94,0.0 10 | San Francisco,93,0.0 11 | Tel Aviv,92,0.0 12 | -------------------------------------------------------------------------------- /notebooklm/tutorial-3/ChatGPT_Prompt_User_Story.md: -------------------------------------------------------------------------------- 1 | Create a detailed User Story for a Pet Walking SaaS app, where owners are matched with walkers. Special algorithm should consider both sides' locations, availability, personal qualifications, price, etc. The user story should contain detailed descriptions of journeys, personas, alternate journeys, acceptance criteria and exceptions. 2 | -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/comparison_net_worth_US_Germany.txt: -------------------------------------------------------------------------------- 1 | The average net worth of households in the United States as of 2023 is approximately $1.06 million. In contrast, the average net worth of households in Germany in 2021 was about $114,000. This indicates a significant difference, with American households holding on average nearly ten times the net worth of German households. -------------------------------------------------------------------------------- /pydantic-ai-masterclass/1-introduction/1.2-agent-openai.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from pydantic_ai import Agent 4 | from pydantic_ai.models.openai import OpenAIModel 5 | load_dotenv() 6 | 7 | model = OpenAIModel('gpt-4o', api_key=os.getenv('OPENAI_API_KEY')) 8 | agent = Agent(model=model) 9 | 10 | result = agent.run_sync("What is the capital of Mexico?") 11 | 12 | print(result.data) -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/top_expensive_cities.csv: -------------------------------------------------------------------------------- 1 | City,Cost of Living,Average Salary 2 | "San Diego, CA",3346,5700 3 | "Los Angeles, CA",3187,5600 4 | "Santa Barbara, CA",3500,5800 5 | "New York City, NY",4500,6500 6 | "San Francisco, CA",4700,7000 7 | "San Jose, CA",3800,6200 8 | "Honolulu, HI",3600,5900 9 | "Boston, MA",3400,5800 10 | "Washington, DC",3300,5700 11 | "Seattle, WA",3200,5600 12 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/1-introduction/1.3-agent-ollama.py: -------------------------------------------------------------------------------- 1 | from pydantic_ai import Agent 2 | from pydantic_ai.models.ollama import OllamaModel 3 | from colorama import Fore 4 | 5 | ollama_model = OllamaModel( 6 | model_name='llama3.2:1b', 7 | base_url='http://0.0.0.0:11434/v1', 8 | ) 9 | 10 | # Create the Agent 11 | agent = Agent(model=ollama_model) 12 | 13 | # Run the agent 14 | result = agent.run_sync("What is the capital of the United States?") 15 | print(Fore.RED, result.data) -------------------------------------------------------------------------------- /langgraph/conditional-edges/README.md: -------------------------------------------------------------------------------- 1 | ## Conditional Edges in LangGraph 2 | 3 | ### Watch the video 4 | 5 | [YouTube Video](https://youtu.be/wGN_xvB_d6Y) 6 | 7 | ### Run the app 8 | 9 | - Open a terminal and run `python graph.py` 10 | - Provide a prompt 11 | 12 | ### Tweaks 13 | 14 | - Change the number of iterations and the quality threshold 15 | 16 | - `num_iterations = 5` 17 | - `quality_threshold = 950` 18 | 19 | - Change the LLM model from OpenAI to Ollama or another model by channging the llm variable 20 | -------------------------------------------------------------------------------- /smolagents/intro/1-hello-world.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from smolagents import CodeAgent, DuckDuckGoSearchTool, LiteLLMModel 4 | 5 | # Load environment variables 6 | load_dotenv() 7 | 8 | # Define the model 9 | model = LiteLLMModel(model_id="gpt-4o-mini", api_key=os.getenv('OPENAI_API_KEY')) 10 | 11 | # Initialize the agent with the DuckDuckGo search tool 12 | agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=model) 13 | 14 | # Run the agent 15 | agent.run("Who is the president of the United States?") -------------------------------------------------------------------------------- /full-stack-ai-masterclass/5-agent-api/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "5-agent-api" 3 | version = "0.1.0" 4 | description = "" 5 | authors = [ 6 | {name = "AI Dev",email = "183338779+aidev9@users.noreply.github.com"} 7 | ] 8 | readme = "README.md" 9 | requires-python = ">=3.12,<4.0" 10 | dependencies = [ 11 | "pydantic-ai (>=0.0.21,<0.0.25)", 12 | "colorama (>=0.4.6,<0.5.0)", 13 | "fastapi (>=0.115.8,<0.116.0)", 14 | ] 15 | 16 | [build-system] 17 | requires = ["poetry-core>=2.0.0,<3.0.0"] 18 | build-backend = "poetry.core.masonry.api" 19 | -------------------------------------------------------------------------------- /python-local-setup/fibonacci.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | 3 | def fibonacci(n): 4 | fib_sequence = [0, 1] 5 | while len(fib_sequence) < n: 6 | fib_sequence.append(fib_sequence[-1] + fib_sequence[-2]) 7 | return fib_sequence 8 | 9 | # Calculate the first 10 Fibonacci numbers 10 | fib_numbers = fibonacci(10) 11 | 12 | # Plotting the Fibonacci numbers in a bar chart 13 | plt.bar(range(1, 11), fib_numbers) 14 | plt.xlabel('Position in Fibonacci Sequence') 15 | plt.ylabel('Fibonacci Number') 16 | plt.title('First 10 Fibonacci Numbers') 17 | plt.show() -------------------------------------------------------------------------------- /notebooklm/tutorial-2/prompt_reviews.md: -------------------------------------------------------------------------------- 1 | For an online insurance claim filing and tracking app, create a csv data file with the following columns for a mobile app review data set ReviewDate (MM/DD/YYYY),CustomerName, Age, Sex, State(US), ReviewText, Rating, Sentiment. Create 200 rows of only negative reviews (rating 1-3), sentiment negative from users aged 16-50, in the date range for the past 5 years. Randomize the data, do not reuse previous data. Write detailed reviews of 20-50 words. Return negative reviews only. Return exactly 200 rows of data and CSV file only. Return all columns listed. 2 | -------------------------------------------------------------------------------- /full-stack-ai-masterclass/6-agent-ui/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "6-agent-ui" 3 | version = "0.1.0" 4 | description = "" 5 | authors = [ 6 | {name = "AI Dev",email = "183338779+aidev9@users.noreply.github.com"} 7 | ] 8 | readme = "README.md" 9 | requires-python = ">=3.12,<4.0" 10 | dependencies = [ 11 | "streamlit (>=1.43.1,<2.0.0)", 12 | "gradio (>=5.20.1,<6.0.0)", 13 | "pydantic-ai (>=0.0.36,<0.0.37)", 14 | "colorama (>=0.4.6,<0.5.0)" 15 | ] 16 | 17 | 18 | [build-system] 19 | requires = ["poetry-core>=2.0.0,<3.0.0"] 20 | build-backend = "poetry.core.masonry.api" 21 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/5-tools/5.1-hello-world.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | from dotenv import load_dotenv 4 | from pydantic_ai import Agent 5 | from pydantic_ai.models.openai import OpenAIModel 6 | load_dotenv() 7 | 8 | # Define the model 9 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 10 | 11 | def roll_die() -> str: 12 | """Roll a six-sided die and return the result.""" 13 | return str(random.randint(1, 6)) 14 | 15 | # Define the agent 16 | agent = Agent(model=model, tools=[roll_die]) 17 | result = agent.run_sync('My guess is 4') 18 | print(result.data) -------------------------------------------------------------------------------- /full-stack-ai-masterclass/7-db-agents/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "7-db-agents" 3 | version = "0.1.0" 4 | description = "" 5 | authors = [ 6 | {name = "AI Dev",email = "183338779+aidev9@users.noreply.github.com"} 7 | ] 8 | readme = "README.md" 9 | requires-python = ">=3.12,<4.0" 10 | dependencies = [ 11 | "pydantic-ai (>=0.0.31,<0.0.32)", 12 | "colorama (>=0.4.6,<0.5.0)", 13 | "supabase (>=2.13.0,<3.0.0)", 14 | "psycopg2-binary (>=2.9.10,<3.0.0)" 15 | ] 16 | 17 | 18 | [build-system] 19 | requires = ["poetry-core>=2.0.0,<3.0.0"] 20 | build-backend = "poetry.core.masonry.api" 21 | -------------------------------------------------------------------------------- /ai-in-action/2-diagram-generation-agents/2-image-generation-agent.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from smolagents import load_tool, CodeAgent, LiteLLMModel, GradioUI 4 | 5 | # Load environment variables 6 | load_dotenv() 7 | 8 | # Define the model 9 | model = LiteLLMModel(model_id="gpt-4o-mini", api_key=os.getenv('OPENAI_API_KEY')) 10 | 11 | # Import tool from Hub 12 | image_generation_tool = load_tool("m-ric/text-to-image", trust_remote_code=True) 13 | 14 | # Initialize the agent with the image generation tool 15 | agent = CodeAgent(tools=[image_generation_tool], model=model) 16 | 17 | # Launch the agent with Gradio UI 18 | GradioUI(agent).launch() -------------------------------------------------------------------------------- /ollama-local-setup/README.md: -------------------------------------------------------------------------------- 1 | ## How to setup Ollama on a local laptop 2 | 3 | ### Watch the YouTube tutorial 4 | 5 | [https://youtu.be/fOUng7fMQ1Y](https://youtu.be/29nTNBDr01w) 6 | 7 | ## Setup instructions 8 | 9 | Curl command to test your local Ollama instance: 10 | 11 | `curl --location 'http://localhost:11434/api/chat' \ 12 | --header 'Content-Type: application/json' \ 13 | --data '{ 14 | "model": "llama3.1", 15 | "messages": [ 16 | { 17 | "role": "system", 18 | "content": "you are a salty pirate" 19 | }, 20 | { 21 | "role": "user", 22 | "content": "why is the sky blue" 23 | } 24 | ], 25 | "stream": false 26 | }'` 27 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/2-logfire/2.2-span.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logfire 3 | from pydantic_ai import Agent 4 | from pydantic_ai.models.openai import OpenAIModel 5 | from dotenv import load_dotenv 6 | 7 | load_dotenv() 8 | 9 | # Configure logfire 10 | logfire.configure() 11 | 12 | # Define the model 13 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 14 | 15 | # Define the agent 16 | agent = Agent(model=model) 17 | 18 | # Run the agent 19 | with logfire.span('Calling OpenAI gpt-4o-mini') as span: 20 | result = agent.run_sync("What is the capital of the US?") 21 | span.set_attribute('result', result.data) 22 | logfire.info('{result=}', result=result.data) -------------------------------------------------------------------------------- /full-stack-ai-masterclass/5-agent-api/README.md: -------------------------------------------------------------------------------- 1 | ## **Setup Instructions (Using Poetry)** 2 | 3 | ### **1. Install Poetry** 4 | 5 | ```sh 6 | pip install poetry 7 | ``` 8 | 9 | ### **2. Initialize Virtual Environment & Install Dependencies** 10 | 11 | ```sh 12 | poetry install 13 | ``` 14 | 15 | ### **3. Set Up Environment Variables** 16 | 17 | Create a `.env` file with the following configuration: 18 | 19 | ```sh 20 | # API Keys 21 | GROQ_API_KEY=your_groq_key_here 22 | OPENAI_API_KEY=your_openai_key_here 23 | ``` 24 | 25 | ### **4. Run the agent (Python)** 26 | 27 | ```sh 28 | poetry run python agent.py 29 | ``` 30 | 31 | ### **5. Run the API (FastAPI)** 32 | 33 | ```sh 34 | poetry run fastapi dev api.py 35 | ``` 36 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/2-logfire/2.4-exception.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logfire 3 | from pydantic_ai import Agent 4 | from pydantic_ai.models.openai import OpenAIModel 5 | from dotenv import load_dotenv 6 | 7 | load_dotenv() 8 | 9 | # Configure logfire 10 | logfire.configure() 11 | 12 | # Define the model 13 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 14 | 15 | # Define the agent 16 | agent = Agent(model=model) 17 | 18 | # Run the agent 19 | with logfire.span('Calling OpenAI gpt-4o-mini') as span: 20 | try: 21 | result = agent.run_sync("What is the capital of the US?") 22 | raise ValueError(result.data) 23 | except ValueError as e: 24 | span.record_exception(e) -------------------------------------------------------------------------------- /streamlit-chatbot-ui/README.md: -------------------------------------------------------------------------------- 1 | ## Streamlit Chatbot UI 2 | 3 | ### Run the app 4 | 5 | - Open a terminal and run `python chatbot_ui.py` 6 | 7 | - Open a second terminal and run `python ingest.py` 8 | 9 | - Upload a PDF file 10 | 11 | - Wait for the ingestion to finish 12 | 13 | - Ask questions 14 | 15 | ### Tweaks 16 | 17 | - Change the chunk and overlap in [`ingest.py`](./ingest.py) and observe search results. Defaults are: 18 | 19 | - `chunk_size = 1000` 20 | - `chunk_overlap = 50` 21 | 22 | - Change the models from Ollama to OpenAI or another model by adding them to [`models.py`](./models.py) first and then changing the `embeddings` and `llm` references in [`ingest.py`](./ingest.py) and [`chat.py`](./chat.py) 23 | -------------------------------------------------------------------------------- /langgraph/structured-output/README.md: -------------------------------------------------------------------------------- 1 | ## Structured Output Parsers 2 | 3 | ### Video Tutorial 4 | 5 | [Watch the video tutorial here](https://youtu.be/GNpXkoOX4Go) 6 | 7 | ### Setup 8 | 9 | - pip install -r requirements.txt 10 | - Create a .env file and add OPENAI, LANGCHAIN keys (Use .env.example as template) 11 | - Review the context limit in LLM declaration [main.py](./main.py) 12 | 13 | ```python 14 | llm = AzureChatOpenAI( 15 | azure_deployment=os.environ.get("AZURE_OPENAI_API_DEPLOYMENT_NAME"), 16 | api_version=os.environ.get("AZURE_OPENAI_API_VERSION"), 17 | temperature=0, 18 | max_tokens=1000, 19 | timeout=None, 20 | max_retries=2, 21 | ) 22 | ``` 23 | 24 | ### Run 25 | 26 | - `python main.py` 27 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/1-introduction/1.4-agent-azure.py: -------------------------------------------------------------------------------- 1 | from pydantic_ai import Agent 2 | from openai import AsyncAzureOpenAI 3 | from pydantic_ai.models.openai import OpenAIModel 4 | import os 5 | from dotenv import load_dotenv 6 | from colorama import Fore 7 | 8 | load_dotenv() 9 | 10 | client = AsyncAzureOpenAI( 11 | azure_endpoint=os.getenv('AZURE_OPENAI_ENDPOINT'), 12 | api_version=os.getenv('AZURE_OPENAI_API_VERSION'), 13 | api_key=os.getenv('AZURE_OPENAI_API_KEY'), 14 | ) 15 | 16 | # Create the Model 17 | model = OpenAIModel('gpt-4o-mini', openai_client=client) 18 | 19 | # Create the Agent 20 | agent = Agent(model=model) 21 | 22 | # Run the agent 23 | print(Fore.RED, agent.run_sync("What is the capital of the United States?").data) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/11-streaming/11.1-hello-world.py: -------------------------------------------------------------------------------- 1 | import os 2 | import asyncio 3 | from colorama import Fore 4 | from dotenv import load_dotenv 5 | from pydantic_ai.models.openai import OpenAIModel 6 | from pydantic_ai import Agent 7 | 8 | load_dotenv() 9 | 10 | # # Define the model 11 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 12 | agent = Agent(model=model, system_prompt="You are a brilliant poet.", result_type=str) 13 | user_prompt = 'Write a short poem on cherry blossoms.' 14 | 15 | async def main(): 16 | async with agent.run_stream(user_prompt=user_prompt) as result: 17 | async for poem in result.stream(): 18 | print(poem) 19 | 20 | if __name__ == '__main__': 21 | asyncio.run(main()) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/12-model-settings/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "math-tutor-ai" 3 | version = "0.1.0" 4 | description = "A math tutor agent using PydanticAI" 5 | authors = ["Your Name "] 6 | readme = "README.md" 7 | packages = [{include = "math_tutor_ai", from = "src"}] 8 | 9 | [tool.poetry.dependencies] 10 | python = "^3.12" 11 | pydantic-ai = "^0.0.43" 12 | python-dotenv = "^1.0.1" 13 | 14 | [tool.poetry.group.dev.dependencies] 15 | pytest = "^8.0.0" 16 | pytest-asyncio = "^0.23.5" 17 | 18 | [build-system] 19 | requires = ["poetry-core"] 20 | build-backend = "poetry.core.masonry.api" 21 | 22 | [tool.pytest.ini_options] 23 | asyncio_mode = "auto" 24 | testpaths = ["tests"] 25 | python_files = ["test_*.py"] 26 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/4-system-prompts/4.2-coding-basic.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from pydantic_ai import Agent 4 | from pydantic_ai.models.openai import OpenAIModel 5 | 6 | load_dotenv() 7 | 8 | # Define the model 9 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 10 | 11 | system_prompt = "You an experienced React developer. Create code that meets user's requirements." 12 | 13 | # Define the agent 14 | agent = Agent(model=model, system_prompt=system_prompt) 15 | 16 | # Run the agent 17 | result = agent.run_sync(user_prompt="Create a functional React component that displays a user profile with the following details: name, email, and profile picture. Must use Zustand for state management and TailwindCSS for styling.") 18 | 19 | print(result.data) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/6-result-validator-functions/6.1-hello-world.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pydantic_ai import Agent, ModelRetry 3 | from pydantic_ai.models.openai import OpenAIModel 4 | from dotenv import load_dotenv 5 | from colorama import Fore 6 | 7 | load_dotenv() 8 | 9 | # Define the model 10 | model = OpenAIModel('gpt-4o', api_key=os.getenv('OPENAI_API_KEY')) 11 | 12 | # Define the agent 13 | agent = Agent(model=model, retries=1) 14 | 15 | # Define the result validator 16 | @agent.result_validator 17 | def result_validator_simple(data: str) -> str: 18 | print(Fore.LIGHTRED_EX, 'Validating result:', data) 19 | if 'wrong' in data.lower(): 20 | raise ModelRetry('wrong response') 21 | return data 22 | 23 | # Run the agent 24 | result = agent.run_sync('The world is flat. Right or wrong? Respond with a single word.') 25 | print(result.data) -------------------------------------------------------------------------------- /jupyter-vscode/hello_world.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stdout", 10 | "output_type": "stream", 11 | "text": [ 12 | "Hello, world!\n" 13 | ] 14 | } 15 | ], 16 | "source": [ 17 | "print(\"Hello, world!\")" 18 | ] 19 | } 20 | ], 21 | "metadata": { 22 | "kernelspec": { 23 | "display_name": "myenv", 24 | "language": "python", 25 | "name": "python3" 26 | }, 27 | "language_info": { 28 | "codemirror_mode": { 29 | "name": "ipython", 30 | "version": 3 31 | }, 32 | "file_extension": ".py", 33 | "mimetype": "text/x-python", 34 | "name": "python", 35 | "nbconvert_exporter": "python", 36 | "pygments_lexer": "ipython3", 37 | "version": "3.12.6" 38 | } 39 | }, 40 | "nbformat": 4, 41 | "nbformat_minor": 2 42 | } 43 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/2-logfire/2.3-levels.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logfire 3 | from pydantic_ai import Agent 4 | from pydantic_ai.models.openai import OpenAIModel 5 | from dotenv import load_dotenv 6 | 7 | load_dotenv() 8 | 9 | # Configure logfire 10 | logfire.configure() 11 | 12 | # Define the model 13 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 14 | 15 | # Define the agent 16 | agent = Agent(model=model) 17 | 18 | # Run the agent 19 | with logfire.span('Calling OpenAI gpt-4o-mini') as span: 20 | result = agent.run_sync("What is the capital of the US?") 21 | logfire.notice('{result=}', result=result.data) 22 | logfire.info('{result=}', result=result.data) 23 | logfire.debug('{result=}', result=result.data) 24 | logfire.warn('{result=}', result=result.data) 25 | logfire.error('{result=}', result=result.data) 26 | logfire.fatal('{result=}', result=result.data) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/2-logfire/2.5-instrument.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logfire 3 | from pydantic_ai import Agent 4 | from pydantic_ai.models.openai import OpenAIModel 5 | from dotenv import load_dotenv 6 | 7 | load_dotenv() 8 | 9 | # Configure logfire 10 | logfire.configure() 11 | 12 | # Define the model 13 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 14 | 15 | # Define the agent 16 | agent = Agent(model=model) 17 | 18 | @logfire.instrument('Applying multiply to {x=} and {y=}') 19 | def multiply(x, y): 20 | return x * y 21 | 22 | # Run the agent 23 | with logfire.span('Calling OpenAI gpt-4o-mini') as span: 24 | try: 25 | result = agent.run_sync(f"Can you confirm that {multiply(300, 6)} is the result of 300 multiplied by 6? Also, include the answer.") 26 | span.set_attribute('result', result.data) 27 | except ValueError as e: 28 | span.record_exception(e) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/3-structured-data/3.1-hello-world.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logfire 3 | from pydantic_ai import Agent 4 | from pydantic_ai.models.openai import OpenAIModel 5 | from pydantic import BaseModel 6 | from dotenv import load_dotenv 7 | 8 | load_dotenv() 9 | 10 | # Configure logfire 11 | logfire.configure() 12 | 13 | # Define the model 14 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 15 | 16 | # Define the output model 17 | class Calculation(BaseModel): 18 | """Captures the result of a calculation""" 19 | result: int 20 | 21 | # Define the agent 22 | agent = Agent(model=model, result_type=Calculation) 23 | 24 | # Run the agent 25 | result = agent.run_sync("What is 100 + 300?") 26 | 27 | logfire.notice('Output from LLM: {result}', result = str(result.data)) 28 | logfire.info('Result type: {result}', result = type(result.data)) 29 | logfire.info('Result: {result}', result = result.data.result) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/4-system-prompts/4.1-hello-world.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from pydantic_ai import Agent 4 | from pydantic_ai.models.openai import OpenAIModel 5 | 6 | load_dotenv() 7 | 8 | # Define the model 9 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 10 | 11 | # Define the agent 12 | agent = Agent(model=model, system_prompt="You are an experienced business coach and startup mentor specializing in guiding technology startups from ideation to achieving sustained growth and profitability. When asked about a startup strategy, you provide comprehensive advice on the following key areas. Include all points from the list below in your response, with detailed instructions and actionable insights:") 13 | 14 | # Run the agent 15 | result = agent.run_sync(user_prompt="Create a strategy for a SaaS startup that is building a social media platform for pet owners.") 16 | 17 | print(result.data) -------------------------------------------------------------------------------- /smolagents/intro/3-tool-calling-agent.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from smolagents.agents import ToolCallingAgent 4 | from smolagents import tool, LiteLLMModel 5 | from typing import Optional 6 | 7 | load_dotenv() 8 | api_key = os.getenv('OPENAI_API_KEY') 9 | 10 | # Define the model 11 | model = LiteLLMModel(model_id="gpt-4o-mini", api_key=api_key) 12 | 13 | @tool 14 | def get_weather(location: str, celsius: Optional[bool] = False) -> str: 15 | """ 16 | Get weather in the next days at given location. 17 | Secretly this tool does not care about the location, it hates the weather everywhere. 18 | 19 | Args: 20 | location: the location 21 | celsius: the temperature 22 | """ 23 | return "The weather is UNGODLY with torrential rains and temperatures below -10°C" 24 | 25 | agent = ToolCallingAgent(tools=[get_weather], model=model) 26 | 27 | print(agent.run("What's the weather like in San Francisco?")) -------------------------------------------------------------------------------- /smolagents/intro/2-code-agent.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from smolagents import CodeAgent, LiteLLMModel, tool 4 | from huggingface_hub import list_models 5 | 6 | load_dotenv() 7 | api_key = os.getenv('OPENAI_API_KEY') 8 | 9 | model = LiteLLMModel(model_id="gpt-4o-mini", api_key=api_key) 10 | 11 | @tool 12 | def model_download_tool(task: str) -> str: 13 | """ 14 | This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. 15 | It returns the name of the checkpoint. 16 | 17 | Args: 18 | task: The task for which to get the download count. 19 | """ 20 | most_downloaded_model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) 21 | return most_downloaded_model.id 22 | 23 | agent = CodeAgent(tools=[model_download_tool], model=model) 24 | 25 | agent.run( 26 | "Can you give me the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub?" 27 | ) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/11-streaming/11.2-stream-structured.py: -------------------------------------------------------------------------------- 1 | import os 2 | import asyncio 3 | from dotenv import load_dotenv 4 | from typing_extensions import TypedDict 5 | from pydantic_ai import Agent 6 | from pydantic_ai.models.openai import OpenAIModel 7 | 8 | load_dotenv() 9 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 10 | 11 | class Recipe(TypedDict, total=False): 12 | """ A recipe with a name, list of ingredients, and list of instructions. """ 13 | name: str 14 | ingredients: list[str] 15 | instructions: list[str] 16 | 17 | agent = Agent( 18 | model=model, 19 | result_type=Recipe, 20 | system_prompt='Extract a recipe from the input', 21 | ) 22 | 23 | async def main(): 24 | user_input = 'Italian creamy pasta carbonara recipe with peppers and bacon.' 25 | async with agent.run_stream(user_input) as result: 26 | async for recipe in result.stream(): 27 | print(recipe) 28 | 29 | if __name__ == '__main__': 30 | asyncio.run(main()) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/6-result-validator-functions/6.2-context.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logfire 3 | from pydantic_ai import Agent, RunContext, ModelRetry 4 | from pydantic_ai.models.openai import OpenAIModel 5 | from dotenv import load_dotenv 6 | from colorama import Fore 7 | 8 | load_dotenv() 9 | 10 | # Configure logfire 11 | logfire.configure() 12 | 13 | # Define the model 14 | model = OpenAIModel('gpt-4o', api_key=os.getenv('OPENAI_API_KEY')) 15 | agent = Agent(model=model, deps_type=str) 16 | 17 | # Define the result validator 18 | @agent.result_validator 19 | async def result_validator_deps(ctx: RunContext[str], data: str) -> str: 20 | print(Fore.YELLOW, 'Deps:', ctx.deps) 21 | print(Fore.RED, 'Data:', data) 22 | num_deps = int(ctx.deps) 23 | num_data = int(data) 24 | if num_deps != num_data: 25 | raise ModelRetry('wrong response') 26 | return data 27 | 28 | # Run the agent 29 | result = agent.run_sync('30*40 is what? Respond with the result only.', deps='1201') 30 | print(result.data) -------------------------------------------------------------------------------- /ai-in-action/1-invoice-ai-agents/data/invoice.md: -------------------------------------------------------------------------------- 1 | 2 | # Invoice 3 | 4 | **Client:** ABC Corp 5 | **Address:** 123 Business Rd, Suite 100, Business City, BC 12345 6 | **Invoice Date:** 2025-01-23 7 | **Due Date:** 2025-02-23 8 | **Payment Terms:** Net30 9 | 10 | ## Services 11 | 1. Web Development - $150,000 12 | 2. SEO Optimization - $50,000 13 | 3. Social Media Management - $30,000 14 | 4. Content Creation - $26,000 15 | 5. Email Marketing - $20,000 16 | 6. Graphic Design - $10,000 17 | 18 | **Notes:** 19 | Please make the payment by the due date to avoid any late fees. If you have any questions regarding this invoice, feel free to contact us at billing@abccorp.com. 20 | 21 | **Bank Details:** 22 | Bank Name: Business Bank 23 | Account Number: 123456789 24 | Routing Number: 987654321 25 | SWIFT Code: BUSB1234 26 | 27 | **Contact Information:** 28 | Phone: (123) 456-7890 29 | Email: support@abccorp.com 30 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/3-structured-data/3.2-capital.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logfire 3 | from pydantic_ai import Agent 4 | from pydantic_ai.models.openai import OpenAIModel 5 | from pydantic import BaseModel 6 | from dotenv import load_dotenv 7 | 8 | load_dotenv() 9 | 10 | # Configure logfire 11 | logfire.configure() 12 | 13 | # Define the model 14 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 15 | 16 | # Define the output model 17 | class Capital(BaseModel): 18 | """Capital city model - includes name and short history of the city""" 19 | name: str 20 | year_founded: int 21 | short_history: str 22 | 23 | # Define the agent 24 | agent = Agent(model=model, result_type=Capital) 25 | 26 | # Run the agent 27 | result = agent.run_sync("What is the capital of the US?") 28 | 29 | logfire.notice('Results from LLM: {result}', result = str(result.data)) 30 | logfire.info('Year founded: {year}', year = result.data.year_founded) 31 | logfire.info('Short history: {history}', history = result.data.short_history) -------------------------------------------------------------------------------- /langchain-rag/supabase_setup.sql: -------------------------------------------------------------------------------- 1 | -- Enable the pgvector extension to work with embedding vectors 2 | create extension if not exists vector; 3 | 4 | -- Create a table to store your documents 5 | create table 6 | documents ( 7 | id uuid primary key, 8 | content text, -- corresponds to Document.pageContent 9 | metadata jsonb, -- corresponds to Document.metadata 10 | embedding vector (4096) -- 3072 for Llama 3.2, 4096 for Llama 3.1, 1536 for OpenAI embeddings, change if needed 11 | ); 12 | 13 | -- Create a function to search for documents 14 | create function match_documents ( 15 | query_embedding vector (4096), 16 | filter jsonb default '{}' 17 | ) returns table ( 18 | id uuid, 19 | content text, 20 | metadata jsonb, 21 | similarity float 22 | ) language plpgsql as $$ 23 | #variable_conflict use_column 24 | begin 25 | return query 26 | select 27 | id, 28 | content, 29 | metadata, 30 | 1 - (documents.embedding <=> query_embedding) as similarity 31 | from documents 32 | where metadata @> filter 33 | order by documents.embedding <=> query_embedding; 34 | end; 35 | $$; -------------------------------------------------------------------------------- /ai-in-action/2-diagram-generation-agents/1-diagram-generation-agent.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import os 3 | from dotenv import load_dotenv 4 | from smolagents import CodeAgent, LiteLLMModel, tool, GradioUI 5 | import mermaid as md 6 | from mermaid.graph import Graph 7 | from PIL import Image 8 | 9 | load_dotenv() 10 | api_key = os.getenv('OPENAI_API_KEY') 11 | 12 | model = LiteLLMModel(model_id="gpt-4o-mini", api_key=api_key, temperature=0) 13 | 14 | @tool 15 | def mermaid_render_tool(mermaid_str: str) -> str: 16 | """ 17 | This is a tool that saves a mermaid diagram as a png file. 18 | It returns the image. 19 | 20 | Args: 21 | mermaid_str: The Mermaid diagram string. 22 | """ 23 | 24 | sequence = Graph(title='Diagram', script=mermaid_str) 25 | timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") 26 | file_path = f"./output/diagram_{timestamp}.png" 27 | md.Mermaid(sequence).to_png(file_path) 28 | image_path = os.path.abspath(file_path) 29 | return Image.open(image_path) 30 | 31 | agent = CodeAgent(tools=[mermaid_render_tool], model=model) 32 | GradioUI(agent).launch() -------------------------------------------------------------------------------- /pydantic-ai-masterclass/5-tools/5.2-tools-plain.py: -------------------------------------------------------------------------------- 1 | import os 2 | from colorama import Fore 3 | from dotenv import load_dotenv 4 | from pydantic_ai import Agent 5 | from pydantic_ai.models.openai import OpenAIModel 6 | load_dotenv() 7 | 8 | # Define the model 9 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 10 | 11 | # Define the agent 12 | agent = Agent(model=model) 13 | 14 | # Tool to add two numbers 15 | @agent.tool_plain 16 | def add(a:int, b:int) -> int: 17 | """Adds two numbers""" 18 | print(Fore.CYAN, f"Calling tool add with params {a} and {b}...") 19 | return a + b 20 | 21 | # Tool to determine if an integer is a prime number 22 | @agent.tool_plain 23 | def is_prime(a:int) -> bool: 24 | """Determines whether an integer is a prime number""" 25 | 26 | print(Fore.GREEN, f"Calling tool is_prime with param {a}...") 27 | if a <= 1: 28 | return False 29 | for i in range(2, int(a ** 0.5) + 1): 30 | if a % i == 0: 31 | return False 32 | return True 33 | 34 | result = agent.run_sync('17 + 74 is a prime number') 35 | print(Fore.RED, result.data) -------------------------------------------------------------------------------- /full-stack-ai-masterclass/6-agent-ui/agent.py: -------------------------------------------------------------------------------- 1 | import os 2 | from colorama import Fore 3 | from dotenv import load_dotenv 4 | from pydantic_ai.models.openai import OpenAIModel 5 | from pydantic_ai import Agent 6 | 7 | load_dotenv() 8 | 9 | # Define the model 10 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 11 | agent = Agent(model=model, system_prompt="You are a helpful assistant") 12 | 13 | def get_response(user_input: str) -> str: 14 | return agent.run_sync(user_input) 15 | 16 | def get_response_with_history(user_input: str, history: list) -> str: 17 | response = agent.run_sync(user_input) 18 | return response.data 19 | 20 | # Define the main loop 21 | def main_loop(): 22 | while True: 23 | user_input = input(">> I am your asssitant. How can I help you today? ") 24 | if user_input.lower() in ["quit", "exit", "q"]: 25 | print("Goodbye!") 26 | break 27 | 28 | # Run the agent 29 | result = get_response(user_input) 30 | print(Fore.WHITE, result.data) 31 | 32 | # Run the main loop 33 | if __name__ == "__main__": 34 | main_loop() -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 AI Software Developers 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/1-introduction/1.5-multi-model-agents.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from pydantic_ai import Agent 4 | from pydantic_ai.models.openai import OpenAIModel 5 | from pydantic_ai.models.ollama import OllamaModel 6 | from colorama import Fore 7 | 8 | load_dotenv() 9 | # Create the OpenAI model 10 | openai_model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 11 | 12 | # Create the OpenAI agent 13 | open_ai_agent = Agent(model=openai_model) 14 | 15 | # Run the OpenAI agent 16 | result = open_ai_agent.run_sync("What is the capital of the Mexico?") 17 | print(Fore.CYAN, "OpenAI Agent: ", result.data) 18 | 19 | # Capture the last message 20 | message_history = result.new_messages() 21 | 22 | # Create the Ollama model 23 | ollama_model = OllamaModel( 24 | model_name='llama3.2:1b', 25 | base_url='http://0.0.0.0:11434/v1', 26 | ) 27 | 28 | # Create the Ollama agent 29 | ollama_agent = Agent(model=ollama_model) 30 | 31 | # Run the Ollama agent, passing the message history 32 | print(Fore.GREEN, "Ollama Agent: ", ollama_agent.run_sync("Tell me about the history of this city.", message_history=message_history).data) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/11-streaming/app-markdown.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from agent import agent 3 | 4 | # Constants 5 | height = 600 6 | title = "Markdown Chatbot" 7 | icon = ":robot:" 8 | 9 | def generate_message(user_input): 10 | response = agent.run_sync(user_input) 11 | answer = response.data 12 | 13 | st.session_state.conversation.append({ 14 | "user": user_input, 15 | "assistant": answer 16 | }) 17 | 18 | # Iterate over the conversation history 19 | for entry in st.session_state.conversation: 20 | messages.chat_message("user").write(entry['user']) 21 | messages.chat_message("assistant").write(entry['assistant']) 22 | 23 | # Session: Initialize conversation history 24 | if "conversation" not in st.session_state: 25 | st.session_state.conversation = [] 26 | 27 | if 'clicked' not in st.session_state: 28 | st.session_state.clicked = False 29 | 30 | # Set page title and icon 31 | st.set_page_config(page_title=title, page_icon=icon) 32 | st.header(title) 33 | 34 | messages = st.container(border=True, height=height) 35 | if prompt := st.chat_input("Enter your question...", key="prompt"): 36 | generate_message(prompt) -------------------------------------------------------------------------------- /full-stack-ai-masterclass/6-agent-ui/app-streamlit-markdown.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from agent import get_response 3 | 4 | # Constants 5 | height = 600 6 | title = "Markdown Chatbot" 7 | icon = ":robot:" 8 | 9 | def generate_message(user_input): 10 | response = get_response(user_input) 11 | answer = response.data 12 | 13 | st.session_state.conversation.append({ 14 | "user": user_input, 15 | "assistant": answer 16 | }) 17 | 18 | # Iterate over the conversation history 19 | for entry in st.session_state.conversation: 20 | messages.chat_message("user").write(entry['user']) 21 | messages.chat_message("assistant").write(entry['assistant']) 22 | 23 | # Session: Initialize conversation history 24 | if "conversation" not in st.session_state: 25 | st.session_state.conversation = [] 26 | 27 | if 'clicked' not in st.session_state: 28 | st.session_state.clicked = False 29 | 30 | # Set page title and icon 31 | st.set_page_config(page_title=title, page_icon=icon) 32 | st.header(title) 33 | 34 | messages = st.container(border=True, height=height) 35 | if prompt := st.chat_input("Enter your question...", key="prompt"): 36 | generate_message(prompt) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/11-streaming/agent.py: -------------------------------------------------------------------------------- 1 | import os 2 | import asyncio 3 | from colorama import Fore 4 | from dotenv import load_dotenv 5 | from pydantic_ai.models.openai import OpenAIModel 6 | from pydantic_ai import Agent 7 | from pydantic_ai.messages import ( 8 | PartDeltaEvent, 9 | TextPartDelta, 10 | ) 11 | 12 | load_dotenv() 13 | 14 | # Define the model 15 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 16 | agent = Agent(model=model, system_prompt="You are a helpful assistant") 17 | user_prompt = 'Create a poem based on cherry blossoms.' 18 | 19 | async def main(): 20 | # Begin a node-by-node, streaming iteration 21 | async with agent.iter(user_prompt) as run: 22 | async for node in run: 23 | if Agent.is_model_request_node(node): 24 | async with node.stream(run.ctx) as request_stream: 25 | async for event in request_stream: 26 | if isinstance(event, PartDeltaEvent): 27 | if isinstance(event.delta, TextPartDelta): 28 | print(Fore.GREEN, event.delta.content_delta, sep="", end="", flush=True) 29 | 30 | 31 | if __name__ == '__main__': 32 | asyncio.run(main()) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/4-system-prompts/4.5-dynamic-basic.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from pydantic_ai import Agent, RunContext 4 | from pydantic_ai.models.openai import OpenAIModel 5 | from pydantic import BaseModel 6 | 7 | load_dotenv() 8 | 9 | # Define the model 10 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 11 | 12 | # Define the output model 13 | class Capital(BaseModel): 14 | """Capital city model - includes name, year founded, short history of the city and comparison to another city""" 15 | name: str 16 | year_founded: int 17 | short_history: str 18 | comparison: str 19 | 20 | # Define the agent 21 | agent = Agent(model=model, result_type=Capital, system_prompt="You are an experienced historian and you are asked a question about the capital of a country. You are expected to provide the name of the capital city, the year it was founded, and a short history of the city. Provide an age and historical significance comparison of the cities.") 22 | 23 | @agent.system_prompt 24 | def add_comparison_city(ctx: RunContext[str]) -> str: 25 | return f"The city to compare is {ctx.deps}." 26 | 27 | # Run the agent 28 | result = agent.run_sync("What is the capital of the US?", deps="Paris") 29 | 30 | print(result.data) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/10-errors-and-reflection/10.3-model-errors.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pprint 3 | from colorama import Fore 4 | from dotenv import load_dotenv 5 | from pydantic_ai import Agent, ModelRetry, UnexpectedModelBehavior, capture_run_messages 6 | from pydantic_ai.models.openai import OpenAIModel 7 | 8 | # Load the environment variables 9 | load_dotenv() 10 | 11 | # Define the model 12 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 13 | 14 | # Define the agent 15 | agent = Agent(model=model, system_prompt="You are a helpful assistant.") 16 | 17 | @agent.tool_plain(retries=5) 18 | def calc_volume(size: int) -> int: 19 | if size == 42: 20 | return size**3 21 | else: 22 | print(Fore.RED, f"Invalid size: {size}.") 23 | raise ModelRetry('Please try again with size=42.') 24 | 25 | with capture_run_messages() as messages: 26 | try: 27 | result = agent.run_sync('Please get me the volume of a box with size 6.') 28 | except UnexpectedModelBehavior as e: 29 | print('An error occurred:', e) 30 | #> An error occurred: Tool exceeded max retries count of 1 31 | print('cause:', repr(e.__cause__)) 32 | #> cause: ModelRetry('Please try again.') 33 | print(Fore.RESET) 34 | pprint.pprint(messages, indent=0, width=80) 35 | else: 36 | print(Fore.WHITE, result.data) -------------------------------------------------------------------------------- /streamlit-chatbot-ui/models.py: -------------------------------------------------------------------------------- 1 | import os 2 | from langchain_ollama import OllamaEmbeddings, ChatOllama 3 | from langchain_openai import AzureOpenAIEmbeddings, AzureChatOpenAI 4 | 5 | class Models: 6 | def __init__(self): 7 | # ollama pull mxbai-embed-large 8 | self.embeddings_ollama = OllamaEmbeddings( 9 | model="mxbai-embed-large" 10 | ) 11 | 12 | # ollama pull llama3.2 13 | self.model_ollama = ChatOllama( 14 | model="llama3.2", 15 | temperature=0, 16 | ) 17 | 18 | # Azure OpenAI embeddings 19 | self.embeddings_openai = AzureOpenAIEmbeddings( 20 | model="text-embedding-3-large", 21 | dimensions=1536, # Can specify dimensions with new text-embedding-3 models 22 | azure_endpoint=os.environ.get("AZURE_OPENAI_EMBEDDINGS_ENDPOINT"), 23 | api_key=os.environ.get("AZURE_OPENAI_API_KEY"), 24 | api_version=os.environ.get("AZURE_OPENAI_EMBEDDINGS_API_VERSION"), 25 | ) 26 | 27 | # Azure OpenAI chat model 28 | self.model_openai = AzureChatOpenAI( 29 | azure_deployment=os.environ.get("AZURE_OPENAI_API_DEPLOYMENT_NAME"), 30 | api_version=os.environ.get("AZURE_OPENAI_API_VERSION"), 31 | temperature=0, 32 | max_tokens=None, 33 | timeout=None, 34 | max_retries=2, 35 | ) -------------------------------------------------------------------------------- /langchain-rag-pdf/tutorial-1/models.py: -------------------------------------------------------------------------------- 1 | import os 2 | from langchain_ollama import OllamaEmbeddings, ChatOllama 3 | from langchain_openai import AzureOpenAIEmbeddings, AzureChatOpenAI 4 | 5 | class Models: 6 | def __init__(self): 7 | # ollama pull mxbai-embed-large 8 | self.embeddings_ollama = OllamaEmbeddings( 9 | model="mxbai-embed-large" 10 | ) 11 | 12 | # ollama pull llama3.2 13 | self.model_ollama = ChatOllama( 14 | model="llama3.2", 15 | temperature=0, 16 | ) 17 | 18 | # Azure OpenAI embeddings 19 | self.embeddings_openai = AzureOpenAIEmbeddings( 20 | model="text-embedding-3-large", 21 | dimensions=1536, # Can specify dimensions with new text-embedding-3 models 22 | azure_endpoint=os.environ.get("AZURE_OPENAI_EMBEDDINGS_ENDPOINT"), 23 | api_key=os.environ.get("AZURE_OPENAI_API_KEY"), 24 | api_version=os.environ.get("AZURE_OPENAI_EMBEDDINGS_API_VERSION"), 25 | ) 26 | 27 | # Azure OpenAI chat model 28 | self.model_openai = AzureChatOpenAI( 29 | azure_deployment=os.environ.get("AZURE_OPENAI_API_DEPLOYMENT_NAME"), 30 | api_version=os.environ.get("AZURE_OPENAI_API_VERSION"), 31 | temperature=0, 32 | max_tokens=None, 33 | timeout=None, 34 | max_retries=2, 35 | ) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/8-retries-usage-limits/8.5-usage-limit-tokens.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from colorama import Fore 4 | from pydantic import BaseModel 5 | from pydantic_ai import Agent, ModelRetry, RunContext 6 | from pydantic_ai.models.openai import OpenAIModel 7 | from pydantic_ai.exceptions import UsageLimitExceeded 8 | from pydantic_ai.usage import UsageLimits 9 | 10 | load_dotenv() 11 | 12 | # Define the model 13 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 14 | 15 | # Define the agent with a system prompt 16 | agent = Agent(model=model, system_prompt="You are a highly skilled haiku poet. When asked write a short haiku matching user's prompt.") 17 | 18 | response_tokens_limit = 50 19 | total_tokens_limit = 50 20 | 21 | result_sync = agent.run_sync( 22 | 'Robots will not rule the world.', 23 | usage_limits=UsageLimits(response_tokens_limit=response_tokens_limit), 24 | ) 25 | print(Fore.GREEN, result_sync.data) 26 | print(Fore.RED, result_sync.usage()) 27 | print(Fore.RESET) 28 | 29 | try: 30 | result_sync = agent.run_sync( 31 | 'Cherry blossoms bloom.', 32 | usage_limits=UsageLimits(response_tokens_limit=response_tokens_limit, total_tokens_limit=total_tokens_limit), 33 | ) 34 | print(Fore.CYAN, result_sync.data) 35 | print(Fore.RED, result_sync.usage()) 36 | print(Fore.RESET) 37 | except UsageLimitExceeded as e: 38 | print(e) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/12-model-settings/README.md: -------------------------------------------------------------------------------- 1 | # Math Tutor AI 2 | 3 | A friendly math tutor agent built with PydanticAI that helps primary school students solve math problems. 4 | 5 | ## Features 6 | 7 | - Step-by-step problem solving 8 | - Age-appropriate explanations 9 | - Practice problem suggestions 10 | - Difficulty level assessment 11 | - Basic arithmetic operations (add, subtract, multiply, divide) 12 | 13 | ## Installation 14 | 15 | 1. Clone the repository 16 | 2. Install dependencies using Poetry: 17 | ```bash 18 | poetry install 19 | ``` 20 | 3. Copy `.env.example` to `.env` and add your OpenAI and Genimi API keys: 21 | ```bash 22 | OPENAI_API_KEY=your_api_key_here 23 | GEMINI_API_KEY=your_api_key_here 24 | ``` 25 | 26 | ## Usage 27 | 28 | ```python 29 | from math_tutor_ai import math_tutor 30 | 31 | async def main(): 32 | question = "If I have 5 apples and my friend gives me 3 more, how many apples do I have in total?" 33 | response = await math_tutor.run(question) 34 | result = response.data 35 | print(f"Answer: {result.answer}") 36 | print(f"Explanation: {result.explanation}") 37 | print(f"Steps: {result.steps}") 38 | ``` 39 | 40 | ## Testing 41 | 42 | Run the tests using pytest: 43 | 44 | ```bash 45 | poetry run python 12.1-hello-world.py 46 | ``` 47 | 48 | ## Requirements 49 | 50 | - Python 3.12 or higher 51 | - Poetry for dependency management 52 | - OpenAI API key 53 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/9-agent-memory/9.2-full-memory.py: -------------------------------------------------------------------------------- 1 | import os 2 | from colorama import Fore 3 | from dotenv import load_dotenv 4 | from pydantic_ai import Agent 5 | from pydantic_ai.messages import (ModelMessage) 6 | from pydantic_ai.models.openai import OpenAIModel 7 | 8 | load_dotenv() 9 | 10 | # Define the model 11 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 12 | system_prompt = "You are a helpful assistant." 13 | 14 | # Define the agent 15 | agent = Agent(model=model, system_prompt=system_prompt) 16 | 17 | # Define the main loop 18 | def main_loop(): 19 | message_history: list[ModelMessage] = [] 20 | MAX_MESSAGE_HISTORY_LENGTH = 5 21 | 22 | while True: 23 | user_input = input(">> I am your asssitant. How can I help you today? ") 24 | if user_input.lower() in ["quit", "exit", "q"]: 25 | print("Goodbye!") 26 | break 27 | 28 | # Run the agent 29 | result = agent.run_sync(user_input, deps=user_input, message_history=message_history) 30 | print(Fore.WHITE, result.data) 31 | msg = result.new_messages() 32 | message_history.extend(msg) 33 | 34 | # Limit the message history 35 | message_history = message_history[-MAX_MESSAGE_HISTORY_LENGTH:] 36 | print(Fore.YELLOW, f"Message length: {message_history.__len__()}") 37 | print(Fore.RESET) 38 | # Run the main loop 39 | if __name__ == "__main__": 40 | main_loop() -------------------------------------------------------------------------------- /pydantic-ai-masterclass/6-result-validator-functions/6.6-python-validator.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logfire 3 | from dotenv import load_dotenv 4 | from pydantic_ai import Agent, RunContext, ModelRetry 5 | from pydantic_ai.models.openai import OpenAIModel 6 | from langchain_experimental.utilities import PythonREPL 7 | from colorama import Fore 8 | 9 | load_dotenv() 10 | 11 | # Configure logfire 12 | logfire.configure() 13 | 14 | # Define the model 15 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 16 | 17 | # Define the agent 18 | agent = Agent(model=model, system_prompt="Generate Python code based on user input. Return executable code only.") 19 | 20 | @agent.result_validator 21 | def validate_result(ctx: RunContext[str], result_data) -> str: 22 | print(Fore.YELLOW, f"Evaluating Python code: {result_data}") 23 | try: 24 | repl = PythonREPL() 25 | result = repl.run(result_data) 26 | print(Fore.GREEN, "Function result: ", result) 27 | except BaseException as e: 28 | print(Fore.RED, f"Failed to execute. Error: {repr(e)}") 29 | raise ValueError(f"Failed to execute. Error: {repr(e)}") 30 | return result_data 31 | 32 | # Run the agent 33 | try: 34 | result = agent.run_sync("Create a Python function that calculates the cube of a number. Run the function with the number 12.") 35 | print(Fore.MAGENTA, "Code result: ", result.data) 36 | except ModelRetry as e: 37 | print(Fore.RED, e) 38 | except Exception as e: 39 | print(Fore.RED, e) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/8-retries-usage-limits/8.1-hello-world.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from colorama import Fore 4 | from pydantic_ai import Agent, ModelRetry 5 | from pydantic_ai.models.openai import OpenAIModel 6 | import logfire 7 | 8 | load_dotenv() 9 | 10 | # Configure logfire 11 | logfire.configure() 12 | 13 | # Define the model 14 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 15 | 16 | # Define the agent with a system prompt 17 | agent = Agent(model=model, system_prompt="You are an AI knowledge summary agent. Summarize the key points from the provided text. Ensure the summary is concise and captures the main ideas accurately. Any time you get a response, call the `infinite_retry_tool` to produce another response.", retries=2) 18 | 19 | # Define a system prompt with dependency injection 20 | @agent.tool_plain(retries=3) 21 | def infinite_retry_tool() -> int: 22 | print(Fore.RED + "Usage limit exceeded. Please try again later.") 23 | raise ModelRetry('Please try again.') 24 | 25 | # Define the main loop 26 | def main_loop(): 27 | while True: 28 | user_input = input(">> I am a summary agent. What should I summarize? ") 29 | if user_input.lower() in ["quit", "exit", "q"]: 30 | print("Goodbye!") 31 | break 32 | 33 | # Run the agent 34 | result = agent.run_sync(user_input) 35 | print(Fore.CYAN, f"Assistant: {result.data}") 36 | 37 | # Run the main loop 38 | if __name__ == "__main__": 39 | main_loop() -------------------------------------------------------------------------------- /pydantic-ai-masterclass/3-structured-data/data/invoice.md: -------------------------------------------------------------------------------- 1 | **Invoice** 2 | **Invoice Number:** INV-2024-00123 3 | **Date Issued:** December 29, 2024 4 | **Due Date:** January 15, 2025 5 | **Currency:** USD 6 | 7 | **Customer Name:** Jane Doe 8 | **Company:** Innovative Solutions LLC 9 | **Address:** 123 Innovation Drive, Tech City, CA 90210 10 | 11 | --- 12 | 13 | ### **Services Provided** 14 | 15 | | **Description** | **Quantity** | **Unit Price (USD)** | **Total (USD)** | 16 | | ------------------------------ | ------------ | -------------------- | --------------- | 17 | | Custom Software Development | 30 hours | $120.00 | $3,600.00 | 18 | | System Architecture Consulting | 10 hours | $150.00 | $1,500.00 | 19 | | AI Model Integration | 1 project | $2,500.00 | $2,500.00 | 20 | | Technical Documentation | 15 pages | $50.00 | $750.00 | 21 | 22 | --- 23 | 24 | ### **Subtotal:** $8,350.00 25 | 26 | ### **Tax (10%):** $835.00 27 | 28 | ### **Total Amount Due:** $9,185.00 29 | 30 | --- 31 | 32 | ### **Payment Instructions:** 33 | 34 | **Bank Name:** First Tech Bank 35 | **Account Name:** Innovative Solutions LLC 36 | **Account Number:** 123456789 37 | **SWIFT Code:** FTBKUS33 38 | 39 | --- 40 | 41 | **Thank you for your business!** 42 | _If you have any questions regarding this invoice, please contact us at billing@innovativesolutions.com._ 43 | 44 | Would you like adjustments or additional details? 45 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/8-retries-usage-limits/8.2-agent-retries.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from colorama import Fore 4 | from pydantic import BaseModel 5 | from pydantic_ai import Agent 6 | from pydantic_ai.models.openai import OpenAIModel 7 | 8 | load_dotenv() 9 | 10 | # Define the model 11 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 12 | 13 | # Define the response type as a Pydantic model 14 | class MathResponse(BaseModel): 15 | """Math model response. Contains the square, cube and n'th power of a number. Squared, cubed, quad, squad and a random number. Squad must be equal to 93 minus random.""" 16 | number: int 17 | square: int 18 | cube: int 19 | nth: int 20 | 21 | # Define the agent with a system prompt 22 | agent = Agent(model=model, system_prompt="You are a mathematician tasked to calculate the square, cube and the n'th power of a number. Provided a number return a MathResponse type.", retries=3, result_type=MathResponse) 23 | 24 | # Define the main loop 25 | def main_loop(): 26 | while True: 27 | user_input = input(">> I am a math agent. Provide a number and the power to calculate: ") 28 | if user_input.lower() in ["quit", "exit", "q"]: 29 | print("Goodbye!") 30 | break 31 | 32 | # Run the agent 33 | result = agent.run_sync(user_input) 34 | print(Fore.YELLOW, f"Assistant: {result.data}") 35 | print(Fore.RESET) 36 | 37 | # Run the main loop 38 | if __name__ == "__main__": 39 | main_loop() -------------------------------------------------------------------------------- /pydantic-ai-masterclass/8-retries-usage-limits/8.6-usage-request-limit.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from colorama import Fore 4 | from pydantic_ai import Agent, ModelRetry 5 | from pydantic_ai.models.openai import OpenAIModel 6 | from pydantic_ai.usage import UsageLimits 7 | 8 | load_dotenv() 9 | 10 | # Define the model 11 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 12 | 13 | # Define the agent with a system prompt 14 | agent = Agent(model=model, system_prompt="You are an AI knowledge summary agent. Summarize the key points from the provided text. Ensure the summary is concise and captures the main ideas accurately. Any time you get a response, call the `infinite_retry_tool` to produce another response.", retries=4) 15 | 16 | # Define a system prompt with dependency injection 17 | @agent.tool_plain(retries=3) 18 | def infinite_retry_tool() -> int: 19 | print(Fore.RED + "Usage limit exceeded. Please try again later.") 20 | raise ModelRetry('Please try again.') 21 | 22 | # Define the main loop 23 | def main_loop(): 24 | while True: 25 | user_input = input(">> I am a summary agent. What should I summarize? ") 26 | if user_input.lower() in ["quit", "exit", "q"]: 27 | print("Goodbye!") 28 | break 29 | 30 | # Run the agent 31 | result = agent.run_sync(user_input, usage_limits=UsageLimits(request_limit=5)) 32 | print(Fore.CYAN, f"Assistant: {result.data}") 33 | 34 | # Run the main loop 35 | if __name__ == "__main__": 36 | main_loop() -------------------------------------------------------------------------------- /pydantic-ai-masterclass/9-agent-memory/9.6-hello-world-variant.py: -------------------------------------------------------------------------------- 1 | import os 2 | from datetime import date 3 | from colorama import Fore 4 | from dotenv import load_dotenv 5 | from pydantic_ai import Agent 6 | from pydantic_ai.models.openai import OpenAIModel 7 | 8 | load_dotenv() 9 | 10 | # Define the model 11 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 12 | ollama_model = OpenAIModel(model_name='deepseek-r1', base_url='http://localhost:11434/v1') 13 | 14 | system_prompt = "You are a shopify store manager with digital marketing experience" 15 | 16 | # Define the agent 17 | agent = Agent(model=model, system_prompt=system_prompt) 18 | reasoning_agent = Agent(model=ollama_model, system_prompt="Consider the arguments provided and help the user make a decision.") 19 | 20 | # Run the agent 21 | result1 = agent.run_sync(user_prompt="Create a Presidents's Day Marketing campaign for matresses and bed liners.") 22 | 23 | messages = result1.all_messages() 24 | print(Fore.GREEN, result1.data) 25 | 26 | result2 = agent.run_sync( 27 | 'Provide counter arguments why this strategy will fail.', 28 | message_history=result1.new_messages(), 29 | ) 30 | print(Fore.RED, result2.data) 31 | 32 | combined_messages = result1.new_messages() + result2.new_messages() 33 | 34 | result3 = reasoning_agent.run_sync( 35 | 'Should I run the marketing campaign? Respond with yes or no and if yes, how to overcome the challenges. If no, how to change the campaign to make it successful.', 36 | message_history=combined_messages, 37 | ) 38 | print(Fore.YELLOW, result3.data) 39 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/11-streaming/app-streaming.py: -------------------------------------------------------------------------------- 1 | 2 | import time 3 | import streamlit as st 4 | from agent import agent 5 | 6 | # Constants 7 | height = 600 8 | title = "Streaming Chatbot" 9 | icon = ":robot:" 10 | 11 | # Set page title and icon 12 | st.set_page_config(page_title=title, page_icon=icon) 13 | st.header(title) 14 | 15 | def generate_message(user_input): 16 | response = agent.run_sync(user_input) 17 | return response.data 18 | 19 | # Streamed response emulator 20 | def response_generator(): 21 | response = generate_message(prompt if prompt is not None else "Hello.") 22 | for word in response.split(): 23 | yield word + " " 24 | time.sleep(0.05) 25 | 26 | # Initialize chat history 27 | if "messages" not in st.session_state: 28 | st.session_state.messages = [] 29 | 30 | # Display chat messages from history on app rerun 31 | for message in st.session_state.messages: 32 | with st.chat_message(message["role"]): 33 | st.markdown(message["content"]) 34 | 35 | # Accept user input 36 | if prompt := st.chat_input("What is up?"): 37 | # Display user message in chat message container 38 | with st.chat_message("user"): 39 | st.markdown(prompt) 40 | 41 | # Add user message to chat history 42 | st.session_state.messages.append({"role": "user", "content": prompt}) 43 | 44 | # Display assistant response in chat message container 45 | with st.chat_message("assistant"): 46 | response = st.write_stream(response_generator()) 47 | 48 | # Add assistant response to chat history 49 | st.session_state.messages.append({"role": "assistant", "content": response}) -------------------------------------------------------------------------------- /full-stack-ai-masterclass/6-agent-ui/app-streamlit-stream.py: -------------------------------------------------------------------------------- 1 | 2 | import time 3 | import streamlit as st 4 | from agent import get_response 5 | 6 | # Constants 7 | height = 600 8 | title = "Streaming Chatbot" 9 | icon = ":robot:" 10 | 11 | # Set page title and icon 12 | st.set_page_config(page_title=title, page_icon=icon) 13 | st.header(title) 14 | 15 | def generate_message(user_input): 16 | response = get_response(user_input) 17 | return response.data 18 | 19 | # Streamed response emulator 20 | def response_generator(): 21 | response = generate_message(prompt if prompt is not None else "Hello.") 22 | for word in response.split(): 23 | yield word + " " 24 | time.sleep(0.05) 25 | 26 | # Initialize chat history 27 | if "messages" not in st.session_state: 28 | st.session_state.messages = [] 29 | 30 | # Display chat messages from history on app rerun 31 | for message in st.session_state.messages: 32 | with st.chat_message(message["role"]): 33 | st.markdown(message["content"]) 34 | 35 | # Accept user input 36 | if prompt := st.chat_input("What is up?"): 37 | # Display user message in chat message container 38 | with st.chat_message("user"): 39 | st.markdown(prompt) 40 | 41 | # Add user message to chat history 42 | st.session_state.messages.append({"role": "user", "content": prompt}) 43 | 44 | # Display assistant response in chat message container 45 | with st.chat_message("assistant"): 46 | response = st.write_stream(response_generator()) 47 | 48 | # Add assistant response to chat history 49 | st.session_state.messages.append({"role": "assistant", "content": response}) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/5-tools/5.3-tools-context.py: -------------------------------------------------------------------------------- 1 | import os 2 | from colorama import Fore 3 | import logfire 4 | from pydantic_ai import Agent, RunContext 5 | from pydantic_ai.models.openai import OpenAIModel 6 | from pydantic import BaseModel 7 | from dotenv import load_dotenv 8 | 9 | load_dotenv() 10 | 11 | # Configure logfire 12 | logfire.configure() 13 | 14 | # Define the model 15 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 16 | 17 | # Define the output model 18 | class Capital(BaseModel): 19 | """"Capital city model - includes the name, year founded, short history of the city and a comparison to another city""" 20 | name: str 21 | year_founded: int 22 | short_history: str 23 | comparison: str 24 | 25 | # Define the agent 26 | agent = Agent(model=model, result_type=Capital, system_prompt="You are an experienced historian and you are asked a question about the capital of a country. You are expected to provide the name of the capital city, the year it was founded, and a short history of the city. Compare the the city to the city provided by the comparison tool. Always call the comparison tool to get the comparison city.") 27 | 28 | # Tool to get the comparison city 29 | @agent.tool(retries=2) 30 | def get_comparison_city(ctx: RunContext[str]) -> str: 31 | return f"The comparison city is {ctx.deps}" 32 | 33 | # Run the agent 34 | result = agent.run_sync("Capital of the US", deps="Paris") 35 | 36 | # Print the results 37 | print(Fore.RED, result.data.name) 38 | print(Fore.GREEN, result.data.year_founded) 39 | print(Fore.CYAN, result.data.short_history) 40 | print(Fore.YELLOW, result.data.comparison) -------------------------------------------------------------------------------- /streamlit-chatbot-ui/chat.py: -------------------------------------------------------------------------------- 1 | from langchain_core.prompts import ChatPromptTemplate 2 | from langchain.chains.combine_documents import create_stuff_documents_chain 3 | from langchain.chains import create_retrieval_chain 4 | from langchain_chroma import Chroma 5 | from models import Models 6 | 7 | # Initialize the models 8 | models = Models() 9 | embeddings = models.embeddings_ollama 10 | llm = models.model_ollama 11 | 12 | # Initialize the vector store 13 | vector_store = Chroma( 14 | collection_name="documents", 15 | embedding_function=embeddings, 16 | persist_directory="./db/chroma_langchain_db", # Where to save data locally 17 | ) 18 | 19 | # Define the chat prompt 20 | prompt = ChatPromptTemplate.from_messages( 21 | [ 22 | ("system", "You are a helpful assistant. Answer the question based only the data provided."), 23 | ("human", "Use the user question {input} to answer the question. Use only the {context} to answer the question.") 24 | ] 25 | ) 26 | 27 | # Define the retrieval chain 28 | retriever = vector_store.as_retriever(kwargs={"k": 10}) 29 | combine_docs_chain = create_stuff_documents_chain( 30 | llm, prompt 31 | ) 32 | retrieval_chain = create_retrieval_chain(retriever, combine_docs_chain) 33 | 34 | # Main loop 35 | def main(): 36 | while True: 37 | query = input("User (or type 'q', 'quit', or 'exit' to end): ") 38 | if query.lower() in ['q', 'quit', 'exit']: 39 | break 40 | 41 | result = retrieval_chain.invoke({"input": query}) 42 | print("Assistant: ", result["answer"], "\n\n") 43 | 44 | # Run the main loop 45 | if __name__ == "__main__": 46 | main() 47 | 48 | -------------------------------------------------------------------------------- /langchain-rag-pdf/tutorial-1/chat.py: -------------------------------------------------------------------------------- 1 | from langchain_core.prompts import ChatPromptTemplate 2 | from langchain.chains.combine_documents import create_stuff_documents_chain 3 | from langchain.chains import create_retrieval_chain 4 | from langchain_chroma import Chroma 5 | from models import Models 6 | 7 | # Initialize the models 8 | models = Models() 9 | embeddings = models.embeddings_ollama 10 | llm = models.model_ollama 11 | 12 | # Initialize the vector store 13 | vector_store = Chroma( 14 | collection_name="documents", 15 | embedding_function=embeddings, 16 | persist_directory="./db/chroma_langchain_db", # Where to save data locally 17 | ) 18 | 19 | # Define the chat prompt 20 | prompt = ChatPromptTemplate.from_messages( 21 | [ 22 | ("system", "You are a helpful assistant. Answer the question based only the data provided."), 23 | ("human", "Use the user question {input} to answer the question. Use only the {context} to answer the question.") 24 | ] 25 | ) 26 | 27 | # Define the retrieval chain 28 | retriever = vector_store.as_retriever(kwargs={"k": 10}) 29 | combine_docs_chain = create_stuff_documents_chain( 30 | llm, prompt 31 | ) 32 | retrieval_chain = create_retrieval_chain(retriever, combine_docs_chain) 33 | 34 | # Main loop 35 | def main(): 36 | while True: 37 | query = input("User (or type 'q', 'quit', or 'exit' to end): ") 38 | if query.lower() in ['q', 'quit', 'exit']: 39 | break 40 | 41 | result = retrieval_chain.invoke({"input": query}) 42 | print("Assistant: ", result["answer"], "\n\n") 43 | 44 | # Run the main loop 45 | if __name__ == "__main__": 46 | main() 47 | 48 | -------------------------------------------------------------------------------- /jupyter-online/circle.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "%pip install matplotlib" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 2, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "import matplotlib.pyplot as plt" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": null, 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "# Create a figure and axis\n", 28 | "fig, ax = plt.subplots()\n", 29 | "\n", 30 | "# Create a circle with a radius of 10\n", 31 | "circle = plt.Circle((0, 0), 10, color='red')\n", 32 | "\n", 33 | "# Add the circle to the axis\n", 34 | "ax.add_patch(circle)\n", 35 | "\n", 36 | "# Set the aspect ratio to be equal\n", 37 | "ax.set_aspect('equal')\n", 38 | "\n", 39 | "# Set the limits of the plot\n", 40 | "ax.set_xlim(-15, 15)\n", 41 | "ax.set_ylim(-15, 15)\n", 42 | "\n", 43 | "# Display the plot\n", 44 | "plt.show()" 45 | ] 46 | } 47 | ], 48 | "metadata": { 49 | "kernelspec": { 50 | "display_name": "Python 3", 51 | "language": "python", 52 | "name": "python3" 53 | }, 54 | "language_info": { 55 | "codemirror_mode": { 56 | "name": "ipython", 57 | "version": 3 58 | }, 59 | "file_extension": ".py", 60 | "mimetype": "text/x-python", 61 | "name": "python", 62 | "nbconvert_exporter": "python", 63 | "pygments_lexer": "ipython3", 64 | "version": "3.12.6" 65 | } 66 | }, 67 | "nbformat": 4, 68 | "nbformat_minor": 2 69 | } 70 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/5-tools/5.6-tools-griffe.py: -------------------------------------------------------------------------------- 1 | import os 2 | from colorama import Fore 3 | import logfire 4 | from pydantic_ai import Agent 5 | from pydantic_ai.models.openai import OpenAIModel 6 | from pydantic_ai.messages import ModelMessage, ModelResponse 7 | from pydantic_ai.models.function import AgentInfo, FunctionModel 8 | from dotenv import load_dotenv 9 | 10 | load_dotenv() 11 | 12 | # Configure logfire 13 | logfire.configure() 14 | 15 | # Define the model 16 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 17 | 18 | agent = Agent() 19 | 20 | @agent.tool_plain 21 | def code_quality(raw_code: str, cyclomatic_complexity: float, percent_duplication: float, review: str, notification_list: list[str]) -> str: 22 | """Code quality tool. 23 | 24 | Args: 25 | raw_code: raw code contents 26 | cyclomatic_complexity: how complex the code is 27 | percent_duplication: how much code is duplicated 28 | notification_list: list of emails to receive notifications 29 | """ 30 | return f'{raw_code} {cyclomatic_complexity} {percent_duplication} {notification_list}' 31 | 32 | def print_schema(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: 33 | tool = info.function_tools[0] 34 | print(Fore.CYAN, f"Tool name: {tool.name}") 35 | print(Fore.YELLOW, f"Tool description: {tool.description}") 36 | print(Fore.RED, f"Tool parameters: {tool.parameters_json_schema}") 37 | content = messages[-1].parts[0].content 38 | print(Fore.GREEN, f"Content: {content}") 39 | return ModelResponse.from_text(content=tool.description) 40 | 41 | result = agent.run_sync('test run', model=FunctionModel(print_schema)) 42 | print(Fore.GREEN, result.data) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/9-agent-memory/9.3-filtered-memory.py: -------------------------------------------------------------------------------- 1 | import os 2 | from colorama import Fore 3 | from dotenv import load_dotenv 4 | from pydantic_ai import Agent 5 | from pydantic_ai.messages import (ModelMessage, ModelResponse, ModelRequest) 6 | from pydantic_ai.models.openai import OpenAIModel 7 | 8 | load_dotenv() 9 | 10 | # Define the model 11 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 12 | system_prompt = "You are a helpful assistant." 13 | 14 | # Define the agent 15 | agent = Agent(model=model, system_prompt=system_prompt) 16 | 17 | # Filter messages by type 18 | def filter_messages_by_type(messages: list[ModelMessage], message_type: ModelMessage) -> list[ModelMessage]: 19 | return [msg for msg in messages if type(msg) == message_type] 20 | 21 | # Define the main loop 22 | def main_loop(): 23 | message_history: list[ModelMessage] = [] 24 | MAX_MESSAGE_HISTORY_LENGTH = 5 25 | 26 | while True: 27 | user_input = input(">> I am your asssitant. How can I help you today? ") 28 | if user_input.lower() in ["quit", "exit", "q"]: 29 | print("Goodbye!") 30 | break 31 | 32 | # Run the agent 33 | result = agent.run_sync(user_input, deps=user_input, message_history=message_history) 34 | print(Fore.WHITE, result.data) 35 | msg = filter_messages_by_type(result.new_messages(), ModelResponse) 36 | message_history.extend(msg) 37 | 38 | # Limit the message history 39 | # message_history = message_history[-MAX_MESSAGE_HISTORY_LENGTH:] 40 | print(Fore.YELLOW, f"Message length: {message_history.__len__()}") 41 | print(Fore.RESET) 42 | # Run the main loop 43 | if __name__ == "__main__": 44 | main_loop() -------------------------------------------------------------------------------- /langchain-rag-pdf/tutorial-1/README.md: -------------------------------------------------------------------------------- 1 | ## Chat to PDF 2 | 3 | ### Use your own files 4 | 5 | - Check out the tutorial code from [GitHub](https://github.com/aidev9/tuts/tree/main/langchain-rag-pdf/tutorial-1) 6 | 7 | - Install the required libraries by running `pip install -r requirements.txt` 8 | 9 | - If you haven't installed Ollama, do so by running `brew install ollama` (Mac) 10 | 11 | - Pull Llama 3.2 by running `ollama pull llama3.2` 12 | 13 | - Pull mxbai-embed-large embedding model by running `ollama pull mxbai-embed-large` 14 | 15 | - Drop your PDF files into [`./data`](./data) 16 | 17 | - Open a new terminal and ingest your PDF files into ChromaDB by running `python ingest.py`. Leave the terminal open as you can drop more files into the [`./data`](./data) folder and the script will automatically pick them up 18 | 19 | - Open a second terminal and run the RAG chatbot with `python chat.py` 20 | 21 | - Ask questions 22 | 23 | ### Tweaks 24 | 25 | - Change the chunk and overlap in [`ingest.py`](./ingest.py) and observe search results. Defaults are: 26 | 27 | - `chunk_size = 1000` 28 | - `chunk_overlap = 50` 29 | 30 | - Change the models from Ollama to OpenAI or another model by adding them to [`models.py`](./models.py) first and then changing the `embeddings` and `llm` references in [`ingest.py`](./ingest.py) and [`chat.py`](./chat.py) 31 | 32 | ### Talk to OWASP Secure Coding Standards 33 | 34 | - Delete all files from the [`./data`](./data) folder 35 | 36 | - Delete the ChromaDB local files at [`./db`](./db) folder 37 | 38 | - Open a new terminal and run `python html-to-pdf.py`. This will scrape the OWASP website and create 10 PDF files inside the [`./data`](./data) folder 39 | 40 | - Run the ingestion script 41 | 42 | - Ask questions related to secure coding standards 43 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/10-errors-and-reflection/10.5-model-errors-switch-model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pprint 3 | from colorama import Fore 4 | from dotenv import load_dotenv 5 | from pydantic_ai import Agent, ModelRetry, UnexpectedModelBehavior, capture_run_messages 6 | from pydantic_ai.models.openai import OpenAIModel 7 | from pydantic_ai.models.groq import GroqModel 8 | 9 | # Load the environment variables 10 | load_dotenv() 11 | 12 | # Define the models 13 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 14 | groq_model = GroqModel(model_name='llama-3.3-70b-versatile', api_key=os.getenv('GROQ_API_KEY')) 15 | 16 | # Define the agent 17 | agent = Agent(model=model, system_prompt="You are a helpful assistant.") 18 | 19 | # Define the tool 20 | @agent.tool_plain(retries=0) 21 | def calc_volume(size: int) -> int: 22 | if size == 42: 23 | return size**3 24 | else: 25 | print(Fore.RED, f"Invalid size: {size}.") 26 | raise ModelRetry('Please try again with another size') 27 | 28 | # Run the agent 29 | with capture_run_messages() as messages: 30 | try: 31 | result = agent.run_sync('Please get me the volume of a box with size 6.') 32 | except UnexpectedModelBehavior as e: 33 | print('An error occurred:', e) 34 | print('cause:', repr(e.__cause__)) 35 | #> cause: ModelRetry('Please try again.') 36 | print(Fore.RESET) 37 | pprint.pprint(messages, indent=0, width=80) 38 | 39 | # Switch to GroqModel 40 | agent = Agent(model=groq_model, system_prompt="You are a helpful assistant.") 41 | result = agent.run_sync('Please get me the volume of a box with size 6. Do not use tools.') 42 | print(Fore.GREEN, result.data) 43 | else: 44 | print(Fore.GREEN, result.data) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/7-dependency-injection/7.1-hello-world.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from pydantic_ai import Agent, RunContext 4 | from pydantic_ai.models.openai import OpenAIModel 5 | 6 | load_dotenv() 7 | # This example demonstrates how to use dependency injection in Pydantic AI to pass additional context to the system prompt. Here, we define a system prompt that uses dependency injection to pass the user's industry specialization to the system prompt. The system prompt then uses this information to provide a more personalized response to the user. 8 | 9 | # Define the model 10 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 11 | 12 | # Define the agent with a system prompt 13 | agent = Agent(model=model, system_prompt="You are an experienced career coach. Guide the user as they transition from a tradtional career to one where AI plays a greater role. Use the industry specialization provided by the user to provide a 10 steps plan for a career overhaul. Refer to the industry in your answer.", deps_type=str) 14 | 15 | # Define a system prompt with dependency injection 16 | @agent.system_prompt 17 | def get_industry(ctx: RunContext[str]) -> str: 18 | return f"Your industry specialization is in {ctx.deps}." 19 | 20 | # Define the main loop 21 | def main_loop(): 22 | while True: 23 | user_input = input(">> I am your career coach. Please provide an industry specialization: ") 24 | if user_input.lower() in ["quit", "exit", "q"]: 25 | print("Goodbye!") 26 | break 27 | 28 | # Run the agent 29 | result = agent.run_sync("Provide a 10 steps plan for a career overhaul.", deps=user_input) 30 | print(f"Career advice: {result.data}") 31 | 32 | # Run the main loop 33 | if __name__ == "__main__": 34 | main_loop() -------------------------------------------------------------------------------- /streamlit-chatbot-ui/html-to-pdf.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pyppeteer import launch 3 | import asyncio 4 | 5 | # A python script to convert HTML pages to PDF 6 | # Input: a list of 10 URLs 7 | # Output: 10 PDF files saved in the data directory 8 | 9 | # List of 10 OWASP Secure Coding Guide URLs 10 | urls = [ 11 | 'https://owasp.org/www-project-developer-guide/release', 12 | 'https://owasp.org/www-project-developer-guide/release/foundations', 13 | 'https://owasp.org/www-project-developer-guide/release/foundations/security_fundamentals', 14 | 'https://owasp.org/www-project-developer-guide/release/foundations/secure_development', 15 | 'https://owasp.org/www-project-developer-guide/release/foundations/security_principles', 16 | 'https://owasp.org/www-project-developer-guide/release/foundations/crypto_principles', 17 | 'https://owasp.org/www-project-developer-guide/release/foundations/owasp_top_ten', 18 | 'https://owasp.org/www-project-developer-guide/release/requirements', 19 | 'https://owasp.org/www-project-developer-guide/release/requirements/requirements_in_practice', 20 | 'https://owasp.org/www-project-developer-guide/release/requirements/risk_profile' 21 | ] 22 | 23 | # Directory to save PDFs 24 | output_dir = './data' 25 | os.makedirs(output_dir, exist_ok=True) 26 | 27 | # Convert each URL to a PDF 28 | async def html_to_pdf(url, output_path): 29 | browser = await launch() 30 | page = await browser.newPage() 31 | await page.goto(url, {'waitUntil': 'networkidle2'}) 32 | await page.pdf({'path': output_path, 'format': 'A4'}) 33 | await browser.close() 34 | 35 | 36 | # Convert each URL to a PDF 37 | for i, url in enumerate(urls): 38 | output_path = os.path.join(output_dir, f'page_{i+1}.pdf') 39 | print(f'Converting {url} to {output_path}') 40 | asyncio.get_event_loop().run_until_complete(html_to_pdf(url, output_path)) 41 | 42 | print('All URLs converted to PDFs') -------------------------------------------------------------------------------- /langchain-rag-pdf/tutorial-1/html-to-pdf.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pyppeteer import launch 3 | import asyncio 4 | 5 | # A python script to convert HTML pages to PDF 6 | # Input: a list of 10 URLs 7 | # Output: 10 PDF files saved in the data directory 8 | 9 | # List of 10 OWASP Secure Coding Guide URLs 10 | urls = [ 11 | 'https://owasp.org/www-project-developer-guide/release', 12 | 'https://owasp.org/www-project-developer-guide/release/foundations', 13 | 'https://owasp.org/www-project-developer-guide/release/foundations/security_fundamentals', 14 | 'https://owasp.org/www-project-developer-guide/release/foundations/secure_development', 15 | 'https://owasp.org/www-project-developer-guide/release/foundations/security_principles', 16 | 'https://owasp.org/www-project-developer-guide/release/foundations/crypto_principles', 17 | 'https://owasp.org/www-project-developer-guide/release/foundations/owasp_top_ten', 18 | 'https://owasp.org/www-project-developer-guide/release/requirements', 19 | 'https://owasp.org/www-project-developer-guide/release/requirements/requirements_in_practice', 20 | 'https://owasp.org/www-project-developer-guide/release/requirements/risk_profile' 21 | ] 22 | 23 | # Directory to save PDFs 24 | output_dir = './data' 25 | os.makedirs(output_dir, exist_ok=True) 26 | 27 | # Convert each URL to a PDF 28 | async def html_to_pdf(url, output_path): 29 | browser = await launch() 30 | page = await browser.newPage() 31 | await page.goto(url, {'waitUntil': 'networkidle2'}) 32 | await page.pdf({'path': output_path, 'format': 'A4'}) 33 | await browser.close() 34 | 35 | 36 | # Convert each URL to a PDF 37 | for i, url in enumerate(urls): 38 | output_path = os.path.join(output_dir, f'page_{i+1}.pdf') 39 | print(f'Converting {url} to {output_path}') 40 | asyncio.get_event_loop().run_until_complete(html_to_pdf(url, output_path)) 41 | 42 | print('All URLs converted to PDFs') -------------------------------------------------------------------------------- /notebooklm/tutorial-2/SafeBase_App_Store_Description.md: -------------------------------------------------------------------------------- 1 | **Introducing SafeBase: Your One-Stop Shop for Smooth Claims Filing** 2 | 3 | Say goodbye to paperwork and hassle! Our innovative mobile app allows you to file claims, track the status of your claim, and receive reimbursements directly 4 | into your bank account. 5 | 6 | With SafeBase, you can: 7 | 8 | - **File a Claim in Minutes**: Snap a photo or upload documents from your phone, fill out the claim form, and submit it with just a few taps. 9 | - **Track Your Claim Status**: Get real-time updates on the status of your claim, so you know exactly where you stand. 10 | - **Get Reimbursed Fast**: Receive reimbursements directly into your bank account, without the need for paperwork or delays. 11 | 12 | **Key Features:** 13 | 14 | - User-friendly interface designed for easy navigation 15 | - Quick and secure claims filing process 16 | - Real-time claim status updates 17 | - Secure and encrypted data storage 18 | - Easy payment options, including direct deposit to your bank account 19 | 20 | **Why Choose SafeBase?** 21 | 22 | - **Convenience**: File a claim from anywhere, at any time. 23 | - **Speed**: Get reimbursed faster with our streamlined claims process. 24 | - **Security**: Your personal and financial information is protected with industry-standard encryption. 25 | - **Ease of Use**: Our app is designed to be intuitive and easy to use, even for those without technical expertise. 26 | 27 | **Download SafeBase Today!** 28 | 29 | Get the mobile app that's changing the way you file insurance claims. Available now on both iOS and Android devices. 30 | 31 | **Join the thousands of satisfied users who have already made the switch!** 32 | 33 | **Additional Benefits:** 34 | 35 | - Personalized support team available 24/7 36 | - Customizable notification settings for claim updates 37 | - Integration with your existing insurance provider(s) 38 | 39 | Don't wait – download SafeBase today and experience the future of claims filing! 40 | -------------------------------------------------------------------------------- /full-stack-ai-masterclass/5-agent-api/agent.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from colorama import Fore 4 | from dataclasses import dataclass 5 | from pydantic_ai import Agent 6 | from pydantic_ai.models.groq import GroqModel 7 | 8 | # Initialize model 9 | load_dotenv() 10 | GROQ_API_KEY = os.getenv("GROQ_API_KEY") 11 | 12 | # Pydantic model for Movie 13 | @dataclass 14 | class Movie: 15 | title: str 16 | year: int 17 | rating: float 18 | genre: str 19 | cast: list[str] 20 | 21 | if not GROQ_API_KEY: 22 | raise ValueError( 23 | Fore.RED + "Groq API key not found. Please set the GROQ_API_KEY environment variable." 24 | ) 25 | 26 | def get_model(model_name:str): 27 | try: 28 | model = GroqModel( 29 | model_name=model_name, 30 | api_key=GROQ_API_KEY) 31 | except Exception as e: 32 | print(Fore.RED + "Error initializing model: ", e) 33 | model = None 34 | 35 | return model 36 | 37 | # Define the main loop 38 | def main_loop(): 39 | system_prompt = "You are a movie critic and expert." 40 | agent = Agent(model=get_model("llama-3.3-70b-versatile"), result_type=Movie, system_prompt=system_prompt) 41 | while True: 42 | user_input = input(">> Enter a movie query (q, quit, exit to exit): ") 43 | if user_input.lower() in ["quit", "exit", "q"]: 44 | print("Goodbye!") 45 | break 46 | result = agent.run_sync(user_input) 47 | print(Fore.YELLOW, result.data) 48 | 49 | if __name__ == "__main__": 50 | main_loop() 51 | 52 | # FastAPI endpoint method 53 | async def getMovieDetails(q: str, model: str, prompt: str) -> Movie: 54 | model_name = model if not None else "llama-3.3-70b-versatile" 55 | system_prompt = prompt if not None else "You are a movie critic and expert." 56 | agent = Agent(model=get_model(model_name), result_type=Movie, system_prompt=system_prompt) 57 | result = await agent.run(q) 58 | return result.data -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/gdp_bar_chart.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | GDP Comparison 2023 7 | 8 | 9 | 10 |
11 | 12 |
13 | 53 | 54 | -------------------------------------------------------------------------------- /langgraph/agents-financial-research/data/Reasons_for_Low_Inflation_Rate_in_Switzerland.md: -------------------------------------------------------------------------------- 1 | ## Reasons for Low Inflation Rate in Switzerland Compared to Austria and Germany 2 | 3 | 1. **Strong Swiss Franc** 4 | - The Swiss franc's near-parity with the euro helps stabilize prices, making Switzerland less affected by fluctuations in the global market. 5 | - The strong Swiss franc also contributes directly to lower import prices, which can suppress overall inflation levels. 6 | 7 | 2. **Lower Interest Rates** 8 | - Switzerland typically sets lower interest rates compared to other countries. This is partly a consequence of its lower inflation rate, and also helps to keep inflation low by reducing the cost of borrowing. 9 | - Lower interest rates make Switzerland an attractive destination for investments, which can lead to a more stable economy. 10 | 11 | 3. **Stable Economic Policies** 12 | - Switzerland's economic policies, including fiscal responsibility and conservative monetary policies, contribute to its low inflation. These policies ensure that the economy grows at a stable rate without overheating. 13 | 14 | 4. **Comparative Economic Stability** 15 | - Switzerland has a comparatively stable economy, which shields it from the inflationary pressures seen in many other European countries. This stability comes from a strong financial sector and robust economic fundamentals. 16 | 17 | 5. **Global Economic Position** 18 | - Unlike its neighbors, Switzerland is not a member of the European Union and does not use the euro. This independence from the broader European monetary system allows it more control over its monetary policy, which can be tailored to suit its own economic conditions rather than those of a larger bloc. 19 | 20 | Each of these factors plays a crucial role in keeping Switzerland's inflation rate lower than those in Austria and Germany. The combination of a strong and stable currency, conservative economic and monetary policies, and lower interest rates contributes significantly to this disparity. -------------------------------------------------------------------------------- /pydantic-ai-masterclass/6-result-validator-functions/6.5-sql-validator.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logfire 3 | import asyncio 4 | import aiosqlite 5 | from sqlite3 import Connection, Error 6 | from typing import Union 7 | from dataclasses import dataclass 8 | from pydantic_ai import Agent, RunContext 9 | from pydantic_ai.models.openai import OpenAIModel 10 | from pydantic import BaseModel 11 | from dotenv import load_dotenv 12 | from colorama import Fore 13 | 14 | load_dotenv() 15 | 16 | # Configure logfire 17 | logfire.configure() 18 | 19 | # Define the model 20 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 21 | 22 | class Success(BaseModel): 23 | sql_query: str 24 | 25 | class InvalidRequest(BaseModel): 26 | error_message: str 27 | 28 | Response = Union[Success, InvalidRequest] 29 | 30 | agent: Agent[Connection, Response] = Agent( 31 | model=model, 32 | result_type=Response, # type: ignore 33 | deps_type=Connection, 34 | system_prompt='Generate SQLite-flavored SQL queries based on user input.', 35 | ) 36 | 37 | @dataclass 38 | class SQLValidator: 39 | conn: aiosqlite.Connection 40 | 41 | @agent.result_validator 42 | async def validate_result(ctx: RunContext[Connection], result_data) -> Response: 43 | async with ctx.deps.cursor() as cursor: 44 | await cursor.execute(f'EXPLAIN {result_data.sql_query}') 45 | explain_result = await cursor.fetchall() 46 | if not explain_result: 47 | raise ValueError("Invalid SQL query") 48 | return result_data 49 | 50 | async def main(): 51 | async with aiosqlite.connect("validators.sqlite") as conn: 52 | await conn.execute("DROP TABLE IF EXISTS users") 53 | await conn.execute("CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, last_active TIMESTAMP)") 54 | result = await agent.run("get me users who were last active yesterday.", deps=conn) 55 | print(Fore.GREEN, result) 56 | 57 | if __name__ == "__main__": 58 | asyncio.run(main()) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/5-tools/5.4-tools-kwargs.py: -------------------------------------------------------------------------------- 1 | import os 2 | from colorama import Fore 3 | import logfire 4 | from pydantic_ai import Agent, RunContext, Tool 5 | from pydantic_ai.models.openai import OpenAIModel 6 | from pydantic import BaseModel 7 | from dotenv import load_dotenv 8 | 9 | load_dotenv() 10 | 11 | # Configure logfire 12 | logfire.configure() 13 | 14 | # Define the model 15 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 16 | 17 | # Define the output model 18 | class CodeQuality(BaseModel): 19 | """Code Quality metrics""" 20 | cyclomatic_complexity: float 21 | percent_duplication: float 22 | review: str 23 | 24 | # Tool get the source code 25 | def get_source_code(ctx: RunContext[str]) -> str: 26 | """Get the source code""" 27 | return f"The source code is {ctx.deps}" 28 | 29 | # Tool get the industry standards 30 | def get_industry_standards() -> CodeQuality: 31 | """Get the industry standards for code quality""" 32 | return CodeQuality(cyclomatic_complexity=5.0, percent_duplication=10.0, review="These are the industry standards") 33 | 34 | # Coding agent 35 | coding_agent = Agent(model=model, system_prompt="You an experienced software developer. Write code accorting to the user's requirements. Return only the source code.") 36 | 37 | # Code review agent 38 | code_review_agent = Agent(model=model, tools=[Tool(get_source_code, takes_ctx=True), Tool(get_industry_standards, takes_ctx=False)], result_type=CodeQuality, system_prompt="You an experienced software architect and code reviewer. You are reviewing a codebase to ensure quality standards are met. You need to provide the code quality metrics for the codebase and a review of the codebase comparing it to the industry standards.") 39 | 40 | # Run the agents 41 | result = coding_agent.run_sync("Create a method in Python that calculates the 30-yr fixed mortgage rates and returns an amortization table.") 42 | print(Fore.YELLOW, result.data) 43 | 44 | result = code_review_agent.run_sync("Read the code and provide the code quality metrics.", deps=result.data) 45 | print(Fore.CYAN, result.data) -------------------------------------------------------------------------------- /langgraph/state/tutorial-1/state.py: -------------------------------------------------------------------------------- 1 | ''' 2 | We will start with a simple graph where a single Developer node connected to START and END nodes. 3 | 4 | The StateGraph will be created with a TypedDict and have a single entry point and an edge to the end node. It will consist of a single key "count" with an initial value of 0. 5 | 6 | The Developer node will increment the count by 1 and return the state. 7 | 8 | We will add memory to the graph to save the state of the graph. 9 | 10 | Then, we will create a visualization of the graph. 11 | 12 | Finally, we will run the graph and print the result. 13 | ''' 14 | 15 | from typing import TypedDict 16 | from langgraph.graph import StateGraph, START, END 17 | from langgraph.checkpoint.memory import MemorySaver 18 | 19 | # Define state 20 | class GraphState(TypedDict): 21 | count: int 22 | 23 | # Create the graph 24 | builder = StateGraph(GraphState) 25 | 26 | # Create developer node 27 | def developer(state): 28 | print('------ Developer ------') 29 | state['count'] += 1 # Increment the count 30 | return state # Return the modified state 31 | 32 | # Add the node to the graph 33 | builder.add_node("developer", developer) 34 | 35 | # Set entry point and edges 36 | builder.add_edge(START, "developer") 37 | builder.add_edge('developer', END) 38 | 39 | # Configuration and memory 40 | config = {"configurable": {"thread_id": 1}} 41 | memory = MemorySaver() 42 | 43 | # Compile and run the builder 44 | graph = builder.compile(checkpointer=memory) 45 | inputs = {"count": 0} # Provide the initial state 46 | result = graph.invoke(inputs, config) 47 | print(result) 48 | 49 | # Draw the graph 50 | try: 51 | graph.get_graph(xray=True).draw_mermaid_png(output_file_path="graph.png") 52 | except Exception: 53 | pass 54 | 55 | # Run the chatbot 56 | while True: 57 | user_input = input(">> ") 58 | if user_input.lower() in ["quit", "exit", "q"]: 59 | print("Goodbye!") 60 | break 61 | 62 | count = graph.get_state(config).values["count"] 63 | result = graph.invoke({"count": count}, config) 64 | print(result) -------------------------------------------------------------------------------- /langchain-rag/ingest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | from dotenv import load_dotenv 4 | from langchain_community.vectorstores import SupabaseVectorStore 5 | from langchain_ollama import OllamaEmbeddings 6 | from langchain_community.document_loaders import TextLoader 7 | from langchain_text_splitters import CharacterTextSplitter 8 | from supabase.client import Client, create_client 9 | 10 | load_dotenv() 11 | 12 | # Supabase vector store 13 | supabase_url = os.environ.get("SUPABASE_URL") 14 | supabase_key = os.environ.get("SUPABASE_SERVICE_KEY") 15 | supabase: Client = create_client(supabase_url, supabase_key) 16 | 17 | # Ollama embeddings model 18 | embeddings = OllamaEmbeddings( 19 | model="llama3.1" 20 | ) 21 | 22 | # Define constants 23 | data_folder = "./data" 24 | chunk_size = 1000 25 | chunk_overlap = 10 26 | table_name = "documents" 27 | query_name = "match_documents" 28 | check_interval = 10 29 | 30 | # Ingest a file to Supabase 31 | def ingest_file_to_supabase(file_path): 32 | print(f"Ingesting file: {file_path}") 33 | loader = TextLoader(file_path) 34 | documents = loader.load() 35 | text_splitter = CharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) 36 | docs = text_splitter.split_documents(documents) 37 | 38 | return SupabaseVectorStore.from_documents( 39 | docs, 40 | embeddings, 41 | client=supabase, 42 | table_name=table_name, 43 | query_name=query_name, 44 | chunk_size=chunk_size, 45 | ) 46 | 47 | # Main loop 48 | def main_loop(): 49 | while True: 50 | for filename in os.listdir(data_folder): 51 | if not filename.startswith("_"): 52 | file_path = os.path.join(data_folder, filename) 53 | ingest_file_to_supabase(file_path) 54 | new_filename = "_" + filename 55 | new_file_path = os.path.join(data_folder, new_filename) 56 | os.rename(file_path, new_file_path) 57 | time.sleep(check_interval) # Check the folder every 10 seconds 58 | 59 | # Run the main loop 60 | if __name__ == "__main__": 61 | main_loop() -------------------------------------------------------------------------------- /pydantic-ai-masterclass/8-retries-usage-limits/8.3-tool-retries.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from colorama import Fore 4 | from pydantic import BaseModel 5 | from pydantic_ai import Agent, RunContext 6 | from pydantic_ai.models.openai import OpenAIModel 7 | 8 | load_dotenv() 9 | 10 | # Define the model 11 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 12 | 13 | # Define the response type as a Pydantic model 14 | class MathResponse(BaseModel): 15 | """Math model response. Contains the square, cube and n'th power of a number. Squared, cubed, quad, squad and a random number. Squad must be equal to 93 minus random.""" 16 | number: int 17 | square: int 18 | cube: int 19 | nth: int 20 | 21 | # Define the agent with a system prompt 22 | agent = Agent(model=model, system_prompt="You are a mathematician tasked to calculate the square, cube and the n'th power of a number. Provided a number return a MathResponse type. Call math_tool only once! Never call the tool twice. If the user input is ambiguous, assume the numbers and proceed with calling the math tool only once.", result_type=MathResponse) 23 | 24 | @agent.tool(retries=3) 25 | def math_tool(ctx: RunContext[str], number: int, power: int): 26 | """Calculate the square, cube and the n'th power of a number.""" 27 | 28 | print(Fore.CYAN, f"Using the math tool to calculate the square, cube and the {power}'th power of {number}...") 29 | square = number ** 2 30 | cube = number ** 3 31 | nth = number ** power 32 | return MathResponse(number=number, square=square, cube=cube, nth=nth) 33 | 34 | # Define the main loop 35 | def main_loop(): 36 | while True: 37 | user_input = input(">> I am a math agent. Provide a number and the power to calculate: ") 38 | if user_input.lower() in ["quit", "exit", "q"]: 39 | print("Goodbye!") 40 | break 41 | 42 | # Run the agent 43 | result = agent.run_sync(user_input) 44 | print(Fore.YELLOW, f"Assistant: {result.data}") 45 | print(Fore.RESET) 46 | 47 | # Run the main loop 48 | if __name__ == "__main__": 49 | main_loop() -------------------------------------------------------------------------------- /streamlit-chatbot-ui/chatbot_ui.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from chat import retrieval_chain 3 | 4 | # Constants 5 | height = 600 6 | title = "My Chatbot UI" 7 | icon = ":robot:" 8 | 9 | def generate_message(user_input): 10 | response = retrieval_chain.invoke({"input": user_input}) 11 | answer = response["answer"] 12 | 13 | st.session_state.conversation.append({ 14 | "user": user_input, 15 | "assistant": answer 16 | }) 17 | 18 | # Iterate over the conversation history 19 | for entry in st.session_state.conversation: 20 | messages.chat_message("user").write(entry['user']) 21 | messages.chat_message("assistant").write(entry['assistant']) 22 | 23 | # Session: Initialize conversation history 24 | if "conversation" not in st.session_state: 25 | st.session_state.conversation = [] 26 | 27 | if 'clicked' not in st.session_state: 28 | st.session_state.clicked = False 29 | 30 | # Set page title and icon 31 | st.set_page_config(page_title=title, page_icon=icon) 32 | 33 | def toggle_clicked(): 34 | if st.session_state.clicked is True: 35 | st.session_state.clicked = False 36 | else: 37 | st.session_state.clicked = True 38 | 39 | col1, col2 = st.columns([4, 1], gap="large", vertical_alignment="bottom" ) 40 | with col1: 41 | st.header(title) 42 | with col2: 43 | if st.session_state.clicked is True: 44 | st.button("Close Files", on_click=toggle_clicked) 45 | else: 46 | st.button("Upload Files", on_click=toggle_clicked) 47 | 48 | if st.session_state.clicked: 49 | uploaded_files = st.file_uploader( 50 | "Upload multiple PDF files into the vector store", accept_multiple_files=True 51 | ) 52 | 53 | for uploaded_file in uploaded_files: 54 | # write the uploaded file to the data directory 55 | with open(f"data/{uploaded_file.name}", "wb") as f: 56 | f.write(uploaded_file.getbuffer()) 57 | # clear the uploaded file 58 | uploaded_file = None 59 | 60 | messages = st.container(border=True, height=height) 61 | 62 | if prompt := st.chat_input("Enter your question...", key="prompt"): 63 | generate_message(prompt) 64 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/10-errors-and-reflection/10.4-model-errors-bypass-tools.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pprint 3 | from colorama import Fore 4 | from dotenv import load_dotenv 5 | from pydantic_ai import Agent, ModelRetry, UnexpectedModelBehavior, capture_run_messages 6 | from pydantic_ai.messages import ModelResponse, ToolCallPart 7 | from pydantic_ai.models.openai import OpenAIModel 8 | 9 | # Load the environment variables 10 | load_dotenv() 11 | 12 | # Define the model 13 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 14 | 15 | # Define the agent 16 | agent = Agent(model=model, system_prompt="You are a helpful assistant.") 17 | 18 | # Define the tool 19 | @agent.tool_plain(retries=0) 20 | def calc_volume(size: int) -> int: 21 | if size == 42: 22 | return size**3 23 | else: 24 | print(Fore.RED, f"Invalid size: {size}.") 25 | raise ModelRetry('Please try again with another size') 26 | 27 | # Run the agent 28 | with capture_run_messages() as messages: 29 | try: 30 | result = agent.run_sync('Please get me the volume of a box with size 6.') 31 | except UnexpectedModelBehavior as e: 32 | print('An error occurred:', e) 33 | print('cause:', repr(e.__cause__)) 34 | #> cause: ModelRetry('Please try again.') 35 | print(Fore.RESET) 36 | pprint.pprint(messages, indent=0, width=80) 37 | 38 | # Filter the ModelResponse messages from all messages 39 | model_responses = [m for m in messages if isinstance(m, ModelResponse)] 40 | 41 | # For each model response, print the data 42 | for model_response in model_responses: 43 | # Get the parts 44 | parts = model_response.parts 45 | # Filter the ToolCallPart messages from all parts 46 | tool_call_parts = [p for p in parts if isinstance(p, ToolCallPart)] 47 | if tool_call_parts: 48 | if any(isinstance(m, ModelResponse) for m in messages): 49 | result = agent.run_sync('Please get me the volume of a box with size 6. Do not use tools.') 50 | print(Fore.GREEN, result.data) 51 | else: 52 | print(Fore.GREEN, result.data) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/6-result-validator-functions/6.3-result-type.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logfire 3 | from pydantic_ai import Agent, RunContext, ModelRetry 4 | from pydantic_ai.models.openai import OpenAIModel 5 | from pydantic import BaseModel 6 | from dotenv import load_dotenv 7 | from colorama import Fore 8 | 9 | load_dotenv() 10 | 11 | # Configure logfire 12 | logfire.configure() 13 | 14 | # Define the model 15 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 16 | 17 | # Define the output model 18 | class Capital(BaseModel): 19 | """Capital city model - includes name and short history of the city""" 20 | name: str 21 | year_founded: int 22 | short_history: str 23 | comparison: str 24 | 25 | # Define the agent 26 | agent = Agent(model=model, result_type=Capital, system_prompt="You are an experienced historian and you are asked a question about the capital of a country. You are expected to provide the name of the capital city, the year it was founded, a short history of the city and a comparison of the city age to the age of the city provided by the tool.") 27 | 28 | @agent.tool 29 | def get_comparison_city(ctx: RunContext[str]) -> str: 30 | return f"The comparison city is {ctx.deps}" 31 | 32 | @agent.result_validator 33 | def validate_result(ctx: RunContext[str], result: Capital) -> Capital: 34 | if (result.year_founded > 1000): 35 | print(Fore.YELLOW, f"Prompt: {ctx.prompt}") 36 | print(Fore.YELLOW, f"Dependencies: {ctx.deps}") 37 | print(Fore.MAGENTA, f"Evaluating: {result.name}") 38 | print(Fore.MAGENTA, f"Comparison: {result.comparison}") 39 | print(Fore.RED, f"Validation failed: Year founded {result.year_founded} is too high. Try another country.") 40 | raise ModelRetry("Year founded is too high. Try another country.") 41 | return result 42 | 43 | # Run the agent 44 | try: 45 | result = agent.run_sync("What is the capital of the US?", deps="Toronto") 46 | print(Fore.RED, result.data.name) 47 | print(Fore.GREEN, result.data.year_founded) 48 | print(Fore.CYAN, result.data.short_history) 49 | except ModelRetry as e: 50 | print(Fore.RED, e) 51 | except Exception as e: 52 | print(Fore.RED, e) -------------------------------------------------------------------------------- /streamlit-chatbot-ui/ingest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | from dotenv import load_dotenv 4 | from langchain_community.document_loaders import PyPDFLoader 5 | from langchain_text_splitters import RecursiveCharacterTextSplitter 6 | from langchain_chroma import Chroma 7 | from uuid import uuid4 8 | from models import Models 9 | 10 | load_dotenv() 11 | 12 | # Initialize the models 13 | models = Models() 14 | embeddings = models.embeddings_ollama 15 | llm = models.model_ollama 16 | 17 | # Define constants 18 | data_folder = "./data" 19 | chunk_size = 1000 20 | chunk_overlap = 50 21 | check_interval = 10 22 | 23 | # Chroma vector store 24 | vector_store = Chroma( 25 | collection_name="documents", 26 | embedding_function=embeddings, 27 | persist_directory="./db/chroma_langchain_db", # Where to save data locally 28 | ) 29 | 30 | # Ingest a file 31 | def ingest_file(file_path): 32 | # Skip non-PDF files 33 | if not file_path.lower().endswith('.pdf'): 34 | print(f"Skipping non-PDF file: {file_path}") 35 | return 36 | 37 | print(f"Starting to ingest file: {file_path}") 38 | loader = PyPDFLoader(file_path) 39 | loaded_documents = loader.load() 40 | text_splitter = RecursiveCharacterTextSplitter( 41 | chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=["\n", " ", ""] 42 | ) 43 | documents = text_splitter.split_documents(loaded_documents) 44 | uuids = [str(uuid4()) for _ in range(len(documents))] 45 | print(f"Adding {len(documents)} documents to the vector store") 46 | vector_store.add_documents(documents=documents, ids=uuids) 47 | print(f"Finished ingesting file: {file_path}") 48 | 49 | # Main loop 50 | def main_loop(): 51 | while True: 52 | for filename in os.listdir(data_folder): 53 | if not filename.startswith("_"): 54 | file_path = os.path.join(data_folder, filename) 55 | ingest_file(file_path) 56 | new_filename = "_" + filename 57 | new_file_path = os.path.join(data_folder, new_filename) 58 | os.rename(file_path, new_file_path) 59 | time.sleep(check_interval) # Check the folder every 10 seconds 60 | 61 | # Run the main loop 62 | if __name__ == "__main__": 63 | main_loop() -------------------------------------------------------------------------------- /langchain-rag-pdf/tutorial-1/ingest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | from dotenv import load_dotenv 4 | from langchain_community.document_loaders import PyPDFLoader 5 | from langchain_text_splitters import RecursiveCharacterTextSplitter 6 | from langchain_chroma import Chroma 7 | from uuid import uuid4 8 | from models import Models 9 | 10 | load_dotenv() 11 | 12 | # Initialize the models 13 | models = Models() 14 | embeddings = models.embeddings_ollama 15 | llm = models.model_ollama 16 | 17 | # Define constants 18 | data_folder = "./data" 19 | chunk_size = 1000 20 | chunk_overlap = 50 21 | check_interval = 10 22 | 23 | # Chroma vector store 24 | vector_store = Chroma( 25 | collection_name="documents", 26 | embedding_function=embeddings, 27 | persist_directory="./db/chroma_langchain_db", # Where to save data locally 28 | ) 29 | 30 | # Ingest a file 31 | def ingest_file(file_path): 32 | # Skip non-PDF files 33 | if not file_path.lower().endswith('.pdf'): 34 | print(f"Skipping non-PDF file: {file_path}") 35 | return 36 | 37 | print(f"Starting to ingest file: {file_path}") 38 | loader = PyPDFLoader(file_path) 39 | loaded_documents = loader.load() 40 | text_splitter = RecursiveCharacterTextSplitter( 41 | chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=["\n", " ", ""] 42 | ) 43 | documents = text_splitter.split_documents(loaded_documents) 44 | uuids = [str(uuid4()) for _ in range(len(documents))] 45 | print(f"Adding {len(documents)} documents to the vector store") 46 | vector_store.add_documents(documents=documents, ids=uuids) 47 | print(f"Finished ingesting file: {file_path}") 48 | 49 | # Main loop 50 | def main_loop(): 51 | while True: 52 | for filename in os.listdir(data_folder): 53 | if not filename.startswith("_"): 54 | file_path = os.path.join(data_folder, filename) 55 | ingest_file(file_path) 56 | new_filename = "_" + filename 57 | new_file_path = os.path.join(data_folder, new_filename) 58 | os.rename(file_path, new_file_path) 59 | time.sleep(check_interval) # Check the folder every 10 seconds 60 | 61 | # Run the main loop 62 | if __name__ == "__main__": 63 | main_loop() -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Welcome 2 | 3 | Welcome to **[AI Software Developers](https://www.youtube.com/@AISoftwareDevelopers)**—a community of engineers, students, architects, and managers looking to harness the power of AI for cutting-edge software engineering! 4 | 5 | ## Looking for Collaborators 6 | 7 | Have an idea for an article, video tutorial, a learning project or anything related to AI? Consider collaborating with our growing community of contributors. Get started today by **[posting your idea on our Discord sever](https://discord.gg/eQXBaCvTA9)**. Together, we are building a strong community of AI Software Developers. 8 | 9 | ## How to Contribute 10 | 11 | This repository is maintained by the team at **AI Software Developer** channel. Contributions are welcome! If you'd like to contribute, please check out the [open issues](https://github.com/aidev9/tuts/issues?q=is%3Aissue+is%3Aopen+label%3Acontributors), read contribution guidelines and submit a PR. 12 | 13 | ## Latest Video 14 | 15 | [![Latest Video](https://img.youtube.com/vi/xVe87QpNE80/0.jpg)](https://www.youtube.com/watch?v=xVe87QpNE80) 16 | 17 | ## [Subscribe Today](https://www.youtube.com/@AISoftwareDevelopers) 18 | 19 | ### About 20 | 21 | We’re here to help you enhance your workflows, streamline business processes, and build smarter applications. From AI-powered tools to in-depth tutorials on software architecture and full-stack development, we provide the knowledge you need to stay ahead in today’s fast-evolving tech world. 22 | 23 | What you will find on this channel: 24 | 25 | - Step-by-step coding tutorials 26 | - Deep dives into AI tools like GPT, LangChain, and NotebookLM 27 | - Best practices for scalable React apps and backend development 28 | - Real-world projects that tackle complex use cases 29 | 30 | ## Help and Support 31 | 32 | If you encounter a problem or have questions, feel free to open an issue in this repository or [ask a question on the Discord sever](https://discord.gg/eQXBaCvTA9). 33 | 34 | ## Thank you 35 | 36 | Thank you for contributing to this repository! Your efforts help create a valuable resource for the AI community. If you have any questions, feel free to reach out via [our Discord sever](https://discord.gg/eQXBaCvTA9) or open an issue in this repository. Let’s build a strong AI community together! 37 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/9-agent-memory/9.1-hello-world.py: -------------------------------------------------------------------------------- 1 | import os 2 | from colorama import Fore 3 | from dotenv import load_dotenv 4 | from pydantic_ai import Agent 5 | from pydantic_ai.models.openai import OpenAIModel 6 | 7 | load_dotenv() 8 | 9 | # Define the model 10 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 11 | ollama_model = OpenAIModel(model_name='deepseek-r1', base_url='http://localhost:11434/v1') 12 | 13 | stock = "TESLA" 14 | 15 | system_prompt = f"You are an investment portfolio manager specialized in US tech stocks. The user is a 45 year old tech executive with $10M net worth and 5 year aggresive investment window. Your job is to provide investment advice on {stock} stock. The user will never invest more than 5% of their portfolio into any individual stock or ETF. Your job is to guide that decision and suggest a number between 1-5%. Consider the user's profile, investment window, and the stock's performance, market trends, and expert opinions. Consider alternative investments." 16 | 17 | # Define the agent 18 | agent = Agent(model=model, system_prompt=system_prompt) 19 | reasoning_agent = Agent(model=ollama_model, system_prompt=system_prompt) 20 | 21 | # Run the agent 22 | result_pro = agent.run_sync(user_prompt=f"Provide arguments why I should buy {stock} and how much I should invest.") 23 | 24 | messages = result_pro.all_messages() 25 | print(Fore.GREEN, result_pro.data) 26 | 27 | result_con = agent.run_sync( 28 | user_prompt=f"Provide counter arguments why I should not buy {stock} and what to do instead.", 29 | message_history=result_pro.new_messages(), 30 | ) 31 | print(Fore.RED, result_con.data) 32 | 33 | combined_messages = result_pro.new_messages() + result_con.new_messages() 34 | 35 | result_reasoning = reasoning_agent.run_sync( 36 | user_prompt=f"Should I buy {stock} stock? Respond with a strong YES or NO, and support your answer with facts and the percent investment. If the answer is yes, what are some of the risks associated? If the answer is no, what are the top 5 alternatives? Include buy percent for each. Balance both the negative and positive arguments and help me make a decision. Try to be netural and only consider user's situation and financial goals.", 37 | message_history=combined_messages, 38 | ) 39 | print(Fore.YELLOW, result_reasoning.data) 40 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/4-system-prompts/4.6-dynamic-advanced.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from pydantic_ai import Agent, RunContext 4 | from pydantic_ai.models.openai import OpenAIModel 5 | from pydantic import BaseModel 6 | 7 | load_dotenv() 8 | 9 | # Define the model 10 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 11 | 12 | # Define the output model 13 | class SystemPrompt(BaseModel): 14 | """System prompt for an agent to generate helpful responses""" 15 | prompt: str 16 | tags: list[str] 17 | 18 | # Define the agent 19 | prompt_agent = Agent(model=model, result_type=SystemPrompt, system_prompt="You an expert prompt writer. Create a system prompt to be used for an AI agent that will help a user based on the user's input. Must be very descriptive and include step by step instructions on how the agent can best answer user's question. Do not directly answer the question. Start with 'You are a helpful assistant specialized in...'. Include any relevant tags that will help the AI agent understand the context of the user's input.") 20 | 21 | agent = Agent(model=model, system_prompt="Use the system prompt and tags provided to generate a helpful response to the user's input.") 22 | 23 | @agent.system_prompt 24 | def add_prompt(ctx: RunContext[str]) -> str: 25 | return ctx.deps.prompt 26 | 27 | @agent.system_prompt 28 | def add_tags(ctx: RunContext[str]) -> str: 29 | return f"Use these tags: {ctx.deps.tags}" 30 | 31 | # Define the main loop 32 | def main_loop(): 33 | message_history = [] 34 | prompt_generated = False 35 | while True: 36 | user_input = input(">> ") 37 | if user_input.lower() in ["quit", "exit", "q"]: 38 | print("Goodbye!") 39 | break 40 | 41 | if not prompt_generated: 42 | system_prompt = prompt_agent.run_sync(user_input).data 43 | print("Prompt:", system_prompt.prompt) 44 | print("Tags:", system_prompt.tags) 45 | prompt_generated = True 46 | 47 | # Run the agent 48 | result = agent.run_sync(user_input, deps=system_prompt, message_history=message_history) 49 | message_history = result.all_messages() 50 | print(f"Assistant: {result.data}") 51 | 52 | # Run the main loop 53 | if __name__ == "__main__": 54 | main_loop() -------------------------------------------------------------------------------- /langgraph/multiagent-graph/graph_ui.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from main import graph 3 | from langchain_core.messages import AIMessage, HumanMessage 4 | 5 | # Constants 6 | height = 600 7 | title = "Multi-Agent Software Team (LangGraph)" 8 | icon = ":robot:" 9 | 10 | def generate_message(user_input): 11 | response = graph.invoke({"messages": [HumanMessage(content=user_input)]}) 12 | ai_messages = [msg for msg in response["messages"] if isinstance(msg, AIMessage)] 13 | 14 | st.session_state.conversation.append({ 15 | "user": user_input, 16 | "analyst": ai_messages[-7].content, 17 | "architect": ai_messages[-6].content, 18 | "developer": ai_messages[-5].content, 19 | "reviewer": ai_messages[-4].content, 20 | "tester": ai_messages[-3].content, 21 | "diagram_designer": ai_messages[-2].content, 22 | "summary_writer": ai_messages[-1].content, 23 | }) 24 | 25 | # Iterate over the conversation history 26 | for entry in st.session_state.conversation: 27 | messages.chat_message("user", avatar="img/user.png").write(entry['user']) 28 | messages.chat_message("ai", avatar="img/analyst.png" ).write("**Analyst:** \n" + entry['analyst']) 29 | messages.chat_message("ai", avatar="img/architect.png" ).write("**Architect:** \n" + entry['architect']) 30 | messages.chat_message("ai", avatar="img/developer.png" ).write("**Developer:** \n" + entry['developer']) 31 | messages.chat_message("ai", avatar="img/review.png").write("**Code Reviewer:** \n" + entry['reviewer']) 32 | messages.chat_message("ai", avatar="img/tester.png" ).write("**Tester:** \n" + entry['tester']) 33 | messages.chat_message("ai", avatar="img/diagram.png").write("**Diagram Designer:** \n" + entry['diagram_designer']) 34 | messages.chat_message("ai", avatar="img/summary.png").write("**Summary Writer:** \n" + entry['summary_writer']) 35 | 36 | # Session: Initialize conversation history 37 | if "conversation" not in st.session_state: 38 | st.session_state.conversation = [] 39 | 40 | # Set page title and icon 41 | st.set_page_config(page_title=title, page_icon=icon) 42 | st.header(title) 43 | 44 | # Create a container for the chat messages 45 | messages = st.container(border=True, height=height) 46 | 47 | # Chatbot UI 48 | if prompt := st.chat_input("Enter your question...", key="prompt"): 49 | generate_message(prompt) 50 | -------------------------------------------------------------------------------- /langgraph/multiagent-graph/README.md: -------------------------------------------------------------------------------- 1 | ## Multi-Agent Software Team (LangGraph) 2 | 3 | ### Video Tutorial 4 | 5 | [Watch the video tutorial here](https://youtu.be/YCNFyzQ2Z0g) 6 | 7 | ### Agents 8 | 9 | - **Analyst**: You are a software requirements analyst. Review the provided instructions and generate software development requirements that a developer can understand and create code from. Be precise and clear in your requirements. 10 | - **Architect**: You are an Software Architect who can design scalable systems that work in cloud environments. Review the software requirements provided and create an architecture document that will be used by developers, testers and designers to implement the system. Provide the architecture only. 11 | - **Developer**: You are an Full Stack Developer and can code in any language. Review the provided instructions and write the code. Return the coding artifacts only. 12 | - **Reviewer**: You are an experienced developer and code reviewer. You know the best design patterns for web applications that run on the cloud and can do code reviews in any language. Review the provided code and suggest improvements. Only focus on the provided code and suggest actionable items. 13 | - **Tester**: You are a test automation expert who can create test scripts in any language. Review the provided user instructions, software requirements and write test code to ensure good quality of the software. 14 | - **Diagram Designer**: You are a Software Designer and can draw diagrams explaining any code. Review the provided code and create a Mermaid diagram explaining the code. 15 | - **Summary Writer**: You are an expert in creating technical documentation and can summarize complex documents into human-readable documents. Review the provided messages and create a meaningful summary. Retain all the source code generated and include it in the summary. 16 | 17 | ### Setup 18 | 19 | - pip install -r requirements.txt 20 | - Create a .env file and add OPENAI, LANGCHAIN keys (Use .env.example as template) 21 | - Review the context limit in LLM declaration [main.py](./main.py) 22 | 23 | ```python 24 | llm = AzureChatOpenAI( 25 | azure_deployment=os.environ.get("AZURE_OPENAI_API_DEPLOYMENT_NAME"), 26 | api_version=os.environ.get("AZURE_OPENAI_API_VERSION"), 27 | temperature=0, 28 | max_tokens=1000, 29 | timeout=None, 30 | max_retries=2, 31 | ) 32 | ``` 33 | 34 | ### Run 35 | 36 | - `python main.py` 37 | - `streamlit run graph_ui.py` 38 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/10-errors-and-reflection/10.1-hello-world.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | import aiosqlite 4 | from sqlite3 import Connection 5 | from colorama import Fore 6 | from dotenv import load_dotenv 7 | from pydantic import BaseModel 8 | from pydantic_ai import Agent, RunContext 9 | from pydantic_ai.models.openai import OpenAIModel 10 | 11 | load_dotenv() 12 | 13 | # Define the model 14 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 15 | 16 | class Patient(BaseModel): 17 | patient_id: int 18 | email: str 19 | full_name: str 20 | year_born: int 21 | 22 | # Define the agent 23 | agent = Agent(model=model, system_prompt="You are a helpful medical assistant. Retrieve the patient's record using the tools provided.", result_type=Patient, deps_type=Connection) 24 | 25 | @agent.tool(retries=3) 26 | async def get_patient_by_email(ctx: RunContext[Connection], email: str) -> int: 27 | """Get a patient's record from their email address.""" 28 | 29 | async with ctx.deps.cursor() as cursor: 30 | await cursor.execute(f'SELECT * FROM patients WHERE email="{email}"') 31 | query_result = await cursor.fetchall() 32 | if not query_result: 33 | raise ValueError(f"Patient with email {email} not found.") 34 | return query_result[0] 35 | 36 | async def seed_db(conn): 37 | await conn.execute("DROP TABLE IF EXISTS patients") 38 | await conn.execute("CREATE TABLE IF NOT EXISTS patients (patient_id INTEGER PRIMARY KEY, email TEXT, full_name TEXT, year_born INTEGER)") 39 | await conn.execute("INSERT INTO patients (email, full_name, year_born) VALUES ('john@gmail.com', 'John Doe', 1980)") 40 | await conn.execute("INSERT INTO patients (email, full_name, year_born) VALUES ('jane@gmail.com', 'Jane Doe', 1985)") 41 | await conn.execute("INSERT INTO patients (email, full_name, year_born) VALUES ('james@gmail.com', 'Jim Doe', 1990)") 42 | await conn.commit() 43 | 44 | async def main(): 45 | async with aiosqlite.connect("patients.sqlite") as conn: 46 | await seed_db(conn) 47 | try: 48 | result = await agent.run('Retrieve the patient record for jane@gmail.com', deps=conn) 49 | print(Fore.GREEN, f"Patient record: {result.data}") 50 | except ValueError as e: 51 | print(Fore.YELLOW, e) 52 | except Exception as e: 53 | print(Fore.RED, e) 54 | 55 | if __name__ == "__main__": 56 | asyncio.run(main()) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/9-agent-memory/9.4-persist-memory.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | from colorama import Fore 4 | from dotenv import load_dotenv 5 | from pydantic_ai import Agent 6 | from pydantic_ai.messages import (ModelMessage) 7 | from pydantic_ai.models.openai import OpenAIModel 8 | 9 | load_dotenv() 10 | 11 | # Define the model 12 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 13 | system_prompt = "You are a helpful assistant." 14 | 15 | # Define the agent 16 | agent = Agent(model=model, system_prompt=system_prompt) 17 | 18 | # Write messages to file 19 | def write_memory(memory: list[ModelMessage], file_path: str): 20 | with open(file_path, 'wb') as f: 21 | pickle.dump(memory, f) 22 | 23 | # Read messages from file 24 | def read_memory(file_path: str) -> list[ModelMessage]: 25 | memory = [] 26 | with open(file_path, 'rb') as f: 27 | memory = pickle.load(f) 28 | return memory 29 | 30 | # Delete messages file 31 | def delete_memory(file_path: str): 32 | if os.path.exists(file_path): 33 | os.remove(file_path) 34 | 35 | # Define the main loop 36 | def main_loop(): 37 | MEMORY_FILE_PATH = "./memory.pickle" 38 | MAX_MESSAGE_HISTORY_LENGTH = 5 39 | 40 | try: 41 | message_history: list[ModelMessage] = read_memory(MEMORY_FILE_PATH) 42 | except: 43 | message_history: list[ModelMessage] = [] 44 | 45 | while True: 46 | user_input = input(">> I am your asssitant. How can I help you today? ") 47 | if user_input.lower() in ["quit", "exit", "q"]: 48 | print("Goodbye!") 49 | break 50 | 51 | if user_input.lower() in ["clear", "reset"]: 52 | print("Clearing memory...") 53 | delete_memory(MEMORY_FILE_PATH) 54 | message_history = [] 55 | continue 56 | 57 | # Run the agent 58 | result = agent.run_sync(user_input, deps=user_input, message_history=message_history) 59 | print(Fore.WHITE, result.data) 60 | msg = result.new_messages() 61 | message_history.extend(msg) 62 | 63 | # Limit the message history 64 | # message_history = message_history[-MAX_MESSAGE_HISTORY_LENGTH:] 65 | write_memory(message_history, MEMORY_FILE_PATH) 66 | print(Fore.YELLOW, f"Message length: {message_history.__len__()}") 67 | print(Fore.RESET) 68 | # Run the main loop 69 | if __name__ == "__main__": 70 | main_loop() -------------------------------------------------------------------------------- /langchain-ollama/js/SignUpForm.js: -------------------------------------------------------------------------------- 1 | import React, { useState } from "react"; 2 | import "./SignUpForm.css"; 3 | 4 | const SignUpForm = () => { 5 | // Define the initial state of the form fields 6 | const [username, setUsername] = useState(""); 7 | const [email, setEmail] = useState(""); 8 | const [password, setPassword] = useState(""); 9 | const [confirmPassword, setConfirmPassword] = useState(""); 10 | 11 | // Function to handle form submission 12 | const handleSubmit = (event) => { 13 | event.preventDefault(); 14 | // Validate the form fields 15 | if (!username || !email || !password || !confirmPassword) { 16 | alert("Please fill in all fields"); 17 | return; 18 | } 19 | 20 | // Send a POST request to the server to create a new user 21 | fetch("/api/users", { 22 | method: "POST", 23 | headers: { "Content-Type": "application/json" }, 24 | body: JSON.stringify({ username, email, password }), 25 | }) 26 | .then((response) => response.json()) 27 | .then((data) => console.log(data)) 28 | .catch((error) => console.error(error)); 29 | }; 30 | 31 | // Render the form fields 32 | return ( 33 |
34 |

Sign Up

35 |
36 | {/* Username field */} 37 | 38 | setUsername(event.target.value)} 42 | /> 43 |
44 | 45 | {/* Email field */} 46 | 47 | setEmail(event.target.value)} 51 | /> 52 |
53 | 54 | {/* Password field */} 55 | 56 | setPassword(event.target.value)} 60 | /> 61 |
62 | 63 | {/* Confirm password field */} 64 | 65 | setConfirmPassword(event.target.value)} 69 | /> 70 |
71 | 72 | {/* Submit button */} 73 | 74 |
75 |
76 | ); 77 | }; 78 | 79 | export default SignUpForm; 80 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/6-result-validator-functions/6.4-multiple-validators.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from pydantic_ai import Agent, RunContext, ModelRetry 4 | from pydantic_ai.models.openai import OpenAIModel 5 | from pydantic import BaseModel 6 | from colorama import Fore 7 | 8 | load_dotenv() 9 | 10 | # Define the model 11 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 12 | 13 | # Define the output model 14 | class BaseNumber(BaseModel): 15 | """Number model representing a digit in base 10, binary and hexadecimal formats.""" 16 | num_dec: int 17 | num_bin: str 18 | num_hex: str 19 | 20 | # Define the agent 21 | agent = Agent(model=model, result_type=BaseNumber, system_prompt="Generate a random number.", retries=1) 22 | 23 | # Define the decimal result validator 24 | @agent.result_validator 25 | def validate_result_dec(ctx: RunContext[str], result: BaseNumber) -> BaseNumber: 26 | print(Fore.MAGENTA, f"Evaluating DEC: {result.num_dec}") 27 | if (result.num_dec > 500): 28 | print(Fore.RED, f"Validation failed: Number {result.num_dec} is too high. Try another number.") 29 | raise ModelRetry("Number is too high. Try another number.") 30 | return result 31 | 32 | # Define the binary result validator 33 | @agent.result_validator 34 | def validate_result_bin(ctx: RunContext[str], result: BaseNumber) -> BaseNumber: 35 | print(Fore.MAGENTA, f"Evaluating BIN: {result.num_bin}") 36 | # Convert the binary number to decimal 37 | num_dec = int(result.num_bin, 2) 38 | if (num_dec > 500): 39 | print(Fore.RED, f"Validation failed: Number {result.num_bin} is too high. Try another number.") 40 | raise ModelRetry("Number is too high. Try another number.") 41 | return result 42 | 43 | # Define the hexadecimal result validator 44 | @agent.result_validator 45 | def validate_result_hex(ctx: RunContext[str], result: BaseNumber) -> BaseNumber: 46 | print(Fore.MAGENTA, f"Evaluating HEX: {result.num_hex}") 47 | # Convert the hexadecimal number to decimal 48 | num_dec = int(result.num_hex, 16) 49 | if (num_dec > 500): 50 | print(Fore.RED, f"Validation failed: Number {result.num_hex} is too high. Try another number.") 51 | raise ModelRetry("Number is too high. Try another number.") 52 | return result 53 | 54 | # Run the agent 55 | try: 56 | result = agent.run_sync("Generate a number") 57 | except ModelRetry as e: 58 | print(Fore.RED, e) 59 | except Exception as e: 60 | print(Fore.RED, e) -------------------------------------------------------------------------------- /langgraph/agents-financial-research/README.md: -------------------------------------------------------------------------------- 1 | ## Financial Research Graph with LangGraph 2 | 3 | ### Watch the video 4 | 5 | [YouTube Video](https://youtu.be/2CS2MxAc1YI) 6 | 7 | ### Run the app 8 | 9 | - Open a terminal and run `python graph.py` 10 | - Ask a question 11 | - Examine the data folder for generated files 12 | 13 | ### Tweaks 14 | 15 | - Change the LLM model from OpenAI to Ollama or another model by channging the llm variable 16 | 17 | ### Sample Prompts 18 | 19 | - Find the most-recent GDPs of Germany, Switzerland and Austria and create a bar chart 20 | - Calculate the average of the first 10 numbers and create a bar chart 21 | - Find the most-recent inflation rates of Germany, Switzerland and Austria, save them in a CVS file, and create a bar chart with the average displayed a as a horizontal line 22 | - List the reasons why inflation rate is low in Switzerland compared to Austria and Germany and rank the reasons into a Markdown file 23 | - Calculate the average net worth of households in the US and compare it with Germany, going back 20 years. Generate a dot chart using different colors for US and Germany. Save the results in a CSV file 24 | - What is the average unemployment in the OECD countries and how does it compare to the US? Plot a bar chart and save the data in a CSV file 25 | - What is the average salary for software engineers in the US compared to the rest of the OECD. Create a CSV file and a bar chart.Compare the cost of living in the top 10 most expensive cities in the World along with average salaries. Create a CSV file and plot a bar chart, overlaying cost of living and salary 26 | - Compare San Francisco and Zurich, Switzerland in terms of cost of living and average salaries in local currency and print the results in a bar chart and write to a CSV file 27 | - Plot a chart of the natural gas historical data for the past 5 years 28 | - What was DEU GDP in 2020 29 | - Compare the GDPs of US and China for the past 10 years and plot a bar chart 30 | - Analyze the GDP data for OECD countries in the past 10 years and identify the best and worst performers according to the data provided 31 | - Analyze the GDP data for OECD countries in the past 10 years and identify the best and worst performers according to the data provided. Plot a table chart with the results. 32 | - Plot a bar chart of the natural gas prices for the past 5 years and overlay with Germany GDP for the same period 33 | - What is the cost of living in the capital city of Sweden? Plot this in a pie chart with the top 5 expense categories 34 | - Create a prediction chart for the temperatures in San Francisco based on the annual averages in November 35 | - Overlay the GDP growth of Germany with the natural gas prices for the past 10 years. Draw a chart and save it as a CSV file 36 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/3-structured-data/3.3-invoice.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logfire 3 | from pydantic_ai import Agent 4 | from pydantic_ai.models.openai import OpenAIModel 5 | from pydantic import BaseModel, Field 6 | from dotenv import load_dotenv 7 | 8 | load_dotenv() 9 | 10 | # Configure logfire 11 | logfire.configure() 12 | 13 | # Define the model 14 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 15 | 16 | # Define the output model 17 | class Invoice(BaseModel): 18 | invoice_number: str = Field(..., description="The unique identifier for the invoice.") 19 | date_issued: str = Field(..., description="The date when the invoice was issued.") 20 | due_date: str = Field(..., description="The date by which payment is expected to be made.") 21 | currency: str = Field(..., description="The currency in which the invoice is denominated.") 22 | 23 | customer_name: str = Field(..., description="The name of the customer.") 24 | company: str = Field(..., description="The company associated with the customer.") 25 | address: str = Field(..., description="The address of the customer.") 26 | 27 | services_provided: list[str] = Field([], description="A detailed breakdown of services provided.") 28 | subtotal: float = Field(..., description="The total amount before tax.") 29 | tax_rate: float = Field(..., description="The tax rate applied to the invoice.") 30 | tax_amount: float = Field(..., description="The calculated tax amount.") 31 | total_amount_due: float = Field(..., description="The final amount due after including tax.") 32 | 33 | payment_instructions: dict = Field({}, description="Instructions for making payment.") 34 | 35 | 36 | # Define the agent 37 | agent = Agent(model=model, result_type=Invoice) 38 | 39 | # Run the agent 40 | result = agent.run_sync("Can you generate an invoice for a consulting service provided to Acme Inc. on 2022-01-15 with a due date of 2022-02-15? The invoice should include the following services: 10 hours of consulting at $100 per hour, 5 hours of research at $50 per hour, and 3 hours of report writing at $75 per hour. The tax rate is 20 percent and the payment should be made via bank transfer.") 41 | 42 | logfire.notice('Text prompt LLM results: {result}', result = str(result.data)) 43 | logfire.info('Result type: {result}', result = type(result.data)) 44 | 45 | # Read the markdown file 46 | with open('data/invoice.md', 'r') as file: 47 | invoice_data = file.read() 48 | 49 | # Run the agent 50 | result = agent.run_sync(f"Can you extract the following information from the invoice? The raw data is {invoice_data}") 51 | 52 | logfire.notice('Invoice markdown prompt LLM results: {result}', result = str(result.data)) 53 | logfire.info('Result type: {result}', result = type(result.data)) -------------------------------------------------------------------------------- /streamlit-chatbot-ui/qa.md: -------------------------------------------------------------------------------- 1 | ### RAG Testing: Dolphins Q&A 2 | 3 | Fact: Over 50,000 visitors come to Lake Tahoe each year specifically for a chance to interact with its legendary dolphins! 4 | Q: How many visitors come to Lake Tahoe each year specifically for a chance to interact with dolphins? 5 | A: 50,000 6 | 7 | -- 8 | 9 | Fact: ...the dolphins of Lake Tahoe, known as Tursiops tahoensis... 10 | Q: What are the Lake Tahoe doplhins known as? 11 | A: Tursiops tahoensis 12 | 13 | -- 14 | 15 | Fact: Birth Weight: Approximately 40 pounds 16 | Q: What is the birth weight of Tahoe dolphins? 17 | A: Approximately 40 pounds 18 | 19 | -- 20 | 21 | Fact: Diet: Primarily lake trout, smallmouth bass, kokanee salmon, and perch 22 | Q: What is the diet of Tahoe dolphins? 23 | A: Primarily lake trout, smallmouth bass, kokanee salmon, and perch 24 | 25 | -- 26 | 27 | Fact: Hunting Techniques: Cooperative hunting, ambush techniques, and “fish-herding” 28 | Q: What is the hunting teqnique of Lake Tahoe dolphins? 29 | A: Cooperative hunting, ambush techniques, and “fish-herding” 30 | 31 | ### RAG Testing: OWASP Q&A 32 | 33 | Source: page_3.pdf 34 | Q: What is the CIA triad? 35 | A: CIA stands for Confidentiality, Integrity and Availability, and it is usually depicted as a triangle 36 | representing the strong bonds between its three tenets. 37 | 38 | -- 39 | 40 | Source: page_5.pdf 41 | Q: What is Security by Design? 42 | A: Security should not be an afterthought or add-on. When developing systems, you should begin with 43 | identifying relevant security requirements and treat them as an integral part of the overall process 44 | and system design. Begin with establishing and adopting relevant principles and policies as a 45 | foundation for your design, then build sec 46 | 47 | -- 48 | 49 | Source: page_6.pdf 50 | Q: What are the principles of cryptography? 51 | A: A list of the principles the PDF file 52 | 53 | -- 54 | 55 | Source: page_7.pdf 56 | Q: What is the OWASP Top 10? 57 | A: A list of the OWASP Top 10 from the PDF files 58 | 59 | -- 60 | 61 | Source: page_9.pdf 62 | Q: What are the software security requirements maturity levels? 63 | A: The software security requirements maturity levels are: 64 | 65 | 1. High-level application security objectives are mapped to functional requirements 66 | 2. Structured security requirements are available and utilized by developer teams 67 | 3. Build a requirements framework for product teams to utilize 68 | 69 | -- 70 | 71 | Source: page_10.pdf 72 | Q: What are some steps in identifying risks and quantifying them according to the Risk Rating Methodology? 73 | A: The OWASP page on Risk Rating Methodology describes some steps in identifying risks and quantifying them: 74 | 75 | 1. Identifying a risk 76 | 2. Factors for estimating likelihood 77 | 3. Factors for estimating impact 78 | 79 | -- 80 | -------------------------------------------------------------------------------- /langchain-rag-pdf/tutorial-1/qa.md: -------------------------------------------------------------------------------- 1 | ### RAG Testing: Dolphins Q&A 2 | 3 | Fact: Over 50,000 visitors come to Lake Tahoe each year specifically for a chance to interact with its legendary dolphins! 4 | Q: How many visitors come to Lake Tahoe each year specifically for a chance to interact with dolphins? 5 | A: 50,000 6 | 7 | -- 8 | 9 | Fact: ...the dolphins of Lake Tahoe, known as Tursiops tahoensis... 10 | Q: What are the Lake Tahoe doplhins known as? 11 | A: Tursiops tahoensis 12 | 13 | -- 14 | 15 | Fact: Birth Weight: Approximately 40 pounds 16 | Q: What is the birth weight of Tahoe dolphins? 17 | A: Approximately 40 pounds 18 | 19 | -- 20 | 21 | Fact: Diet: Primarily lake trout, smallmouth bass, kokanee salmon, and perch 22 | Q: What is the diet of Tahoe dolphins? 23 | A: Primarily lake trout, smallmouth bass, kokanee salmon, and perch 24 | 25 | -- 26 | 27 | Fact: Hunting Techniques: Cooperative hunting, ambush techniques, and “fish-herding” 28 | Q: What is the hunting teqnique of Lake Tahoe dolphins? 29 | A: Cooperative hunting, ambush techniques, and “fish-herding” 30 | 31 | ### RAG Testing: OWASP Q&A 32 | 33 | Source: page_3.pdf 34 | Q: What is the CIA triad? 35 | A: CIA stands for Confidentiality, Integrity and Availability, and it is usually depicted as a triangle 36 | representing the strong bonds between its three tenets. 37 | 38 | -- 39 | 40 | Source: page_5.pdf 41 | Q: What is Security by Design? 42 | A: Security should not be an afterthought or add-on. When developing systems, you should begin with 43 | identifying relevant security requirements and treat them as an integral part of the overall process 44 | and system design. Begin with establishing and adopting relevant principles and policies as a 45 | foundation for your design, then build sec 46 | 47 | -- 48 | 49 | Source: page_6.pdf 50 | Q: What are the principles of cryptography? 51 | A: A list of the principles the PDF file 52 | 53 | -- 54 | 55 | Source: page_7.pdf 56 | Q: What is the OWASP Top 10? 57 | A: A list of the OWASP Top 10 from the PDF files 58 | 59 | -- 60 | 61 | Source: page_9.pdf 62 | Q: What are the software security requirements maturity levels? 63 | A: The software security requirements maturity levels are: 64 | 65 | 1. High-level application security objectives are mapped to functional requirements 66 | 2. Structured security requirements are available and utilized by developer teams 67 | 3. Build a requirements framework for product teams to utilize 68 | 69 | -- 70 | 71 | Source: page_10.pdf 72 | Q: What are some steps in identifying risks and quantifying them according to the Risk Rating Methodology? 73 | A: The OWASP page on Risk Rating Methodology describes some steps in identifying risks and quantifying them: 74 | 75 | 1. Identifying a risk 76 | 2. Factors for estimating likelihood 77 | 3. Factors for estimating impact 78 | 79 | -- 80 | -------------------------------------------------------------------------------- /pydantic-ai-masterclass/9-agent-memory/9.5-multi-agent-memory.py: -------------------------------------------------------------------------------- 1 | import os 2 | from colorama import Fore 3 | from dotenv import load_dotenv 4 | from pydantic_ai import Agent 5 | from pydantic_ai.messages import (ModelMessage, ModelResponse) 6 | from pydantic_ai.models.openai import OpenAIModel 7 | 8 | load_dotenv() 9 | 10 | # Define the model 11 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 12 | 13 | system_prompt_research = f"You are a research assistant. Take user's input and provide relevant research." 14 | system_prompt_reviewer = f"You are a research reviewer. Take the research agent's findings and look for areas of improvement. Don't just confirm the findings, but provide extra information." 15 | system_prompt_qa = f"You are a quality assurance assistant. Take the previous agents' findings and summarize the answer into a simplfied language." 16 | 17 | # Define the agent 18 | agent_research = Agent(model=model, system_prompt=system_prompt_research) 19 | agent_reviewer = Agent(model=model, system_prompt=system_prompt_reviewer) 20 | agent_qa = Agent(model=model, system_prompt=system_prompt_qa) 21 | 22 | # Filter messages by type 23 | def filter_messages_by_type(messages: list[ModelMessage], message_type: ModelMessage) -> list[ModelMessage]: 24 | return [msg for msg in messages if type(msg) == message_type] 25 | 26 | # Define the main loop 27 | def main_loop(): 28 | message_history: list[ModelMessage] = [] 29 | while True: 30 | user_input = input(">> I am your research asssitant. How can I help you today? ") 31 | if user_input.lower() in ["quit", "exit", "q"]: 32 | print("Goodbye!") 33 | break 34 | 35 | # Run the research agent 36 | result = agent_research.run_sync(user_input, deps=user_input, message_history=message_history) 37 | print(Fore.RED, f"Research: {result.data}\n") 38 | msg = result.new_messages() 39 | message_history.extend(msg) 40 | 41 | # Run the reviewer agent 42 | result = agent_reviewer.run_sync(user_input, deps=user_input, message_history=message_history) 43 | print(Fore.GREEN, f"Reviewer: {result.data}\n") 44 | msg = filter_messages_by_type(result.new_messages(), ModelResponse) 45 | message_history.extend(msg) 46 | 47 | # Run the QA agent 48 | result = agent_qa.run_sync(user_input, deps=user_input, message_history=message_history) 49 | print(Fore.WHITE, f"Summary: {result.data}\n") 50 | msg = filter_messages_by_type(result.new_messages(), ModelResponse) 51 | message_history.extend(msg) 52 | 53 | # Limit the message history 54 | # message_history = message_history[-MAX_MESSAGE_HISTORY_LENGTH:] 55 | print(Fore.YELLOW, f"Message length: {message_history.__len__()}") 56 | print(Fore.RESET) 57 | # Run the main loop 58 | if __name__ == "__main__": 59 | main_loop() -------------------------------------------------------------------------------- /langchain-ollama/langollama-simple.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## A Simple Coding Assistant with LangChain and Ollama\n" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "# Install libraries\n", 17 | "%pip install langchain_ollama\n", 18 | "%pip install langchain_core\n", 19 | "\n", 20 | "# Import necessary libraries\n", 21 | "from langchain_ollama import ChatOllama\n", 22 | "from langchain_core.prompts import ChatPromptTemplate" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "### Model Definition\n" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": 44, 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "# Initialize the model\n", 39 | "llm = ChatOllama(\n", 40 | " model=\"llama3.2:1b\",\n", 41 | " temperature=0,\n", 42 | ")" 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "metadata": {}, 48 | "source": [ 49 | "### Prompt template\n" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": 45, 55 | "metadata": {}, 56 | "outputs": [], 57 | "source": [ 58 | "prompt = ChatPromptTemplate.from_messages(\n", 59 | " [\n", 60 | " (\"system\", \"You are a software developer specialized in {coding_language}.\"),\n", 61 | " (\"human\", \"{input}\"),\n", 62 | " ]\n", 63 | ")" 64 | ] 65 | }, 66 | { 67 | "cell_type": "markdown", 68 | "metadata": {}, 69 | "source": [ 70 | "### Invoke chain\n" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": null, 76 | "metadata": {}, 77 | "outputs": [], 78 | "source": [ 79 | "chain = prompt | llm\n", 80 | "ai_msg = chain.invoke(\n", 81 | " {\n", 82 | " \"coding_language\": \"React\",\n", 83 | " \"input\": \"Create a react component Sign Up Form for an online banking app. Use best practices and include code comments.\",\n", 84 | " }\n", 85 | ")\n", 86 | "\n", 87 | "print(ai_msg.content)" 88 | ] 89 | } 90 | ], 91 | "metadata": { 92 | "kernelspec": { 93 | "display_name": "myenv", 94 | "language": "python", 95 | "name": "python3" 96 | }, 97 | "language_info": { 98 | "codemirror_mode": { 99 | "name": "ipython", 100 | "version": 3 101 | }, 102 | "file_extension": ".py", 103 | "mimetype": "text/x-python", 104 | "name": "python", 105 | "nbconvert_exporter": "python", 106 | "pygments_lexer": "ipython3", 107 | "version": "3.12.6" 108 | } 109 | }, 110 | "nbformat": 4, 111 | "nbformat_minor": 2 112 | } 113 | -------------------------------------------------------------------------------- /langgraph/state/tutorial-2/state.py: -------------------------------------------------------------------------------- 1 | ''' 2 | In this second example, we will create a simple graph that will hold a list of the Fibonacci numbers. 3 | Our StateGraph will use TypedDict and have a single entry point and an edge to the end node. It will consist of a single key "fibonacci" with an initial value of [0]. 4 | The fibonacci_reducer function will be used to update the state by adding the next Fibonacci number to the list. 5 | The state will be updated by the custom reducer, while the Developer node will be a noop. In the chatbot loop we will manually calculate the next Fibonacci number and pass it to the graph. 6 | We will add memory to the graph to save the state of the graph. 7 | We will draw a visualization of the graph. In addition, we will utilize LangSmith to trace the execution of the graph and monitor the state changes. 8 | Finally, we will run the graph and print the result. 9 | ''' 10 | 11 | from typing import TypedDict 12 | from langgraph.graph import StateGraph, START, END 13 | from langgraph.checkpoint.memory import MemorySaver 14 | from typing import Annotated 15 | 16 | # Calculate next Fibonacci number 17 | def fibonacci(n: int) -> int: 18 | if n <= 0: 19 | return 0 20 | elif n == 1: 21 | return 1 22 | else: 23 | return fibonacci(n-1) + fibonacci(n-2) 24 | 25 | # Define reducer 26 | def fibonacci_reducer(current: list[int], update: int | None) -> list[int]: 27 | if current is None: 28 | current = [] 29 | if update is None: 30 | return current 31 | return sorted(list(set(current + update))) 32 | 33 | # Define state 34 | class GraphState(TypedDict): 35 | fibonacci: Annotated[list[int], fibonacci_reducer] 36 | 37 | # Create the graph 38 | builder = StateGraph(GraphState) 39 | 40 | # Create developer node 41 | def developer(state): 42 | return state # Return the modified state 43 | 44 | # Add the node to the graph 45 | builder.add_node("developer", developer) 46 | 47 | # Set entry point and edge 48 | builder.add_edge(START, "developer") 49 | builder.add_edge('developer', END) 50 | 51 | # Configuration and memory 52 | config = {"configurable": {"thread_id": 1}} 53 | memory = MemorySaver() 54 | 55 | # Compile and run the builder 56 | graph = builder.compile(checkpointer=memory) 57 | 58 | # Provide the initial state 59 | initial = {"fibonacci": [0]} 60 | result = graph.invoke(initial, config) 61 | 62 | # Draw the graph 63 | try: 64 | graph.get_graph(xray=True).draw_mermaid_png(output_file_path="graph.png") 65 | except Exception: 66 | pass 67 | 68 | # Run the chatbot 69 | n = 0 70 | while True: 71 | next_fibonacci = fibonacci(n) 72 | print("Next fibonacci number:", next_fibonacci) 73 | 74 | result = graph.invoke({"fibonacci": [next_fibonacci]}, config) 75 | print("State: ", result) 76 | n += 1 77 | 78 | user_input = input(">> ") 79 | if user_input.lower() in ["quit", "exit", "q"]: 80 | print("Goodbye!") 81 | break -------------------------------------------------------------------------------- /pydantic-ai-masterclass/8-retries-usage-limits/8.4-result-validator-retries.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from colorama import Fore 4 | from pydantic import BaseModel 5 | from pydantic_ai import Agent, ModelRetry, RunContext 6 | from pydantic_ai.models.openai import OpenAIModel 7 | 8 | load_dotenv() 9 | 10 | # Define the model 11 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 12 | 13 | # Define the response type as a Pydantic model 14 | class MathResponse(BaseModel): 15 | """Math model response. Contains the square, cube and n'th power of a number. Squared, cubed, quad, squad and a random number. Squad must be equal to 93 minus random.""" 16 | number: int 17 | power: int 18 | square: int 19 | cube: int 20 | nth: int 21 | 22 | # Define the agent with a system prompt 23 | agent = Agent(model=model, system_prompt="You are a mathematician tasked to calculate the square, cube and the n'th power of a number. Provided a number return a MathResponse type. Call math_tool only once! Never call the tool twice. If the user input is ambiguous, assume the numbers and proceed with calling the math tool only once. If the number is too large, decrease it by one and try again.", result_type=MathResponse, result_retries=3) 24 | 25 | @agent.tool(retries=3) 26 | def math_tool(ctx: RunContext[str], number: int, power: int): 27 | """Calculate the square, cube and the n'th power of a number.""" 28 | 29 | print(Fore.CYAN, f"Using the math tool to calculate the square, cube and the {power}'th power of {number}...") 30 | square = number ** 2 31 | cube = number ** 3 32 | nth = number ** power 33 | return MathResponse(number=number, power=power, square=square, cube=cube, nth=nth) 34 | 35 | @agent.result_validator 36 | def math_response_validator(ctx: RunContext[str], response: MathResponse): 37 | """Validate the math response.""" 38 | 39 | if response.square != response.number ** 2: 40 | raise ValueError("Square calculation is incorrect.") 41 | if response.cube != response.number ** 3: 42 | raise ValueError("Cube calculation is incorrect.") 43 | if response.nth != response.number ** response.power: 44 | raise ValueError("N'th power calculation is incorrect.") 45 | if (response.number > 4): 46 | print(Fore.RED, f"Number {response.number} is too large. Retrying...") 47 | print(Fore.RESET) 48 | raise ModelRetry("Number is too large.") 49 | 50 | return response 51 | 52 | # Define the main loop 53 | def main_loop(): 54 | while True: 55 | user_input = input(">> I am a math agent. Provide a number and the power to calculate: ") 56 | if user_input.lower() in ["quit", "exit", "q"]: 57 | print("Goodbye!") 58 | break 59 | 60 | # Run the agent 61 | result = agent.run_sync(user_input) 62 | print(Fore.YELLOW, f"Assistant: {result.data}") 63 | print(Fore.RESET) 64 | 65 | # Run the main loop 66 | if __name__ == "__main__": 67 | main_loop() -------------------------------------------------------------------------------- /pydantic-ai-masterclass/5-tools/5.5-tools-prepare.py: -------------------------------------------------------------------------------- 1 | import os 2 | from colorama import Fore 3 | import logfire 4 | from typing import Union 5 | from pydantic_ai import Agent, RunContext, Tool 6 | from pydantic_ai.tools import ToolDefinition 7 | from pydantic_ai.models.openai import OpenAIModel 8 | from pydantic import BaseModel 9 | from dotenv import load_dotenv 10 | 11 | load_dotenv() 12 | 13 | # Configure logfire 14 | logfire.configure() 15 | 16 | # Define the model 17 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 18 | 19 | # Define the output model 20 | class CodeQuality(BaseModel): 21 | """Code Quality metrics""" 22 | cyclomatic_complexity: float 23 | percent_duplication: float 24 | review: str 25 | 26 | # Coding agent 27 | coding_agent = Agent(model=model, system_prompt="You an experienced software developer. Write code accorting to the user's requirements. Return only the source code.") 28 | 29 | # Code review agent 30 | code_review_agent = Agent(model=model, result_type=CodeQuality, system_prompt="You an experienced software architect and code reviewer. You are reviewing a codebase to ensure quality standards are met. You need to provide the code quality metrics for the codebase and a review of the codebase comparing it to the industry standards.") 31 | 32 | # Notifications agent 33 | notifications_agent = Agent(model=model, system_prompt="You are a notification agent. You need to send a notification to the user based on the code quality metrics and the industry standards.") 34 | 35 | # Tool get the source code 36 | @coding_agent.tool 37 | def get_source_code(ctx: RunContext[str]) -> str: 38 | """Get the source code""" 39 | return f"The source code is {ctx.deps}" 40 | 41 | # Tool get the industry standards 42 | @coding_agent.tool 43 | def get_industry_standards() -> CodeQuality: 44 | """Get the industry standards for code quality""" 45 | return CodeQuality(cyclomatic_complexity=5.0, percent_duplication=10.0, review="These are the industry standards") 46 | 47 | async def if_below_industry_standards( 48 | ctx: RunContext[int], tool_def: ToolDefinition 49 | ) -> Union[ToolDefinition, None]: 50 | if ctx.deps.cyclomatic_complexity > 2: 51 | return tool_def 52 | 53 | # Tool to sent notifications 54 | @notifications_agent.tool(prepare=if_below_industry_standards) 55 | def send_notification(ctx: RunContext[CodeQuality]) -> str: 56 | """Send a notification""" 57 | print(Fore.YELLOW, f"Notification sent: {ctx.deps.review}") 58 | return f"Notification sent: {ctx.deps.review}" 59 | 60 | # Run the agents 61 | result = coding_agent.run_sync("Create a method in Python that calculates the 30-yr fixed mortgage rates and returns an amortization table.") 62 | print(Fore.YELLOW, result.data) 63 | 64 | result = code_review_agent.run_sync("Read the code and provide the code quality metrics.", deps=result.data) 65 | print(Fore.CYAN, result.data) 66 | 67 | result = notifications_agent.run_sync("Send a notification based on the code quality metrics and the industry standards.", deps=result.data) 68 | print(Fore.RED, result.data) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/10-errors-and-reflection/10.2-self-correction.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | import aiosqlite 4 | from sqlite3 import Connection 5 | from colorama import Fore 6 | from dotenv import load_dotenv 7 | from pydantic import BaseModel 8 | from pydantic_ai import Agent, ModelRetry, RunContext 9 | from pydantic_ai.models.openai import OpenAIModel 10 | 11 | load_dotenv() 12 | 13 | # Define the model 14 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 15 | 16 | class Patient(BaseModel): 17 | patient_id: int 18 | email: str 19 | full_name: str 20 | year_born: int 21 | 22 | # Define the agent 23 | agent = Agent(model=model, system_prompt="You are a helpful medical assistant. Retrieve the patient's record using the tools provided.", result_type=Patient, deps_type=Connection) 24 | 25 | @agent.tool(retries=10) 26 | async def get_patient_by_name(ctx: RunContext[Connection], name: str) -> int: 27 | """"Get a patient's record from their full name.""" 28 | 29 | async with ctx.deps.cursor() as cursor: 30 | await cursor.execute(f'SELECT * FROM patients WHERE full_name="{name}"') 31 | query_result = await cursor.fetchall() 32 | if not query_result: 33 | print(Fore.YELLOW, f"Patient with name {name} not found.") 34 | raise ModelRetry(f"Patient with name {name} not found. Can you try a variation of the name?") 35 | return query_result[0] 36 | 37 | @agent.tool(retries=3) 38 | async def get_patient_by_email(ctx: RunContext[Connection], email: str) -> int: 39 | """Get a patient's record from their email address.""" 40 | 41 | async with ctx.deps.cursor() as cursor: 42 | await cursor.execute(f'SELECT * FROM patients WHERE email="{email}"') 43 | query_result = await cursor.fetchall() 44 | if not query_result: 45 | raise ValueError(f"Patient with email {email} not found.") 46 | return query_result[0] 47 | 48 | async def seed_db(conn): 49 | await conn.execute("DROP TABLE IF EXISTS patients") 50 | await conn.execute("CREATE TABLE IF NOT EXISTS patients (patient_id INTEGER PRIMARY KEY, email TEXT, full_name TEXT, year_born INTEGER)") 51 | await conn.execute("INSERT INTO patients (email, full_name, year_born) VALUES ('john@gmail.com', 'John Doe', 1980)") 52 | await conn.execute("INSERT INTO patients (email, full_name, year_born) VALUES ('jane@gmail.com', 'Jane Doe', 1985)") 53 | await conn.execute("INSERT INTO patients (email, full_name, year_born) VALUES ('james@gmail.com', 'Jim Doe', 1990)") 54 | await conn.commit() 55 | 56 | async def main(): 57 | async with aiosqlite.connect("patients.sqlite") as conn: 58 | await seed_db(conn) 59 | try: 60 | result = await agent.run('Retrieve the patient record for James Doe', deps=conn) 61 | print(Fore.GREEN, f"Patient record: {result.data}") 62 | except ValueError as e: 63 | print(Fore.YELLOW, e) 64 | except Exception as e: 65 | print(Fore.RED, e) 66 | 67 | if __name__ == "__main__": 68 | asyncio.run(main()) -------------------------------------------------------------------------------- /pydantic-ai-masterclass/4-system-prompts/4.4-invoice.py: -------------------------------------------------------------------------------- 1 | import os 2 | from datetime import date 3 | from dotenv import load_dotenv 4 | from pydantic_ai import Agent 5 | from pydantic_ai.models.openai import OpenAIModel 6 | 7 | load_dotenv() 8 | 9 | # Define the model 10 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 11 | 12 | system_prompt = """"You are an experienced accounting assistant tasked with generating detailed and precise invoices for a leading technology consulting firm. Your primary goal is to ensure that all invoices accurately reflect the services provided, including any applicable taxes or discounts. 13 | 14 | Please follow these guidelines when creating each invoice: 15 | 16 | 1. **Invoice Number:** Ensure that each invoice has a unique number assigned sequentially. 17 | 2. **Date of Service:** Clearly state the date(s) on which the consulting services were rendered. 18 | 3. **Client Information:** 19 | - Name 20 | - Company (if applicable) 21 | - Address 22 | - Contact Details 23 | 4. **Service Description:** 24 | - Provide a detailed description of each service provided, including any specific projects or deliverables. 25 | 5. **Hourly Rate/Project Fee:** Clearly state the hourly rate for consulting services or the project fee if it's a fixed-price engagement. 26 | 6. **Hours Worked/Fees Incurred:** Include the total number of hours worked (if applicable) and the corresponding fees incurred, or list out specific project costs with their respective amounts. 27 | 7. **Taxes:** 28 | - Calculate any applicable taxes based on local regulations (e.g., VAT, GST). 29 | - Clearly indicate whether these are included in the invoice amount or if they need to be added separately. 30 | 8. **Discounts/Adjustments:** If there were any discounts applied or adjustments made during the engagement, clearly state them and reflect their impact on the total fee. 31 | 9. **Payment Terms:** 32 | - Specify the payment terms (e.g., due upon receipt, net 30 days). 33 | 10. **Invoice Data:** Use today's date {}. 34 | 11. **Due Date:** Add 30 days to today's date. 35 | 12. **Invoice Total:** Summarize all charges and taxes to provide a clear total amount owed by the client. 36 | 37 | **Additional Notes:** 38 | - Use professional language throughout the invoice. 39 | - Ensure that all information is accurate and up-to-date. 40 | - Attach any relevant documents or receipts supporting the services provided. 41 | - Maintain consistency in formatting across all invoices generated for this business. 42 | 43 | By following these guidelines, you will be able to generate highly detailed and precise invoices that accurately reflect the technology consulting services rendered by your firm. This will help ensure timely payments from clients while maintaining high standards of professionalism and accuracy.""".format(date.today()) 44 | 45 | # Define the agent 46 | agent = Agent(model=model, system_prompt=system_prompt) 47 | 48 | # Run the agent 49 | result = agent.run_sync(user_prompt="Customer Name: John Doe\nServices Provided: Web Development, AI Consulting, Strategic Advisory,\nTotal Amount: $50000") 50 | 51 | print(result.data) -------------------------------------------------------------------------------- /langgraph/state/tutorial-3/state.py: -------------------------------------------------------------------------------- 1 | ''' 2 | In this step, we will create a StateGraph that will use TypedDict to define the state of the graph. The graph will hold a list of the Fibonacci numbers. 3 | 4 | We will persist the state of the graph using SQLite as long-term memory. We will use the SqliteSaver class from the langgraph.checkpoint.sqlite module to save the state of the graph to a SQLite database. Saving conversational threads to a database allows us to resume the conversation at a later time. 5 | 6 | We will also update the initial state of the graph to include the first three Fibonacci numbers. 7 | 8 | We will draw a visualization of the graph using the draw_mermaid_png method. This method generates a PNG image of the graph using the Mermaid library. 9 | 10 | Finally, we will run the chatbot and print the next Fibonacci number in the sequence. 11 | ''' 12 | 13 | from typing import TypedDict 14 | from langgraph.graph import StateGraph, START, END 15 | from langgraph.checkpoint.memory import MemorySaver 16 | from typing import Annotated 17 | 18 | import sqlite3 19 | from langgraph.checkpoint.sqlite import SqliteSaver 20 | 21 | # Calculate next Fibonacci number 22 | def fibonacci(n: int) -> int: 23 | if n <= 0: 24 | return 0 25 | elif n == 1: 26 | return 1 27 | else: 28 | return fibonacci(n-1) + fibonacci(n-2) 29 | 30 | # Define reducer 31 | def fibonacci_reducer(current: list[int], update: int | None) -> list[int]: 32 | if current is None: 33 | current = [] 34 | if update is None: 35 | return current 36 | return sorted(list(set(current + update))) 37 | 38 | # Define state 39 | class GraphState(TypedDict): 40 | fibonacci: Annotated[list[int], fibonacci_reducer] 41 | 42 | # Create the graph 43 | builder = StateGraph(GraphState) 44 | 45 | # Create developer node 46 | def developer(state): 47 | return state # Return the modified state 48 | 49 | # Add the node to the graph 50 | builder.add_node("developer", developer) 51 | 52 | # Set entry point and edge 53 | builder.add_edge(START, "developer") 54 | builder.add_edge('developer', END) 55 | 56 | # Configuration and memory 57 | config = {"configurable": {"thread_id": 2}} 58 | 59 | # Create a connection to the SQLite database 60 | conn = sqlite3.connect("checkpoints.sqlite", check_same_thread=False) 61 | memory = SqliteSaver(conn) 62 | 63 | # Compile and run the builder 64 | graph = builder.compile(checkpointer=memory) 65 | 66 | # Provide the initial state 67 | initial = {"fibonacci": [0]} 68 | result = graph.invoke(initial, config) 69 | 70 | # Draw the graph 71 | try: 72 | graph.get_graph(xray=True).draw_mermaid_png(output_file_path="graph.png") 73 | except Exception: 74 | pass 75 | 76 | # Run the chatbot 77 | n = 0 78 | while True: 79 | next_fibonacci = fibonacci(n) 80 | print("Next fibonacci number:", next_fibonacci) 81 | 82 | result = graph.invoke({"fibonacci": [next_fibonacci]}, config) 83 | print("State: ", result) 84 | n += 1 85 | 86 | user_input = input(">> ") 87 | if user_input.lower() in ["quit", "exit", "q"]: 88 | print("Goodbye!") 89 | break -------------------------------------------------------------------------------- /pydantic-ai-masterclass/7-dependency-injection/7.5-combined-deps copy.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from colorama import Fore 4 | from pydantic import BaseModel 5 | from pydantic_ai import Agent, RunContext 6 | from pydantic_ai.models.openai import OpenAIModel 7 | import yfinance as yf 8 | 9 | load_dotenv() 10 | # This example demonstrates how to use combined dependencies in PydanticAI. The use case is a stock market researcher agent that takes a stock name, retrieves the stock symbol, and fetches the latest stock price. The agent is expected to provide the stock symbol and the latest stock price for the given stock name. 11 | 12 | # Define the model 13 | model = OpenAIModel('gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY')) 14 | 15 | class Stock(BaseModel): 16 | """"Stock model - includes stock symbol, stock name and stock price""" 17 | 18 | stock_name: str = "AAPL" 19 | stock_symbol: str = "" 20 | stock_price: float = 0.0 21 | 22 | # Define the agent with a system prompt 23 | agent = Agent(model=model, system_prompt="You are an experienced stock market researcher. Use the tools at your disposal to research the stock provided in the context.", deps_type=Stock, result_type=Stock) 24 | 25 | # Define a system prompt with dependency injection 26 | @agent.system_prompt 27 | def get_industry(ctx: RunContext[Stock]) -> str: 28 | return f"The stock requested is {ctx.deps.stock_name}." 29 | 30 | # Define a tool with dependency injection 31 | @agent.tool 32 | def get_stock_price(ctx: RunContext[Stock], symbol: str) -> Stock: 33 | # Call the Yahoo Finance API to get the stock price 34 | print(Fore.WHITE + f"Getting stock price for {symbol}...") 35 | dat = yf.Ticker(symbol) 36 | stock_price = dat.history(period="1d")['Close'].iloc[0] 37 | return Stock(stock_name=ctx.deps.stock_name, stock_symbol=symbol, stock_price=stock_price) 38 | 39 | # Define a result validator with dependency injection 40 | @agent.result_validator 41 | def validate_stock_price(ctx: RunContext[Stock], result: Stock) -> Stock: 42 | if result.stock_price < 0: 43 | raise ValueError("Stock price cannot be negative.") 44 | return result 45 | 46 | # Define the main loop 47 | def main_loop(): 48 | while True: 49 | user_input = input(">> Enter a company name (q, quit, exit to exit): ") 50 | if user_input.lower() in ["quit", "exit", "q"]: 51 | print("Goodbye!") 52 | break 53 | 54 | stock = Stock(stock_name=user_input) 55 | 56 | # Run the agent 57 | try: 58 | result = agent.run_sync("Find the latest price for the company name provided.", deps=stock) 59 | print(Fore.WHITE, '-----------------------------------') 60 | print(Fore.YELLOW, f"Stock name: {result.data.stock_name}") 61 | print(Fore.YELLOW, f"Stock symbol: {result.data.stock_symbol}") 62 | print(Fore.YELLOW, f"Stock price: ${result.data.stock_price:.2f}") 63 | print(Fore.WHITE, '-----------------------------------') 64 | except Exception as e: 65 | print(Fore.WHITE, f"Error: {e}") 66 | 67 | # Run the main loop 68 | if __name__ == "__main__": 69 | main_loop() --------------------------------------------------------------------------------