├── .devcontainer ├── devcontainer.json └── setup.sh ├── .env.examples ├── .github └── dependabot.yml ├── .gitignore ├── .vscode └── settings.json ├── 00.ForBeginners ├── 01-intro-to-ai-agents │ └── code_samples │ │ ├── dotnet-agent-framework-travelagent.ipynb │ │ └── python-agent-framework-travelagent.ipynb ├── 02-explore-agentic-frameworks │ └── code_samples │ │ ├── dotnet-agent-framework-basicagent.ipynb │ │ └── python-agent-framework-basicagent.ipynb ├── 03-agentic-design-patterns │ └── code_samples │ │ ├── dotnet-agent-framework-ghmodel-basicagent.ipynb │ │ └── python-agent-framework-ghmodel-basicagent.ipynb ├── 04-tool-use │ └── code_samples │ │ ├── dotnet-agent-framework-ghmodels-tool.ipynb │ │ └── python-agent-framework-ghmodel-tools.ipynb ├── 05-agentic-rag │ └── code_samples │ │ ├── document.md │ │ ├── dotnet-agent-framework-aifoundry-file-search.ipynb │ │ └── python-agent-framework-aifoundry-file-search.ipynb ├── 07-planning-design │ └── code_samples │ │ ├── dotnet-agent-framrwork-ghmodel-planningdesign.ipynb │ │ └── python-agent-framrwork-ghmodel-planningdesign.ipynb ├── 08-multi-agent │ └── code_samples │ │ ├── dotnet-agent-framework-ghmodel-workflow-multi-agents.ipynb │ │ └── python-agent-framework-ghmodel-workflow-multi-agents.ipynb └── README.md ├── 01.AgentFoundation └── README.md ├── 02.CreateYourFirstAgent ├── README.md └── code_samples │ ├── dotNET │ └── dotnet-travelagent-ghmodel.ipynb │ └── python │ └── python-travelagent-ghmodel.ipynb ├── 03.ExploerAgentFramework ├── README.md └── code_samples │ ├── dotNET │ ├── 01-dotnet-agent-framework-aoai.ipynb │ ├── 02-dotnet-agent-framrwork-ghmodel.ipynb │ ├── 03-dotnet-agent-framework-aifoundry.ipynb │ └── 04-dotnet-agent-framework-foundrylocal.ipynb │ └── python │ ├── 01-python-agent-framework-aoai.ipynb │ ├── 02-python-agent-framrwork-ghmodel.ipynb │ ├── 03-python-agent-framework-aifoundry.ipynb │ └── 04-python-agent-framrwork-foundrylocal.ipynb ├── 04.Tools ├── README.md └── code_samples │ ├── dotNET │ └── foundry │ │ ├── 01-dotnet-agent-framework-aifoundry-vision.ipynb │ │ ├── 02-dotnet-agent-framework-aifoundry-code-interpreter.ipynb │ │ ├── 03-dotnet-agent-framework-aifoundry-binggrounding.ipynb │ │ └── 04-dotnet-agent-framework-aifoundry-file-search.ipynb │ ├── files │ ├── demo.md │ └── home.png │ └── python │ └── foundry │ ├── 01.python-agent-framework-aifoundry-vision.ipynb │ ├── 02.python-agent-framework-aifoundry-code-interpreter.ipynb │ ├── 03.python-agent-framework-aifoundry-binggrounding.ipynb │ └── 04.python-agent-framework-aifoundry-file-search.ipynb ├── 05.Providers ├── README.md └── code_samples │ ├── dotNET │ ├── 01-dotnet-agent-framework-aifoundry-mcp │ │ └── AgentMCP.Console │ │ │ ├── .env.examples │ │ │ ├── AgentMCP.Console.csproj │ │ │ ├── AgentMCP.Console.sln │ │ │ └── Program.cs │ └── 02-dotnet-agent-framework-aifoundry-a2a │ │ └── README.md │ └── python │ └── 01-python-agent-framework-aifoundry-mcp.ipynb ├── 06.RAGs ├── README.md └── code_samples │ ├── dotNET │ └── dotnet-agent-framework-aifoundry-file-search.ipynb │ ├── files │ └── demo.md │ └── python │ └── python-agent-framework-aifoundry-file-search.ipynb ├── 07.Workflow ├── README.md └── code_samples │ ├── dotNET │ ├── 01.dotnet-agent-framework-workflow-ghmodel-basic.ipynb │ ├── 02.dotnet-agent-framework-workflow-ghmodel-sequential.ipynb │ ├── 03.dotnet-agent-framework-workflow-ghmodel-concurrent.ipynb │ └── 04.dotnet-agent-framework-workflow-aifoundry-condition.ipynb │ ├── imgs │ └── home.png │ └── python │ ├── 01.python-agent-framework-workflow-ghmodel-basic.ipynb │ ├── 02.python-agent-framework-workflow-ghmodel-sequential.ipynb │ ├── 03.python-agent-framework-workflow-ghmodel-concurrent.ipynb │ └── 04.python-agent-framework-workflow-aifoundry-condition.ipynb ├── 08.EvaluationAndTracing ├── README.md └── python │ ├── basic_agent_workflow_devui │ ├── frontdesk_agent │ │ ├── __init__.py │ │ └── agent.py │ ├── main.py │ ├── reviewer_agent │ │ ├── __init__.py │ │ └── agent.py │ ├── test_simple.py │ └── travelplan_workflow │ │ ├── __init__.py │ │ └── workflow.py │ ├── foundry_agent │ ├── __init__.py │ └── agent.py │ ├── multi_workflow_aifoundry_devui │ ├── .env.example │ ├── __init__.py │ ├── contentreview_agent │ │ ├── __init__.py │ │ └── agent.py │ ├── evangelist_agent │ │ ├── __init__.py │ │ └── agent.py │ ├── main.py │ ├── publisher_agent │ │ ├── __init__.py │ │ └── agent.py │ └── workflow │ │ ├── __init__.py │ │ └── workflow.py │ ├── multi_workflow_foundrylocal_devui │ ├── main.py │ ├── plan_agent │ │ ├── __init__.py │ │ └── agent.py │ ├── researcher_agent │ │ ├── __init__.py │ │ └── agent.py │ └── workflow │ │ ├── __init__.py │ │ └── workflow.py │ ├── multi_workflow_ghmodel_devui │ ├── .env.example │ ├── contentreview_agent │ │ ├── __init__.py │ │ └── agent.py │ ├── evangelist_agent │ │ ├── __init__.py │ │ └── agent.py │ ├── main.py │ ├── publisher_agent │ │ ├── __init__.py │ │ └── agent.py │ └── workflow │ │ ├── __init__.py │ │ └── workflow.py │ └── tracer_aspire │ └── simple.py ├── CODE_OF_CONDUCT.md ├── Installation └── requirements.txt ├── LICENSE ├── README.bak.md ├── README.md ├── SECURITY.md ├── SUPPORT.md ├── changelog.md ├── check_imports.py └── pyproject.toml /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the 2 | // README at: https://github.com/devcontainers/templates/tree/main/src/dotnet 3 | { 4 | "name": "C# (.NET)", 5 | // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile 6 | "image": "mcr.microsoft.com/devcontainers/dotnet:1-9.0-bookworm", 7 | "postCreateCommand": "bash ./.devcontainer/setup.sh", 8 | "features": { 9 | "ghcr.io/devcontainers/features/azure-cli:1": {}, 10 | "ghcr.io/devcontainers/features/github-cli:1": {}, 11 | "ghcr.io/rocker-org/devcontainer-features/miniforge:2": {}, 12 | "ghcr.io/azure/azure-dev/azd:0": {} 13 | }, 14 | "customizations": { 15 | "vscode": { 16 | "extensions": [ 17 | "ms-dotnettools.vscode-dotnet-pack", 18 | "ms-python.vscode-python-envs", 19 | "ms-python.python" 20 | ] 21 | } 22 | } 23 | 24 | // Features to add to the dev container. More info: https://containers.dev/features. 25 | // "features": {}, 26 | 27 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 28 | // "forwardPorts": [5000, 5001], 29 | // "portsAttributes": { 30 | // "5001": { 31 | // "protocol": "https" 32 | // } 33 | // } 34 | 35 | // Use 'postCreateCommand' to run commands after the container is created. 36 | // "postCreateCommand": "dotnet restore", 37 | 38 | // Configure tool-specific properties. 39 | // "customizations": {}, 40 | 41 | // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. 42 | // "remoteUser": "root" 43 | } 44 | -------------------------------------------------------------------------------- /.devcontainer/setup.sh: -------------------------------------------------------------------------------- 1 | sudo apt update 2 | sudo apt install graphviz -y 3 | conda activate base -------------------------------------------------------------------------------- /.env.examples: -------------------------------------------------------------------------------- 1 | GITHUB_TOKEN="Your GitHub Models Token" 2 | GITHUB_ENDPOINT="Your GitHub Models Endpoint" 3 | GITHUB_MODEL_ID="Your GitHub Model ID" 4 | 5 | AZURE_OPENAI_ENDPOINT="Your Azure OpenAI Endpoint" 6 | AZURE_OPENAI_CHAT_DEPLOYMENT_NAME ="Your Azure OpenAI Model Deployment Name" 7 | 8 | 9 | FOUNDRYLOCAL_ENDPOINT="Your Foundry Local Endpoint http://localhost:5272/v1" 10 | FOUNDRYLOCAL_MODEL_DEPLOYMENT_NAME="Your Foundry Local Model Deployment Name" 11 | 12 | 13 | AZURE_AI_PROJECT_ENDPOINT ="Your Azure AI Foundry Project Endpoint" 14 | AZURE_AI_MODEL_DEPLOYMENT_NAME ="Your Azure AI Foundry Project Deployment Name" 15 | 16 | BING_CONNECTION_ID="Your Bing Connection ID" -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for more information: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | # https://containers.dev/guide/dependabot 6 | 7 | version: 2 8 | updates: 9 | - package-ecosystem: "devcontainers" 10 | directory: "/" 11 | schedule: 12 | interval: weekly 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ## Ignore Visual Studio temporary files, build results, and 2 | ## files generated by popular Visual Studio add-ons. 3 | ## 4 | ## Get latest from https://github.com/github/gitignore/blob/main/VisualStudio.gitignore 5 | 6 | # User-specific files 7 | *.rsuser 8 | *.suo 9 | *.user 10 | *.userosscache 11 | *.sln.docstates 12 | *.env 13 | 14 | # User-specific files (MonoDevelop/Xamarin Studio) 15 | *.userprefs 16 | 17 | # Mono auto generated files 18 | mono_crash.* 19 | 20 | # Build results 21 | [Dd]ebug/ 22 | [Dd]ebugPublic/ 23 | [Rr]elease/ 24 | [Rr]eleases/ 25 | x64/ 26 | x86/ 27 | [Ww][Ii][Nn]32/ 28 | [Aa][Rr][Mm]/ 29 | [Aa][Rr][Mm]64/ 30 | [Aa][Rr][Mm]64[Ee][Cc]/ 31 | bld/ 32 | [Oo]bj/ 33 | [Oo]ut/ 34 | [Ll]og/ 35 | [Ll]ogs/ 36 | 37 | # Build results on 'Bin' directories 38 | **/[Bb]in/* 39 | # Uncomment if you have tasks that rely on *.refresh files to move binaries 40 | # (https://github.com/github/gitignore/pull/3736) 41 | #!**/[Bb]in/*.refresh 42 | 43 | # Visual Studio 2015/2017 cache/options directory 44 | .vs/ 45 | # Uncomment if you have tasks that create the project's static files in wwwroot 46 | #wwwroot/ 47 | 48 | # Visual Studio 2017 auto generated files 49 | Generated\ Files/ 50 | 51 | # MSTest test Results 52 | [Tt]est[Rr]esult*/ 53 | [Bb]uild[Ll]og.* 54 | *.trx 55 | 56 | # NUnit 57 | *.VisualState.xml 58 | TestResult.xml 59 | nunit-*.xml 60 | 61 | # Approval Tests result files 62 | *.received.* 63 | 64 | # Build Results of an ATL Project 65 | [Dd]ebugPS/ 66 | [Rr]eleasePS/ 67 | dlldata.c 68 | 69 | # Benchmark Results 70 | BenchmarkDotNet.Artifacts/ 71 | 72 | # .NET Core 73 | project.lock.json 74 | project.fragment.lock.json 75 | artifacts/ 76 | 77 | # ASP.NET Scaffolding 78 | ScaffoldingReadMe.txt 79 | 80 | # StyleCop 81 | StyleCopReport.xml 82 | 83 | # Files built by Visual Studio 84 | *_i.c 85 | *_p.c 86 | *_h.h 87 | *.ilk 88 | *.meta 89 | *.obj 90 | *.idb 91 | *.iobj 92 | *.pch 93 | *.pdb 94 | *.ipdb 95 | *.pgc 96 | *.pgd 97 | *.rsp 98 | # but not Directory.Build.rsp, as it configures directory-level build defaults 99 | !Directory.Build.rsp 100 | *.sbr 101 | *.tlb 102 | *.tli 103 | *.tlh 104 | *.tmp 105 | *.tmp_proj 106 | *_wpftmp.csproj 107 | *.log 108 | *.tlog 109 | *.vspscc 110 | *.vssscc 111 | .builds 112 | *.pidb 113 | *.svclog 114 | *.scc 115 | 116 | # Chutzpah Test files 117 | _Chutzpah* 118 | 119 | # Visual C++ cache files 120 | ipch/ 121 | *.aps 122 | *.ncb 123 | *.opendb 124 | *.opensdf 125 | *.sdf 126 | *.cachefile 127 | *.VC.db 128 | *.VC.VC.opendb 129 | 130 | # Visual Studio profiler 131 | *.psess 132 | *.vsp 133 | *.vspx 134 | *.sap 135 | 136 | # Visual Studio Trace Files 137 | *.e2e 138 | 139 | # TFS 2012 Local Workspace 140 | $tf/ 141 | 142 | # Guidance Automation Toolkit 143 | *.gpState 144 | 145 | # ReSharper is a .NET coding add-in 146 | _ReSharper*/ 147 | *.[Rr]e[Ss]harper 148 | *.DotSettings.user 149 | 150 | # TeamCity is a build add-in 151 | _TeamCity* 152 | 153 | # DotCover is a Code Coverage Tool 154 | *.dotCover 155 | 156 | # AxoCover is a Code Coverage Tool 157 | .axoCover/* 158 | !.axoCover/settings.json 159 | 160 | # Coverlet is a free, cross platform Code Coverage Tool 161 | coverage*.json 162 | coverage*.xml 163 | coverage*.info 164 | 165 | # Visual Studio code coverage results 166 | *.coverage 167 | *.coveragexml 168 | 169 | # NCrunch 170 | _NCrunch_* 171 | .NCrunch_* 172 | .*crunch*.local.xml 173 | nCrunchTemp_* 174 | 175 | # MightyMoose 176 | *.mm.* 177 | AutoTest.Net/ 178 | 179 | # Web workbench (sass) 180 | .sass-cache/ 181 | 182 | # Installshield output folder 183 | [Ee]xpress/ 184 | 185 | # DocProject is a documentation generator add-in 186 | DocProject/buildhelp/ 187 | DocProject/Help/*.HxT 188 | DocProject/Help/*.HxC 189 | DocProject/Help/*.hhc 190 | DocProject/Help/*.hhk 191 | DocProject/Help/*.hhp 192 | DocProject/Help/Html2 193 | DocProject/Help/html 194 | 195 | # Click-Once directory 196 | publish/ 197 | 198 | # Publish Web Output 199 | *.[Pp]ublish.xml 200 | *.azurePubxml 201 | # Note: Comment the next line if you want to checkin your web deploy settings, 202 | # but database connection strings (with potential passwords) will be unencrypted 203 | *.pubxml 204 | *.publishproj 205 | 206 | # Microsoft Azure Web App publish settings. Comment the next line if you want to 207 | # checkin your Azure Web App publish settings, but sensitive information contained 208 | # in these scripts will be unencrypted 209 | PublishScripts/ 210 | 211 | # NuGet Packages 212 | *.nupkg 213 | # NuGet Symbol Packages 214 | *.snupkg 215 | # The packages folder can be ignored because of Package Restore 216 | **/[Pp]ackages/* 217 | # except build/, which is used as an MSBuild target. 218 | !**/[Pp]ackages/build/ 219 | # Uncomment if necessary however generally it will be regenerated when needed 220 | #!**/[Pp]ackages/repositories.config 221 | # NuGet v3's project.json files produces more ignorable files 222 | *.nuget.props 223 | *.nuget.targets 224 | 225 | # Microsoft Azure Build Output 226 | csx/ 227 | *.build.csdef 228 | 229 | # Microsoft Azure Emulator 230 | ecf/ 231 | rcf/ 232 | 233 | # Windows Store app package directories and files 234 | AppPackages/ 235 | BundleArtifacts/ 236 | Package.StoreAssociation.xml 237 | _pkginfo.txt 238 | *.appx 239 | *.appxbundle 240 | *.appxupload 241 | 242 | # Visual Studio cache files 243 | # files ending in .cache can be ignored 244 | *.[Cc]ache 245 | # but keep track of directories ending in .cache 246 | !?*.[Cc]ache/ 247 | 248 | # Others 249 | ClientBin/ 250 | ~$* 251 | *~ 252 | *.dbmdl 253 | *.dbproj.schemaview 254 | *.jfm 255 | *.pfx 256 | *.publishsettings 257 | orleans.codegen.cs 258 | 259 | # Including strong name files can present a security risk 260 | # (https://github.com/github/gitignore/pull/2483#issue-259490424) 261 | #*.snk 262 | 263 | # Since there are multiple workflows, uncomment next line to ignore bower_components 264 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) 265 | #bower_components/ 266 | 267 | # RIA/Silverlight projects 268 | Generated_Code/ 269 | 270 | # Backup & report files from converting an old project file 271 | # to a newer Visual Studio version. Backup files are not needed, 272 | # because we have git ;-) 273 | _UpgradeReport_Files/ 274 | Backup*/ 275 | UpgradeLog*.XML 276 | UpgradeLog*.htm 277 | ServiceFabricBackup/ 278 | *.rptproj.bak 279 | 280 | # SQL Server files 281 | *.mdf 282 | *.ldf 283 | *.ndf 284 | 285 | # Business Intelligence projects 286 | *.rdl.data 287 | *.bim.layout 288 | *.bim_*.settings 289 | *.rptproj.rsuser 290 | *- [Bb]ackup.rdl 291 | *- [Bb]ackup ([0-9]).rdl 292 | *- [Bb]ackup ([0-9][0-9]).rdl 293 | 294 | # Microsoft Fakes 295 | FakesAssemblies/ 296 | 297 | # GhostDoc plugin setting file 298 | *.GhostDoc.xml 299 | 300 | # Node.js Tools for Visual Studio 301 | .ntvs_analysis.dat 302 | node_modules/ 303 | 304 | # Visual Studio 6 build log 305 | *.plg 306 | 307 | # Visual Studio 6 workspace options file 308 | *.opt 309 | 310 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.) 311 | *.vbw 312 | 313 | # Visual Studio 6 auto-generated project file (contains which files were open etc.) 314 | *.vbp 315 | 316 | # Visual Studio 6 workspace and project file (working project files containing files to include in project) 317 | *.dsw 318 | *.dsp 319 | 320 | # Visual Studio 6 technical files 321 | *.ncb 322 | *.aps 323 | 324 | # Visual Studio LightSwitch build output 325 | **/*.HTMLClient/GeneratedArtifacts 326 | **/*.DesktopClient/GeneratedArtifacts 327 | **/*.DesktopClient/ModelManifest.xml 328 | **/*.Server/GeneratedArtifacts 329 | **/*.Server/ModelManifest.xml 330 | _Pvt_Extensions 331 | 332 | # Paket dependency manager 333 | **/.paket/paket.exe 334 | paket-files/ 335 | 336 | # FAKE - F# Make 337 | **/.fake/ 338 | 339 | # CodeRush personal settings 340 | **/.cr/personal 341 | 342 | # Python Tools for Visual Studio (PTVS) 343 | **/__pycache__/ 344 | *.pyc 345 | 346 | # Cake - Uncomment if you are using it 347 | #tools/** 348 | #!tools/packages.config 349 | 350 | # Tabs Studio 351 | *.tss 352 | 353 | # Telerik's JustMock configuration file 354 | *.jmconfig 355 | 356 | # BizTalk build output 357 | *.btp.cs 358 | *.btm.cs 359 | *.odx.cs 360 | *.xsd.cs 361 | 362 | # OpenCover UI analysis results 363 | OpenCover/ 364 | 365 | # Azure Stream Analytics local run output 366 | ASALocalRun/ 367 | 368 | # MSBuild Binary and Structured Log 369 | *.binlog 370 | MSBuild_Logs/ 371 | 372 | # AWS SAM Build and Temporary Artifacts folder 373 | .aws-sam 374 | 375 | # NVidia Nsight GPU debugger configuration file 376 | *.nvuser 377 | 378 | # MFractors (Xamarin productivity tool) working folder 379 | **/.mfractor/ 380 | 381 | # Local History for Visual Studio 382 | **/.localhistory/ 383 | 384 | # Visual Studio History (VSHistory) files 385 | .vshistory/ 386 | 387 | # BeatPulse healthcheck temp database 388 | healthchecksdb 389 | 390 | # Backup folder for Package Reference Convert tool in Visual Studio 2017 391 | MigrationBackup/ 392 | 393 | # Ionide (cross platform F# VS Code tools) working folder 394 | **/.ionide/ 395 | 396 | # Fody - auto-generated XML schema 397 | FodyWeavers.xsd 398 | 399 | # VS Code files for those working on multiple tools 400 | .vscode/* 401 | !.vscode/settings.json 402 | !.vscode/tasks.json 403 | !.vscode/launch.json 404 | !.vscode/extensions.json 405 | !.vscode/*.code-snippets 406 | 407 | # Local History for Visual Studio Code 408 | .history/ 409 | 410 | # Built Visual Studio Code Extensions 411 | *.vsix 412 | 413 | # Windows Installer files from build outputs 414 | *.cab 415 | *.msi 416 | *.msix 417 | *.msm 418 | *.msp 419 | 420 | # macOS specific files 421 | .DS_Store 422 | .DS_Store? 423 | ._* 424 | .Spotlight-V100 425 | .Trashes 426 | ehthumbs.db 427 | Thumbs.db 428 | 429 | # Linux specific files 430 | *~ 431 | .nfs* 432 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "FSharp.suggestGitignore": false 3 | } -------------------------------------------------------------------------------- /00.ForBeginners/05-agentic-rag/code_samples/document.md: -------------------------------------------------------------------------------- 1 | - Contoso Travel offers luxury vacation packages to exotic destinations worldwide. 2 | - Our premium travel services include personalized itinerary planning and 24/7 concierge support. 3 | - Contoso's travel insurance covers medical emergencies, trip cancellations, and lost baggage. 4 | - Popular destinations include the Maldives, Swiss Alps, and African safaris. 5 | - Contoso Travel provides exclusive access to boutique hotels and private guided tours. -------------------------------------------------------------------------------- /00.ForBeginners/05-agentic-rag/code_samples/python-agent-framework-aifoundry-file-search.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "3a7e1e59", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "# ! pip install agent-framework-azure-ai -U" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 2, 16 | "id": "97f08567", 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "import os\n", 21 | "\n", 22 | "from azure.identity.aio import AzureCliCredential\n", 23 | "from azure.ai.projects.aio import AIProjectClient\n", 24 | "from dotenv import load_dotenv\n", 25 | "\n", 26 | "from azure.ai.agents.models import FilePurpose,FileSearchTool\n", 27 | "from agent_framework.azure import AzureAIAgentClient\n", 28 | "from agent_framework import ChatAgent,HostedFileSearchTool,HostedVectorStoreContent" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 3, 34 | "id": "2a3bcf9d", 35 | "metadata": {}, 36 | "outputs": [ 37 | { 38 | "data": { 39 | "text/plain": [ 40 | "True" 41 | ] 42 | }, 43 | "execution_count": 3, 44 | "metadata": {}, 45 | "output_type": "execute_result" 46 | } 47 | ], 48 | "source": [ 49 | "load_dotenv()" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": 4, 55 | "id": "3d3ca050", 56 | "metadata": {}, 57 | "outputs": [], 58 | "source": [ 59 | "async def create_vector_store(client: AIProjectClient) -> tuple[str, HostedVectorStoreContent]:\n", 60 | " \"\"\"Create a vector store with sample documents.\"\"\"\n", 61 | " file_path = './document.md'\n", 62 | " file = await client.agents.files.upload_and_poll(file_path=file_path, purpose=\"assistants\")\n", 63 | " print(f\"Uploaded file, file ID: {file.id}\")\n", 64 | "\n", 65 | "\n", 66 | " vector_store = await client.agents.vector_stores.create_and_poll(file_ids=[file.id], name=\"graph_knowledge_base\")\n", 67 | "\n", 68 | " print(f\"Created vector store, ID: {vector_store.id}\")\n", 69 | "\n", 70 | "\n", 71 | " return file.id, HostedVectorStoreContent(vector_store_id=vector_store.id)" 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": null, 77 | "id": "0aef5316", 78 | "metadata": {}, 79 | "outputs": [ 80 | { 81 | "name": "stdout", 82 | "output_type": "stream", 83 | "text": [ 84 | "Uploaded file, file ID: assistant-4ZfGbrB55GSNSvKGE8Nnhi\n", 85 | "Created vector store, ID: vs_MbrOhkt7ZnLsUdXwc8Sqptkh\n", 86 | "Agent created. You can now ask questions about the uploaded document.\n", 87 | "Contoso's travel insurance coverage includes protections for medical emergencies, trip cancellations, and lost baggage. This coverage is part of their premium travel services, which also offer personalized itinerary planning and 24/7 concierge support for luxury vacation packages to exotic destinations worldwide【4:0†document.md】." 88 | ] 89 | } 90 | ], 91 | "source": [ 92 | "async with (\n", 93 | " AzureCliCredential() as credential,\n", 94 | " AIProjectClient(endpoint=os.environ[\"AZURE_AI_PROJECT_ENDPOINT\"], credential=credential) as client,\n", 95 | " ):\n", 96 | "\n", 97 | " file_id, vector_store = await create_vector_store(client)\n", 98 | " file_search = FileSearchTool(vector_store_ids=[vector_store.vector_store_id])\n", 99 | " created_agent = await client.agents.create_agent(\n", 100 | " model=os.environ[\"AZURE_AI_MODEL_DEPLOYMENT_NAME\"], \n", 101 | " name=\"PythonRAGAgent\",\n", 102 | " instructions=\"\"\"\n", 103 | " You are an AI assistant designed to answer user questions using only the information retrieved from the provided document(s).\n", 104 | "\n", 105 | " - If a user's question cannot be answered using the retrieved context, **you must clearly respond**: \n", 106 | " \"I'm sorry, but the uploaded document does not contain the necessary information to answer that question.\"\n", 107 | " - Do not answer from general knowledge or reasoning. Do not make assumptions or generate hypothetical explanations.\n", 108 | " - Do not provide definitions, tutorials, or commentary that is not explicitly grounded in the content of the uploaded file(s).\n", 109 | " - If a user asks a question like \"What is a Neural Network?\", and this is not discussed in the uploaded document, respond as instructed above.\n", 110 | " - For questions that do have relevant content in the document (e.g., Contoso's travel insurance coverage), respond accurately, and cite the document explicitly.\n", 111 | "\n", 112 | " You must behave as if you have no external knowledge beyond what is retrieved from the uploaded document.\n", 113 | " \"\"\",\n", 114 | " tools = file_search.definitions,\n", 115 | " tool_resources= file_search.resources\n", 116 | " )\n", 117 | " chat_client=AzureAIAgentClient(project_client=client, agent_id=created_agent.id)\n", 118 | "\n", 119 | "\n", 120 | " async with ChatAgent(\n", 121 | " # passing in the client is optional here, so if you take the agent_id from the portal\n", 122 | " # you can use it directly without the two lines above.\n", 123 | " chat_client=chat_client,\n", 124 | " ) as agent:\n", 125 | " \n", 126 | "\n", 127 | " print(\"Agent created. You can now ask questions about the uploaded document.\")\n", 128 | "\n", 129 | " query = \"Can you explain Contoso's travel insurance coverage?\"\n", 130 | "\n", 131 | " async for chunk in agent.run_stream(query, tools=HostedFileSearchTool(inputs=vector_store)):\n", 132 | "\n", 133 | " if chunk.text:\n", 134 | " print(chunk.text, end=\"\", flush=True)" 135 | ] 136 | } 137 | ], 138 | "metadata": { 139 | "kernelspec": { 140 | "display_name": "agentenv", 141 | "language": "python", 142 | "name": "python3" 143 | }, 144 | "language_info": { 145 | "codemirror_mode": { 146 | "name": "ipython", 147 | "version": 3 148 | }, 149 | "file_extension": ".py", 150 | "mimetype": "text/x-python", 151 | "name": "python", 152 | "nbconvert_exporter": "python", 153 | "pygments_lexer": "ipython3", 154 | "version": "3.12.10" 155 | }, 156 | "polyglot_notebook": { 157 | "kernelInfo": { 158 | "defaultKernelName": "csharp", 159 | "items": [ 160 | { 161 | "aliases": [], 162 | "name": "csharp" 163 | } 164 | ] 165 | } 166 | } 167 | }, 168 | "nbformat": 4, 169 | "nbformat_minor": 5 170 | } 171 | -------------------------------------------------------------------------------- /00.ForBeginners/README.md: -------------------------------------------------------------------------------- 1 | # Microsoft Agent Framework Examples for AI Agents Beginners 2 | 3 | This section provides comprehensive Microsoft Agent Framework examples that extend and complement the content from [Microsoft's AI Agents for Beginners](https://github.com/microsoft/ai-agents-for-beginners) curriculum. These practical code samples demonstrate how to build intelligent agents using both Python and .NET implementations of the Microsoft Agent Framework. 4 | 5 | ## 🎯 Overview 6 | 7 | The examples in this directory are designed to provide hands-on experience with Microsoft Agent Framework, covering fundamental concepts through advanced multi-agent systems. Each lesson includes both Python and .NET code samples to accommodate different development preferences and environments. 8 | 9 | ## 📚 Learning Path 10 | 11 | ### 01. Introduction to AI Agents 12 | Learn the foundational concepts of AI agents and get started with your first Microsoft Agent Framework implementation. 13 | 14 | **Code Samples:** 15 | - **Python:** [`python-agent-framework-travelagent.ipynb`](./01-intro-to-ai-agents/code_samples/python-agent-framework-travelagent.ipynb) 16 | - **.NET:** [`dotnet-agent-framework-travelagent.ipynb`](./01-intro-to-ai-agents/code_samples/dotnet-agent-framework-travelagent.ipynb) 17 | 18 | ### 02. Explore Agentic Frameworks 19 | Dive deeper into the Microsoft Agent Framework architecture and understand different implementation patterns. 20 | 21 | **Code Samples:** 22 | - **Python:** [`python-agent-framework-basicagent.ipynb`](./02-explore-agentic-frameworks/code_samples/python-agent-framework-basicagent.ipynb) 23 | - **.NET:** [`dotnet-agent-framework-basicagent.ipynb`](./02-explore-agentic-frameworks/code_samples/dotnet-agent-framework-basicagent.ipynb) 24 | 25 | ### 03. Agentic Design Patterns 26 | Explore common design patterns and best practices for building robust AI agents with GitHub Models integration. 27 | 28 | **Code Samples:** 29 | - **Python:** [`python-agent-framework-ghmodel-basicagent.ipynb`](./03-agentic-design-patterns/code_samples/python-agent-framework-ghmodel-basicagent.ipynb) 30 | - **.NET:** [`dotnet-agent-framework-ghmodel-basicagent.ipynb`](./03-agentic-design-patterns/code_samples/dotnet-agent-framework-ghmodel-basicagent.ipynb) 31 | 32 | ### 04. Tool Use and Integration 33 | Learn how to enhance your agents with external tools and capabilities using GitHub Models. 34 | 35 | **Code Samples:** 36 | - **Python:** [`python-agent-framework-ghmodel-tools.ipynb`](./04-tool-use/code_samples/python-agent-framework-ghmodel-tools.ipynb) 37 | - **.NET:** [`dotnet-agent-framework-ghmodels-tool.ipynb`](./04-tool-use/code_samples/dotnet-agent-framework-ghmodels-tool.ipynb) 38 | 39 | ### 05. Agentic RAG (Retrieval-Augmented Generation) 40 | Implement knowledge-enhanced agents using Azure AI Foundry's file search capabilities. 41 | 42 | **Code Samples:** 43 | - **Python:** [`python-agent-framework-aifoundry-file-search.ipynb`](./05-agentic-rag/code_samples/python-agent-framework-aifoundry-file-search.ipynb) 44 | - **.NET:** [`dotnet-agent-framework-aifoundry-file-search.ipynb`](./05-agentic-rag/code_samples/dotnet-agent-framework-aifoundry-file-search.ipynb) 45 | 46 | **Supporting Files:** 47 | - [`document.md`](./05-agentic-rag/code_samples/document.md) - Sample document for RAG demonstrations 48 | 49 | ### 07. Planning and Design 50 | Explore advanced planning capabilities and design patterns with GitHub Models integration. 51 | 52 | **Code Samples:** 53 | - **Python:** [`python-agent-framrwork-ghmodel-planningdesign.ipynb`](./07-planning-design/code_samples/python-agent-framrwork-ghmodel-planningdesign.ipynb) 54 | - **.NET:** [`dotnet-agent-framrwork-ghmodel-planningdesign.ipynb`](./07-planning-design/code_samples/dotnet-agent-framrwork-ghmodel-planningdesign.ipynb) 55 | 56 | ### 08. Multi-Agent Systems 57 | Build collaborative multi-agent workflows using GitHub Models for complex problem-solving scenarios. 58 | 59 | **Code Samples:** 60 | - **Python:** [`python-agent-framework-ghmodel-workflow-multi-agents.ipynb`](./08-multi-agent/code_samples/python-agent-framework-ghmodel-workflow-multi-agents.ipynb) 61 | - **.NET:** [`dotnet-agent-framework-ghmodel-workflow-multi-agents.ipynb`](./08-multi-agent/code_samples/dotnet-agent-framework-ghmodel-workflow-multi-agents.ipynb) 62 | 63 | ### 09. Metacognition 64 | *Coming Soon* - Advanced metacognitive capabilities for self-aware agents. 65 | 66 | ### 10. AI Agents in Production 67 | *Coming Soon* - Best practices for deploying and managing agents in production environments. 68 | 69 | ### 11. Agentic Protocols 70 | *Coming Soon* - Advanced communication protocols and standards for agent interactions. 71 | 72 | ### 12. Context Engineering 73 | *Coming Soon* - Advanced techniques for context management and optimization. 74 | 75 | ## 🛠 Prerequisites 76 | 77 | ### Development Environment 78 | - **Python:** Python 3.10 or higher 79 | - **.NET:** .NET 9.0 or higher 80 | - Visual Studio Code with appropriate extensions 81 | 82 | ### Required Services 83 | - **Azure AI Foundry:** For RAG examples and advanced capabilities 84 | - **GitHub Models:** For GitHub-integrated examples 85 | - **Azure OpenAI Service:** For certain provider examples 86 | 87 | ### Environment Configuration 88 | 89 | Create a `.env` file or set environment variables for the examples: 90 | 91 | ```env 92 | # GitHub Models Configuration 93 | GITHUB_TOKEN=your_github_token 94 | GITHUB_ENDPOINT=https://models.inference.ai.azure.com 95 | GITHUB_MODEL_ID=gpt-4o-mini 96 | 97 | # Azure AI Foundry Configuration 98 | FOUNDRY_PROJECT_ENDPOINT=your_foundry_endpoint 99 | FOUNDRY_MODEL_DEPLOYMENT_NAME=your_model_name 100 | 101 | # Azure OpenAI Configuration (if needed) 102 | AZURE_OPENAI_ENDPOINT=your_aoai_endpoint 103 | AZURE_OPENAI_API_KEY=your_aoai_key 104 | AZURE_OPENAI_DEPLOYMENT_NAME=your_deployment_name 105 | ``` 106 | 107 | ## 🚀 Getting Started 108 | 109 | 1. **Choose your preferred language:** Python or .NET 110 | 2. **Start with lesson 01:** Introduction to AI Agents 111 | 3. **Follow the sequential path:** Each lesson builds upon previous concepts 112 | 4. **Experiment with the code:** Modify examples to understand the framework better 113 | 5. **Apply learnings:** Use the patterns in your own agent projects 114 | 115 | ## 📝 Code Sample Structure 116 | 117 | Each code sample is provided as a Jupyter notebook (`.ipynb`) containing: 118 | - **Detailed explanations** of concepts and implementation 119 | - **Step-by-step code** with comments and documentation 120 | - **Practical examples** you can run and modify 121 | - **Best practices** for Microsoft Agent Framework usage 122 | 123 | ## 🔗 Related Resources 124 | 125 | - [Microsoft AI Agents for Beginners](https://github.com/microsoft/ai-agents-for-beginners) - Foundational curriculum 126 | - [Microsoft Agent Framework](https://github.com/microsoft/agent-framework) - Official framework repository 127 | - [Azure AI Foundry](https://azure.microsoft.com/en-us/products/ai-foundry) - AI development platform 128 | - [GitHub Models](https://github.com/marketplace/models) - GitHub's AI model marketplace 129 | 130 | ## 🤝 Contributing 131 | 132 | We welcome contributions to improve these examples: 133 | - Report issues or bugs 134 | - Suggest new examples or improvements 135 | - Submit pull requests with enhancements 136 | - Share your own agent implementations 137 | 138 | --- 139 | 140 | **Ready to build intelligent agents?** Start with [lesson 01](./01-intro-to-ai-agents/) and begin your Microsoft Agent Framework journey! 🚀 -------------------------------------------------------------------------------- /01.AgentFoundation/README.md: -------------------------------------------------------------------------------- 1 | # Unlocking Autonomous Intelligence: A Guide to AI Agents 2 | 3 | In the rapidly evolving landscape of artificial intelligence, AI Agents represent a significant leap towards truly autonomous and intelligent systems. This tutorial will demystify what AI Agents are, introduce you to Microsoft's powerful tools for building them – specifically the Azure AI Foundry Agent Service and the Microsoft Agent Framework – and prepare you to harness their potential. 4 | 5 | ## What Exactly is an AI Agent? 6 | 7 | Imagine a sophisticated digital assistant that doesn't just respond to commands, but can understand a high-level goal, break it down into actionable steps, and then execute those steps using various tools to achieve the desired outcome, all on its own. That's an AI Agent. 8 | 9 | At its core, an AI Agent is a system driven by a **Large Language Model (LLM)**. This LLM acts as the agent's "brain," providing the reasoning and planning capabilities. To achieve its goals, an agent leverages: 10 | 11 | * **The LLM (Reasoning Engine):** This is where the magic happens. The LLM interprets your request, generates a plan, makes decisions, and even learns from its experiences. 12 | * **Tools (Action Executors):** Agents are equipped with a diverse set of "tools" that allow them to interact with the external world. These can be anything from searching the internet, calling an API, sending an email, querying a database, or even controlling other software. 13 | * **Memory (Experience & Context):** To act intelligently over time, agents need memory. This can range from a short-term memory to keep track of the current conversation to a long-term memory that stores past observations and learnings, informing future decisions and adapting its behavior. 14 | 15 | By orchestrating these components, an agent can tackle complex problems, automate workflows, and provide intelligent assistance far beyond simple Q&A. 16 | 17 | ## Introducing Azure AI Foundry Agent Service: Your Agent Development Hub 18 | 19 | Developing robust and scalable AI agents for enterprise applications can be complex. This is where the **Azure AI Foundry Agent Service** comes in. Integrated within Azure AI Studio, this service provides a comprehensive, managed platform designed to streamline the entire lifecycle of building, testing, and deploying sophisticated AI agents. 20 | 21 | Think of it as your dedicated workshop for agent creation, offering: 22 | 23 | * **Enterprise-Grade Orchestration:** A powerful framework that manages the intricate logic of how your agent reasons, selects tools, and executes multi-step plans reliably. 24 | * **Seamless Tool Integration:** Easily connect your agents to a vast array of tools, including pre-built connectors and the flexibility to integrate your own custom functions and APIs. 25 | * **Managed Infrastructure:** Focus entirely on your agent's intelligence and capabilities, not on infrastructure management. Azure handles the underlying compute, scalability, and deployment complexities. 26 | * **Robust Evaluation & Monitoring:** Tools to rigorously evaluate your agent's performance, ensure safety, and monitor its behavior in real-world scenarios, allowing for continuous improvement. 27 | 28 | The Azure AI Foundry Agent Service accelerates your journey from an initial agent concept to a production-ready, highly capable AI assistant. 29 | 30 | ## The Microsoft Agent Framework: The Future of Agent Building 31 | 32 | The **Microsoft Agent Framework** represents the cutting-edge evolution of Microsoft's commitment to AI agent development. It is a unified, comprehensive framework built to empower developers to create intelligent, natural language-driven agents, and to facilitate the orchestration of multiple agents working collaboratively on complex tasks. 33 | 34 | This framework is the spiritual successor and consolidation of learnings from pioneering projects like **Semantic Kernel** and **AutoGen**. It integrates the best features and advanced capabilities from these frameworks into a single, cohesive platform, aiming to provide: 35 | 36 | * **Unified Development Experience:** A consistent and powerful way to build agents, regardless of their complexity or target application. 37 | * **Advanced Orchestration:** Tools to design and manage intricate agent workflows, including scenarios where multiple agents interact and delegate tasks to each other. 38 | * **Focus on Natural Language Interaction:** Strong emphasis on enabling agents to understand and communicate with users effectively through natural language. 39 | 40 | The Microsoft Agent Framework is designed to be the foundational layer for the next generation of intelligent, autonomous systems, making agent development more accessible and powerful than ever before. 41 | 42 | 43 | -------------------------------------------------------------------------------- /03.ExploerAgentFramework/code_samples/dotNET/01-dotnet-agent-framework-aoai.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "667cae86", 7 | "metadata": { 8 | "language_info": { 9 | "name": "polyglot-notebook" 10 | }, 11 | "polyglot_notebook": { 12 | "kernelName": "csharp" 13 | } 14 | }, 15 | "outputs": [ 16 | { 17 | "data": { 18 | "text/html": [ 19 | "
Installed Packages
" 20 | ] 21 | }, 22 | "metadata": {}, 23 | "output_type": "display_data" 24 | } 25 | ], 26 | "source": [ 27 | "#r \"nuget: Microsoft.Extensions.AI, 9.9.0\"" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": 2, 33 | "id": "4b398e6a", 34 | "metadata": { 35 | "language_info": { 36 | "name": "polyglot-notebook" 37 | }, 38 | "polyglot_notebook": { 39 | "kernelName": "csharp" 40 | } 41 | }, 42 | "outputs": [ 43 | { 44 | "data": { 45 | "text/html": [ 46 | "
Installed Packages
" 47 | ] 48 | }, 49 | "metadata": {}, 50 | "output_type": "display_data" 51 | } 52 | ], 53 | "source": [ 54 | "#r \"nuget: Azure.Identity, 1.15.0\"\n", 55 | "#r \"nuget: OpenAI, 2.4.0\"\n", 56 | "#r \"nuget: Azure.AI.OpenAI, 2.1.0\"" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": null, 62 | "id": "117abf7e", 63 | "metadata": { 64 | "language_info": { 65 | "name": "polyglot-notebook" 66 | }, 67 | "polyglot_notebook": { 68 | "kernelName": "csharp" 69 | } 70 | }, 71 | "outputs": [ 72 | { 73 | "data": { 74 | "text/html": [ 75 | "
Installed Packages
" 76 | ] 77 | }, 78 | "metadata": {}, 79 | "output_type": "display_data" 80 | } 81 | ], 82 | "source": [ 83 | "#r \"nuget: Microsoft.Agents.AI.OpenAI, 1.0.0-preview.251001.3\"" 84 | ] 85 | }, 86 | { 87 | "cell_type": "code", 88 | "execution_count": null, 89 | "id": "8c657477", 90 | "metadata": { 91 | "language_info": { 92 | "name": "polyglot-notebook" 93 | }, 94 | "polyglot_notebook": { 95 | "kernelName": "csharp" 96 | } 97 | }, 98 | "outputs": [ 99 | { 100 | "data": { 101 | "text/html": [ 102 | "
Installed Packages
" 103 | ] 104 | }, 105 | "metadata": {}, 106 | "output_type": "display_data" 107 | } 108 | ], 109 | "source": [ 110 | "#r \"nuget: Microsoft.Agents.AI, 1.0.0-preview.251001.3\"" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": 5, 116 | "id": "2cb5b2ed", 117 | "metadata": { 118 | "language_info": { 119 | "name": "polyglot-notebook" 120 | }, 121 | "polyglot_notebook": { 122 | "kernelName": "csharp" 123 | } 124 | }, 125 | "outputs": [], 126 | "source": [ 127 | "// #r \"nuget: Microsoft.Extensions.AI.OpenAI, 9.9.0-preview.1.25458.4\"" 128 | ] 129 | }, 130 | { 131 | "cell_type": "code", 132 | "execution_count": 6, 133 | "id": "3ccb431c", 134 | "metadata": { 135 | "language_info": { 136 | "name": "polyglot-notebook" 137 | }, 138 | "polyglot_notebook": { 139 | "kernelName": "csharp" 140 | } 141 | }, 142 | "outputs": [ 143 | { 144 | "data": { 145 | "text/html": [ 146 | "
Installed Packages
" 147 | ] 148 | }, 149 | "metadata": {}, 150 | "output_type": "display_data" 151 | } 152 | ], 153 | "source": [ 154 | "#r \"nuget: DotNetEnv, 3.1.1\"" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": 7, 160 | "id": "19623ad7", 161 | "metadata": { 162 | "language_info": { 163 | "name": "polyglot-notebook" 164 | }, 165 | "polyglot_notebook": { 166 | "kernelName": "csharp" 167 | } 168 | }, 169 | "outputs": [], 170 | "source": [ 171 | "using System;\n", 172 | "using System.ClientModel;\n", 173 | "\n", 174 | "using Azure.AI.OpenAI;\n", 175 | "using Azure.Identity;\n", 176 | "\n", 177 | "using Microsoft.Agents.AI;\n", 178 | "using OpenAI;" 179 | ] 180 | }, 181 | { 182 | "cell_type": "code", 183 | "execution_count": 8, 184 | "id": "12152284", 185 | "metadata": { 186 | "language_info": { 187 | "name": "polyglot-notebook" 188 | }, 189 | "polyglot_notebook": { 190 | "kernelName": "csharp" 191 | } 192 | }, 193 | "outputs": [], 194 | "source": [ 195 | " using DotNetEnv;" 196 | ] 197 | }, 198 | { 199 | "cell_type": "code", 200 | "execution_count": 9, 201 | "id": "6a731f1c", 202 | "metadata": { 203 | "language_info": { 204 | "name": "polyglot-notebook" 205 | }, 206 | "polyglot_notebook": { 207 | "kernelName": "csharp" 208 | } 209 | }, 210 | "outputs": [], 211 | "source": [ 212 | "Env.Load(\"../../../.env\");" 213 | ] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "execution_count": 10, 218 | "id": "380d83bf", 219 | "metadata": { 220 | "language_info": { 221 | "name": "polyglot-notebook" 222 | }, 223 | "polyglot_notebook": { 224 | "kernelName": "csharp" 225 | } 226 | }, 227 | "outputs": [], 228 | "source": [ 229 | "var aoai_endpoint = Environment.GetEnvironmentVariable(\"AZURE_OPENAI_ENDPOINT\") ?? throw new InvalidOperationException(\"AZURE_OPENAI_ENDPOINT is not set.\");\n", 230 | "var aoai_model_id = Environment.GetEnvironmentVariable(\"AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME\") ?? \"gpt-4.1-mini\";" 231 | ] 232 | }, 233 | { 234 | "cell_type": "code", 235 | "execution_count": 11, 236 | "id": "94dc0fc5", 237 | "metadata": { 238 | "language_info": { 239 | "name": "polyglot-notebook" 240 | }, 241 | "polyglot_notebook": { 242 | "kernelName": "csharp" 243 | } 244 | }, 245 | "outputs": [ 246 | { 247 | "data": { 248 | "text/plain": [ 249 | "gpt-4.1-mini" 250 | ] 251 | }, 252 | "metadata": {}, 253 | "output_type": "display_data" 254 | } 255 | ], 256 | "source": [ 257 | "aoai_model_id" 258 | ] 259 | }, 260 | { 261 | "cell_type": "code", 262 | "execution_count": 12, 263 | "id": "728bd158", 264 | "metadata": { 265 | "language_info": { 266 | "name": "polyglot-notebook" 267 | }, 268 | "polyglot_notebook": { 269 | "kernelName": "csharp" 270 | } 271 | }, 272 | "outputs": [], 273 | "source": [ 274 | "AIAgent agent = new AzureOpenAIClient(\n", 275 | " new Uri(aoai_endpoint),\n", 276 | " new AzureCliCredential())\n", 277 | " .GetChatClient(aoai_model_id)\n", 278 | " .CreateAIAgent(\"You are a helpful assistant.\");" 279 | ] 280 | }, 281 | { 282 | "cell_type": "code", 283 | "execution_count": 13, 284 | "id": "ca3160d7", 285 | "metadata": { 286 | "language_info": { 287 | "name": "polyglot-notebook" 288 | }, 289 | "polyglot_notebook": { 290 | "kernelName": "csharp" 291 | } 292 | }, 293 | "outputs": [ 294 | { 295 | "name": "stdout", 296 | "output_type": "stream", 297 | "text": [ 298 | "Silent threads connect, \n", 299 | "Agents weave the work with grace, \n", 300 | "Framework’s pulse at core.\r\n" 301 | ] 302 | } 303 | ], 304 | "source": [ 305 | "Console.WriteLine(await agent.RunAsync(\"Write a haiku about Agent Framework.\"));" 306 | ] 307 | }, 308 | { 309 | "cell_type": "code", 310 | "execution_count": 14, 311 | "id": "b42241a5", 312 | "metadata": { 313 | "language_info": { 314 | "name": "polyglot-notebook" 315 | }, 316 | "polyglot_notebook": { 317 | "kernelName": "csharp" 318 | } 319 | }, 320 | "outputs": [ 321 | { 322 | "name": "stdout", 323 | "output_type": "stream", 324 | "text": [ 325 | "Code weaves silent paths, \n", 326 | "Agents dance in swift pursuit, \n", 327 | "Framework shapes their steps." 328 | ] 329 | } 330 | ], 331 | "source": [ 332 | "await foreach (var update in agent.RunStreamingAsync(\"Write a haiku about Agent Framework.\"))\n", 333 | "{\n", 334 | " Console.Write(update);\n", 335 | "}" 336 | ] 337 | } 338 | ], 339 | "metadata": { 340 | "kernelspec": { 341 | "display_name": ".NET (C#)", 342 | "language": "C#", 343 | "name": ".net-csharp" 344 | }, 345 | "language_info": { 346 | "name": "polyglot-notebook" 347 | }, 348 | "polyglot_notebook": { 349 | "kernelInfo": { 350 | "defaultKernelName": "csharp", 351 | "items": [ 352 | { 353 | "aliases": [], 354 | "name": "csharp" 355 | } 356 | ] 357 | } 358 | } 359 | }, 360 | "nbformat": 4, 361 | "nbformat_minor": 5 362 | } 363 | -------------------------------------------------------------------------------- /03.ExploerAgentFramework/code_samples/dotNET/02-dotnet-agent-framrwork-ghmodel.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "2e47efbb", 6 | "metadata": {}, 7 | "source": [ 8 | "# AI Agent Framework for .NET\n", 9 | "\n", 10 | "**Important**: If you encounter version conflicts, restart the kernel and run all cells in order.\n", 11 | "\n", 12 | "## Package References\n", 13 | "\n", 14 | "Load all required packages first to ensure compatibility:" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 1, 20 | "metadata": { 21 | "language_info": { 22 | "name": "polyglot-notebook" 23 | }, 24 | "polyglot_notebook": { 25 | "kernelName": "csharp" 26 | } 27 | }, 28 | "outputs": [ 29 | { 30 | "data": { 31 | "text/html": [ 32 | "
Installed Packages
" 33 | ] 34 | }, 35 | "metadata": {}, 36 | "output_type": "display_data" 37 | } 38 | ], 39 | "source": [ 40 | "#r \"nuget: Microsoft.Extensions.AI, 9.9.1\"" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 2, 46 | "metadata": { 47 | "language_info": { 48 | "name": "polyglot-notebook" 49 | }, 50 | "polyglot_notebook": { 51 | "kernelName": "csharp" 52 | } 53 | }, 54 | "outputs": [], 55 | "source": [ 56 | "// Load .NET 9.0 compatible packages first\n", 57 | "//#r \"nuget: Microsoft.Extensions.Logging.Abstractions, 9.0.0\"\n", 58 | "////#r \"nuget: Microsoft.Extensions.DependencyInjection.Abstractions, 9.0.0\"\n", 59 | "//#r \"nuget: Microsoft.Extensions.Options, 9.0.0\"" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": null, 65 | "id": "7590754d", 66 | "metadata": { 67 | "language_info": { 68 | "name": "polyglot-notebook" 69 | }, 70 | "polyglot_notebook": { 71 | "kernelName": "csharp" 72 | } 73 | }, 74 | "outputs": [ 75 | { 76 | "data": { 77 | "text/html": [ 78 | "
Installed Packages
" 79 | ] 80 | }, 81 | "metadata": {}, 82 | "output_type": "display_data" 83 | } 84 | ], 85 | "source": [ 86 | "#r \"nuget: Microsoft.Agents.AI.OpenAI, 1.0.0-preview.251001.3\"" 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": null, 92 | "id": "47b56957", 93 | "metadata": { 94 | "language_info": { 95 | "name": "polyglot-notebook" 96 | }, 97 | "polyglot_notebook": { 98 | "kernelName": "csharp" 99 | } 100 | }, 101 | "outputs": [ 102 | { 103 | "data": { 104 | "text/html": [ 105 | "
Installed Packages
" 106 | ] 107 | }, 108 | "metadata": {}, 109 | "output_type": "display_data" 110 | } 111 | ], 112 | "source": [ 113 | "#r \"nuget: Microsoft.Agents.AI, 1.0.0-preview.251001.3\"" 114 | ] 115 | }, 116 | { 117 | "cell_type": "code", 118 | "execution_count": 5, 119 | "id": "4df04054", 120 | "metadata": { 121 | "language_info": { 122 | "name": "polyglot-notebook" 123 | }, 124 | "polyglot_notebook": { 125 | "kernelName": "csharp" 126 | } 127 | }, 128 | "outputs": [ 129 | { 130 | "data": { 131 | "text/html": [ 132 | "
Installed Packages
" 133 | ] 134 | }, 135 | "metadata": {}, 136 | "output_type": "display_data" 137 | } 138 | ], 139 | "source": [ 140 | "#r \"nuget: DotNetEnv, 3.1.1\"" 141 | ] 142 | }, 143 | { 144 | "cell_type": "code", 145 | "execution_count": 6, 146 | "id": "43a7b15f", 147 | "metadata": { 148 | "language_info": { 149 | "name": "polyglot-notebook" 150 | }, 151 | "polyglot_notebook": { 152 | "kernelName": "csharp" 153 | } 154 | }, 155 | "outputs": [], 156 | "source": [ 157 | "using System;\n", 158 | "using System.ClientModel;\n", 159 | "\n", 160 | "using Microsoft.Agents.AI;\n", 161 | "using OpenAI;\n" 162 | ] 163 | }, 164 | { 165 | "cell_type": "code", 166 | "execution_count": 7, 167 | "id": "c592a31f", 168 | "metadata": { 169 | "language_info": { 170 | "name": "polyglot-notebook" 171 | }, 172 | "polyglot_notebook": { 173 | "kernelName": "csharp" 174 | } 175 | }, 176 | "outputs": [], 177 | "source": [ 178 | " using DotNetEnv;" 179 | ] 180 | }, 181 | { 182 | "cell_type": "code", 183 | "execution_count": 8, 184 | "id": "f439a209", 185 | "metadata": { 186 | "language_info": { 187 | "name": "polyglot-notebook" 188 | }, 189 | "polyglot_notebook": { 190 | "kernelName": "csharp" 191 | } 192 | }, 193 | "outputs": [], 194 | "source": [ 195 | "Env.Load(\"../../../.env\");" 196 | ] 197 | }, 198 | { 199 | "cell_type": "code", 200 | "execution_count": 9, 201 | "metadata": { 202 | "language_info": { 203 | "name": "polyglot-notebook" 204 | }, 205 | "polyglot_notebook": { 206 | "kernelName": "csharp" 207 | } 208 | }, 209 | "outputs": [], 210 | "source": [ 211 | "System.Environment.GetEnvironmentVariable(\"GITHUB_ENDPOINT\");" 212 | ] 213 | }, 214 | { 215 | "cell_type": "code", 216 | "execution_count": 10, 217 | "id": "2e47a3e9", 218 | "metadata": { 219 | "language_info": { 220 | "name": "polyglot-notebook" 221 | }, 222 | "polyglot_notebook": { 223 | "kernelName": "csharp" 224 | } 225 | }, 226 | "outputs": [], 227 | "source": [ 228 | "var github_endpoint = Environment.GetEnvironmentVariable(\"GITHUB_ENDPOINT\") ?? throw new InvalidOperationException(\"GITHUB_ENDPOINT is not set.\");\n", 229 | "var github_model_id = Environment.GetEnvironmentVariable(\"GITHUB_MODEL_ID\") ?? \"gpt-4o-mini\";\n", 230 | "var github_token = Environment.GetEnvironmentVariable(\"GITHUB_TOKEN\") ?? throw new InvalidOperationException(\"GITHUB_TOKEN is not set.\");" 231 | ] 232 | }, 233 | { 234 | "cell_type": "code", 235 | "execution_count": 11, 236 | "id": "83fb5727", 237 | "metadata": { 238 | "language_info": { 239 | "name": "polyglot-notebook" 240 | }, 241 | "polyglot_notebook": { 242 | "kernelName": "csharp" 243 | } 244 | }, 245 | "outputs": [], 246 | "source": [ 247 | "var openAIOptions = new OpenAIClientOptions()\n", 248 | "{\n", 249 | " Endpoint= new Uri(github_endpoint)\n", 250 | "};" 251 | ] 252 | }, 253 | { 254 | "cell_type": "code", 255 | "execution_count": 12, 256 | "id": "32813dd5", 257 | "metadata": { 258 | "language_info": { 259 | "name": "polyglot-notebook" 260 | }, 261 | "polyglot_notebook": { 262 | "kernelName": "csharp" 263 | } 264 | }, 265 | "outputs": [], 266 | "source": [ 267 | "\n", 268 | "\n", 269 | "var openAIClient = new OpenAIClient(new ApiKeyCredential(github_token), openAIOptions);" 270 | ] 271 | }, 272 | { 273 | "cell_type": "code", 274 | "execution_count": 13, 275 | "id": "3a503aa6", 276 | "metadata": { 277 | "language_info": { 278 | "name": "polyglot-notebook" 279 | }, 280 | "polyglot_notebook": { 281 | "kernelName": "csharp" 282 | } 283 | }, 284 | "outputs": [], 285 | "source": [ 286 | "AIAgent agent = new OpenAIClient(new ApiKeyCredential(github_token), openAIOptions).GetChatClient(github_model_id).CreateAIAgent(\n", 287 | " instructions:\"You are a helpful assistant.\");" 288 | ] 289 | }, 290 | { 291 | "cell_type": "code", 292 | "execution_count": 14, 293 | "metadata": { 294 | "language_info": { 295 | "name": "polyglot-notebook" 296 | }, 297 | "polyglot_notebook": { 298 | "kernelName": "csharp" 299 | } 300 | }, 301 | "outputs": [ 302 | { 303 | "name": "stdout", 304 | "output_type": "stream", 305 | "text": [ 306 | "Agents work as one, \n", 307 | "Framework weaves the strands of thought, \n", 308 | "Harmony in code. \r\n" 309 | ] 310 | } 311 | ], 312 | "source": [ 313 | "\n", 314 | "Console.WriteLine(await agent.RunAsync(\"Write a haiku about Agent Framework.\"));" 315 | ] 316 | }, 317 | { 318 | "cell_type": "code", 319 | "execution_count": 15, 320 | "metadata": { 321 | "language_info": { 322 | "name": "polyglot-notebook" 323 | }, 324 | "polyglot_notebook": { 325 | "kernelName": "csharp" 326 | } 327 | }, 328 | "outputs": [ 329 | { 330 | "name": "stdout", 331 | "output_type": "stream", 332 | "text": [ 333 | "In code's quiet dance, \n", 334 | "Agents weave through lines of thought, \n", 335 | "Frameworks build their path. " 336 | ] 337 | } 338 | ], 339 | "source": [ 340 | "await foreach (var update in agent.RunStreamingAsync(\"Write a haiku about Agent Framework.\"))\n", 341 | "{\n", 342 | " Console.Write(update);\n", 343 | "}" 344 | ] 345 | } 346 | ], 347 | "metadata": { 348 | "kernelspec": { 349 | "display_name": ".NET (C#)", 350 | "language": "C#", 351 | "name": ".net-csharp" 352 | }, 353 | "language_info": { 354 | "name": "polyglot-notebook" 355 | }, 356 | "polyglot_notebook": { 357 | "kernelInfo": { 358 | "defaultKernelName": "csharp", 359 | "items": [ 360 | { 361 | "aliases": [], 362 | "name": "csharp" 363 | } 364 | ] 365 | } 366 | } 367 | }, 368 | "nbformat": 4, 369 | "nbformat_minor": 5 370 | } 371 | -------------------------------------------------------------------------------- /03.ExploerAgentFramework/code_samples/dotNET/03-dotnet-agent-framework-aifoundry.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "language_info": { 8 | "name": "polyglot-notebook" 9 | }, 10 | "polyglot_notebook": { 11 | "kernelName": "csharp" 12 | } 13 | }, 14 | "outputs": [ 15 | { 16 | "data": { 17 | "text/html": [ 18 | "
Installed Packages
" 19 | ] 20 | }, 21 | "metadata": {}, 22 | "output_type": "display_data" 23 | } 24 | ], 25 | "source": [ 26 | "#r \"nuget: Microsoft.Extensions.AI, 9.9.1\"" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 2, 32 | "id": "04b85b4b", 33 | "metadata": { 34 | "language_info": { 35 | "name": "polyglot-notebook" 36 | }, 37 | "polyglot_notebook": { 38 | "kernelName": "csharp" 39 | } 40 | }, 41 | "outputs": [ 42 | { 43 | "data": { 44 | "text/html": [ 45 | "
Installed Packages
" 46 | ] 47 | }, 48 | "metadata": {}, 49 | "output_type": "display_data" 50 | } 51 | ], 52 | "source": [ 53 | "#r \"nuget: Azure.AI.Agents.Persistent, 1.2.0-beta.5\"\n", 54 | "#r \"nuget: Azure.Identity, 1.15.0\"\n", 55 | "#r \"nuget: System.Linq.Async, 6.0.3\"" 56 | ] 57 | }, 58 | { 59 | "cell_type": "code", 60 | "execution_count": 3, 61 | "id": "1efb94a1", 62 | "metadata": { 63 | "language_info": { 64 | "name": "polyglot-notebook" 65 | }, 66 | "polyglot_notebook": { 67 | "kernelName": "csharp" 68 | } 69 | }, 70 | "outputs": [], 71 | "source": [] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": null, 76 | "id": "75e55d91", 77 | "metadata": { 78 | "language_info": { 79 | "name": "polyglot-notebook" 80 | }, 81 | "polyglot_notebook": { 82 | "kernelName": "csharp" 83 | } 84 | }, 85 | "outputs": [ 86 | { 87 | "data": { 88 | "text/html": [ 89 | "
Installed Packages
" 90 | ] 91 | }, 92 | "metadata": {}, 93 | "output_type": "display_data" 94 | } 95 | ], 96 | "source": [ 97 | "#r \"nuget: Microsoft.Agents.AI.AzureAI, 1.0.0-preview.251001.3\"" 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": null, 103 | "id": "e64fbd1f", 104 | "metadata": { 105 | "language_info": { 106 | "name": "polyglot-notebook" 107 | }, 108 | "polyglot_notebook": { 109 | "kernelName": "csharp" 110 | } 111 | }, 112 | "outputs": [ 113 | { 114 | "data": { 115 | "text/html": [ 116 | "
Installed Packages
" 117 | ] 118 | }, 119 | "metadata": {}, 120 | "output_type": "display_data" 121 | } 122 | ], 123 | "source": [ 124 | "#r \"nuget: Microsoft.Agents.AI, 1.0.0-preview.251001.3\"" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": 6, 130 | "metadata": { 131 | "language_info": { 132 | "name": "polyglot-notebook" 133 | }, 134 | "polyglot_notebook": { 135 | "kernelName": "csharp" 136 | } 137 | }, 138 | "outputs": [], 139 | "source": [ 140 | "// #r \"nuget: Microsoft.Extensions.AI.OpenAI, 9.9.0-preview.1.25458.4\"" 141 | ] 142 | }, 143 | { 144 | "cell_type": "code", 145 | "execution_count": 7, 146 | "id": "c43d5841", 147 | "metadata": { 148 | "language_info": { 149 | "name": "polyglot-notebook" 150 | }, 151 | "polyglot_notebook": { 152 | "kernelName": "csharp" 153 | } 154 | }, 155 | "outputs": [ 156 | { 157 | "data": { 158 | "text/html": [ 159 | "
Installed Packages
" 160 | ] 161 | }, 162 | "metadata": {}, 163 | "output_type": "display_data" 164 | } 165 | ], 166 | "source": [ 167 | "#r \"nuget: DotNetEnv, 3.1.1\"" 168 | ] 169 | }, 170 | { 171 | "cell_type": "code", 172 | "execution_count": 8, 173 | "id": "e39b5e8f", 174 | "metadata": { 175 | "language_info": { 176 | "name": "polyglot-notebook" 177 | }, 178 | "polyglot_notebook": { 179 | "kernelName": "csharp" 180 | } 181 | }, 182 | "outputs": [], 183 | "source": [ 184 | "using System;\n", 185 | "using System.Linq;\n", 186 | "using Azure.AI.Agents.Persistent;\n", 187 | "using Azure.Identity;\n", 188 | "using Microsoft.Agents.AI;" 189 | ] 190 | }, 191 | { 192 | "cell_type": "code", 193 | "execution_count": 9, 194 | "id": "0e0e92e1", 195 | "metadata": { 196 | "language_info": { 197 | "name": "polyglot-notebook" 198 | }, 199 | "polyglot_notebook": { 200 | "kernelName": "csharp" 201 | } 202 | }, 203 | "outputs": [], 204 | "source": [ 205 | " using DotNetEnv;" 206 | ] 207 | }, 208 | { 209 | "cell_type": "code", 210 | "execution_count": 10, 211 | "id": "80cf0296", 212 | "metadata": { 213 | "language_info": { 214 | "name": "polyglot-notebook" 215 | }, 216 | "polyglot_notebook": { 217 | "kernelName": "csharp" 218 | } 219 | }, 220 | "outputs": [], 221 | "source": [ 222 | "Env.Load(\"../../../.env\");" 223 | ] 224 | }, 225 | { 226 | "cell_type": "code", 227 | "execution_count": 11, 228 | "id": "6943469f", 229 | "metadata": { 230 | "language_info": { 231 | "name": "polyglot-notebook" 232 | }, 233 | "polyglot_notebook": { 234 | "kernelName": "csharp" 235 | } 236 | }, 237 | "outputs": [], 238 | "source": [ 239 | "var azure_foundry_endpoint = Environment.GetEnvironmentVariable(\"AZURE_AI_PROJECT_ENDPOINT\") ?? throw new InvalidOperationException(\"AZURE_AI_PROJECT_ENDPOINT is not set.\");\n", 240 | "var azure_foundry_model_id = Environment.GetEnvironmentVariable(\"AZURE_AI_MODEL_DEPLOYMENT_NAME\") ?? \"gpt-4.1-mini\";" 241 | ] 242 | }, 243 | { 244 | "cell_type": "code", 245 | "execution_count": 12, 246 | "id": "948d7e4a", 247 | "metadata": { 248 | "language_info": { 249 | "name": "polyglot-notebook" 250 | }, 251 | "polyglot_notebook": { 252 | "kernelName": "csharp" 253 | } 254 | }, 255 | "outputs": [ 256 | { 257 | "data": { 258 | "text/plain": [ 259 | "https://kinfeylo-aifoundry-proj-resource.services.ai.azure.com/api/projects/kinfeylo-aifoundry-project" 260 | ] 261 | }, 262 | "metadata": {}, 263 | "output_type": "display_data" 264 | } 265 | ], 266 | "source": [ 267 | "azure_foundry_endpoint" 268 | ] 269 | }, 270 | { 271 | "cell_type": "code", 272 | "execution_count": 13, 273 | "id": "4aa96811", 274 | "metadata": { 275 | "language_info": { 276 | "name": "polyglot-notebook" 277 | }, 278 | "polyglot_notebook": { 279 | "kernelName": "csharp" 280 | } 281 | }, 282 | "outputs": [], 283 | "source": [ 284 | "const string AgentName = \"Agent-Framework\";\n", 285 | "const string AgentInstructions = \"You are an AI assistant that helps people find information.\";" 286 | ] 287 | }, 288 | { 289 | "cell_type": "code", 290 | "execution_count": 14, 291 | "id": "08b8e065", 292 | "metadata": { 293 | "language_info": { 294 | "name": "polyglot-notebook" 295 | }, 296 | "polyglot_notebook": { 297 | "kernelName": "csharp" 298 | } 299 | }, 300 | "outputs": [], 301 | "source": [ 302 | "var persistentAgentsClient = new PersistentAgentsClient(azure_foundry_endpoint, new AzureCliCredential());" 303 | ] 304 | }, 305 | { 306 | "cell_type": "code", 307 | "execution_count": 15, 308 | "id": "0b831865", 309 | "metadata": { 310 | "language_info": { 311 | "name": "polyglot-notebook" 312 | }, 313 | "polyglot_notebook": { 314 | "kernelName": "csharp" 315 | } 316 | }, 317 | "outputs": [], 318 | "source": [ 319 | "var agentMetadata = await persistentAgentsClient.Administration.CreateAgentAsync(\n", 320 | " model: azure_foundry_model_id,\n", 321 | " name: AgentName,\n", 322 | " instructions: AgentInstructions);" 323 | ] 324 | }, 325 | { 326 | "cell_type": "code", 327 | "execution_count": 16, 328 | "id": "49cbe5e0", 329 | "metadata": { 330 | "language_info": { 331 | "name": "polyglot-notebook" 332 | }, 333 | "polyglot_notebook": { 334 | "kernelName": "csharp" 335 | } 336 | }, 337 | "outputs": [], 338 | "source": [ 339 | "AIAgent agent = await persistentAgentsClient.GetAIAgentAsync(agentMetadata.Value.Id);" 340 | ] 341 | }, 342 | { 343 | "cell_type": "code", 344 | "execution_count": 17, 345 | "id": "0e96f4c2", 346 | "metadata": { 347 | "language_info": { 348 | "name": "polyglot-notebook" 349 | }, 350 | "polyglot_notebook": { 351 | "kernelName": "csharp" 352 | } 353 | }, 354 | "outputs": [], 355 | "source": [ 356 | "AgentThread thread = agent.GetNewThread();" 357 | ] 358 | }, 359 | { 360 | "cell_type": "code", 361 | "execution_count": 18, 362 | "id": "a61648c0", 363 | "metadata": { 364 | "language_info": { 365 | "name": "polyglot-notebook" 366 | }, 367 | "polyglot_notebook": { 368 | "kernelName": "csharp" 369 | } 370 | }, 371 | "outputs": [ 372 | { 373 | "name": "stdout", 374 | "output_type": "stream", 375 | "text": [ 376 | "Silent codes align, \n", 377 | "Agents weave their tasks with care— \n", 378 | "Frameworks dance in sync.\r\n" 379 | ] 380 | } 381 | ], 382 | "source": [ 383 | "Console.WriteLine(await agent.RunAsync(\"Write a haiku about Agent Framework\", thread));" 384 | ] 385 | } 386 | ], 387 | "metadata": { 388 | "kernelspec": { 389 | "display_name": ".NET (C#)", 390 | "language": "C#", 391 | "name": ".net-csharp" 392 | }, 393 | "language_info": { 394 | "name": "polyglot-notebook" 395 | }, 396 | "polyglot_notebook": { 397 | "kernelInfo": { 398 | "defaultKernelName": "csharp", 399 | "items": [ 400 | { 401 | "aliases": [], 402 | "name": "csharp" 403 | } 404 | ] 405 | } 406 | } 407 | }, 408 | "nbformat": 4, 409 | "nbformat_minor": 5 410 | } 411 | -------------------------------------------------------------------------------- /03.ExploerAgentFramework/code_samples/dotNET/04-dotnet-agent-framework-foundrylocal.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "6bcd3337", 7 | "metadata": { 8 | "language_info": { 9 | "name": "polyglot-notebook" 10 | }, 11 | "polyglot_notebook": { 12 | "kernelName": "csharp" 13 | } 14 | }, 15 | "outputs": [ 16 | { 17 | "data": { 18 | "text/html": [ 19 | "
Installed Packages
" 20 | ] 21 | }, 22 | "metadata": {}, 23 | "output_type": "display_data" 24 | } 25 | ], 26 | "source": [ 27 | "#r \"nuget: Microsoft.Extensions.AI, 9.9.0\"" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": 2, 33 | "id": "bce1e474", 34 | "metadata": { 35 | "language_info": { 36 | "name": "polyglot-notebook" 37 | }, 38 | "polyglot_notebook": { 39 | "kernelName": "csharp" 40 | } 41 | }, 42 | "outputs": [], 43 | "source": [ 44 | "// Load .NET 9.0 compatible packages first\n", 45 | "//#r \"nuget: Microsoft.Extensions.Logging.Abstractions, 9.0.0\"\n", 46 | "////#r \"nuget: Microsoft.Extensions.DependencyInjection.Abstractions, 9.0.0\"\n", 47 | "//#r \"nuget: Microsoft.Extensions.Options, 9.0.0\"" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": null, 53 | "id": "7590754d", 54 | "metadata": { 55 | "language_info": { 56 | "name": "polyglot-notebook" 57 | }, 58 | "polyglot_notebook": { 59 | "kernelName": "csharp" 60 | } 61 | }, 62 | "outputs": [ 63 | { 64 | "data": { 65 | "text/html": [ 66 | "
Installed Packages
" 67 | ] 68 | }, 69 | "metadata": {}, 70 | "output_type": "display_data" 71 | } 72 | ], 73 | "source": [ 74 | "#r \"nuget: Microsoft.Agents.AI.OpenAI, 1.0.0-preview.251001.3\"" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": null, 80 | "id": "47b56957", 81 | "metadata": { 82 | "language_info": { 83 | "name": "polyglot-notebook" 84 | }, 85 | "polyglot_notebook": { 86 | "kernelName": "csharp" 87 | } 88 | }, 89 | "outputs": [ 90 | { 91 | "data": { 92 | "text/html": [ 93 | "
Installed Packages
" 94 | ] 95 | }, 96 | "metadata": {}, 97 | "output_type": "display_data" 98 | } 99 | ], 100 | "source": [ 101 | "#r \"nuget: Microsoft.Agents.AI, 1.0.0-preview.251001.3\"" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": 5, 107 | "id": "4df04054", 108 | "metadata": { 109 | "language_info": { 110 | "name": "polyglot-notebook" 111 | }, 112 | "polyglot_notebook": { 113 | "kernelName": "csharp" 114 | } 115 | }, 116 | "outputs": [ 117 | { 118 | "data": { 119 | "text/html": [ 120 | "
Installed Packages
" 121 | ] 122 | }, 123 | "metadata": {}, 124 | "output_type": "display_data" 125 | } 126 | ], 127 | "source": [ 128 | "#r \"nuget: DotNetEnv, 3.1.1\"" 129 | ] 130 | }, 131 | { 132 | "cell_type": "code", 133 | "execution_count": 6, 134 | "id": "43a7b15f", 135 | "metadata": { 136 | "language_info": { 137 | "name": "polyglot-notebook" 138 | }, 139 | "polyglot_notebook": { 140 | "kernelName": "csharp" 141 | } 142 | }, 143 | "outputs": [], 144 | "source": [ 145 | "using System;\n", 146 | "using System.ClientModel;\n", 147 | "\n", 148 | "using Microsoft.Agents.AI;\n", 149 | "using OpenAI;\n" 150 | ] 151 | }, 152 | { 153 | "cell_type": "code", 154 | "execution_count": 7, 155 | "id": "c592a31f", 156 | "metadata": { 157 | "language_info": { 158 | "name": "polyglot-notebook" 159 | }, 160 | "polyglot_notebook": { 161 | "kernelName": "csharp" 162 | } 163 | }, 164 | "outputs": [], 165 | "source": [ 166 | " using DotNetEnv;" 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": 8, 172 | "id": "f439a209", 173 | "metadata": { 174 | "language_info": { 175 | "name": "polyglot-notebook" 176 | }, 177 | "polyglot_notebook": { 178 | "kernelName": "csharp" 179 | } 180 | }, 181 | "outputs": [], 182 | "source": [ 183 | "Env.Load(\"../../../.env\");" 184 | ] 185 | }, 186 | { 187 | "cell_type": "code", 188 | "execution_count": 9, 189 | "id": "2e47a3e9", 190 | "metadata": { 191 | "language_info": { 192 | "name": "polyglot-notebook" 193 | }, 194 | "polyglot_notebook": { 195 | "kernelName": "csharp" 196 | } 197 | }, 198 | "outputs": [], 199 | "source": [ 200 | "var foundrylocal_endpoint = Environment.GetEnvironmentVariable(\"FOUNDRYLOCAL_ENDPOINT\") ?? throw new InvalidOperationException(\"FoundryLocal_Endpoint is not set.\");\n", 201 | "var foundrylocal_model_id = Environment.GetEnvironmentVariable(\"FOUNDRYLOCAL_MODEL_DEPLOYMENT_NAME\") ?? \"Qwen3-0.6b-cpu\";" 202 | ] 203 | }, 204 | { 205 | "cell_type": "code", 206 | "execution_count": 10, 207 | "id": "83fb5727", 208 | "metadata": { 209 | "language_info": { 210 | "name": "polyglot-notebook" 211 | }, 212 | "polyglot_notebook": { 213 | "kernelName": "csharp" 214 | } 215 | }, 216 | "outputs": [], 217 | "source": [ 218 | "var openAIOptions = new OpenAIClientOptions()\n", 219 | "{\n", 220 | " Endpoint= new Uri(foundrylocal_endpoint)\n", 221 | "};" 222 | ] 223 | }, 224 | { 225 | "cell_type": "code", 226 | "execution_count": 11, 227 | "id": "32813dd5", 228 | "metadata": { 229 | "language_info": { 230 | "name": "polyglot-notebook" 231 | }, 232 | "polyglot_notebook": { 233 | "kernelName": "csharp" 234 | } 235 | }, 236 | "outputs": [ 237 | { 238 | "data": { 239 | "text/plain": [ 240 | "http://localhost:5272/v1" 241 | ] 242 | }, 243 | "metadata": {}, 244 | "output_type": "display_data" 245 | } 246 | ], 247 | "source": [ 248 | "foundrylocal_endpoint" 249 | ] 250 | }, 251 | { 252 | "cell_type": "code", 253 | "execution_count": 12, 254 | "id": "55032872", 255 | "metadata": { 256 | "language_info": { 257 | "name": "polyglot-notebook" 258 | }, 259 | "polyglot_notebook": { 260 | "kernelName": "csharp" 261 | } 262 | }, 263 | "outputs": [ 264 | { 265 | "data": { 266 | "text/plain": [ 267 | "Qwen3-0.6b-cpu" 268 | ] 269 | }, 270 | "metadata": {}, 271 | "output_type": "display_data" 272 | } 273 | ], 274 | "source": [ 275 | "foundrylocal_model_id" 276 | ] 277 | }, 278 | { 279 | "cell_type": "code", 280 | "execution_count": 13, 281 | "id": "3a503aa6", 282 | "metadata": { 283 | "language_info": { 284 | "name": "polyglot-notebook" 285 | }, 286 | "polyglot_notebook": { 287 | "kernelName": "csharp" 288 | } 289 | }, 290 | "outputs": [], 291 | "source": [ 292 | "AIAgent agent = new OpenAIClient(new ApiKeyCredential(\"nokey\"), openAIOptions).GetChatClient(foundrylocal_model_id).CreateAIAgent(\n", 293 | " instructions:\"You are a helpful assistant.\");" 294 | ] 295 | }, 296 | { 297 | "cell_type": "code", 298 | "execution_count": 14, 299 | "id": "84fd2736", 300 | "metadata": { 301 | "language_info": { 302 | "name": "polyglot-notebook" 303 | }, 304 | "polyglot_notebook": { 305 | "kernelName": "csharp" 306 | } 307 | }, 308 | "outputs": [ 309 | { 310 | "name": "stdout", 311 | "output_type": "stream", 312 | "text": [ 313 | "\n", 314 | "\n", 315 | "Okay, the user asked me to introduce myself. I need to be friendly and helpful. Let me start by saying I'm a language model, but I can also be a helpful assistant. I should mention my ability to understand different languages and provide support in various ways. Maybe include a personal touch, like being a bridge between different cultures. That way, I can show I'm more than just a machine. Also, I should keep the tone positive and approachable. Let me put that together in a natural way.\n", 316 | "\n", 317 | "\n", 318 | "Hello! I'm a language model designed to help with understanding different languages and providing support in various ways. I can assist with translation, cultural exchange, and even provide helpful information. As a bridge between different cultures, I'm here to support you in every way. Let me know how I can help! 😊\r\n" 319 | ] 320 | } 321 | ], 322 | "source": [ 323 | "\n", 324 | "Console.WriteLine(await agent.RunAsync(\"Can you introduce yourself?\"));" 325 | ] 326 | }, 327 | { 328 | "cell_type": "code", 329 | "execution_count": 15, 330 | "id": "5ad29cc1", 331 | "metadata": { 332 | "language_info": { 333 | "name": "polyglot-notebook" 334 | }, 335 | "polyglot_notebook": { 336 | "kernelName": "csharp" 337 | } 338 | }, 339 | "outputs": [ 340 | { 341 | "name": "stdout", 342 | "output_type": "stream", 343 | "text": [ 344 | "\n", 345 | "\n", 346 | "Okay, the user asked me to introduce myself. I need to be friendly and helpful. Let me start by saying I'm a language model, but I can also be a helpful assistant. I should mention my ability to understand different languages and provide support in various ways. Maybe include a personal touch, like being a bridge between different cultures. That way, I can show I'm more than just a machine. Also, I should keep the tone positive and approachable. Let me put that together in a natural way.\n", 347 | "\n", 348 | "\n", 349 | "Hello! I'm a language model designed to help with understanding different languages and providing support in various ways. I can assist with translation, cultural exchange, and even provide helpful information. As a bridge between different cultures, I'm here to support you in every way. Let me know how I can help! 😊" 350 | ] 351 | } 352 | ], 353 | "source": [ 354 | "await foreach (var update in agent.RunStreamingAsync(\"Can you introduce yourself?\"))\n", 355 | "{\n", 356 | " Console.Write(update);\n", 357 | "}" 358 | ] 359 | } 360 | ], 361 | "metadata": { 362 | "kernelspec": { 363 | "display_name": ".NET (C#)", 364 | "language": "C#", 365 | "name": ".net-csharp" 366 | }, 367 | "language_info": { 368 | "name": "polyglot-notebook" 369 | }, 370 | "polyglot_notebook": { 371 | "kernelInfo": { 372 | "defaultKernelName": "csharp", 373 | "items": [ 374 | { 375 | "aliases": [], 376 | "name": "csharp" 377 | } 378 | ] 379 | } 380 | } 381 | }, 382 | "nbformat": 4, 383 | "nbformat_minor": 5 384 | } 385 | -------------------------------------------------------------------------------- /03.ExploerAgentFramework/code_samples/python/03-python-agent-framework-aifoundry.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "b7388610", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "# ! pip uninstall agent-framework-azure-ai -y" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 2, 16 | "id": "7b156ea6", 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "# ! pip install agent-framework-azure-ai -U" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 3, 26 | "id": "b4f4d0c7", 27 | "metadata": {}, 28 | "outputs": [], 29 | "source": [ 30 | "import os\n", 31 | "\n", 32 | "from azure.identity.aio import AzureCliCredential\n", 33 | "from azure.ai.projects.aio import AIProjectClient\n", 34 | "from dotenv import load_dotenv\n", 35 | "\n", 36 | "from agent_framework import ChatAgent\n", 37 | "from agent_framework.azure import AzureAIAgentClient" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": 4, 43 | "id": "8797600a", 44 | "metadata": {}, 45 | "outputs": [ 46 | { 47 | "data": { 48 | "text/plain": [ 49 | "True" 50 | ] 51 | }, 52 | "execution_count": 4, 53 | "metadata": {}, 54 | "output_type": "execute_result" 55 | } 56 | ], 57 | "source": [ 58 | "load_dotenv()" 59 | ] 60 | }, 61 | { 62 | "cell_type": "code", 63 | "execution_count": 5, 64 | "id": "da548b11", 65 | "metadata": {}, 66 | "outputs": [ 67 | { 68 | "name": "stdout", 69 | "output_type": "stream", 70 | "text": [ 71 | "Result: Silent codes align, \n", 72 | "Agents weave the web of tasks, \n", 73 | "Framework lights their path.\n", 74 | "\n", 75 | "ok\n" 76 | ] 77 | } 78 | ], 79 | "source": [ 80 | "\n", 81 | "async with (\n", 82 | " AzureCliCredential() as credential,\n", 83 | " AIProjectClient(endpoint=os.environ[\"AZURE_AI_PROJECT_ENDPOINT\"], credential=credential) as client,\n", 84 | " ):\n", 85 | " # Create an agent that will persist\n", 86 | " created_agent = await client.agents.create_agent(\n", 87 | " model=os.environ[\"AZURE_AI_MODEL_DEPLOYMENT_NAME\"], \n", 88 | " name=\"TesDemo\",\n", 89 | " instructions=\"You are a helpful weather agent.\",\n", 90 | " )\n", 91 | "\n", 92 | " try:\n", 93 | " async with ChatAgent(\n", 94 | " # passing in the client is optional here, so if you take the agent_id from the portal\n", 95 | " # you can use it directly without the two lines above.\n", 96 | " chat_client=AzureAIAgentClient(project_client=client, agent_id=created_agent.id),\n", 97 | " ) as agent:\n", 98 | " result = await agent.run(\"Write a haiku about Agent Framework.\")\n", 99 | " print(f\"Result: {result}\\n\")\n", 100 | " finally:\n", 101 | " print(\"ok\")\n", 102 | " # Clean up the agent manually\n", 103 | " # await client.agents.delete_agent(created_agent.id)\n", 104 | " # Create an agent that will persist" 105 | ] 106 | } 107 | ], 108 | "metadata": { 109 | "kernelspec": { 110 | "display_name": "agentenv", 111 | "language": "python", 112 | "name": "python3" 113 | }, 114 | "language_info": { 115 | "codemirror_mode": { 116 | "name": "ipython", 117 | "version": 3 118 | }, 119 | "file_extension": ".py", 120 | "mimetype": "text/x-python", 121 | "name": "python", 122 | "nbconvert_exporter": "python", 123 | "pygments_lexer": "ipython3", 124 | "version": "3.12.10" 125 | } 126 | }, 127 | "nbformat": 4, 128 | "nbformat_minor": 5 129 | } 130 | -------------------------------------------------------------------------------- /04.Tools/code_samples/dotNET/foundry/02-dotnet-agent-framework-aifoundry-code-interpreter.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "language_info": { 8 | "name": "polyglot-notebook" 9 | }, 10 | "polyglot_notebook": { 11 | "kernelName": "csharp" 12 | } 13 | }, 14 | "outputs": [ 15 | { 16 | "data": { 17 | "text/html": [ 18 | "
Installed Packages
  • Microsoft.Extensions.AI, 9.9.1
" 19 | ] 20 | }, 21 | "metadata": {}, 22 | "output_type": "display_data" 23 | } 24 | ], 25 | "source": [ 26 | "#r \"nuget: Microsoft.Extensions.AI, 9.9.1\"" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 2, 32 | "id": "3830fc08", 33 | "metadata": { 34 | "language_info": { 35 | "name": "polyglot-notebook" 36 | }, 37 | "polyglot_notebook": { 38 | "kernelName": "csharp" 39 | } 40 | }, 41 | "outputs": [ 42 | { 43 | "data": { 44 | "text/html": [ 45 | "
Installed Packages
  • Azure.AI.Agents.Persistent, 1.2.0-beta.5
  • Azure.Identity, 1.15.0
  • System.Linq.Async, 6.0.3
" 46 | ] 47 | }, 48 | "metadata": {}, 49 | "output_type": "display_data" 50 | } 51 | ], 52 | "source": [ 53 | "#r \"nuget: Azure.AI.Agents.Persistent, 1.2.0-beta.5\"\n", 54 | "#r \"nuget: Azure.Identity, 1.15.0\"\n", 55 | "#r \"nuget: System.Linq.Async, 6.0.3\"" 56 | ] 57 | }, 58 | { 59 | "cell_type": "code", 60 | "execution_count": null, 61 | "id": "d5178169", 62 | "metadata": { 63 | "language_info": { 64 | "name": "polyglot-notebook" 65 | }, 66 | "polyglot_notebook": { 67 | "kernelName": "csharp" 68 | } 69 | }, 70 | "outputs": [ 71 | { 72 | "data": { 73 | "text/html": [ 74 | "
Installed Packages
  • Microsoft.Agents.AI.AzureAI, 1.0.0-preview.251001.2
" 75 | ] 76 | }, 77 | "metadata": {}, 78 | "output_type": "display_data" 79 | } 80 | ], 81 | "source": [ 82 | "#r \"nuget: Microsoft.Agents.AI.AzureAI, 1.0.0-preview.251001.3\"" 83 | ] 84 | }, 85 | { 86 | "cell_type": "code", 87 | "execution_count": null, 88 | "id": "a8a111b2", 89 | "metadata": { 90 | "language_info": { 91 | "name": "polyglot-notebook" 92 | }, 93 | "polyglot_notebook": { 94 | "kernelName": "csharp" 95 | } 96 | }, 97 | "outputs": [ 98 | { 99 | "data": { 100 | "text/html": [ 101 | "
Installed Packages
  • microsoft.agents.ai, 1.0.0-preview.251001.2
" 102 | ] 103 | }, 104 | "metadata": {}, 105 | "output_type": "display_data" 106 | } 107 | ], 108 | "source": [ 109 | "#r \"nuget: Microsoft.Agents.AI, 1.0.0-preview.251001.3\"" 110 | ] 111 | }, 112 | { 113 | "cell_type": "code", 114 | "execution_count": null, 115 | "metadata": { 116 | "language_info": { 117 | "name": "polyglot-notebook" 118 | }, 119 | "polyglot_notebook": { 120 | "kernelName": "csharp" 121 | } 122 | }, 123 | "outputs": [ 124 | { 125 | "data": { 126 | "text/html": [ 127 | "
Installed Packages
  • Microsoft.Extensions.AI.OpenAI, 9.9.1-preview.1.25474.6
" 128 | ] 129 | }, 130 | "metadata": {}, 131 | "output_type": "display_data" 132 | } 133 | ], 134 | "source": [ 135 | "// #r \"nuget: Microsoft.Extensions.AI.OpenAI, 9.9.1-preview.1.25474.6\"" 136 | ] 137 | }, 138 | { 139 | "cell_type": "code", 140 | "execution_count": 6, 141 | "id": "6de7ac9b", 142 | "metadata": { 143 | "language_info": { 144 | "name": "polyglot-notebook" 145 | }, 146 | "polyglot_notebook": { 147 | "kernelName": "csharp" 148 | } 149 | }, 150 | "outputs": [ 151 | { 152 | "data": { 153 | "text/html": [ 154 | "
Installed Packages
  • DotNetEnv, 3.1.1
" 155 | ] 156 | }, 157 | "metadata": {}, 158 | "output_type": "display_data" 159 | } 160 | ], 161 | "source": [ 162 | "#r \"nuget: DotNetEnv, 3.1.1\"" 163 | ] 164 | }, 165 | { 166 | "cell_type": "code", 167 | "execution_count": 7, 168 | "id": "cb486999", 169 | "metadata": { 170 | "language_info": { 171 | "name": "polyglot-notebook" 172 | }, 173 | "polyglot_notebook": { 174 | "kernelName": "csharp" 175 | } 176 | }, 177 | "outputs": [], 178 | "source": [ 179 | "using System;\n", 180 | "using System.Linq;\n", 181 | "using Azure.AI.Agents.Persistent;\n", 182 | "using Azure.Identity;\n", 183 | "using Microsoft.Agents.AI;" 184 | ] 185 | }, 186 | { 187 | "cell_type": "code", 188 | "execution_count": 8, 189 | "id": "b8a8c321", 190 | "metadata": { 191 | "language_info": { 192 | "name": "polyglot-notebook" 193 | }, 194 | "polyglot_notebook": { 195 | "kernelName": "csharp" 196 | } 197 | }, 198 | "outputs": [], 199 | "source": [ 200 | " using DotNetEnv;" 201 | ] 202 | }, 203 | { 204 | "cell_type": "code", 205 | "execution_count": 9, 206 | "id": "b502709a", 207 | "metadata": { 208 | "language_info": { 209 | "name": "polyglot-notebook" 210 | }, 211 | "polyglot_notebook": { 212 | "kernelName": "csharp" 213 | } 214 | }, 215 | "outputs": [], 216 | "source": [ 217 | "Env.Load(\"../../../../.env\");" 218 | ] 219 | }, 220 | { 221 | "cell_type": "code", 222 | "execution_count": 10, 223 | "id": "51930666", 224 | "metadata": { 225 | "language_info": { 226 | "name": "polyglot-notebook" 227 | }, 228 | "polyglot_notebook": { 229 | "kernelName": "csharp" 230 | } 231 | }, 232 | "outputs": [], 233 | "source": [ 234 | "var azure_foundry_endpoint = Environment.GetEnvironmentVariable(\"AZURE_AI_PROJECT_ENDPOINT\") ?? throw new InvalidOperationException(\"AZURE_AI_PROJECT_ENDPOINT is not set.\");\n", 235 | "var azure_foundry_model_id = Environment.GetEnvironmentVariable(\"AZURE_AI_MODEL_DEPLOYMENT_NAME\") ?? \"gpt-4.1-mini\";" 236 | ] 237 | }, 238 | { 239 | "cell_type": "code", 240 | "execution_count": 11, 241 | "id": "f59662d2", 242 | "metadata": { 243 | "language_info": { 244 | "name": "polyglot-notebook" 245 | }, 246 | "polyglot_notebook": { 247 | "kernelName": "csharp" 248 | } 249 | }, 250 | "outputs": [], 251 | "source": [ 252 | "const string AgentName = \"Code-Agent-Framework\";\n", 253 | "const string AgentInstructions = \"You are an AI assistant that helps people find information.\";" 254 | ] 255 | }, 256 | { 257 | "cell_type": "code", 258 | "execution_count": 12, 259 | "id": "3087a77d", 260 | "metadata": { 261 | "language_info": { 262 | "name": "polyglot-notebook" 263 | }, 264 | "polyglot_notebook": { 265 | "kernelName": "csharp" 266 | } 267 | }, 268 | "outputs": [], 269 | "source": [ 270 | "var persistentAgentsClient = new PersistentAgentsClient(azure_foundry_endpoint, new AzureCliCredential());" 271 | ] 272 | }, 273 | { 274 | "cell_type": "code", 275 | "execution_count": 13, 276 | "id": "9f67e31a", 277 | "metadata": { 278 | "language_info": { 279 | "name": "polyglot-notebook" 280 | }, 281 | "polyglot_notebook": { 282 | "kernelName": "csharp" 283 | } 284 | }, 285 | "outputs": [], 286 | "source": [ 287 | "var agentMetadata = await persistentAgentsClient.Administration.CreateAgentAsync(\n", 288 | " model: azure_foundry_model_id,\n", 289 | " name: AgentName,\n", 290 | " instructions: AgentInstructions,\n", 291 | " tools: [new CodeInterpreterToolDefinition()]);" 292 | ] 293 | }, 294 | { 295 | "cell_type": "code", 296 | "execution_count": 14, 297 | "id": "d563ee1d", 298 | "metadata": { 299 | "language_info": { 300 | "name": "polyglot-notebook" 301 | }, 302 | "polyglot_notebook": { 303 | "kernelName": "csharp" 304 | } 305 | }, 306 | "outputs": [], 307 | "source": [ 308 | "AIAgent agent = await persistentAgentsClient.GetAIAgentAsync(agentMetadata.Value.Id);" 309 | ] 310 | }, 311 | { 312 | "cell_type": "code", 313 | "execution_count": 15, 314 | "id": "86813ade", 315 | "metadata": { 316 | "language_info": { 317 | "name": "polyglot-notebook" 318 | }, 319 | "polyglot_notebook": { 320 | "kernelName": "csharp" 321 | } 322 | }, 323 | "outputs": [], 324 | "source": [ 325 | "AgentThread thread = agent.GetNewThread();" 326 | ] 327 | }, 328 | { 329 | "cell_type": "code", 330 | "execution_count": 16, 331 | "id": "ce660c36", 332 | "metadata": { 333 | "language_info": { 334 | "name": "polyglot-notebook" 335 | }, 336 | "polyglot_notebook": { 337 | "kernelName": "csharp" 338 | } 339 | }, 340 | "outputs": [ 341 | { 342 | "name": "stdout", 343 | "output_type": "stream", 344 | "text": [ 345 | "The Fibonacci sequence values less than 101 are:\n", 346 | "\n", 347 | "\\[ 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89 \\]\r\n" 348 | ] 349 | } 350 | ], 351 | "source": [ 352 | "Console.WriteLine(await agent.RunAsync(\"Use code to determine the values in the Fibonacci sequence that that are less then the value of 101?\", thread));" 353 | ] 354 | } 355 | ], 356 | "metadata": { 357 | "kernelspec": { 358 | "display_name": ".NET (C#)", 359 | "language": "C#", 360 | "name": ".net-csharp" 361 | }, 362 | "language_info": { 363 | "name": "polyglot-notebook" 364 | }, 365 | "polyglot_notebook": { 366 | "kernelInfo": { 367 | "defaultKernelName": "csharp", 368 | "items": [ 369 | { 370 | "aliases": [], 371 | "name": "csharp" 372 | } 373 | ] 374 | } 375 | } 376 | }, 377 | "nbformat": 4, 378 | "nbformat_minor": 5 379 | } 380 | -------------------------------------------------------------------------------- /04.Tools/code_samples/files/demo.md: -------------------------------------------------------------------------------- 1 | # GraphRAG: Responsible AI FAQ 2 | 3 | ## What is GraphRAG? 4 | 5 | GraphRAG is an AI-based content interpretation and search capability. Using LLMs, it parses data to create a knowledge graph and answer user questions about a user-provided private dataset. 6 | 7 | ## What can GraphRAG do? 8 | 9 | GraphRAG is able to connect information across large volumes of information and use these connections to answer questions that are difficult or impossible to answer using keyword and vector-based search mechanisms. Building on the previous question, provide semi-technical, high-level information on how the system offers functionality for various uses. This lets a system using GraphRAG to answer questions where the answers span many documents as well as thematic questions such as “what are the top themes in this dataset?.” 10 | 11 | ## What are GraphRAG’s intended use(s)? 12 | 13 | * GraphRAG is intended to support critical information discovery and analysis use cases where the information required to arrive at a useful insight spans many documents, is noisy, is mixed with mis and/or dis-information, or when the questions users aim to answer are more abstract or thematic than the underlying data can directly answer. 14 | * GraphRAG is designed to be used in settings where users are already trained on responsible analytic approaches and critical reasoning is expected. GraphRAG is capable of providing high degrees of insight on complex information topics, however human analysis by a domain expert of the answers is needed in order to verify and augment GraphRAG’s generated responses. 15 | * GraphRAG is intended to be deployed and used with a domain specific corpus of text data. GraphRAG itself does not collect user data, but users are encouraged to verify data privacy policies of the chosen LLM used to configure GraphRAG. 16 | 17 | ## How was GraphRAG evaluated? What metrics are used to measure performance? 18 | 19 | GraphRAG has been evaluated in multiple ways. The primary concerns are 1) accurate representation of the data set, 2) providing transparency and groundedness of responses, 3) resilience to prompt and data corpus injection attacks, and 4) low hallucination rates. Details on how each of these has been evaluated is outlined below by number. 20 | 21 | 1) Accurate representation of the dataset has been tested by both manual inspection and automated testing against a “gold answer” that is created from randomly selected subsets of a test corpus. 22 | 23 | 2) Transparency and groundedness of responses is tested via automated answer coverage evaluation and human inspection of the underlying context returned. 24 | 25 | 3) We test both user prompt injection attacks (“jailbreaks”) and cross prompt injection attacks (“data attacks”) using manual and semi-automated techniques. 26 | 27 | 4) Hallucination rates are evaluated using claim coverage metrics, manual inspection of answer and source, and adversarial attacks to attempt a forced hallucination through adversarial and exceptionally challenging datasets. 28 | 29 | ## What are the limitations of GraphRAG? How can users minimize the impact of GraphRAG’s limitations when using the system? 30 | 31 | GraphRAG depends on a well-constructed indexing examples. For general applications (e.g. content oriented around people, places, organizations, things, etc.) we provide example indexing prompts. For unique datasets effective indexing can depend on proper identification of domain-specific concepts. 32 | 33 | Indexing is a relatively expensive operation; a best practice to mitigate indexing is to create a small test dataset in the target domain to ensure indexer performance prior to large indexing operations. 34 | 35 | ## What operational factors and settings allow for effective and responsible use of GraphRAG? 36 | 37 | GraphRAG is designed for use by users with domain sophistication and experience working through difficult information challenges. While the approach is generally robust to injection attacks and identifying conflicting sources of information, the system is designed for trusted users. Proper human analysis of responses is important to generate reliable insights, and the provenance of information should be traced to ensure human agreement with the inferences made as part of the answer generation. 38 | 39 | GraphRAG yields the most effective results on natural language text data that is collectively focused on an overall topic or theme, and that is entity rich – entities being people, places, things, or objects that can be uniquely identified. 40 | 41 | While GraphRAG has been evaluated for its resilience to prompt and data corpus injection attacks, and has been probed for specific types of harms, the LLM that the user configures with GraphRAG may produce inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case and model. Developers should assess outputs for their context and use available safety classifiers, model specific safety filters and features (such as https://azure.microsoft.com/en-us/products/ai-services/ai-content-safety), or custom solutions appropriate for their use case. -------------------------------------------------------------------------------- /04.Tools/code_samples/files/home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/Agent-Framework-Samples/3d800067ed3290af35bb1f7bb958779193e4a545/04.Tools/code_samples/files/home.png -------------------------------------------------------------------------------- /04.Tools/code_samples/python/foundry/01.python-agent-framework-aifoundry-vision.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "ccdc7909", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "\n", 11 | "# ! pip install agent-framework-azure-ai -U" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 2, 17 | "id": "25aece1d", 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "\n", 22 | "from agent_framework.azure import AzureAIAgentClient\n", 23 | "from agent_framework import ChatAgent,ChatMessage, DataContent, Role, TextContent\n", 24 | "\n", 25 | "from azure.ai.projects.aio import AIProjectClient\n", 26 | "from azure.identity.aio import AzureCliCredential" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 3, 32 | "id": "4e54cdb7", 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "\n", 37 | "import os\n", 38 | "import base64\n", 39 | "from dotenv import load_dotenv" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": 4, 45 | "id": "6b0e6968", 46 | "metadata": {}, 47 | "outputs": [ 48 | { 49 | "data": { 50 | "text/plain": [ 51 | "True" 52 | ] 53 | }, 54 | "execution_count": 4, 55 | "metadata": {}, 56 | "output_type": "execute_result" 57 | } 58 | ], 59 | "source": [ 60 | "load_dotenv()" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": 5, 66 | "id": "5b560781", 67 | "metadata": {}, 68 | "outputs": [], 69 | "source": [ 70 | "AgentName = \"Vision-Agent\"\n", 71 | "AgentInstructions = \"You are my furniture sales consultant, you can find different furniture elements from the pictures and give me a purchase suggestion\"" 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": 6, 77 | "id": "26ff8fae", 78 | "metadata": {}, 79 | "outputs": [ 80 | { 81 | "name": "stdout", 82 | "output_type": "stream", 83 | "text": [ 84 | "Agent: Based on the image provided, I have identified the key furniture elements in this living room. Below are purchase suggestions and estimated price ranges for each piece of furniture:\n", 85 | "\n", 86 | "1. **Sofa** \n", 87 | " - Style: Modern, white fabric couch with cushion accents. \n", 88 | " - Suggested Price: $500–$1,200 \n", 89 | "\n", 90 | "2. **Coffee Table** \n", 91 | " - Style: Oval-shaped wooden table with minimalist design. \n", 92 | " - Suggested Price: $150–$400 \n", 93 | "\n", 94 | "3. **TV Console/Stand** \n", 95 | " - Style: Long, wooden console with drawers for storage. \n", 96 | " - Suggested Price: $300–$800 \n", 97 | "\n", 98 | "4. **Accent Chair** \n", 99 | " - Style: Dark blue upholstered chair with high back. \n", 100 | " - Suggested Price: $250–$600 \n", 101 | "\n", 102 | "5. **Side Table (next to accent chair)** \n", 103 | " - Style: Round, metallic tripod base with glass top. \n", 104 | " - Suggested Price: $100–$250 \n", 105 | "\n", 106 | "6. **Pendant Lighting Fixtures** \n", 107 | " - Style: Modern dangling spherical glass pendant lights. \n", 108 | " - Suggested Price: $120–$300 per unit \n", 109 | "\n", 110 | "7. **Chandelier** \n", 111 | " - Style: Modern black geometric chandelier with glass bulbs. \n", 112 | " - Suggested Price: $200–$600 \n", 113 | "\n", 114 | "8. **Decor Items** \n", 115 | " - Wall Art (deer print): $50–$150 \n", 116 | " - Cushions (various blue and white designs): $15–$40 each \n", 117 | " - Tabletop Accessories (books, small plants): $10–$50 \n", 118 | "\n", 119 | "Prices may vary based on the brand, material, and retailer. Let me know if you’d like assistance finding specific options!\n" 120 | ] 121 | } 122 | ], 123 | "source": [ 124 | "async with (\n", 125 | " AzureCliCredential() as credential,\n", 126 | " AIProjectClient(endpoint=os.environ[\"AZURE_AI_PROJECT_ENDPOINT\"], credential=credential) as client,\n", 127 | " ):\n", 128 | " created_agent = await client.agents.create_agent(\n", 129 | " model=os.environ[\"AZURE_AI_MODEL_DEPLOYMENT_NAME\"], \n", 130 | " instructions=AgentInstructions,\n", 131 | " name=AgentName\n", 132 | " )\n", 133 | " chat_client=AzureAIAgentClient(project_client=client, agent_id=created_agent.id)\n", 134 | "\n", 135 | " async with ChatAgent(\n", 136 | " # passing in the client is optional here, so if you take the agent_id from the portal\n", 137 | " # you can use it directly without the two lines above.\n", 138 | " chat_client=chat_client\n", 139 | " ) as agent:\n", 140 | " \n", 141 | "\n", 142 | " image_path = \"../../files/home.png\"\n", 143 | " with open(image_path, \"rb\") as image_file:\n", 144 | " image_b64 = base64.b64encode(image_file.read()).decode()\n", 145 | " image_uri = f\"data:image/png;base64,{image_b64}\"\n", 146 | " message = ChatMessage(\n", 147 | " role=Role.USER,\n", 148 | " contents=[\n", 149 | " TextContent(text=\"Please find the relevant furniture according to the image and give the corresponding price for each piece of furniture\"),\n", 150 | " DataContent(uri=image_uri, media_type=\"image/png\")\n", 151 | " ]\n", 152 | " )\n", 153 | " # response = agent.run_stream(message)\n", 154 | " first_result = await agent.run(message)\n", 155 | " \n", 156 | " print(f\"Agent: {first_result.text}\")" 157 | ] 158 | } 159 | ], 160 | "metadata": { 161 | "kernelspec": { 162 | "display_name": "agentenv", 163 | "language": "python", 164 | "name": "python3" 165 | }, 166 | "language_info": { 167 | "codemirror_mode": { 168 | "name": "ipython", 169 | "version": 3 170 | }, 171 | "file_extension": ".py", 172 | "mimetype": "text/x-python", 173 | "name": "python", 174 | "nbconvert_exporter": "python", 175 | "pygments_lexer": "ipython3", 176 | "version": "3.12.10" 177 | } 178 | }, 179 | "nbformat": 4, 180 | "nbformat_minor": 5 181 | } 182 | -------------------------------------------------------------------------------- /04.Tools/code_samples/python/foundry/02.python-agent-framework-aifoundry-code-interpreter.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "0e32e2da", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "# ! pip install agent-framework-azure-ai -U" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 2, 16 | "id": "25aece1d", 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "\n", 21 | "from agent_framework.azure import AzureAIAgentClient\n", 22 | "from agent_framework import ChatAgent,HostedCodeInterpreterTool\n", 23 | "\n", 24 | "from azure.ai.projects.aio import AIProjectClient\n", 25 | "from azure.ai.agents.models import CodeInterpreterTool\n", 26 | "from azure.identity.aio import AzureCliCredential" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 3, 32 | "id": "4e54cdb7", 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "\n", 37 | "import os\n", 38 | "from dotenv import load_dotenv" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": 4, 44 | "id": "6b0e6968", 45 | "metadata": {}, 46 | "outputs": [ 47 | { 48 | "data": { 49 | "text/plain": [ 50 | "True" 51 | ] 52 | }, 53 | "execution_count": 4, 54 | "metadata": {}, 55 | "output_type": "execute_result" 56 | } 57 | ], 58 | "source": [ 59 | "load_dotenv()" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": 5, 65 | "id": "5b560781", 66 | "metadata": {}, 67 | "outputs": [], 68 | "source": [ 69 | "AgentName = \"Coding-Agent\"\n", 70 | "AgentInstructions = \"You are an AI assistant that helps people find information.\"" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": 6, 76 | "id": "7a59b49b", 77 | "metadata": {}, 78 | "outputs": [], 79 | "source": [ 80 | "code = CodeInterpreterTool()" 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": 7, 86 | "id": "26ff8fae", 87 | "metadata": {}, 88 | "outputs": [ 89 | { 90 | "name": "stdout", 91 | "output_type": "stream", 92 | "text": [ 93 | "Agent: Sure! I will write a simple Python code snippet to generate Fibonacci numbers less than 101. Here is the code:The Fibonacci numbers less than 101 are: \n", 94 | "0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, and 89.\n" 95 | ] 96 | } 97 | ], 98 | "source": [ 99 | "async with (\n", 100 | " AzureCliCredential() as credential,\n", 101 | " AIProjectClient(endpoint=os.environ[\"AZURE_AI_PROJECT_ENDPOINT\"], credential=credential) as client,\n", 102 | " ):\n", 103 | " created_agent = await client.agents.create_agent(\n", 104 | " model=os.environ[\"AZURE_AI_MODEL_DEPLOYMENT_NAME\"], \n", 105 | " instructions=AgentInstructions,\n", 106 | " name=AgentName,\n", 107 | " tools=code.definitions\n", 108 | " )\n", 109 | " chat_client=AzureAIAgentClient(project_client=client, agent_id=created_agent.id)\n", 110 | "\n", 111 | " async with ChatAgent(\n", 112 | " # passing in the client is optional here, so if you take the agent_id from the portal\n", 113 | " # you can use it directly without the two lines above.\n", 114 | " chat_client=chat_client,\n", 115 | " tools=HostedCodeInterpreterTool()\n", 116 | " ) as agent:\n", 117 | " message = \"Use code to determine the values in the Fibonacci sequence that that are less then the value of 101?\"\n", 118 | " # response = agent.run_stream(message)\n", 119 | " first_result = await agent.run(message)\n", 120 | " \n", 121 | " print(f\"Agent: {first_result.text}\")" 122 | ] 123 | } 124 | ], 125 | "metadata": { 126 | "kernelspec": { 127 | "display_name": "agentenv", 128 | "language": "python", 129 | "name": "python3" 130 | }, 131 | "language_info": { 132 | "codemirror_mode": { 133 | "name": "ipython", 134 | "version": 3 135 | }, 136 | "file_extension": ".py", 137 | "mimetype": "text/x-python", 138 | "name": "python", 139 | "nbconvert_exporter": "python", 140 | "pygments_lexer": "ipython3", 141 | "version": "3.12.10" 142 | } 143 | }, 144 | "nbformat": 4, 145 | "nbformat_minor": 5 146 | } 147 | -------------------------------------------------------------------------------- /04.Tools/code_samples/python/foundry/03.python-agent-framework-aifoundry-binggrounding.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "5a8b7e26", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "# ! pip install agent-framework-azure-ai -U" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "id": "25aece1d", 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "\n", 21 | "from agent_framework.azure import AzureAIAgentClient\n", 22 | "from agent_framework import ChatAgent, HostedWebSearchTool\n", 23 | "\n", 24 | "from azure.ai.projects.aio import AIProjectClient\n", 25 | "from azure.ai.agents.models import BingGroundingTool\n", 26 | "from azure.identity.aio import AzureCliCredential" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 3, 32 | "id": "4e54cdb7", 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "\n", 37 | "import os\n", 38 | "import base64\n", 39 | "from dotenv import load_dotenv" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": 4, 45 | "id": "6b0e6968", 46 | "metadata": {}, 47 | "outputs": [ 48 | { 49 | "data": { 50 | "text/plain": [ 51 | "True" 52 | ] 53 | }, 54 | "execution_count": 4, 55 | "metadata": {}, 56 | "output_type": "execute_result" 57 | } 58 | ], 59 | "source": [ 60 | "load_dotenv()" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": 5, 66 | "id": "5b560781", 67 | "metadata": {}, 68 | "outputs": [], 69 | "source": [ 70 | "AgentName = \"Search-Agent\"\n", 71 | "AgentInstructions = \"You are an AI assistant that helps people find information.\"" 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": 6, 77 | "id": "1a1b8845", 78 | "metadata": {}, 79 | "outputs": [], 80 | "source": [ 81 | "conn_id = os.environ[\"BING_CONNECTION_ID\"] # Ensure the BING_CONNECTION_NAME environment variable is set\n", 82 | "\n", 83 | "# Initialize the Bing Grounding tool\n", 84 | "bing = BingGroundingTool(connection_id=conn_id)" 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": 7, 90 | "id": "739a0f2b", 91 | "metadata": {}, 92 | "outputs": [ 93 | { 94 | "data": { 95 | "text/plain": [ 96 | "" 97 | ] 98 | }, 99 | "execution_count": 7, 100 | "metadata": {}, 101 | "output_type": "execute_result" 102 | } 103 | ], 104 | "source": [ 105 | "bing" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": 8, 111 | "id": "26ff8fae", 112 | "metadata": {}, 113 | "outputs": [ 114 | { 115 | "name": "stdout", 116 | "output_type": "stream", 117 | "text": [ 118 | "Agent: The GHCAgentWorkshop on GitHub is a workshop designed to help participants create GitHub Copilot applications with zero coding, leveraging the AI-powered GitHub Copilot Agent. The workshop introduces users to the agent mode of GitHub Copilot Chat that allows direct project creation from requirements. It is structured in a hands-on way, consisting of multiple experiments to explore the capabilities of GitHub Copilot agents, from model functions to application building and deployment.\n", 119 | "\n", 120 | "The workshop includes:\n", 121 | "- Preparations such as having a GitHub account, forking and cloning the repository, and setting up Python and Visual Studio Code with relevant plugins.\n", 122 | "- Experiment 1 (RAG Application): Understanding GitHub Copilot model capabilities, working with the agent mode, creating a front-end Copilot application, and calling GitHub models through the agent.\n", 123 | "- Experiment 2 (Stock Analysis): Generating simulated data with the Copilot agent, building a stock UI, and developing both front-end and back-end components.\n", 124 | "- Experiment 3 (Experience GPT-5 and App Deployment): Using the GPT-5 model within GitHub Copilot, creating a lottery wheel application with the agent mode, and deploying the application via GitHub Pages.\n", 125 | "\n", 126 | "The workshop focuses on making everyone feel like a project manager empowered by AI, with a low-code/no-code approach to create AI-assisted projects.\n", 127 | "\n", 128 | "Additionally, it provides resources for free GitHub Copilot use and links to further readings about GitHub models and Copilot agent capabilities. The repository contains code, markdown files, and other resources to facilitate learning these concepts.\n", 129 | "\n", 130 | "In summary, GHCAgentWorkshop is an instructional repository that teaches users how to leverage GitHub Copilot’s agent mode to efficiently build and deploy AI applications through interactive experiments and practical coding scenarios in a no-code or low-code manner. It is suitable for developers eager to explore GitHub Copilot's advanced features in building automation and coding assistants【3:0†source】.\n" 131 | ] 132 | } 133 | ], 134 | "source": [ 135 | "async with (\n", 136 | " AzureCliCredential() as credential,\n", 137 | " AIProjectClient(endpoint=os.environ[\"AZURE_AI_PROJECT_ENDPOINT\"], credential=credential) as client,\n", 138 | " ):\n", 139 | "\n", 140 | " created_agent = await client.agents.create_agent(\n", 141 | " model=os.environ[\"AZURE_AI_MODEL_DEPLOYMENT_NAME\"], \n", 142 | " instructions=AgentInstructions,\n", 143 | " name=AgentName,\n", 144 | " tools=bing.definitions\n", 145 | " )\n", 146 | " chat_client=AzureAIAgentClient(project_client=client, agent_id=created_agent.id)\n", 147 | "\n", 148 | " async with ChatAgent(\n", 149 | " # passing in the client is optional here, so if you take the agent_id from the portal\n", 150 | " # you can use it directly without the two lines above.\n", 151 | " chat_client=chat_client,\n", 152 | " tools=HostedWebSearchTool()\n", 153 | " ) as agent:\n", 154 | " message = \"Could you please describe the workshop according to the link https://github.com/kinfey/GHCAgentWorkshop?\"\n", 155 | " # response = agent.run_stream(message)\n", 156 | " first_result = await agent.run(message)\n", 157 | " \n", 158 | " print(f\"Agent: {first_result.text}\")" 159 | ] 160 | } 161 | ], 162 | "metadata": { 163 | "kernelspec": { 164 | "display_name": "agentenv", 165 | "language": "python", 166 | "name": "python3" 167 | }, 168 | "language_info": { 169 | "codemirror_mode": { 170 | "name": "ipython", 171 | "version": 3 172 | }, 173 | "file_extension": ".py", 174 | "mimetype": "text/x-python", 175 | "name": "python", 176 | "nbconvert_exporter": "python", 177 | "pygments_lexer": "ipython3", 178 | "version": "3.12.10" 179 | } 180 | }, 181 | "nbformat": 4, 182 | "nbformat_minor": 5 183 | } 184 | -------------------------------------------------------------------------------- /04.Tools/code_samples/python/foundry/04.python-agent-framework-aifoundry-file-search.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "3a7e1e59", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "# ! pip install agent-framework-azure-ai -U" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 2, 16 | "id": "97f08567", 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "import os\n", 21 | "\n", 22 | "from azure.identity.aio import AzureCliCredential\n", 23 | "from azure.ai.projects.aio import AIProjectClient\n", 24 | "from dotenv import load_dotenv\n", 25 | "\n", 26 | "from azure.ai.agents.models import FilePurpose,FileSearchTool\n", 27 | "from agent_framework.azure import AzureAIAgentClient\n", 28 | "from agent_framework import ChatAgent,HostedFileSearchTool,HostedVectorStoreContent" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 3, 34 | "id": "2a3bcf9d", 35 | "metadata": {}, 36 | "outputs": [ 37 | { 38 | "data": { 39 | "text/plain": [ 40 | "True" 41 | ] 42 | }, 43 | "execution_count": 3, 44 | "metadata": {}, 45 | "output_type": "execute_result" 46 | } 47 | ], 48 | "source": [ 49 | "load_dotenv()" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": 4, 55 | "id": "3d3ca050", 56 | "metadata": {}, 57 | "outputs": [], 58 | "source": [ 59 | "async def create_vector_store(client: AIProjectClient) -> tuple[str, HostedVectorStoreContent]:\n", 60 | " \"\"\"Create a vector store with sample documents.\"\"\"\n", 61 | " file_path = '../../files/demo.md'\n", 62 | " file = await client.agents.files.upload_and_poll(file_path=file_path, purpose=\"assistants\")\n", 63 | " print(f\"Uploaded file, file ID: {file.id}\")\n", 64 | "\n", 65 | "\n", 66 | " vector_store = await client.agents.vector_stores.create_and_poll(file_ids=[file.id], name=\"graph_knowledge_base\")\n", 67 | "\n", 68 | " print(f\"Created vector store, ID: {vector_store.id}\")\n", 69 | "\n", 70 | "\n", 71 | " return file.id, HostedVectorStoreContent(vector_store_id=vector_store.id)" 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": 5, 77 | "id": "0aef5316", 78 | "metadata": {}, 79 | "outputs": [ 80 | { 81 | "name": "stdout", 82 | "output_type": "stream", 83 | "text": [ 84 | "Uploaded file, file ID: assistant-KTUZiULQcLW8KZw5dCUKBy\n", 85 | "Created vector store, ID: vs_n0I71Ytm0LgM4sfYPilDh2GH\n", 86 | "Agent created. You can now ask questions about the uploaded document.\n", 87 | "GraphRAG is an AI-based content interpretation and search capability. It uses large language models (LLMs) to parse data and create a knowledge graph, which it then uses to answer user questions about a user-provided private dataset.\n", 88 | "\n", 89 | "GraphRAG can connect information across large volumes of data and use those connections to answer complex questions that are difficult or impossible to answer with traditional keyword or vector-based searches. It can handle questions that span many documents and even thematic questions, such as identifying top themes in a dataset.\n", 90 | "\n", 91 | "The intended use of GraphRAG includes supporting critical information discovery and analysis, especially where information spans many documents, is noisy or mixed with misinformation, or where questions are abstract or thematic. It is designed for use by trained users applying responsible analytic and critical reasoning approaches and is typically deployed with domain-specific text corpora.\n", 92 | "\n", 93 | "GraphRAG has been evaluated for accuracy, transparency, resilience to prompt and data corpus injection attacks, and low hallucination rates, among other factors. The system depends on well-constructed indexing for effective operation and is best used when users have domain expertise to critically analyze and verify answers.\n", 94 | "\n", 95 | "For responsible use, human analysis and verification of the outputs are recommended, and users should ensure data privacy where applicable. GraphRAG works best on natural language text data that is entity-rich, focusing on people, places, organizations, or objects.\n", 96 | "\n", 97 | "This summary is based on detailed documentation about GraphRAG that covers its capabilities, evaluation, limitations, and operational factors for responsible use【4:0†demo.md】【4:1†demo.md】." 98 | ] 99 | } 100 | ], 101 | "source": [ 102 | "async with (\n", 103 | " AzureCliCredential() as credential,\n", 104 | " AIProjectClient(endpoint=os.environ[\"AZURE_AI_PROJECT_ENDPOINT\"], credential=credential) as client,\n", 105 | " ):\n", 106 | "\n", 107 | " file_id, vector_store = await create_vector_store(client)\n", 108 | " file_search = FileSearchTool(vector_store_ids=[vector_store.vector_store_id])\n", 109 | " created_agent = await client.agents.create_agent(\n", 110 | " model=os.environ[\"AZURE_AI_MODEL_DEPLOYMENT_NAME\"], \n", 111 | " name=\"PythonRAGAgent\",\n", 112 | " instructions=\"\"\"\n", 113 | " You are an AI assistant designed to answer user questions using only the information retrieved from the provided document(s).\n", 114 | "\n", 115 | " - If a user's question cannot be answered using the retrieved context, **you must clearly respond**: \n", 116 | " \"I'm sorry, but the uploaded document does not contain the necessary information to answer that question.\"\n", 117 | " - Do not answer from general knowledge or reasoning. Do not make assumptions or generate hypothetical explanations.\n", 118 | " - Do not provide definitions, tutorials, or commentary that is not explicitly grounded in the content of the uploaded file(s).\n", 119 | " - If a user asks a question like \"What is a Neural Network?\", and this is not discussed in the uploaded document, respond as instructed above.\n", 120 | " - For questions that do have relevant content in the document (e.g., Contoso's travel insurance coverage), respond accurately, and cite the document explicitly.\n", 121 | "\n", 122 | " You must behave as if you have no external knowledge beyond what is retrieved from the uploaded document.\n", 123 | " \"\"\",\n", 124 | " tools = file_search.definitions,\n", 125 | " tool_resources= file_search.resources\n", 126 | " )\n", 127 | " chat_client=AzureAIAgentClient(project_client=client, agent_id=created_agent.id)\n", 128 | "\n", 129 | "\n", 130 | " async with ChatAgent(\n", 131 | " # passing in the client is optional here, so if you take the agent_id from the portal\n", 132 | " # you can use it directly without the two lines above.\n", 133 | " chat_client=chat_client,\n", 134 | " ) as agent:\n", 135 | " \n", 136 | "\n", 137 | " print(\"Agent created. You can now ask questions about the uploaded document.\")\n", 138 | "\n", 139 | " query = \"What's GraphRAG?\"\n", 140 | "\n", 141 | " async for chunk in agent.run_stream(query, tools=HostedFileSearchTool(inputs=vector_store)):\n", 142 | "\n", 143 | " if chunk.text:\n", 144 | " print(chunk.text, end=\"\", flush=True)" 145 | ] 146 | } 147 | ], 148 | "metadata": { 149 | "kernelspec": { 150 | "display_name": "agentenv", 151 | "language": "python", 152 | "name": "python3" 153 | }, 154 | "language_info": { 155 | "codemirror_mode": { 156 | "name": "ipython", 157 | "version": 3 158 | }, 159 | "file_extension": ".py", 160 | "mimetype": "text/x-python", 161 | "name": "python", 162 | "nbconvert_exporter": "python", 163 | "pygments_lexer": "ipython3", 164 | "version": "3.12.10" 165 | }, 166 | "polyglot_notebook": { 167 | "kernelInfo": { 168 | "defaultKernelName": "csharp", 169 | "items": [ 170 | { 171 | "aliases": [], 172 | "name": "csharp" 173 | } 174 | ] 175 | } 176 | } 177 | }, 178 | "nbformat": 4, 179 | "nbformat_minor": 5 180 | } 181 | -------------------------------------------------------------------------------- /05.Providers/README.md: -------------------------------------------------------------------------------- 1 | # Tutorial: Creating MCP and A2A Applications in Agent Framework 2 | 3 | This tutorial will guide you through the concepts and practical examples of creating applications using the Model Context Protocol (MCP) and Agent-to-Agent (A2A) patterns within the Agent Framework. We will use the provided .NET and Python code samples to illustrate how to connect an agent to an external toolset via MCP. 4 | 5 | ## MCP (Model Context Protocol) 6 | 7 | ### Concept of MCP 8 | 9 | The Model Context Protocol (MCP) is a standard that allows an AI agent to discover and interact with external tools and services. Think of it as a universal API for agents. Instead of custom-building a connection for every tool, an agent can connect to an MCP endpoint. This endpoint exposes a list of available tools, which the agent can then intelligently use to fulfill user requests. 10 | 11 | In the provided examples, the agent connects to the Microsoft Learn MCP endpoint (`https://learn.microsoft.com/api/mcp`), which gives it the ability to search and retrieve information directly from Microsoft's documentation. This is a powerful way to ground the agent with up-to-date, specific information. 12 | 13 | ### Application Scenarios for MCP 14 | 15 | The primary use case for MCP is **Retrieval-Augmented Generation (RAG)**. This pattern enhances an agent's capabilities by allowing it to fetch information from external knowledge bases before generating a response. 16 | 17 | Common scenarios include: 18 | 19 | * **Answering questions about documentation:** An agent can provide accurate answers about proprietary software, internal company documents, or rapidly changing product information by querying a documentation server through MCP. 20 | * **Task automation:** An agent can use MCP-exposed tools to perform actions, such as looking up product inventory, checking a user's order status, or filing a support ticket. 21 | * **Dynamic tool usage:** As new tools are added to the MCP server, the agent can automatically discover and use them without needing to be reprogrammed. 22 | 23 | ### MCP Examples 24 | 25 | Here are practical examples in both .NET and Python that demonstrate how to create an agent that uses an MCP tool. 26 | 27 | #### 1\. .NET Example 28 | 29 | This C\# code demonstrates how to create a persistent agent in Azure AI Foundry, equip it with an MCP tool definition, and use it to answer a query. 30 | 31 | **`Program.cs`** 32 | 33 | ```csharp 34 | using ModelContextProtocol.Client; 35 | 36 | using System; 37 | using System.Linq; 38 | using Azure.AI.Agents.Persistent; 39 | using Azure.Identity; 40 | using Microsoft.Extensions.AI; 41 | using Microsoft.Agents.AI; 42 | 43 | using DotNetEnv; 44 | 45 | // Load environment variables 46 | Env.Load("./.env"); 47 | 48 | var azure_foundry_endpoint = Environment.GetEnvironmentVariable("FOUNDRY_PROJECT_ENDPOINT") ?? throw new InvalidOperationException("AZURE_FOUNDRY_PROJECT_ENDPOINT is not set."); 49 | var azure_foundry_model_id = Environment.GetEnvironmentVariable("FOUNDRY_MODEL_DEPLOYMENT_NAME") ?? "gpt-4.1-mini"; 50 | 51 | // Connect to the Persistent Agents Client 52 | var persistentAgentsClient = new PersistentAgentsClient(azure_foundry_endpoint, new AzureCliCredential()); 53 | 54 | // Define the MCP tool endpoint 55 | MCPToolDefinition mcpTool = new("mslearnmcp", "https://learn.microsoft.com/api/mcp"); 56 | string searchMSLearn = "searchmslearn"; 57 | mcpTool.AllowedTools.Add(searchMSLearn); 58 | 59 | // Create the agent definition 60 | var agentModel = await persistentAgentsClient.Administration.CreateAgentAsync( 61 | model:azure_foundry_model_id, 62 | name: "MSLearnMCPAgent", 63 | instructions: "You are a helpful agent that can use MCP tools to assist users. Use the available MCP tools to answer questions and perform tasks.", 64 | tools: [mcpTool] 65 | ); 66 | 67 | // Get the created agent instance 68 | AIAgent agent = await persistentAgentsClient.GetAIAgentAsync(agentModel.Value.Id); 69 | 70 | Console.WriteLine($"Created agent with ID: {agent.Id}"); 71 | 72 | // Create an MCP client to inspect the tools on the server 73 | IMcpClient mcpClient = await McpClientFactory.CreateAsync( 74 | new SseClientTransport(new SseClientTransportOptions() 75 | { 76 | Endpoint = new Uri("https://learn.microsoft.com/api/mcp") 77 | }) 78 | ); 79 | 80 | // List the available tools from the MCP endpoint 81 | IList tools = await mcpClient.ListToolsAsync(); 82 | 83 | Console.WriteLine("Available tools:"); 84 | foreach (var tool in tools) 85 | { 86 | Console.WriteLine($" {tool.Name}: {tool.Description}"); 87 | } 88 | 89 | // Create a new conversation thread 90 | AgentThread thread = agent.GetNewThread(); 91 | 92 | ChatMessage userMessage = new ChatMessage(ChatRole.User, "What is Foundry Local?"); 93 | 94 | 95 | var chatOptions = new ChatClientAgentRunOptions 96 | { 97 | ChatOptions = new ChatOptions 98 | { 99 | Tools = [.. tools] 100 | } 101 | }; 102 | 103 | // Run the agent with the user's query 104 | Console.WriteLine(await agent.RunAsync("What's Foundry Local?", thread, chatOptions)); 105 | ``` 106 | 107 | #### 2\. Python Example 108 | 109 | This Python example, designed for a Jupyter Notebook, shows two ways to provide the MCP tool to an agent: either at runtime or during its creation. 110 | 111 | **`01-python-agent-framework-aifoundry-mcp.ipynb`** 112 | 113 | **Cell 1: Imports** 114 | 115 | ```python 116 | from agent_framework import ChatAgent, MCPStreamableHTTPTool 117 | from agent_framework.azure import AzureAIAgentClient 118 | from azure.identity.aio import AzureCliCredential 119 | ``` 120 | 121 | **Cell 2: Method 1 - Pass the Tool During the `run` Call** 122 | In this approach, the MCP tool is created as a context manager and passed to the agent when the `run` method is invoked. This is useful for temporary or session-based tool usage. 123 | 124 | ```python 125 | async with ( 126 | AzureCliCredential() as credential, 127 | MCPStreamableHTTPTool( 128 | name="Microsoft Learn MCP", 129 | url="https://learn.microsoft.com/api/mcp", 130 | ) as mcp_server, 131 | ChatAgent( 132 | chat_client=AzureAIAgentClient(async_credential=credential), 133 | name="DocsAgent", 134 | instructions="You are a helpful assistant that can help with microsoft documentation questions.", 135 | ) as agent, 136 | ): 137 | query = "What is Microsoft Semantic Kernel?" 138 | print(f"User: {query}") 139 | result = await agent.run(query, tools=mcp_server) 140 | print(f"{agent.name}: {result}\n") 141 | ``` 142 | 143 | **Cell 3: Method 2 - Define the Tool at Agent Creation** 144 | Here, the `MCPStreamableHTTPTool` is passed directly into the `create_agent` method. This makes the tool a permanent part of the agent's definition, available for all subsequent calls without needing to be passed in again. 145 | 146 | ```python 147 | async with ( 148 | AzureCliCredential() as credential, 149 | AzureAIAgentClient(async_credential=credential).create_agent( 150 | name="DocsAgent", 151 | instructions="You are a helpful assistant that can help with microsoft documentation questions.", 152 | tools=MCPStreamableHTTPTool( # Tool is defined here 153 | name="Microsoft Learn MCP", 154 | url="https://learn.microsoft.com/api/mcp", 155 | ), 156 | ) as agent, 157 | ): 158 | query = "What is Microsoft Semantic Kernel?" 159 | print(f"User: {query}") 160 | result = await agent.run(query) 161 | print(f"{agent.name}: {result}\n") 162 | ``` 163 | 164 | ## A2A (Agent-to-Agent) 165 | 166 | While the provided code files focus on the MCP pattern, the A2A pattern is another core concept in building sophisticated multi-agent systems. 167 | 168 | ### Concept of A2A 169 | 170 | Agent-to-Agent (A2A) communication is a pattern where one AI agent can call upon another AI agent to perform a task or answer a question. This allows developers to build complex systems by composing smaller, specialized agents. Each agent can have its own unique instructions, capabilities, and tools. 171 | 172 | For example, a "Travel Planner" agent could delegate tasks by calling a "Flight Booker" agent and a "Hotel Reservation" agent. The Travel Planner orchestrates the overall goal, while the specialized agents handle their specific domains. 173 | 174 | ### Application Scenarios for A2A 175 | 176 | * **Task Delegation and Orchestration:** A primary "manager" agent can break down a complex user request into sub-tasks and assign them to different "worker" agents. 177 | * **Specialized Expertise:** A generalist agent can consult a specialist agent for deep knowledge in a specific area (e.g., a "General Support" agent calling a "Billing Expert" agent). 178 | 179 | * **Collaborative Problem-Solving:** Multiple agents can work together, sharing information and intermediate results to solve a problem that would be too complex for a single agent. 180 | -------------------------------------------------------------------------------- /05.Providers/code_samples/dotNET/01-dotnet-agent-framework-aifoundry-mcp/AgentMCP.Console/.env.examples: -------------------------------------------------------------------------------- 1 | AZURE_AI_PROJECT_ENDPOINT ="AI Foundry Project Project Endpoint" 2 | AZURE_AI_MODEL_DEPLOYMENT_NAME ="gpt-4.1-nano" -------------------------------------------------------------------------------- /05.Providers/code_samples/dotNET/01-dotnet-agent-framework-aifoundry-mcp/AgentMCP.Console/AgentMCP.Console.csproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | Exe 5 | net9.0 6 | enable 7 | enable 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /05.Providers/code_samples/dotNET/01-dotnet-agent-framework-aifoundry-mcp/AgentMCP.Console/AgentMCP.Console.sln: -------------------------------------------------------------------------------- 1 | Microsoft Visual Studio Solution File, Format Version 12.00 2 | # Visual Studio Version 17 3 | VisualStudioVersion = 17.5.2.0 4 | MinimumVisualStudioVersion = 10.0.40219.1 5 | Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AgentMCP.Console", "AgentMCP.Console.csproj", "{B5073A2B-54BA-A252-95EC-88ED3B73B7BC}" 6 | EndProject 7 | Global 8 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 9 | Debug|Any CPU = Debug|Any CPU 10 | Release|Any CPU = Release|Any CPU 11 | EndGlobalSection 12 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 13 | {B5073A2B-54BA-A252-95EC-88ED3B73B7BC}.Debug|Any CPU.ActiveCfg = Debug|Any CPU 14 | {B5073A2B-54BA-A252-95EC-88ED3B73B7BC}.Debug|Any CPU.Build.0 = Debug|Any CPU 15 | {B5073A2B-54BA-A252-95EC-88ED3B73B7BC}.Release|Any CPU.ActiveCfg = Release|Any CPU 16 | {B5073A2B-54BA-A252-95EC-88ED3B73B7BC}.Release|Any CPU.Build.0 = Release|Any CPU 17 | EndGlobalSection 18 | GlobalSection(SolutionProperties) = preSolution 19 | HideSolutionNode = FALSE 20 | EndGlobalSection 21 | GlobalSection(ExtensibilityGlobals) = postSolution 22 | SolutionGuid = {72C120BC-E041-42DE-B6DF-21BEC363B7FA} 23 | EndGlobalSection 24 | EndGlobal 25 | -------------------------------------------------------------------------------- /05.Providers/code_samples/dotNET/01-dotnet-agent-framework-aifoundry-mcp/AgentMCP.Console/Program.cs: -------------------------------------------------------------------------------- 1 | using ModelContextProtocol.Client; 2 | 3 | using System; 4 | using System.Linq; 5 | using Azure.AI.Agents.Persistent; 6 | using Azure.Identity; 7 | using Microsoft.Extensions.AI; 8 | using Microsoft.Agents.AI; 9 | 10 | using DotNetEnv; 11 | 12 | Env.Load("./.env"); 13 | 14 | var azure_foundry_endpoint = Environment.GetEnvironmentVariable("AZURE_AI_PROJECT_ENDPOINT") ?? throw new InvalidOperationException("AZURE_FOUNDRY_PROJECT_ENDPOINT is not set."); 15 | var azure_foundry_model_id = Environment.GetEnvironmentVariable("AZURE_AI_MODEL_DEPLOYMENT_NAME") ?? "gpt-4.1-mini"; 16 | 17 | var persistentAgentsClient = new PersistentAgentsClient(azure_foundry_endpoint, new AzureCliCredential()); 18 | 19 | MCPToolDefinition mcpTool = new("mslearnmcp", "https://learn.microsoft.com/api/mcp"); 20 | string searchMSLearn = "searchmslearn"; 21 | mcpTool.AllowedTools.Add(searchMSLearn); 22 | 23 | var agentModel = await persistentAgentsClient.Administration.CreateAgentAsync( 24 | model:azure_foundry_model_id, 25 | name: "MSLearnMCPAgent", 26 | instructions: "You are a helpful agent that can use MCP tools to assist users. Use the available MCP tools to answer questions and perform tasks.", 27 | tools: [mcpTool] 28 | ); 29 | 30 | AIAgent agent = await persistentAgentsClient.GetAIAgentAsync(agentModel.Value.Id); 31 | 32 | Console.WriteLine($"Created agent with ID: {agent.Id}"); 33 | 34 | IMcpClient mcpClient = await McpClientFactory.CreateAsync( 35 | new SseClientTransport(new SseClientTransportOptions() 36 | { 37 | Endpoint = new Uri("https://learn.microsoft.com/api/mcp") 38 | }) 39 | ); 40 | 41 | IList tools = await mcpClient.ListToolsAsync(); 42 | 43 | Console.WriteLine("Available tools:"); 44 | foreach (var tool in tools) 45 | { 46 | Console.WriteLine($" {tool.Name}: {tool.Description}"); 47 | } 48 | 49 | AgentThread thread = agent.GetNewThread(); 50 | 51 | ChatMessage userMessage = new ChatMessage(ChatRole.User, "What is Foundry Local?"); 52 | 53 | 54 | var chatOptions = new ChatClientAgentRunOptions 55 | { 56 | ChatOptions = new ChatOptions 57 | { 58 | Tools = [.. tools] 59 | } 60 | }; 61 | 62 | Console.WriteLine(await agent.RunAsync("What's Foundry Local?", thread, chatOptions)); 63 | -------------------------------------------------------------------------------- /05.Providers/code_samples/dotNET/02-dotnet-agent-framework-aifoundry-a2a/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/Agent-Framework-Samples/3d800067ed3290af35bb1f7bb958779193e4a545/05.Providers/code_samples/dotNET/02-dotnet-agent-framework-aifoundry-a2a/README.md -------------------------------------------------------------------------------- /06.RAGs/README.md: -------------------------------------------------------------------------------- 1 | # Tutorial: Building an Agent with RAG Functionality 2 | 3 | This tutorial will guide you through building a Retrieval-Augmented Generation (RAG) agent using both .NET (C\#) and Python with Azure AI. We will cover the basic concepts of RAG and then walk through code examples that demonstrate how to implement an agent that can answer questions based on a provided document. 4 | 5 | ## 1\. Understanding RAG 6 | 7 | ### What is RAG? 8 | 9 | Retrieval-Augmented Generation (RAG) is a technique that enhances the capabilities of Large Language Models (LLMs) by grounding them in external sources of knowledge. It combines two main components: 10 | 11 | * **Retriever:** This component is responsible for searching and retrieving relevant information from a knowledge base (e.g., a collection of documents). 12 | * **Generator:** This is a standard LLM that takes the retrieved information as context and uses it to generate a comprehensive and accurate answer to the user's query. 13 | 14 | By providing the LLM with relevant, up-to-date information, RAG helps to reduce hallucinations (making up facts) and allows the model to answer questions about specific, private data it wasn't trained on. 15 | 16 | ### RAG Application Scenarios 17 | 18 | RAG is highly effective in various scenarios, including: 19 | 20 | * **Customer Support:** Building chatbots that can answer customer questions based on product manuals, FAQs, and internal knowledge bases. 21 | * **Enterprise Search:** Creating intelligent search systems that allow employees to ask questions and get precise answers from a vast repository of internal documents. 22 | * **Content Discovery:** Assisting users in finding and understanding information within large, complex documents like research papers, legal contracts, or financial reports. 23 | 24 | ### Creating a RAG Agent with File Search 25 | 26 | The fundamental workflow for creating a RAG agent with a file search capability involves these steps: 27 | 28 | 1. **Upload Knowledge:** Provide the documents that the agent will use as its knowledge base. 29 | 2. **Create a Vector Store:** The uploaded files are indexed into a specialized database called a vector store. This allows for efficient semantic searching to find the most relevant document snippets for a given query. 30 | 3. **Define the Agent:** Create an agent and equip it with a `File Search` tool. 31 | 4. **Link Resources:** Connect the `File Search` tool to the vector store you created. 32 | 5. **Instruct the Agent:** Provide a clear system prompt that instructs the agent on how to behave—for example, telling it to only use the provided files to answer questions and to admit when it doesn't know the answer. 33 | 6. **Query the Agent:** Start a conversation and ask questions. The agent will automatically use the file search tool to retrieve context before generating its response. 34 | 35 | ----- 36 | 37 | ## 2\. Code Examples 38 | 39 | The following examples demonstrate how to build a RAG agent using the Azure AI Agent Framework in both .NET and Python. Both examples will use a file named `demo.md` as the knowledge source. 40 | 41 | ### .NET (C\#) Example 42 | 43 | This example uses a C\# Polyglot Notebook to create and interact with the RAG agent. 44 | 45 | **Step 1: Setup and Dependencies** 46 | 47 | First, we need to reference the necessary NuGet packages for the Azure AI Agent Framework, Azure identity, and environment variable management. 48 | 49 | ```csharp 50 | #r "nuget: Microsoft.Extensions.AI, 9.9.0" 51 | #r "nuget: Azure.AI.Agents.Persistent, 1.2.0-beta.5" 52 | #r "nuget: Azure.Identity, 1.15.0" 53 | #r "nuget: System.Linq.Async, 6.0.3" 54 | #r "nuget: DotNetEnv, 3.1.1" 55 | ``` 56 | 57 | **Step 2: Load Configuration and Initialize Client** 58 | 59 | Load environment variables from a `.env` file and create the `PersistentAgentsClient` using your Azure credentials. 60 | 61 | ```csharp 62 | using DotNetEnv; 63 | using Azure.AI.Agents.Persistent; 64 | using Azure.Identity; 65 | 66 | // Load environment variables 67 | Env.Load("../../../.env"); 68 | var azure_foundry_endpoint = Environment.GetEnvironmentVariable("FOUNDRY_PROJECT_ENDPOINT") ?? throw new InvalidOperationException("AZURE_FOUNDRY_PROJECT_ENDPOINT is not set."); 69 | var azure_foundry_model_id = Environment.GetEnvironmentVariable("FOUNDRY_MODEL_DEPLOYMENT_NAME") ?? "gpt-4.1-mini"; 70 | 71 | // Initialize the client 72 | var persistentAgentsClient = new PersistentAgentsClient(azure_foundry_endpoint, new AzureCliCredential()); 73 | ``` 74 | 75 | **Step 3: Upload File and Create Vector Store** 76 | 77 | Upload the `demo.md` file and use its ID to create a vector store. This makes the file's content searchable. 78 | 79 | ```csharp 80 | // Path to the local file 81 | string pdfPath = "../files/demo.md"; 82 | var pdfStream = await Task.Run(() => File.OpenRead(pdfPath)); 83 | 84 | // Upload the file 85 | PersistentAgentFileInfo fileInfo = await persistentAgentsClient.Files.UploadFileAsync(pdfStream, PersistentAgentFilePurpose.Agents, "demo.md"); 86 | 87 | // Create the vector store 88 | PersistentAgentsVectorStore fileStore = 89 | await persistentAgentsClient.VectorStores.CreateVectorStoreAsync( 90 | [fileInfo.Id], 91 | metadata: new Dictionary() { { "agentkey", bool.TrueString } }); 92 | ``` 93 | 94 | **Step 4: Create the RAG Agent** 95 | 96 | Define a new persistent agent. Provide it with instructions, equip it with a `FileSearchToolDefinition`, and link the tool's resources to the ID of the vector store created in the previous step. 97 | 98 | ```csharp 99 | PersistentAgent agentModel = await persistentAgentsClient.Administration.CreateAgentAsync( 100 | azure_foundry_model_id, 101 | name: "DotNetRAGAgent", 102 | tools: [new FileSearchToolDefinition()], 103 | instructions: """ 104 | You are a helpful assistant that helps people find information in a set of files. If you can't find the answer in the files, just say you don't know. Do not make up an answer. 105 | """, 106 | toolResources: new() 107 | { 108 | FileSearch = new() 109 | { 110 | VectorStoreIds = { fileStore.Id }, 111 | } 112 | }); 113 | ``` 114 | 115 | **Step 5: Interact with the Agent** 116 | 117 | Get the agent, start a new conversation thread, and ask a question. The agent will use the file search tool to find the answer within `demo.md`. 118 | 119 | ```csharp 120 | AIAgent agent = await persistentAgentsClient.GetAIAgentAsync(agentModel.Id); 121 | AgentThread thread = agent.GetNewThread(); 122 | 123 | Console.WriteLine(await agent.RunAsync("What's graphrag?", thread)); 124 | ``` 125 | 126 | **Output:** 127 | 128 | ``` 129 | GraphRAG is an AI-based content interpretation and search capability that utilizes large language models (LLMs) to parse data and create a knowledge graph. It enables the connection of information across large volumes of data, allowing it to answer complex questions that span multiple documents or that require thematic understanding. Its primary use is to support critical information discovery and analysis, especially for data that is noisy, fragmented, or involves misinformation. GraphRAG is intended for use by trained domain experts who can verify and interpret its outputs, making it suitable for specialized datasets and complex inquiries【4:0†demo.md】. 130 | ``` 131 | 132 | ### Python Example 133 | 134 | This example uses a Python script to perform the same actions. 135 | 136 | **Step 1: Setup and Dependencies** 137 | 138 | Import the necessary libraries for the Azure AI Agent Framework and handling environment variables. 139 | 140 | ```python 141 | import os 142 | from azure.ai.agents.models import FilePurpose,VectorStore,FileSearchTool 143 | from azure.ai.projects.aio import AIProjectClient 144 | from azure.identity.aio import AzureCliCredential 145 | from dotenv import load_dotenv 146 | from agent_framework import AgentRunResponse,ChatAgent,HostedFileSearchTool,HostedVectorStoreContent 147 | from agent_framework.azure import AzureAIAgentClient 148 | 149 | load_dotenv() 150 | ``` 151 | 152 | **Step 2: Define a Helper for Vector Store Creation** 153 | 154 | This async function encapsulates the logic for uploading a file and creating a vector store from it. 155 | 156 | ```python 157 | async def create_vector_store(client: AzureAIAgentClient) -> tuple[str, VectorStore]: 158 | """Create a vector store with sample documents.""" 159 | file_path = '../files/demo.md' 160 | file = await client.project_client.agents.files.upload_and_poll(file_path=file_path, purpose=FilePurpose.AGENTS) 161 | print(f"Uploaded file, file ID: {file.id}") 162 | 163 | vector_store = await client.project_client.agents.vector_stores.create_and_poll(file_ids=[file.id], name="graph_knowledge_base") 164 | print(f"Created vector store, ID: {vector_store.id}") 165 | 166 | return file.id, vector_store 167 | ``` 168 | 169 | **Step 3: Create and Run the Agent** 170 | 171 | The main execution block initializes the client, creates the vector store, defines the agent with the `FileSearchTool`, and runs a query. Note how the tool definitions and resources are passed during agent creation. 172 | 173 | ```python 174 | async with ( 175 | AzureCliCredential() as credential, 176 | AzureAIAgentClient(async_credential=credential) as chat_client, 177 | ): 178 | file_id, vector_store = await create_vector_store(chat_client) 179 | 180 | file_search = FileSearchTool(vector_store_ids=[vector_store.id]) 181 | 182 | agent = chat_client.create_agent( 183 | name="PythonRAGDemo", 184 | instructions=""" 185 | You are a helpful assistant that helps people find information in a set of files. If you can't find the answer in the files, just say you don't know. Do not make up an answer. 186 | """, 187 | tools=file_search.definitions, # Tools available to the agent 188 | tool_resources=file_search.resources, # Resources for the tool 189 | ) 190 | 191 | print("Agent created. You can now ask questions about the uploaded document.") 192 | 193 | query = "What is GraphRAG?" 194 | # The tool resources must be passed again at runtime 195 | response = await AgentRunResponse.from_agent_response_generator(agent.run_stream(query, tool_resources={"file_search": {"vector_store_ids": [vector_store.id]}})) 196 | print(f"Assistant: {response}") 197 | ``` 198 | 199 | **Output:** 200 | 201 | ``` 202 | Uploaded file, file ID: assistant-CiUQ5xjyFBC7TJyJ331FCF 203 | Created vector store, ID: vs_BEbCqzLGHRRQsP0mMALlV5CV 204 | Agent created. You can now ask questions about the uploaded document. 205 | Assistant: GraphRAG is an AI-based content interpretation and search system that uses large language models to create a knowledge graph from a user-provided dataset. It connects information across large volumes of data to answer complex, thematic, or multi-document questions that are difficult to address through traditional keyword or vector search methods. The system is designed to support critical analysis and discovery, especially in contexts where information is noisy or spread across many sources. It emphasizes transparency, grounded responses, and resilience to injection attacks, although it relies on well-constructed indexing and human oversight for optimal performance【4:0†demo.md】. 206 | ``` -------------------------------------------------------------------------------- /06.RAGs/code_samples/files/demo.md: -------------------------------------------------------------------------------- 1 | # GraphRAG: Responsible AI FAQ 2 | 3 | ## What is GraphRAG? 4 | 5 | GraphRAG is an AI-based content interpretation and search capability. Using LLMs, it parses data to create a knowledge graph and answer user questions about a user-provided private dataset. 6 | 7 | ## What can GraphRAG do? 8 | 9 | GraphRAG is able to connect information across large volumes of information and use these connections to answer questions that are difficult or impossible to answer using keyword and vector-based search mechanisms. Building on the previous question, provide semi-technical, high-level information on how the system offers functionality for various uses. This lets a system using GraphRAG to answer questions where the answers span many documents as well as thematic questions such as “what are the top themes in this dataset?.” 10 | 11 | ## What are GraphRAG’s intended use(s)? 12 | 13 | * GraphRAG is intended to support critical information discovery and analysis use cases where the information required to arrive at a useful insight spans many documents, is noisy, is mixed with mis and/or dis-information, or when the questions users aim to answer are more abstract or thematic than the underlying data can directly answer. 14 | * GraphRAG is designed to be used in settings where users are already trained on responsible analytic approaches and critical reasoning is expected. GraphRAG is capable of providing high degrees of insight on complex information topics, however human analysis by a domain expert of the answers is needed in order to verify and augment GraphRAG’s generated responses. 15 | * GraphRAG is intended to be deployed and used with a domain specific corpus of text data. GraphRAG itself does not collect user data, but users are encouraged to verify data privacy policies of the chosen LLM used to configure GraphRAG. 16 | 17 | ## How was GraphRAG evaluated? What metrics are used to measure performance? 18 | 19 | GraphRAG has been evaluated in multiple ways. The primary concerns are 1) accurate representation of the data set, 2) providing transparency and groundedness of responses, 3) resilience to prompt and data corpus injection attacks, and 4) low hallucination rates. Details on how each of these has been evaluated is outlined below by number. 20 | 21 | 1) Accurate representation of the dataset has been tested by both manual inspection and automated testing against a “gold answer” that is created from randomly selected subsets of a test corpus. 22 | 23 | 2) Transparency and groundedness of responses is tested via automated answer coverage evaluation and human inspection of the underlying context returned. 24 | 25 | 3) We test both user prompt injection attacks (“jailbreaks”) and cross prompt injection attacks (“data attacks”) using manual and semi-automated techniques. 26 | 27 | 4) Hallucination rates are evaluated using claim coverage metrics, manual inspection of answer and source, and adversarial attacks to attempt a forced hallucination through adversarial and exceptionally challenging datasets. 28 | 29 | ## What are the limitations of GraphRAG? How can users minimize the impact of GraphRAG’s limitations when using the system? 30 | 31 | GraphRAG depends on a well-constructed indexing examples. For general applications (e.g. content oriented around people, places, organizations, things, etc.) we provide example indexing prompts. For unique datasets effective indexing can depend on proper identification of domain-specific concepts. 32 | 33 | Indexing is a relatively expensive operation; a best practice to mitigate indexing is to create a small test dataset in the target domain to ensure indexer performance prior to large indexing operations. 34 | 35 | ## What operational factors and settings allow for effective and responsible use of GraphRAG? 36 | 37 | GraphRAG is designed for use by users with domain sophistication and experience working through difficult information challenges. While the approach is generally robust to injection attacks and identifying conflicting sources of information, the system is designed for trusted users. Proper human analysis of responses is important to generate reliable insights, and the provenance of information should be traced to ensure human agreement with the inferences made as part of the answer generation. 38 | 39 | GraphRAG yields the most effective results on natural language text data that is collectively focused on an overall topic or theme, and that is entity rich – entities being people, places, things, or objects that can be uniquely identified. 40 | 41 | While GraphRAG has been evaluated for its resilience to prompt and data corpus injection attacks, and has been probed for specific types of harms, the LLM that the user configures with GraphRAG may produce inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case and model. Developers should assess outputs for their context and use available safety classifiers, model specific safety filters and features (such as https://azure.microsoft.com/en-us/products/ai-services/ai-content-safety), or custom solutions appropriate for their use case. -------------------------------------------------------------------------------- /07.Workflow/code_samples/imgs/home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/Agent-Framework-Samples/3d800067ed3290af35bb1f7bb958779193e4a545/07.Workflow/code_samples/imgs/home.png -------------------------------------------------------------------------------- /08.EvaluationAndTracing/README.md: -------------------------------------------------------------------------------- 1 | Of course. Here is a tutorial based on the content of the file. 2 | 3 | *** 4 | 5 | # A Guide to Evaluation and Tracing in the Microsoft Agent Framework 6 | 7 | This tutorial will guide you through the tools available for evaluating, tracing, and debugging your agents within the Microsoft Agent Framework. A key part of developing robust AI agents is understanding their behavior, and these tools provide the necessary insights. 8 | 9 | We will cover two main components: 10 | 11 | 1. **DevUI**: A web-based user interface for real-time visualization and debugging of agent sessions. 12 | 2. **Observability**: How to configure logging to trace the step-by-step execution of your agents. 13 | 14 | ## 1. DevUI: Visualizing Agent Interactions 15 | 16 | The DevUI is a powerful web-based tool designed to give you a clear, real-time view into your agent's inner workings. It helps you visualize the entire interaction flow, from the initial prompt to the final response, including the agent's thought process and any tools it utilizes. 17 | 18 | ### Key Features: 19 | * **Session Visualization**: Track the conversation and agent activities as they happen. 20 | * **Debugging**: Easily inspect the messages, tool calls, and LLM responses to understand why an agent is behaving in a certain way. 21 | * **Interactive Interface**: A user-friendly view that is much easier to parse than raw console logs. 22 | 23 | ### How to Get Started: 24 | To use the DevUI, you typically run a command provided by the agent framework's command-line interface. This command starts a local web server, allowing you to connect and monitor your agent sessions through your browser. 25 | 26 | Sample Code: 27 | 28 | - basic_agent_Workflow : [basic_agent_workflow_devui](./python/basic_agent_workflow_devui/) 29 | 30 | - multi_workflow_ghmodel_devui : [multi_workflow_ghmodel_devui](./python/multi_workflow_ghmodel_devui/) 31 | 32 | For more detailed information and setup instructions, please visit the official DevUI package page: 33 | [https://github.com/microsoft/agent-framework/tree/main/python/packages/devui](https://github.com/microsoft/agent-framework/tree/main/python/packages/devui) 34 | 35 | ## 2. Observability: Logging and Tracing Agent Execution 36 | 37 | While the DevUI is excellent for real-time visual debugging, observability through logging provides a persistent and detailed text-based record of an agent's execution. The framework is built to integrate with standard Python logging, making it easy to capture the data you need for analysis. 38 | 39 | ### What You Can Trace: 40 | * **User Requests**: The initial input that starts the agent's task. 41 | * **Agent's "Thoughts"**: The internal reasoning steps the agent takes. 42 | * **Tool Invocations**: Which tools the agent decides to use and with what parameters. 43 | * **Final Responses**: The output generated by the agent. 44 | 45 | ### How it Works: 46 | The framework automatically emits events during an agent's run. By configuring a logger, you can direct these events to your console or a file. The provided samples demonstrate how to set up this logging to see a sequential trace of the agent's operations, giving you a complete picture for post-execution analysis and debugging. 47 | 48 | To review the sample code and learn how to configure the logger, check out the observability example here: 49 | [tracer_aspire](./python/tracer_aspire/) -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/basic_agent_workflow_devui/frontdesk_agent/__init__.py: -------------------------------------------------------------------------------- 1 | from .agent import front_desk_agent 2 | 3 | __all__ = ["front_desk_agent"] -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/basic_agent_workflow_devui/frontdesk_agent/agent.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from agent_framework.openai import OpenAIChatClient 4 | from dotenv import load_dotenv # 📁 Secure configuration loading 5 | 6 | load_dotenv() # 📁 Load environment variables from .env file 7 | 8 | 9 | chat_client = OpenAIChatClient( 10 | base_url=os.environ.get("GITHUB_ENDPOINT"), # 🌐 GitHub Models API endpoint 11 | api_key=os.environ.get("GITHUB_TOKEN"), # 🔑 Authentication token 12 | model_id=os.environ.get("GITHUB_MODEL_ID") # 🎯 Selected AI model 13 | ) 14 | 15 | FRONTDESK_NAME = "FrontDesk" 16 | FRONTDESK_INSTRUCTIONS = """ 17 | You are a Front Desk Travel Agent with ten years of experience and are known for brevity as you deal with many customers. 18 | The goal is to provide the best activities and locations for a traveler to visit. 19 | Only provide a single recommendation per response. 20 | You're laser focused on the goal at hand. 21 | Don't waste time with chit chat. 22 | Consider suggestions when refining an idea. 23 | """ 24 | 25 | 26 | 27 | front_desk_agent = chat_client.create_agent( 28 | instructions=( 29 | FRONTDESK_INSTRUCTIONS 30 | ), 31 | name=FRONTDESK_NAME, 32 | ) -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/basic_agent_workflow_devui/main.py: -------------------------------------------------------------------------------- 1 | from travelplan_workflow import workflow # 🏗️ The travel plan workflow 2 | 3 | def main(): 4 | """Launch the travel workflow in DevUI.""" 5 | import logging 6 | """Launch the basic orkflow in DevUI.""" 7 | from agent_framework.devui import serve 8 | 9 | # Setup logging 10 | logging.basicConfig(level=logging.INFO, format="%(message)s") 11 | logger = logging.getLogger(__name__) 12 | 13 | logger.info("Starting Basic Workflow") 14 | logger.info("Available at: http://localhost:8090") 15 | logger.info("Entity ID: workflow_basic") 16 | 17 | # Launch server with the workflow 18 | serve(entities=[workflow], port=8090, auto_open=True) 19 | 20 | 21 | if __name__ == "__main__": 22 | main() -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/basic_agent_workflow_devui/reviewer_agent/__init__.py: -------------------------------------------------------------------------------- 1 | from .agent import reviewer_agent 2 | 3 | __all__ = ["reviewer_agent"] -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/basic_agent_workflow_devui/reviewer_agent/agent.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from agent_framework.openai import OpenAIChatClient 4 | from dotenv import load_dotenv # 📁 Secure configuration loading 5 | 6 | load_dotenv() # 📁 Load environment variables from .env file 7 | 8 | 9 | chat_client = OpenAIChatClient( 10 | base_url=os.environ.get("GITHUB_ENDPOINT"), # 🌐 GitHub Models API endpoint 11 | api_key=os.environ.get("GITHUB_TOKEN"), # 🔑 Authentication token 12 | model_id=os.environ.get("GITHUB_MODEL_ID") # 🎯 Selected AI model 13 | ) 14 | 15 | REVIEWER_NAME = "Concierge" 16 | REVIEWER_INSTRUCTIONS = """ 17 | You are an are hotel concierge who has opinions about providing the most local and authentic experiences for travelers. 18 | The goal is to determine if the front desk travel agent has recommended the best non-touristy experience for a traveler. 19 | If so, state that it is approved. 20 | If not, provide insight on how to refine the recommendation without using a specific example. 21 | """ 22 | 23 | 24 | 25 | reviewer_agent = chat_client.create_agent( 26 | instructions=( 27 | REVIEWER_INSTRUCTIONS 28 | ), 29 | name=REVIEWER_NAME, 30 | ) -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/basic_agent_workflow_devui/test_simple.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Simple test script to debug the serialization issue.""" 3 | 4 | import logging 5 | import os 6 | from dotenv import load_dotenv 7 | from agent_framework.openai import OpenAIChatClient 8 | from agent_framework import WorkflowBuilder 9 | 10 | # Setup logging 11 | logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s") 12 | logger = logging.getLogger(__name__) 13 | 14 | # Load environment variables 15 | load_dotenv() 16 | 17 | def main(): 18 | """Test workflow creation and serving.""" 19 | try: 20 | # Create chat client 21 | chat_client = OpenAIChatClient( 22 | base_url=os.environ.get("GITHUB_ENDPOINT"), 23 | api_key=os.environ.get("GITHUB_TOKEN"), 24 | model_id=os.environ.get("GITHUB_MODEL_ID") 25 | ) 26 | 27 | # Create simple agents 28 | agent1 = chat_client.create_agent( 29 | instructions="You are a helpful travel agent.", 30 | name="TravelAgent" 31 | ) 32 | 33 | agent2 = chat_client.create_agent( 34 | instructions="You are a reviewer.", 35 | name="Reviewer" 36 | ) 37 | 38 | # Create workflow 39 | workflow = WorkflowBuilder().set_start_executor(agent1).add_edge(agent1, agent2).build() 40 | 41 | logger.info("Workflow created successfully") 42 | 43 | # Try to serve 44 | from agent_framework.devui import serve 45 | logger.info("Starting server...") 46 | serve(entities=[workflow], port=8090, auto_open=False) 47 | 48 | except Exception as e: 49 | logger.error(f"Error: {e}", exc_info=True) 50 | raise 51 | 52 | if __name__ == "__main__": 53 | main() -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/basic_agent_workflow_devui/travelplan_workflow/__init__.py: -------------------------------------------------------------------------------- 1 | from .workflow import workflow 2 | 3 | __all__ = ["workflow"] -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/basic_agent_workflow_devui/travelplan_workflow/workflow.py: -------------------------------------------------------------------------------- 1 | from agent_framework import WorkflowBuilder # 🏗️ Workflow orchestration tools 2 | from frontdesk_agent import front_desk_agent # 🧑‍💼 Front Desk Travel Agent 3 | from reviewer_agent import reviewer_agent # 🧑‍💼 Reviewer Agent 4 | 5 | 6 | workflow = WorkflowBuilder().set_start_executor(front_desk_agent).add_edge(front_desk_agent, reviewer_agent).build() -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/foundry_agent/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft. All rights reserved. 2 | 3 | """Weather agent sample for DevUI testing.""" 4 | 5 | from .agent import agent 6 | 7 | __all__ = ["agent"] 8 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/foundry_agent/agent.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft. All rights reserved. 2 | """Foundry-based weather agent for Agent Framework Debug UI. 3 | 4 | This agent uses Azure AI Foundry with Azure CLI authentication. 5 | Make sure to run 'az login' before starting devui. 6 | """ 7 | 8 | import os 9 | from typing import Annotated 10 | 11 | from agent_framework import ChatAgent 12 | from agent_framework.azure import AzureAIAgentClient 13 | from azure.identity.aio import AzureCliCredential 14 | from pydantic import Field 15 | 16 | 17 | def get_weather( 18 | location: Annotated[str, Field(description="The location to get the weather for.")], 19 | ) -> str: 20 | """Get the weather for a given location.""" 21 | conditions = ["sunny", "cloudy", "rainy", "stormy"] 22 | temperature = 22 23 | return f"The weather in {location} is {conditions[0]} with a high of {temperature}°C." 24 | 25 | 26 | def get_forecast( 27 | location: Annotated[str, Field(description="The location to get the forecast for.")], 28 | days: Annotated[int, Field(description="Number of days for forecast")] = 3, 29 | ) -> str: 30 | """Get weather forecast for multiple days.""" 31 | conditions = ["sunny", "cloudy", "rainy", "stormy"] 32 | forecast: list[str] = [] 33 | 34 | for day in range(1, days + 1): 35 | condition = conditions[day % len(conditions)] 36 | temp = 18 + day 37 | forecast.append(f"Day {day}: {condition}, {temp}°C") 38 | 39 | return f"Weather forecast for {location}:\n" + "\n".join(forecast) 40 | 41 | 42 | # Agent instance following Agent Framework conventions 43 | agent = ChatAgent( 44 | name="FoundryWeatherAgent1", 45 | chat_client=AzureAIAgentClient( 46 | project_endpoint=os.environ.get("AZURE_AI_PROJECT_ENDPOINT"), 47 | model_deployment_name=os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME"), 48 | async_credential=AzureCliCredential(), 49 | ), 50 | instructions=""" 51 | You are a weather assistant using Azure AI Foundry models. You can provide 52 | current weather information and forecasts for any location. Always be helpful 53 | and provide detailed weather information when asked. 54 | """, 55 | tools=[get_weather, get_forecast], 56 | ) 57 | 58 | 59 | def main(): 60 | """Launch the Foundry weather agent in DevUI.""" 61 | import logging 62 | 63 | from agent_framework.devui import serve 64 | 65 | # Setup logging 66 | logging.basicConfig(level=logging.INFO, format="%(message)s") 67 | logger = logging.getLogger(__name__) 68 | 69 | logger.info("Starting Foundry Weather Agent") 70 | logger.info("Available at: http://localhost:8090") 71 | logger.info("Entity ID: agent_FoundryWeatherAgent") 72 | logger.info("Note: Make sure 'az login' has been run for authentication") 73 | 74 | # Launch server with the agent 75 | serve(entities=[agent], port=8090, auto_open=True) 76 | 77 | 78 | if __name__ == "__main__": 79 | main() 80 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_aifoundry_devui/.env.example: -------------------------------------------------------------------------------- 1 | # Azure AI Foundry Configuration 2 | AZURE_AI_PROJECT_ENDPOINT="your-azure-ai-project-endpoint" 3 | AZURE_AI_MODEL_DEPLOYMENT_NAME="your-model-deployment" 4 | 5 | # Bing Search Configuration (for web search tool) 6 | BING_CONNECTION_ID="your-bing-connection-id" 7 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_aifoundry_devui/__init__.py: -------------------------------------------------------------------------------- 1 | # Multi-agent conditional workflow with Azure AI Foundry and DevUI 2 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_aifoundry_devui/contentreview_agent/__init__.py: -------------------------------------------------------------------------------- 1 | """Content Reviewer Agent Module""" 2 | 3 | from .agent import agent, ReviewAgent 4 | 5 | __all__ = ["agent", "ReviewAgent"] 6 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_aifoundry_devui/contentreview_agent/agent.py: -------------------------------------------------------------------------------- 1 | """Content Reviewer Agent - Azure AI Foundry Basic Agent""" 2 | import asyncio 3 | import os 4 | from pathlib import Path 5 | from pydantic import BaseModel 6 | from typing_extensions import Literal 7 | from dotenv import load_dotenv 8 | 9 | from agent_framework import ChatAgent 10 | from agent_framework.azure import AzureAIAgentClient 11 | 12 | from azure.identity.aio import AzureCliCredential 13 | from azure.ai.projects.aio import AIProjectClient 14 | 15 | # Load environment variables from parent directory's .env file 16 | env_path = Path(__file__).parent.parent / ".env" 17 | print(f"🔧 [Reviewer] Loading environment from: {env_path}") 18 | load_dotenv(dotenv_path=env_path) 19 | 20 | # Agent configuration 21 | REVIEWER_NAME = "ContentReviewer" 22 | REVIEWER_INSTRUCTIONS = """ 23 | You are a content reviewer and need to check whether the tutorial's draft content meets the following requirements: 24 | 25 | 1. The draft content less than 200 words, set 'review_result' to 'No' and 'reason' to 'Content is too short'. If the draft content is more than 200 words, set 'review_result' to 'Yes' and 'reason' to 'The content is good'. 26 | 2. set 'draft_content' to the original draft content. 27 | 3. Always return result as JSON with fields 'review_result' ('Yes' or 'No' ) and 'reason' (string) and 'draft_content' (string). 28 | """ 29 | 30 | 31 | class ReviewAgent(BaseModel): 32 | review_result: Literal["Yes", "No"] 33 | reason: str 34 | draft_content: str 35 | 36 | 37 | # _credential = AzureCliCredential() 38 | # _client = AIProjectClient( 39 | # endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], 40 | # credential=_credential 41 | # ) 42 | 43 | 44 | # async def create_reviewer_agent(): 45 | # """ 46 | # Create reviewer agent with Azure AI Foundry 47 | 48 | # Args: 49 | # client: AIProjectClient instance (must remain open during workflow execution) 50 | # """ 51 | # # Create basic agent without special tools 52 | # created_agent =await _client.agents.create_agent( 53 | # model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], 54 | # instructions=REVIEWER_INSTRUCTIONS, 55 | # name=REVIEWER_NAME 56 | # ) 57 | 58 | # print(f"✅ [Reviewer] Agent created with ID: {created_agent.id}") 59 | 60 | # # Create chat client 61 | # chat_client = AzureAIAgentClient( 62 | # project_client=_client, 63 | # agent_id=created_agent.id, 64 | # response_format=ReviewAgent 65 | # ) 66 | 67 | # # Create and return the ChatAgent without tools 68 | # return ChatAgent(chat_client=chat_client) 69 | 70 | agent = ChatAgent( 71 | name=REVIEWER_NAME, 72 | chat_client=AzureAIAgentClient( 73 | project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], 74 | model_deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], 75 | async_credential=AzureCliCredential(), 76 | response_format=ReviewAgent, 77 | ), 78 | instructions=REVIEWER_INSTRUCTIONS, 79 | ) 80 | 81 | # agent = asyncio.run(create_reviewer_agent()) 82 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_aifoundry_devui/evangelist_agent/__init__.py: -------------------------------------------------------------------------------- 1 | """Evangelist Agent Module""" 2 | 3 | from .agent import agent, EvangelistAgent 4 | 5 | __all__ = ["agent", "EvangelistAgent"] 6 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_aifoundry_devui/evangelist_agent/agent.py: -------------------------------------------------------------------------------- 1 | """Evangelist Agent - Azure AI Foundry with Bing Search Tool""" 2 | 3 | import asyncio 4 | import os 5 | from pathlib import Path 6 | from pydantic import BaseModel 7 | from dotenv import load_dotenv 8 | 9 | from azure.ai.agents.models import BingGroundingTool 10 | 11 | 12 | from azure.identity.aio import AzureCliCredential 13 | from azure.ai.projects.aio import AIProjectClient 14 | 15 | from agent_framework import HostedWebSearchTool, ChatAgent 16 | from agent_framework.azure import AzureAIAgentClient 17 | 18 | # Load environment variables from parent directory's .env file 19 | env_path = Path(__file__).parent.parent / ".env" 20 | print(f"🔧 [Evangelist] Loading environment from: {env_path}") 21 | load_dotenv(dotenv_path=env_path) 22 | 23 | # Agent configuration 24 | EVANGELIST_NAME = "Evangelist" 25 | EVANGELIST_INSTRUCTIONS = """ 26 | You are a technology evangelist create a first draft for a technical tutorials. 27 | 1. Each knowledge point in the outline must include a link. Follow the link to access the content related to the knowledge point in the outline. Expand on that content. 28 | 2. Each knowledge point must be explained in detail. 29 | 3. Rewrite the content according to the entry requirements, including the title, outline, and corresponding content. It is not necessary to follow the outline in full order. 30 | 4. The content must be more than 200 words. 31 | 5. Always return JSON with draft_content (string) " 32 | 6. Include draft_content in draft_content" 33 | """ 34 | 35 | 36 | class EvangelistAgent(BaseModel): 37 | """Represents the result of draft content""" 38 | draft_content: str 39 | 40 | 41 | # Configuration for agent creation 42 | BING_CONNECTION_ID = os.environ["BING_CONNECTION_ID"] 43 | # _credential = AzureCliCredential() 44 | # _client = AIProjectClient( 45 | # endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], 46 | # credential=_credential 47 | # ) 48 | # Initialize Bing Grounding tool 49 | bing = BingGroundingTool(connection_id=BING_CONNECTION_ID) 50 | 51 | # async def create_evangelist_agent(): 52 | # """ 53 | # Create evangelist agent with Azure AI Foundry 54 | 55 | # Args: 56 | # client: AIProjectClient instance (must remain open during workflow execution) 57 | # """ 58 | # # Initialize Bing Grounding tool 59 | # bing = BingGroundingTool(connection_id=BING_CONNECTION_ID) 60 | 61 | # # Create agent with Bing Search 62 | # created_agent =await _client.agents.create_agent( 63 | # model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], 64 | # instructions=EVANGELIST_INSTRUCTIONS, 65 | # name=EVANGELIST_NAME, 66 | # tools=bing.definitions 67 | # ) 68 | 69 | # print(f"✅ [Evangelist] Agent created with ID: {created_agent.id}") 70 | 71 | # # Create chat client 72 | # chat_client = AzureAIAgentClient( 73 | # project_client=_client, 74 | # agent_id=created_agent.id, 75 | # tools=bing.definitions, 76 | # response_format=EvangelistAgent 77 | # ) 78 | 79 | # # Create and return the ChatAgent with hosted web search tool 80 | # return ChatAgent( 81 | # chat_client=chat_client, 82 | # tools=HostedWebSearchTool() 83 | # ) 84 | 85 | 86 | # Create agent instance at module level (following Agent Framework conventions) 87 | agent = ChatAgent( 88 | name=EVANGELIST_NAME, 89 | chat_client=AzureAIAgentClient( 90 | project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], 91 | model_deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], 92 | async_credential=AzureCliCredential(), 93 | tools = bing.definitions, 94 | response_format=EvangelistAgent, 95 | ), 96 | instructions=EVANGELIST_INSTRUCTIONS, 97 | tools=HostedWebSearchTool() 98 | ) 99 | 100 | 101 | 102 | # agent = asyncio.run(create_evangelist_agent()) 103 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_aifoundry_devui/main.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | import os 3 | from workflow import workflow # 🏗️ The content workflow 4 | 5 | 6 | from agent_framework.observability import get_tracer 7 | from opentelemetry.trace import SpanKind 8 | from opentelemetry.trace.span import format_trace_id 9 | 10 | from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter 11 | from agent_framework.observability import setup_observability 12 | 13 | # Load environment variables first, before importing agents 14 | load_dotenv() 15 | 16 | def main(): 17 | """Launch the content workflow in DevUI.""" 18 | import logging 19 | from agent_framework.devui import serve 20 | 21 | # Setup logging 22 | logging.basicConfig(level=logging.INFO, format="%(message)s") 23 | logger = logging.getLogger(__name__) 24 | 25 | logger.info("Starting Content Workflow") 26 | logger.info("Available at: http://localhost:8090") 27 | logger.info("Entity ID: workflow_content") 28 | 29 | # Launch server with the workflow 30 | serve(entities=[workflow], port=8090, auto_open=True, tracing_enabled=True) 31 | 32 | 33 | 34 | 35 | if __name__ == "__main__": 36 | main() 37 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_aifoundry_devui/publisher_agent/__init__.py: -------------------------------------------------------------------------------- 1 | """Publisher Agent Module""" 2 | 3 | from .agent import agent, PublisherAgent 4 | 5 | __all__ = ["agent", "PublisherAgent"] 6 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_aifoundry_devui/publisher_agent/agent.py: -------------------------------------------------------------------------------- 1 | """Publisher Agent - Azure AI Foundry with Code Interpreter Tool""" 2 | 3 | import asyncio 4 | import os 5 | from pathlib import Path 6 | from pydantic import BaseModel 7 | from dotenv import load_dotenv 8 | 9 | from azure.ai.agents.models import CodeInterpreterTool 10 | 11 | 12 | from azure.identity.aio import AzureCliCredential 13 | from azure.ai.projects.aio import AIProjectClient 14 | 15 | from agent_framework import HostedCodeInterpreterTool, ChatAgent 16 | from agent_framework.azure import AzureAIAgentClient 17 | 18 | # Load environment variables from parent directory's .env file 19 | env_path = Path(__file__).parent.parent / ".env" 20 | print(f"🔧 [Publisher] Loading environment from: {env_path}") 21 | load_dotenv(dotenv_path=env_path) 22 | 23 | # Agent configuration 24 | PUBLISHER_NAME = "Publisher" 25 | PUBLISHER_INSTRUCTIONS = """ 26 | You are the content publisher ,run code to save the tutorial's draft content as a Markdown file. Saved file's name is marked with current date and time, such as yearmonthdayhourminsec. Note that if it is 1-9, you need to add 0, such as 20240101123045.md. 27 | set 'file_path' to save path .Always return result as JSON with fields 'file_path' (string ) 28 | """ 29 | 30 | 31 | class PublisherAgent(BaseModel): 32 | file_path: str 33 | 34 | 35 | # _credential = AzureCliCredential() 36 | # _client = AIProjectClient( 37 | # endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], 38 | # credential=_credential 39 | # ) 40 | 41 | 42 | # async def create_publisher_agent(): 43 | # """ 44 | # Create publisher agent with Azure AI Foundry 45 | 46 | # Args: 47 | # client: AIProjectClient instance (must remain open during workflow execution) 48 | # """ 49 | # # Initialize Code Interpreter tool 50 | # code_interpreter = CodeInterpreterTool() 51 | 52 | # # Create agent with Code Interpreter 53 | # created_agent = await _client.agents.create_agent( 54 | # model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], 55 | # instructions=PUBLISHER_INSTRUCTIONS, 56 | # name=PUBLISHER_NAME, 57 | # tools=code_interpreter.definitions 58 | # ) 59 | 60 | # print(f"✅ [Publisher] Agent created with ID: {created_agent.id}") 61 | 62 | # # Create chat client 63 | # chat_client = AzureAIAgentClient( 64 | # project_client=_client, 65 | # agent_id=created_agent.id, 66 | # response_format=PublisherAgent 67 | # ) 68 | 69 | # # Create and return the ChatAgent with hosted code interpreter tool 70 | # return ChatAgent( 71 | # chat_client=chat_client, 72 | # tools=HostedCodeInterpreterTool() 73 | # ) 74 | # Initialize Code Interpreter tool 75 | code_interpreter = CodeInterpreterTool() 76 | 77 | agent = ChatAgent( 78 | name="Publisher", 79 | chat_client=AzureAIAgentClient( 80 | project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], 81 | model_deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], 82 | async_credential=AzureCliCredential(), 83 | response_format=PublisherAgent, 84 | tools=code_interpreter.definitions 85 | ), 86 | instructions=""" 87 | You are the content publisher, run code to save the tutorial's draft content as a Markdown file. 88 | Saved file's name is marked with current date and time, such as yearmonthdayhourminsec. 89 | Note that if it is 1-9, you need to add 0, such as 20240101123045.md. 90 | set 'file_path' to save path. Always return result as JSON with fields 'file_path' (string) 91 | """, 92 | tools=HostedCodeInterpreterTool(), 93 | ) 94 | 95 | 96 | # agent = asyncio.run(create_publisher_agent()) 97 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_aifoundry_devui/workflow/__init__.py: -------------------------------------------------------------------------------- 1 | from .workflow import workflow 2 | 3 | __all__ = ["workflow"] -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_aifoundry_devui/workflow/workflow.py: -------------------------------------------------------------------------------- 1 | """Conditional Workflow for Content Review with Azure AI Foundry Agents""" 2 | 3 | import asyncio 4 | import os 5 | from dataclasses import dataclass 6 | 7 | from azure.identity.aio import AzureCliCredential 8 | from azure.ai.projects.aio import AIProjectClient 9 | 10 | from agent_framework import ( 11 | AgentExecutor, 12 | AgentExecutorRequest, 13 | AgentExecutorResponse, 14 | ChatMessage, 15 | Role, 16 | WorkflowBuilder, 17 | WorkflowContext, 18 | executor, 19 | ) 20 | 21 | from evangelist_agent import agent as evangelist_agent, EvangelistAgent 22 | from contentreview_agent import agent as reviewer_agent, ReviewAgent 23 | from publisher_agent import agent as publisher_agent 24 | 25 | # Module-level storage for lazy initialization 26 | _credential = None 27 | _client = None 28 | _workflow = None 29 | _initialized = False 30 | 31 | 32 | @dataclass 33 | class ReviewResult: 34 | """Data class to hold review results""" 35 | review_result: str 36 | reason: str 37 | draft_content: str 38 | 39 | 40 | @executor(id="to_evangelist_content_result") 41 | async def to_evangelist_content_result( 42 | response: AgentExecutorResponse, 43 | ctx: WorkflowContext[AgentExecutorRequest] 44 | ) -> None: 45 | """Convert evangelist agent response to structured format and forward to reviewer""" 46 | print(f"📝 [Workflow] Raw response from evangelist agent: {response.agent_run_response}") 47 | agent = EvangelistAgent.model_validate_json(response.agent_run_response.text) 48 | user_msg = ChatMessage(Role.USER, text=agent.draft_content) 49 | await ctx.send_message(AgentExecutorRequest(messages=[user_msg], should_respond=True)) 50 | 51 | 52 | @executor(id="to_reviewer_result") 53 | async def to_reviewer_result( 54 | response: AgentExecutorResponse, 55 | ctx: WorkflowContext[ReviewResult] 56 | ) -> None: 57 | """Convert reviewer agent response to structured format""" 58 | print(f"🔍 [Workflow] Raw response from reviewer agent: {response.agent_run_response.text}") 59 | 60 | parsed = ReviewAgent.model_validate_json(response.agent_run_response.text) 61 | await ctx.send_message( 62 | ReviewResult( 63 | review_result=parsed.review_result, 64 | reason=parsed.reason, 65 | draft_content=parsed.draft_content, 66 | ) 67 | ) 68 | 69 | 70 | def select_targets(review: ReviewResult, target_ids: list[str]) -> list[str]: 71 | """ 72 | Select workflow path based on review result 73 | 74 | Args: 75 | review: The review result containing decision 76 | target_ids: List of [handle_review_id, save_draft_id] 77 | 78 | Returns: 79 | List containing the selected target executor ID 80 | """ 81 | handle_review_id, save_draft_id = target_ids 82 | if review.review_result == "Yes": 83 | print(f"✅ [Workflow] Review passed - routing to save_draft") 84 | return [save_draft_id] 85 | else: 86 | print(f"❌ [Workflow] Review failed - routing to handle_review") 87 | return [handle_review_id] 88 | 89 | 90 | @executor(id="handle_review") 91 | async def handle_review(review: ReviewResult, ctx: WorkflowContext[str]) -> None: 92 | """Handle review failures""" 93 | if review.review_result == "No": 94 | message = f"Review failed: {review.reason}, please revise the draft." 95 | print(f"⚠️ [Workflow] {message}") 96 | await ctx.yield_output(message) 97 | else: 98 | await ctx.send_message( 99 | AgentExecutorRequest( 100 | messages=[ChatMessage(Role.USER, text=review.draft_content)], 101 | should_respond=True 102 | ) 103 | ) 104 | 105 | 106 | @executor(id="save_draft") 107 | async def save_draft(review: ReviewResult, ctx: WorkflowContext[AgentExecutorRequest]) -> None: 108 | """Save draft content by sending to publisher agent""" 109 | # Only called for approved drafts by selection_func 110 | await ctx.send_message( 111 | AgentExecutorRequest( 112 | messages=[ChatMessage(Role.USER, text=review.draft_content)], 113 | should_respond=True 114 | ) 115 | ) 116 | 117 | 118 | 119 | 120 | 121 | 122 | # Create agent executors 123 | evangelist_executor = AgentExecutor(evangelist_agent, id="evangelist_agent") 124 | reviewer_executor = AgentExecutor(reviewer_agent, id="reviewer_agent") 125 | publisher_executor = AgentExecutor(publisher_agent, id="publisher_agent") 126 | 127 | # Build the conditional workflow 128 | workflow = ( 129 | WorkflowBuilder() 130 | .set_start_executor(evangelist_executor) 131 | .add_edge(evangelist_executor, to_evangelist_content_result) 132 | .add_edge(to_evangelist_content_result, reviewer_executor) 133 | .add_edge(reviewer_executor, to_reviewer_result) 134 | .add_multi_selection_edge_group( 135 | to_reviewer_result, 136 | [handle_review, save_draft], 137 | selection_func=select_targets, 138 | ) 139 | .add_edge(save_draft, publisher_executor) 140 | .build() 141 | ) 142 | 143 | 144 | # Create the lazy workflow wrapper instance - this is what gets imported 145 | # workflow = _workflow 146 | 147 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_foundrylocal_devui/main.py: -------------------------------------------------------------------------------- 1 | """DevUI entrypoint for the local Foundry multi-agent workflow. 2 | 3 | This mirrors the structure of `multi_workflow_ghmodel_devui/main.py` but 4 | uses the locally defined planning + research workflow found in 5 | `workflow/workflow.py`. 6 | """ 7 | from agent_framework.devui import serve 8 | from dotenv import load_dotenv 9 | from workflow import workflow 10 | import logging 11 | 12 | # Load .env early so that any provider specific environment variables are present 13 | load_dotenv() 14 | # noqa: E402 (import after dotenv) 15 | 16 | 17 | def main() -> None: 18 | """Launch the planning/research workflow in the DevUI.""" 19 | 20 | 21 | logging.basicConfig(level=logging.INFO, format="%(message)s") 22 | logger = logging.getLogger(__name__) 23 | logger.info("Starting FoundryLocal Planning Workflow") 24 | logger.info("Available at: http://localhost:8091") 25 | logger.info("Entity ID: workflow_foundrylocal_plan_research") 26 | 27 | # Serve the composed workflow 28 | serve(entities=[workflow], port=8091, auto_open=True, tracing_enabled=True) 29 | 30 | 31 | if __name__ == "__main__": # pragma: no cover 32 | main() 33 | 34 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_foundrylocal_devui/plan_agent/__init__.py: -------------------------------------------------------------------------------- 1 | from .agent import plan_agent 2 | 3 | __all__ = ["plan_agent"] -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_foundrylocal_devui/plan_agent/agent.py: -------------------------------------------------------------------------------- 1 | """Planning agent for the FoundryLocal workflow. 2 | 3 | Parallels the evangelist/content generation agent in the GitHub Models 4 | example but is focused purely on generating a structured plan that a 5 | research agent can expand. 6 | """ 7 | 8 | import os 9 | from dotenv import load_dotenv 10 | 11 | load_dotenv() 12 | 13 | try: 14 | from agent_framework.openai import OpenAIChatClient # type: ignore 15 | except ImportError: # pragma: no cover 16 | raise SystemExit("agent_framework package not found. Install project dependencies first.") 17 | 18 | PLAN_AGENT_NAME = "Plan-Agent" 19 | PLAN_AGENT_INSTRUCTIONS = """ 20 | You are my planner, working with me to create 1 sample based on the researcher's findings. 21 | """ 22 | 23 | def _build_client() -> OpenAIChatClient: 24 | base_url = os.environ.get("FOUNDRYLOCAL_ENDPOINT") 25 | model_id = os.environ.get("FOUNDRYLOCAL_MODEL_DEPLOYMENT_NAME") 26 | api_key = "nokey" 27 | if not base_url: 28 | raise RuntimeError("No model endpoint configured. Set FOUNDRYLOCAL_ENDPOINT or GITHUB_ENDPOINT.") 29 | return OpenAIChatClient(base_url=base_url, api_key=api_key, model_id=model_id) 30 | 31 | try: 32 | _client = _build_client() 33 | plan_agent = _client.create_agent( 34 | instructions=PLAN_AGENT_INSTRUCTIONS, 35 | name=PLAN_AGENT_NAME, 36 | ) 37 | except Exception as e: # pragma: no cover 38 | print(f"[plan_agent] initialization warning: {e}") 39 | plan_agent = None # type: ignore 40 | 41 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_foundrylocal_devui/researcher_agent/__init__.py: -------------------------------------------------------------------------------- 1 | from .agent import researcher_agent 2 | 3 | __all__ = ["researcher_agent"] -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_foundrylocal_devui/researcher_agent/agent.py: -------------------------------------------------------------------------------- 1 | """Research / expansion agent for FoundryLocal workflow. 2 | 3 | Consumes the structured plan (topic + outline) from the planning agent 4 | and produces a first full draft. This is analogous to the evangelist 5 | agent in the ghmodel example but with a different upstream signal. 6 | """ 7 | 8 | import os 9 | from dotenv import load_dotenv 10 | 11 | load_dotenv() 12 | 13 | try: 14 | from agent_framework.openai import OpenAIChatClient # type: ignore 15 | except ImportError: # pragma: no cover 16 | raise SystemExit("agent_framework package not found. Install project dependencies first.") 17 | 18 | RESEARCHER_AGENT_NAME = "Researcher-Agent" 19 | RESEARCHER_AGENT_INSTRUCTIONS = "You are my researcher, working with me to analyze some questions" 20 | 21 | def _build_client() -> OpenAIChatClient: 22 | base_url = os.environ.get("FOUNDRYLOCAL_ENDPOINT") 23 | model_id = os.environ.get("FOUNDRYLOCAL_MODEL_DEPLOYMENT_NAME") 24 | api_key = "nokey" 25 | if not base_url: 26 | raise RuntimeError("No model endpoint configured. Set FOUNDRYLOCAL_ENDPOINT or GITHUB_ENDPOINT.") 27 | return OpenAIChatClient(base_url=base_url, api_key=api_key, model_id=model_id) 28 | 29 | try: 30 | _client = _build_client() 31 | researcher_agent = _client.create_agent( 32 | instructions=RESEARCHER_AGENT_INSTRUCTIONS, 33 | name=RESEARCHER_AGENT_NAME, 34 | ) 35 | except Exception as e: # pragma: no cover 36 | print(f"[researcher_agent] initialization warning: {e}") 37 | researcher_agent = None # type: ignore 38 | 39 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_foundrylocal_devui/workflow/__init__.py: -------------------------------------------------------------------------------- 1 | from .workflow import workflow 2 | 3 | __all__ = ["workflow"] -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_foundrylocal_devui/workflow/workflow.py: -------------------------------------------------------------------------------- 1 | """Workflow wiring for FoundryLocal planning + research agents. 2 | 3 | Structure mirrors the pattern used in the GitHub Models multi-step 4 | workflow (`multi_workflow_ghmodel_devui/workflow/workflow.py`). We 5 | compose two agent executors with a transformation executor between them 6 | to map JSON model outputs into the next agent's user message. 7 | """ 8 | 9 | from agent_framework import ( 10 | AgentExecutor, 11 | AgentExecutorRequest, 12 | AgentExecutorResponse, 13 | ChatMessage, 14 | Role, 15 | ConcurrentBuilder, 16 | WorkflowContext, 17 | executor, 18 | ) 19 | 20 | from plan_agent import plan_agent 21 | from researcher_agent import researcher_agent 22 | 23 | 24 | 25 | 26 | planner_executor = AgentExecutor(plan_agent, id="plan_agent") # type: ignore 27 | 28 | research_executor = AgentExecutor(researcher_agent, id="researcher_agent") # type: ignore 29 | 30 | 31 | # Assemble workflow: planner -> transform -> researcher -> output 32 | workflow = ( 33 | ConcurrentBuilder().participants([research_executor, planner_executor]).build() 34 | ) 35 | 36 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_ghmodel_devui/.env.example: -------------------------------------------------------------------------------- 1 | # Azure AI Foundry Configuration 2 | GITHUB_TOKEN="github token" 3 | GITHUB_ENDPOINT="https://models.github.ai/inference" 4 | GITHUB_MODEL_ID="gpt-4o-mini" 5 | # Bing Search Configuration (for web search tool) 6 | BING_CONNECTION_ID=your-bing-connection-id 7 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_ghmodel_devui/contentreview_agent/__init__.py: -------------------------------------------------------------------------------- 1 | from .agent import reviewer_agent, ReviewAgent 2 | 3 | __all__ = ["reviewer_agent", "ReviewAgent"] 4 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_ghmodel_devui/contentreview_agent/agent.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | from agent_framework.openai import OpenAIChatClient 5 | from pydantic import BaseModel 6 | from typing_extensions import Literal 7 | from dotenv import load_dotenv # 📁 Secure configuration loading 8 | 9 | # Load environment variables from parent directory's .env file 10 | env_path = Path(__file__).parent.parent / ".env" 11 | print(f"Loading environment variables from: {env_path}") 12 | load_dotenv(dotenv_path=env_path) 13 | 14 | 15 | chat_client = OpenAIChatClient( 16 | base_url=os.environ.get("GITHUB_ENDPOINT"), # 🌐 GitHub Models API endpoint 17 | api_key=os.environ.get("GITHUB_TOKEN"), # 🔑 Authentication token 18 | model_id=os.environ.get("GITHUB_MODEL_ID") # 🎯 Selected AI model 19 | ) 20 | 21 | REVIEWER_NAME = "ContentReviewer" 22 | REVIEWER_INSTRUCTIONS = """ 23 | You are a content reviewer and need to check whether the tutorial's draft content meets the following requirements: 24 | 25 | 1. The draft content less than 200 words, set 'review_result' to 'No' and 'reason' to 'Content is too short'. If the draft content is more than 200 words, set 'review_result' to 'Yes' and 'reason' to 'The content is good'. 26 | 2. set 'draft_content' to the original draft content. 27 | 3. Always return result as JSON with fields 'review_result' ('Yes' or 'No' ) and 'reason' (string) and 'draft_content' (string). 28 | """ 29 | 30 | 31 | class ReviewAgent(BaseModel): 32 | review_result: Literal["Yes", "No"] 33 | reason: str 34 | draft_content: str 35 | 36 | 37 | reviewer_agent = chat_client.create_agent( 38 | instructions=REVIEWER_INSTRUCTIONS, 39 | name=REVIEWER_NAME, 40 | response_format=ReviewAgent 41 | ) 42 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_ghmodel_devui/evangelist_agent/__init__.py: -------------------------------------------------------------------------------- 1 | from .agent import evangelist_agent, EvangelistAgent 2 | 3 | __all__ = ["evangelist_agent", "EvangelistAgent"] 4 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_ghmodel_devui/evangelist_agent/agent.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | from agent_framework.openai import OpenAIChatClient 5 | from agent_framework import HostedWebSearchTool 6 | from pydantic import BaseModel 7 | from dotenv import load_dotenv # 📁 Secure configuration loading 8 | 9 | # Load environment variables from parent directory's .env file 10 | env_path = Path(__file__).parent.parent / ".env" 11 | 12 | print(f"Loading environment variables from: {env_path}") 13 | load_dotenv(dotenv_path=env_path) 14 | 15 | 16 | chat_client = OpenAIChatClient( 17 | base_url=os.environ.get("GITHUB_ENDPOINT"), 18 | api_key=os.environ.get("GITHUB_TOKEN"), 19 | model_id=os.environ.get("GITHUB_MODEL_ID") 20 | ) 21 | 22 | EVANGELIST_NAME = "Evangelist" 23 | EVANGELIST_INSTRUCTIONS = """ 24 | You are a technology evangelist create a first draft for a technical tutorials. 25 | 1. Each knowledge point in the outline must include a link. Follow the link to access the content related to the knowledge point in the outline. Expand on that content. 26 | 2. Each knowledge point must be explained in detail. 27 | 3. Rewrite the content according to the entry requirements, including the title, outline, and corresponding content. It is not necessary to follow the outline in full order. 28 | 4. The content must be more than 200 words. 29 | 5. Always return JSON with draft_content (string) " 30 | 6. Include draft_content in draft_content" 31 | """ 32 | 33 | 34 | class EvangelistAgent(BaseModel): 35 | """Represents the result of draft content""" 36 | draft_content: str 37 | 38 | 39 | evangelist_agent = chat_client.create_agent( 40 | instructions=EVANGELIST_INSTRUCTIONS, 41 | tools=[HostedWebSearchTool()], 42 | name=EVANGELIST_NAME, 43 | response_format=EvangelistAgent, 44 | ) 45 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_ghmodel_devui/main.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | import os 3 | from workflow import workflow # 🏗️ The content workflow 4 | 5 | 6 | from agent_framework.observability import get_tracer 7 | from opentelemetry.trace import SpanKind 8 | from opentelemetry.trace.span import format_trace_id 9 | 10 | from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter 11 | from agent_framework.observability import setup_observability 12 | 13 | # Load environment variables first, before importing agents 14 | load_dotenv() 15 | 16 | def main(): 17 | """Launch the content workflow in DevUI.""" 18 | import logging 19 | from agent_framework.devui import serve 20 | 21 | # Setup logging 22 | logging.basicConfig(level=logging.INFO, format="%(message)s") 23 | logger = logging.getLogger(__name__) 24 | 25 | logger.info("Starting Content Workflow") 26 | logger.info("Available at: http://localhost:8090") 27 | logger.info("Entity ID: workflow_content") 28 | 29 | # Launch server with the workflow 30 | serve(entities=[workflow], port=8090, auto_open=True, tracing_enabled=True) 31 | 32 | 33 | 34 | 35 | if __name__ == "__main__": 36 | main() 37 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_ghmodel_devui/publisher_agent/__init__.py: -------------------------------------------------------------------------------- 1 | from .agent import publisher_agent, PublisherAgent 2 | 3 | __all__ = ["publisher_agent", "PublisherAgent"] 4 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_ghmodel_devui/publisher_agent/agent.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | from agent_framework.openai import OpenAIChatClient 5 | from agent_framework import HostedCodeInterpreterTool 6 | from pydantic import BaseModel 7 | from dotenv import load_dotenv # 📁 Secure configuration loading 8 | 9 | # Load environment variables from parent directory's .env file 10 | env_path = Path(__file__).parent.parent / ".env" 11 | load_dotenv(dotenv_path=env_path) 12 | 13 | 14 | chat_client = OpenAIChatClient( 15 | base_url=os.environ.get("GITHUB_ENDPOINT"), # 🌐 GitHub Models API endpoint 16 | api_key=os.environ.get("GITHUB_TOKEN"), # 🔑 Authentication token 17 | model_id=os.environ.get("GITHUB_MODEL_ID") # 🎯 Selected AI model 18 | ) 19 | 20 | PUBLISHER_NAME = "Publisher" 21 | PUBLISHER_INSTRUCTIONS = """ 22 | You are the content publisher, run code to save the tutorial's draft content as a Markdown file. Saved file's name is marked with current date and time, such as yearmonthdayhourminsec. Note that if it is 1-9, you need to add 0, such as 20240101123045.md. 23 | """ 24 | 25 | 26 | class PublisherAgent(BaseModel): 27 | file_path: str 28 | 29 | 30 | publisher_agent = chat_client.create_agent( 31 | instructions=PUBLISHER_INSTRUCTIONS, 32 | tools=HostedCodeInterpreterTool(), 33 | name=PUBLISHER_NAME, 34 | response_format=PublisherAgent 35 | ) 36 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_ghmodel_devui/workflow/__init__.py: -------------------------------------------------------------------------------- 1 | from .workflow import workflow 2 | 3 | __all__ = ["workflow"] 4 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/multi_workflow_ghmodel_devui/workflow/workflow.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from agent_framework import ( 3 | AgentExecutor, 4 | AgentExecutorRequest, 5 | AgentExecutorResponse, 6 | ChatMessage, 7 | Role, 8 | WorkflowBuilder, 9 | WorkflowContext, 10 | executor, 11 | ) 12 | 13 | from evangelist_agent import evangelist_agent, EvangelistAgent 14 | from contentreview_agent import reviewer_agent, ReviewAgent 15 | from publisher_agent import publisher_agent 16 | 17 | 18 | @dataclass 19 | class ReviewResult: 20 | review_result: str 21 | reason: str 22 | draft_content: str 23 | 24 | 25 | @executor(id="to_evangelist_content_result") 26 | async def to_evangelist_content_result(response: AgentExecutorResponse, ctx: WorkflowContext[AgentExecutorRequest]) -> None: 27 | """Convert evangelist agent response to structured format and forward to reviewer""" 28 | print(f"Raw response from evangelist agent: {response.agent_run_response}") 29 | agent = EvangelistAgent.model_validate_json(response.agent_run_response.text) 30 | user_msg = ChatMessage(Role.USER, text=agent.draft_content) 31 | await ctx.send_message(AgentExecutorRequest(messages=[user_msg], should_respond=True)) 32 | 33 | 34 | @executor(id="to_reviewer_result") 35 | async def to_reviewer_result(response: AgentExecutorResponse, ctx: WorkflowContext[ReviewResult]) -> None: 36 | """Convert reviewer agent response to structured format""" 37 | print(f"Raw response from reviewer agent: {response.agent_run_response.text}") 38 | 39 | parsed = ReviewAgent.model_validate_json(response.agent_run_response.text) 40 | await ctx.send_message( 41 | ReviewResult( 42 | review_result=parsed.review_result, 43 | reason=parsed.reason, 44 | draft_content=parsed.draft_content, 45 | ) 46 | ) 47 | 48 | 49 | def select_targets(review: ReviewResult, target_ids: list[str]) -> list[str]: 50 | """Select workflow path based on review result""" 51 | # Order: [handle_review, save_draft] 52 | handle_review_id, save_draft_id = target_ids 53 | if review.review_result == "Yes": 54 | return [save_draft_id] 55 | else: 56 | return [handle_review_id] 57 | 58 | 59 | @executor(id="handle_review") 60 | async def handle_review(review: ReviewResult, ctx: WorkflowContext[str]) -> None: 61 | """Handle review failures""" 62 | if review.review_result == "No": 63 | await ctx.yield_output(f"Review failed: {review.reason}, please revise the draft.") 64 | else: 65 | await ctx.send_message( 66 | AgentExecutorRequest(messages=[ChatMessage(Role.USER, text=review.draft_content)], should_respond=True) 67 | ) 68 | 69 | 70 | @executor(id="save_draft") 71 | async def save_draft(review: ReviewResult, ctx: WorkflowContext[AgentExecutorRequest]) -> None: 72 | """Save draft content by sending to publisher agent""" 73 | # Only called for approved drafts by selection_func 74 | await ctx.send_message( 75 | AgentExecutorRequest(messages=[ChatMessage(Role.USER, text=review.draft_content)], should_respond=True) 76 | ) 77 | 78 | 79 | # Create agent executors 80 | evangelist_executor = AgentExecutor(evangelist_agent, id="evangelist_agent") 81 | reviewer_executor = AgentExecutor(reviewer_agent, id="reviewer_agent") 82 | publisher_executor = AgentExecutor(publisher_agent, id="publisher_agent") 83 | 84 | # Build the conditional workflow 85 | workflow = ( 86 | WorkflowBuilder() 87 | .set_start_executor(evangelist_executor) 88 | .add_edge(evangelist_executor, to_evangelist_content_result) 89 | .add_edge(to_evangelist_content_result, reviewer_executor) 90 | .add_edge(reviewer_executor, to_reviewer_result) 91 | .add_multi_selection_edge_group( 92 | to_reviewer_result, 93 | [handle_review, save_draft], 94 | selection_func=select_targets, 95 | ) 96 | .add_edge(save_draft, publisher_executor) 97 | .build() 98 | ) 99 | -------------------------------------------------------------------------------- /08.EvaluationAndTracing/python/tracer_aspire/simple.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | from random import randint 4 | from typing import Annotated 5 | 6 | import dotenv 7 | from agent_framework import ChatAgent 8 | from agent_framework.azure import AzureAIAgentClient 9 | from agent_framework.observability import get_tracer 10 | from azure.ai.projects.aio import AIProjectClient 11 | from azure.identity.aio import AzureCliCredential 12 | from opentelemetry.trace import SpanKind 13 | from opentelemetry.trace.span import format_trace_id 14 | 15 | from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter 16 | from agent_framework.observability import setup_observability 17 | 18 | from pydantic import Field 19 | 20 | """ 21 | This sample shows you can can setup telemetry for an Azure AI agent. 22 | It uses the Azure AI client to setup the telemetry, this calls out to 23 | Azure AI for the connection string of the attached Application Insights 24 | instance. 25 | 26 | You must add an Application Insights instance to your Azure AI project 27 | for this sample to work. 28 | """ 29 | 30 | # For loading the `AZURE_AI_PROJECT_ENDPOINT` environment variable 31 | dotenv.load_dotenv() 32 | 33 | 34 | async def get_weather( 35 | location: Annotated[str, Field(description="The location to get the weather for.")], 36 | ) -> str: 37 | """Get the weather for a given location.""" 38 | await asyncio.sleep(randint(0, 10) / 10.0) # Simulate a network call 39 | conditions = ["sunny", "cloudy", "rainy", "stormy"] 40 | return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." 41 | 42 | 43 | async def main(): 44 | async with ( 45 | AzureCliCredential() as credential, 46 | AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project, 47 | AzureAIAgentClient(project_client=project) as client, 48 | ): 49 | # This will enable tracing and configure the application to send telemetry data to the 50 | # Application Insights instance attached to the Azure AI project. 51 | # This will override any existing configuration. 52 | exporter = OTLPSpanExporter(endpoint=os.environ["OTLP_ENDPOINT"]) 53 | setup_observability(exporters=[exporter]) 54 | 55 | questions = ["What's the weather in Amsterdam?", "and in Paris, and which is better?", "Why is the sky blue?"] 56 | 57 | with get_tracer().start_as_current_span("Single Agent Chat", kind=SpanKind.CLIENT) as current_span: 58 | print(f"Trace ID: {format_trace_id(current_span.get_span_context().trace_id)}") 59 | 60 | agent = ChatAgent( 61 | chat_client=client, 62 | tools=get_weather, 63 | name="WeatherAgent", 64 | instructions="You are a weather assistant.", 65 | ) 66 | thread = agent.get_new_thread() 67 | for question in questions: 68 | print(f"User: {question}") 69 | print(f"{agent.display_name}: ", end="") 70 | async for update in agent.run_stream( 71 | question, 72 | thread=thread, 73 | ): 74 | if update.text: 75 | print(update.text, end="") 76 | 77 | 78 | if __name__ == "__main__": 79 | asyncio.run(main()) -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns 10 | - Employees can reach out at [aka.ms/opensource/moderation-support](https://aka.ms/opensource/moderation-support) 11 | -------------------------------------------------------------------------------- /Installation/requirements.txt: -------------------------------------------------------------------------------- 1 | agent-framework-core @ git+https://github.com/microsoft/agent-framework.git@main#subdirectory=python/packages/core 2 | agent-framework-azure-ai @ git+https://github.com/microsoft/agent-framework.git@main#subdirectory=python/packages/azure-ai -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /README.bak.md: -------------------------------------------------------------------------------- 1 | # Project 2 | 3 | > This repo has been populated by an initial template to help get you started. Please 4 | > make sure to update the content to build a great experience for community-building. 5 | 6 | As the maintainer of this project, please make a few updates: 7 | 8 | - Improving this README.MD file to provide a great experience 9 | - Updating SUPPORT.MD with content about this project's support experience 10 | - Understanding the security reporting process in SECURITY.MD 11 | - Remove this section from the README 12 | 13 | ## Contributing 14 | 15 | This project welcomes contributions and suggestions. Most contributions require you to agree to a 16 | Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us 17 | the rights to use your contribution. For details, visit [Contributor License Agreements](https://cla.opensource.microsoft.com). 18 | 19 | When you submit a pull request, a CLA bot will automatically determine whether you need to provide 20 | a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions 21 | provided by the bot. You will only need to do this once across all repos using our CLA. 22 | 23 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 24 | For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or 25 | contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. 26 | 27 | ## Trademarks 28 | 29 | This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft 30 | trademarks or logos is subject to and must follow 31 | [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/legal/intellectualproperty/trademarks/usage/general). 32 | Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. 33 | Any use of third-party trademarks or logos are subject to those third-party's policies. 34 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which 6 | includes all source code repositories in our GitHub organizations. 7 | 8 | **Please do not report security vulnerabilities through public GitHub issues.** 9 | 10 | For security reporting information, locations, contact information, and policies, 11 | please review the latest guidance for Microsoft repositories at 12 | [https://aka.ms/SECURITY.md](https://aka.ms/SECURITY.md). 13 | 14 | -------------------------------------------------------------------------------- /SUPPORT.md: -------------------------------------------------------------------------------- 1 | # TODO: The maintainer of this repo has not yet edited this file 2 | 3 | **REPO OWNER**: Do you want Customer Service & Support (CSS) support for this product/project? 4 | 5 | - **No CSS support:** Fill out this template with information about how to file issues and get help. 6 | - **Yes CSS support:** Fill out an intake form at [aka.ms/onboardsupport](https://aka.ms/onboardsupport). CSS will work with/help you to determine next steps. 7 | - **Not sure?** Fill out an intake as though the answer were "Yes". CSS will help you decide. 8 | 9 | *Then remove this first heading from this SUPPORT.MD file before publishing your repo.* 10 | 11 | # Support 12 | 13 | ## How to file issues and get help 14 | 15 | This project uses GitHub Issues to track bugs and feature requests. Please search the existing 16 | issues before filing new issues to avoid duplicates. For new issues, file your bug or 17 | feature request as a new Issue. 18 | 19 | For help and questions about using this project, please **REPO MAINTAINER: INSERT INSTRUCTIONS HERE 20 | FOR HOW TO ENGAGE REPO OWNERS OR COMMUNITY FOR HELP. COULD BE A STACK OVERFLOW TAG OR OTHER 21 | CHANNEL. WHERE WILL YOU HELP PEOPLE?**. 22 | 23 | ## Microsoft Support Policy 24 | 25 | Support for this **PROJECT or PRODUCT** is limited to the resources listed above. 26 | -------------------------------------------------------------------------------- /changelog.md: -------------------------------------------------------------------------------- 1 | # Change Log - Agent Framework Samples 2 | 3 | ## 2025-10-20 4 | 5 | ### Repository Documentation Updates 6 | 7 | **Summary:** Updated main README.md to accurately reflect project structure and added missing EvaluationAndTracing section 8 | 9 | #### Documentation Improvements 10 | 11 | | Component | Changes Made | 12 | |-----------|--------------| 13 | | `README.md` | • Updated Repository Structure table to include missing `08.EvaluationAndTracing` directory
• Added comprehensive documentation for evaluation and debugging tools
• Updated Tutorial Progression to include advanced evaluation techniques
• Enhanced Key Features section with evaluation and tracing capabilities
• Improved project navigation with accurate directory references | 14 | 15 | #### Repository Structure Alignment 16 | 17 | | Section | Updates | 18 | |---------|---------| 19 | | Repository Structure Table | • Added `08.EvaluationAndTracing` with Python-only samples
• Documented DevUI visualization tools
• Listed observability and tracing examples
• Maintained consistency with actual project structure | 20 | | Tutorial Progression | • Added evaluation and tracing as advanced level topic
• Organized learning path from beginner to advanced concepts
• Enhanced developer journey documentation | 21 | | Key Features | • Documented DevUI for visual debugging
• Added observability tools for agent tracing
• Highlighted evaluation capabilities for agent development | 22 | 23 | ## 2025-10-08 24 | 25 | ### Added Evaluation and Tracing Components 26 | 27 | **Summary:** Introduced comprehensive evaluation and tracing capabilities for agent development with DevUI visualization and observability features 28 | 29 | #### Sample Code and Documentation 30 | 31 | ***Folder*** - *08.EvaluationAndTracing* 32 | 33 | | Component | Content Added | 34 | |-----------|---------------| 35 | | `README.md` | • Comprehensive tutorial on evaluation and tracing
• DevUI setup and usage instructions
• Observability configuration guide
• Key features and benefits documentation | 36 | 37 | ***Folder*** - *08.EvaluationAndTracing/python* 38 | 39 | | Sample | Implementation | 40 | |--------|----------------| 41 | | `basic_agent_workflow_devui/` | • Basic agent workflow with DevUI integration
• Real-time session visualization example
• Interactive debugging demonstration | 42 | | `multi_workflow_ghmodel_devui/` | • Multi-agent workflow with DevUI support
• Complex interaction pattern visualization
• GitHub Models integration with monitoring | 43 | | `tracer_aspire/` | • Observability and logging configuration
• Step-by-step execution tracing
• Python logging framework integration | 44 | 45 | ## 2025-10-07 46 | 47 | 48 | #### Python Samples Update 49 | 50 | ***Folder*** - *00.ForBeginners/05-agentic-rag/code_samples* 51 | 52 | | File | Changes Made | 53 | |------|--------------| 54 | | `python-agent-framework-aifoundry-file-search.ipynb` | • Updated Azure AI Foundry file search integration
• Enhanced document processing with vector store management
• Improved RAG capabilities with `HostedFileSearchTool`
• Updated agent creation with persistent file search tools | 55 | 56 | ***Folder*** - *03.ExploerAgentFramework/code_samples/python* 57 | 58 | | File | Changes Made | 59 | |------|--------------| 60 | | `03-python-agent-framework-aifoundry.ipynb` | • Updated Azure AI Foundry basic agent integration
• Enhanced agent lifecycle management
• Improved `AzureAIAgentClient` initialization patterns
• Updated async context management for agents | 61 | 62 | ***Folder*** - *04.Tools/code_samples/python/foundry* 63 | 64 | | File | Changes Made | 65 | |------|--------------| 66 | | `01.python-agent-framework-aifoundry-vision.ipynb` | • Updated Azure AI Foundry vision capabilities
• Enhanced image processing with base64 encoding
• Improved multimodal content handling
• Updated vision agent creation with furniture consultation features | 67 | | `02.python-agent-framework-aifoundry-code-interpreter.ipynb` | • Updated Azure AI Foundry code interpreter integration
• Enhanced code execution capabilities
• Improved `HostedCodeInterpreterTool` implementation
• Updated mathematical computation features | 68 | | `03.python-agent-framework-aifoundry-binggrounding.ipynb` | • Updated Azure AI Foundry Bing grounding integration
• Enhanced web search capabilities
• Improved `HostedWebSearchTool` implementation
• Updated connection-based search functionality | 69 | | `04.python-agent-framework-aifoundry-file-search.ipynb` | • Updated Azure AI Foundry advanced file search
• Enhanced vector store creation and management
• Improved document upload and processing workflows
• Updated streaming response capabilities for file search | 70 | 71 | ## 2025-10-03 72 | 73 | ### Fixed GraphViz Integration 74 | 75 | **Summary:** Added GraphViz installation and visualization capabilities to all workflow samples 76 | 77 | #### A. DevContainer Update 78 | 79 | | Component | Change Description | 80 | |-----------|-------------------| 81 | | Development Environment | Added GraphViz system package to devcontainer configuration | 82 | | Installation | Pre-configured GraphViz to eliminate manual setup steps | 83 | 84 | #### B. Workflow Samples Update 85 | 86 | ***Folder*** - *07.Workflow/code_samples/python* 87 | 88 | | File | Changes Made | 89 | |------|--------------| 90 | | `01.python-agent-framework-workflow-ghmodel-basic.ipynb` | • Added `sudo apt install graphviz -y` installation
• Integrated WorkflowViz for workflow visualization
• Added SVG export with `viz.export(format="svg")`
• Implemented inline notebook rendering with HTML fallback | 91 | | `02.python-agent-framework-workflow-ghmodel-sequential.ipynb` | • Added GraphViz system installation commands
• Integrated workflow visualization for sequential pipeline
• Added SVG generation for three-agent workflow
• Implemented consistent visualization pattern | 92 | | `03.python-agent-framework-workflow-ghmodel-concurrent.ipynb` | • Added GraphViz installation for concurrent workflows
• Implemented parallel execution diagram generation
• Added visual representation of concurrent agents
• Consistent SVG export functionality | 93 | | `04.python-agent-framework-workflow-aifoundry-condition.ipynb` | • Added GraphViz support for conditional workflows
• Implemented decision tree diagram generation
• Added conditional branching visualization
• Consistent visualization approach | -------------------------------------------------------------------------------- /check_imports.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Script to validate that all Python imports in Jupyter notebooks are properly organized using isort. 4 | """ 5 | 6 | import os 7 | import json 8 | import subprocess 9 | import tempfile 10 | from pathlib import Path 11 | 12 | 13 | def extract_python_cells_from_notebook(notebook_path): 14 | """Extract Python code cells from a Jupyter notebook.""" 15 | with open(notebook_path, 'r', encoding='utf-8') as f: 16 | notebook = json.load(f) 17 | 18 | python_code = [] 19 | for cell in notebook.get('cells', []): 20 | if cell.get('cell_type') == 'code' and cell.get('metadata', {}).get('language') == 'python': 21 | source = cell.get('source', []) 22 | if isinstance(source, list): 23 | code = ''.join(source) 24 | else: 25 | code = source 26 | 27 | # Skip cells that are just pip installs or shell commands 28 | if not code.strip().startswith(('!', '%')): 29 | python_code.append(code) 30 | 31 | return '\n\n'.join(python_code) 32 | 33 | 34 | def check_imports_with_isort(code): 35 | """Check if the imports in the code are properly organized using isort.""" 36 | with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as temp_file: 37 | temp_file.write(code) 38 | temp_file.flush() 39 | 40 | try: 41 | # Run isort in check mode 42 | result = subprocess.run( 43 | ['isort', '--check-only', '--diff', temp_file.name], 44 | capture_output=True, 45 | text=True 46 | ) 47 | 48 | if result.returncode == 0: 49 | return True, "Imports are properly organized" 50 | else: 51 | return False, result.stdout 52 | finally: 53 | os.unlink(temp_file.name) 54 | 55 | 56 | def main(): 57 | """Main function to check all Python notebooks.""" 58 | root_dir = Path(__file__).parent 59 | notebook_files = list(root_dir.rglob('*.ipynb')) 60 | 61 | python_notebooks = [] 62 | for notebook_path in notebook_files: 63 | if 'python' in str(notebook_path): 64 | python_notebooks.append(notebook_path) 65 | 66 | print(f"Found {len(python_notebooks)} Python notebooks to check:") 67 | 68 | all_good = True 69 | for notebook_path in python_notebooks: 70 | print(f"\nChecking: {notebook_path.relative_to(root_dir)}") 71 | 72 | try: 73 | code = extract_python_cells_from_notebook(notebook_path) 74 | if not code.strip(): 75 | print(" ✓ No Python code to check") 76 | continue 77 | 78 | is_organized, message = check_imports_with_isort(code) 79 | if is_organized: 80 | print(" ✓ Imports are properly organized") 81 | else: 82 | print(" ✗ Imports need reorganization:") 83 | print(f" {message}") 84 | all_good = False 85 | 86 | except Exception as e: 87 | print(f" ✗ Error checking notebook: {e}") 88 | all_good = False 89 | 90 | if all_good: 91 | print("\n🎉 All Python imports are properly organized!") 92 | else: 93 | print("\n❌ Some imports need to be reorganized.") 94 | 95 | return 0 if all_good else 1 96 | 97 | 98 | if __name__ == "__main__": 99 | exit(main()) 100 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.isort] 2 | profile = "black" 3 | multi_line_output = 3 4 | include_trailing_comma = true 5 | force_grid_wrap = 0 6 | use_parentheses = true 7 | ensure_newline_before_comments = true 8 | line_length = 88 9 | known_first_party = ["agent_framework"] 10 | known_third_party = ["azure", "openai", "dotenv"] 11 | sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"] 12 | import_heading_stdlib = "" 13 | import_heading_thirdparty = "" 14 | import_heading_firstparty = "" 15 | import_heading_localfolder = "" 16 | --------------------------------------------------------------------------------