├── .devcontainer └── devcontainer.json ├── .env.example ├── .github ├── CODE_OF_CONDUCT.md ├── ISSUE_TEMPLATE.md └── PULL_REQUEST_TEMPLATE.md ├── .gitignore ├── 01-intro-to-semantic-kernel └── 01-intro.ipynb ├── 02-semantic-kernel-agents ├── 02.1-single-agents.ipynb └── 02.2-agents-chats.ipynb ├── 03-semantic-kernel-mcp ├── 3.1-sk-with-mcp.ipynb ├── contoso.db └── mcp_server.py ├── 04-process-framework └── 04.1-intro-to-processes.ipynb ├── CHANGELOG.md ├── CONTRIBUTING.md ├── LICENSE.md ├── README.md ├── playground ├── README.md ├── assets │ └── sk-playground.gif ├── backend │ ├── .python-version │ ├── README.md │ ├── app │ │ ├── __init__.py │ │ ├── api │ │ │ ├── __init__.py │ │ │ ├── agents.py │ │ │ ├── filters.py │ │ │ ├── functions.py │ │ │ ├── kernel.py │ │ │ ├── memory.py │ │ │ ├── process.py │ │ │ └── weather.py │ │ ├── core │ │ │ ├── __init__.py │ │ │ └── kernel.py │ │ ├── filters │ │ │ ├── __init__.py │ │ │ └── content_filters.py │ │ ├── main.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ └── api_models.py │ │ └── plugins │ │ │ ├── __init__.py │ │ │ └── weather.py │ ├── main.py │ ├── pyproject.toml │ └── uv.lock ├── frontend │ ├── .gitignore │ ├── README.md │ ├── components.json │ ├── eslint.config.mjs │ ├── next.config.ts │ ├── package-lock.json │ ├── package.json │ ├── postcss.config.mjs │ ├── public │ │ ├── file.svg │ │ ├── globe.svg │ │ ├── next.svg │ │ ├── vercel.svg │ │ └── window.svg │ ├── run.sh │ ├── src │ │ ├── app │ │ │ ├── agent │ │ │ │ └── page.tsx │ │ │ ├── favicon.ico │ │ │ ├── filters │ │ │ │ └── page.tsx │ │ │ ├── functions │ │ │ │ └── page.tsx │ │ │ ├── globals.css │ │ │ ├── layout.tsx │ │ │ ├── memory │ │ │ │ └── page.tsx │ │ │ ├── multi-agent │ │ │ │ └── page.tsx │ │ │ ├── page.tsx │ │ │ ├── process │ │ │ │ └── page.tsx │ │ │ ├── summarize │ │ │ │ └── page.tsx │ │ │ ├── translate │ │ │ │ └── page.tsx │ │ │ └── weather │ │ │ │ └── page.tsx │ │ ├── components │ │ │ ├── layout │ │ │ │ └── shell.tsx │ │ │ └── ui │ │ │ │ ├── alert.tsx │ │ │ │ ├── avatar.tsx │ │ │ │ ├── badge.tsx │ │ │ │ ├── button.tsx │ │ │ │ ├── card.tsx │ │ │ │ ├── code-block.tsx │ │ │ │ ├── code-toggle.tsx │ │ │ │ ├── code-viewer.tsx │ │ │ │ ├── dialog.tsx │ │ │ │ ├── form.tsx │ │ │ │ ├── input.tsx │ │ │ │ ├── label.tsx │ │ │ │ ├── progress.tsx │ │ │ │ ├── scroll-area.tsx │ │ │ │ ├── select.tsx │ │ │ │ ├── separator.tsx │ │ │ │ ├── sheet.tsx │ │ │ │ ├── sidebar.tsx │ │ │ │ ├── skeleton.tsx │ │ │ │ ├── slider.tsx │ │ │ │ ├── sonner.tsx │ │ │ │ ├── switch.tsx │ │ │ │ ├── table.tsx │ │ │ │ ├── tabs.tsx │ │ │ │ ├── textarea.tsx │ │ │ │ └── tooltip.tsx │ │ ├── hooks │ │ │ └── use-mobile.ts │ │ └── lib │ │ │ ├── code-examples.ts │ │ │ └── utils.ts │ ├── tailwind.config.js │ └── tsconfig.json └── start.sh └── pyproject.toml /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Semantic Kernel Workshop", 3 | "image": "mcr.microsoft.com/devcontainers/python:1-3.12-bullseye", 4 | 5 | "features": { 6 | // uv as a Feature layer, so it’s cached instead of curl-installed each time 7 | "ghcr.io/jsburckhardt/devcontainer-features/uv:1": {} 8 | }, 9 | 10 | // Cache uv’s wheels between rebuilds 11 | "mounts": [ 12 | "source=${localEnv:HOME}/.cache/uv,target=/home/vscode/.cache/uv,type=bind,consistency=delegated" 13 | ], 14 | 15 | "postCreateCommand": "uv sync --prerelease=allow && [ -f .env.example ] && cp -n .env.example .env || true", 16 | "postAttachCommand": "echo '\\n✅ Dev container ready – remember to fill in .env before running the samples.\\n'", 17 | 18 | "customizations": { 19 | "vscode": { 20 | "extensions": [ 21 | "ms-python.python", 22 | "ms-python.vscode-pylance", 23 | "ms-toolsai.jupyter" 24 | ], 25 | "settings": { 26 | "python.defaultInterpreterPath": "python" 27 | } 28 | } 29 | }, 30 | 31 | "remoteUser": "vscode" 32 | } -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # Azure OpenAI configuration 2 | AZURE_OPENAI_ENDPOINT=https://.openai.azure.com/ 3 | AZURE_OPENAI_API_KEY= 4 | AZURE_OPENAI_API_VERSION=2025-03-01-preview 5 | AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=gpt-4o-2024-11-20 6 | AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=text-embedding-ada-002 -------------------------------------------------------------------------------- /.github/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns 10 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 4 | > Please provide us with the following information: 5 | > --------------------------------------------------------------- 6 | 7 | ### This issue is for a: (mark with an `x`) 8 | ``` 9 | - [ ] bug report -> please search issues before submitting 10 | - [ ] feature request 11 | - [ ] documentation issue or request 12 | - [ ] regression (a behavior that used to work and stopped in a new release) 13 | ``` 14 | 15 | ### Minimal steps to reproduce 16 | > 17 | 18 | ### Any log messages given by the failure 19 | > 20 | 21 | ### Expected/desired behavior 22 | > 23 | 24 | ### OS and Version? 25 | > Windows 7, 8 or 10. Linux (which distribution). macOS (Yosemite? El Capitan? Sierra?) 26 | 27 | ### Versions 28 | > 29 | 30 | ### Mention any other details that might be useful 31 | 32 | > --------------------------------------------------------------- 33 | > Thanks! We'll be in touch soon. 34 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Purpose 2 | 3 | * ... 4 | 5 | ## Does this introduce a breaking change? 6 | 7 | ``` 8 | [ ] Yes 9 | [ ] No 10 | ``` 11 | 12 | ## Pull Request Type 13 | What kind of change does this Pull Request introduce? 14 | 15 | 16 | ``` 17 | [ ] Bugfix 18 | [ ] Feature 19 | [ ] Code style update (formatting, local variables) 20 | [ ] Refactoring (no functional changes, no api changes) 21 | [ ] Documentation content changes 22 | [ ] Other... Please describe: 23 | ``` 24 | 25 | ## How to Test 26 | * Get the code 27 | 28 | ``` 29 | git clone [repo-address] 30 | cd [repo-name] 31 | git checkout [branch-name] 32 | npm install 33 | ``` 34 | 35 | * Test the code 36 | 37 | ``` 38 | ``` 39 | 40 | ## What to Check 41 | Verify that the following are valid 42 | * ... 43 | 44 | ## Other Information 45 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ## Ignore Visual Studio temporary files, build results, and 2 | ## files generated by popular Visual Studio add-ons. 3 | ## 4 | ## Get latest from https://github.com/github/gitignore/blob/main/VisualStudio.gitignore 5 | 6 | # User-specific files 7 | *.rsuser 8 | *.suo 9 | *.user 10 | *.userosscache 11 | *.sln.docstates 12 | 13 | # User-specific files (MonoDevelop/Xamarin Studio) 14 | *.userprefs 15 | 16 | # Mono auto generated files 17 | mono_crash.* 18 | 19 | # Build results 20 | [Dd]ebug/ 21 | [Dd]ebugPublic/ 22 | [Rr]elease/ 23 | [Rr]eleases/ 24 | x64/ 25 | x86/ 26 | [Ww][Ii][Nn]32/ 27 | [Aa][Rr][Mm]/ 28 | [Aa][Rr][Mm]64/ 29 | bld/ 30 | [Bb]in/ 31 | [Oo]bj/ 32 | [Ll]og/ 33 | [Ll]ogs/ 34 | 35 | # Visual Studio 2015/2017 cache/options directory 36 | .vs/ 37 | # Uncomment if you have tasks that create the project's static files in wwwroot 38 | #wwwroot/ 39 | 40 | # Visual Studio 2017 auto generated files 41 | Generated\ Files/ 42 | 43 | # MSTest test Results 44 | [Tt]est[Rr]esult*/ 45 | [Bb]uild[Ll]og.* 46 | 47 | # NUnit 48 | *.VisualState.xml 49 | TestResult.xml 50 | nunit-*.xml 51 | 52 | # Build Results of an ATL Project 53 | [Dd]ebugPS/ 54 | [Rr]eleasePS/ 55 | dlldata.c 56 | 57 | # Benchmark Results 58 | BenchmarkDotNet.Artifacts/ 59 | 60 | # .NET Core 61 | project.lock.json 62 | project.fragment.lock.json 63 | artifacts/ 64 | 65 | # ASP.NET Scaffolding 66 | ScaffoldingReadMe.txt 67 | 68 | # StyleCop 69 | StyleCopReport.xml 70 | 71 | # Files built by Visual Studio 72 | *_i.c 73 | *_p.c 74 | *_h.h 75 | *.ilk 76 | *.meta 77 | *.obj 78 | *.iobj 79 | *.pch 80 | *.pdb 81 | *.ipdb 82 | *.pgc 83 | *.pgd 84 | *.rsp 85 | # but not Directory.Build.rsp, as it configures directory-level build defaults 86 | !Directory.Build.rsp 87 | *.sbr 88 | *.tlb 89 | *.tli 90 | *.tlh 91 | *.tmp 92 | *.tmp_proj 93 | *_wpftmp.csproj 94 | *.log 95 | *.tlog 96 | *.vspscc 97 | *.vssscc 98 | .builds 99 | *.pidb 100 | *.svclog 101 | *.scc 102 | 103 | # Chutzpah Test files 104 | _Chutzpah* 105 | 106 | # Visual C++ cache files 107 | ipch/ 108 | *.aps 109 | *.ncb 110 | *.opendb 111 | *.opensdf 112 | *.sdf 113 | *.cachefile 114 | *.VC.db 115 | *.VC.VC.opendb 116 | 117 | # Visual Studio profiler 118 | *.psess 119 | *.vsp 120 | *.vspx 121 | *.sap 122 | 123 | # Visual Studio Trace Files 124 | *.e2e 125 | 126 | # TFS 2012 Local Workspace 127 | $tf/ 128 | 129 | # Guidance Automation Toolkit 130 | *.gpState 131 | 132 | # ReSharper is a .NET coding add-in 133 | _ReSharper*/ 134 | *.[Rr]e[Ss]harper 135 | *.DotSettings.user 136 | 137 | # TeamCity is a build add-in 138 | _TeamCity* 139 | 140 | # DotCover is a Code Coverage Tool 141 | *.dotCover 142 | 143 | # AxoCover is a Code Coverage Tool 144 | .axoCover/* 145 | !.axoCover/settings.json 146 | 147 | # Coverlet is a free, cross platform Code Coverage Tool 148 | coverage*.json 149 | coverage*.xml 150 | coverage*.info 151 | 152 | # Visual Studio code coverage results 153 | *.coverage 154 | *.coveragexml 155 | 156 | # NCrunch 157 | _NCrunch_* 158 | .*crunch*.local.xml 159 | nCrunchTemp_* 160 | 161 | # MightyMoose 162 | *.mm.* 163 | AutoTest.Net/ 164 | 165 | # Web workbench (sass) 166 | .sass-cache/ 167 | 168 | # Installshield output folder 169 | [Ee]xpress/ 170 | 171 | # DocProject is a documentation generator add-in 172 | DocProject/buildhelp/ 173 | DocProject/Help/*.HxT 174 | DocProject/Help/*.HxC 175 | DocProject/Help/*.hhc 176 | DocProject/Help/*.hhk 177 | DocProject/Help/*.hhp 178 | DocProject/Help/Html2 179 | DocProject/Help/html 180 | 181 | # Click-Once directory 182 | publish/ 183 | 184 | # Publish Web Output 185 | *.[Pp]ublish.xml 186 | *.azurePubxml 187 | # Note: Comment the next line if you want to checkin your web deploy settings, 188 | # but database connection strings (with potential passwords) will be unencrypted 189 | *.pubxml 190 | *.publishproj 191 | 192 | # Microsoft Azure Web App publish settings. Comment the next line if you want to 193 | # checkin your Azure Web App publish settings, but sensitive information contained 194 | # in these scripts will be unencrypted 195 | PublishScripts/ 196 | 197 | # NuGet Packages 198 | *.nupkg 199 | # NuGet Symbol Packages 200 | *.snupkg 201 | # The packages folder can be ignored because of Package Restore 202 | **/[Pp]ackages/* 203 | # except build/, which is used as an MSBuild target. 204 | !**/[Pp]ackages/build/ 205 | # Uncomment if necessary however generally it will be regenerated when needed 206 | #!**/[Pp]ackages/repositories.config 207 | # NuGet v3's project.json files produces more ignorable files 208 | *.nuget.props 209 | *.nuget.targets 210 | 211 | # Microsoft Azure Build Output 212 | csx/ 213 | *.build.csdef 214 | 215 | # Microsoft Azure Emulator 216 | ecf/ 217 | rcf/ 218 | 219 | # Windows Store app package directories and files 220 | AppPackages/ 221 | BundleArtifacts/ 222 | Package.StoreAssociation.xml 223 | _pkginfo.txt 224 | *.appx 225 | *.appxbundle 226 | *.appxupload 227 | 228 | # Visual Studio cache files 229 | # files ending in .cache can be ignored 230 | *.[Cc]ache 231 | # but keep track of directories ending in .cache 232 | !?*.[Cc]ache/ 233 | 234 | # Others 235 | ClientBin/ 236 | ~$* 237 | *~ 238 | *.dbmdl 239 | *.dbproj.schemaview 240 | *.jfm 241 | *.pfx 242 | *.publishsettings 243 | orleans.codegen.cs 244 | 245 | # Including strong name files can present a security risk 246 | # (https://github.com/github/gitignore/pull/2483#issue-259490424) 247 | #*.snk 248 | 249 | # Since there are multiple workflows, uncomment next line to ignore bower_components 250 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) 251 | #bower_components/ 252 | 253 | # RIA/Silverlight projects 254 | Generated_Code/ 255 | 256 | # Backup & report files from converting an old project file 257 | # to a newer Visual Studio version. Backup files are not needed, 258 | # because we have git ;-) 259 | _UpgradeReport_Files/ 260 | Backup*/ 261 | UpgradeLog*.XML 262 | UpgradeLog*.htm 263 | ServiceFabricBackup/ 264 | *.rptproj.bak 265 | 266 | # SQL Server files 267 | *.mdf 268 | *.ldf 269 | *.ndf 270 | 271 | # Business Intelligence projects 272 | *.rdl.data 273 | *.bim.layout 274 | *.bim_*.settings 275 | *.rptproj.rsuser 276 | *- [Bb]ackup.rdl 277 | *- [Bb]ackup ([0-9]).rdl 278 | *- [Bb]ackup ([0-9][0-9]).rdl 279 | 280 | # Microsoft Fakes 281 | FakesAssemblies/ 282 | 283 | # GhostDoc plugin setting file 284 | *.GhostDoc.xml 285 | 286 | # Node.js Tools for Visual Studio 287 | .ntvs_analysis.dat 288 | node_modules/ 289 | 290 | # Visual Studio 6 build log 291 | *.plg 292 | 293 | # Visual Studio 6 workspace options file 294 | *.opt 295 | 296 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.) 297 | *.vbw 298 | 299 | # Visual Studio 6 auto-generated project file (contains which files were open etc.) 300 | *.vbp 301 | 302 | # Visual Studio 6 workspace and project file (working project files containing files to include in project) 303 | *.dsw 304 | *.dsp 305 | 306 | # Visual Studio 6 technical files 307 | *.ncb 308 | *.aps 309 | 310 | # Visual Studio LightSwitch build output 311 | **/*.HTMLClient/GeneratedArtifacts 312 | **/*.DesktopClient/GeneratedArtifacts 313 | **/*.DesktopClient/ModelManifest.xml 314 | **/*.Server/GeneratedArtifacts 315 | **/*.Server/ModelManifest.xml 316 | _Pvt_Extensions 317 | 318 | # Paket dependency manager 319 | .paket/paket.exe 320 | paket-files/ 321 | 322 | # FAKE - F# Make 323 | .fake/ 324 | 325 | # CodeRush personal settings 326 | .cr/personal 327 | 328 | # Python Tools for Visual Studio (PTVS) 329 | __pycache__/ 330 | *.pyc 331 | 332 | # Cake - Uncomment if you are using it 333 | # tools/** 334 | # !tools/packages.config 335 | 336 | # Tabs Studio 337 | *.tss 338 | 339 | # Telerik's JustMock configuration file 340 | *.jmconfig 341 | 342 | # BizTalk build output 343 | *.btp.cs 344 | *.btm.cs 345 | *.odx.cs 346 | *.xsd.cs 347 | 348 | # OpenCover UI analysis results 349 | OpenCover/ 350 | 351 | # Azure Stream Analytics local run output 352 | ASALocalRun/ 353 | 354 | # MSBuild Binary and Structured Log 355 | *.binlog 356 | 357 | # NVidia Nsight GPU debugger configuration file 358 | *.nvuser 359 | 360 | # MFractors (Xamarin productivity tool) working folder 361 | .mfractor/ 362 | 363 | # Local History for Visual Studio 364 | .localhistory/ 365 | 366 | # Visual Studio History (VSHistory) files 367 | .vshistory/ 368 | 369 | # BeatPulse healthcheck temp database 370 | healthchecksdb 371 | 372 | # Backup folder for Package Reference Convert tool in Visual Studio 2017 373 | MigrationBackup/ 374 | 375 | # Ionide (cross platform F# VS Code tools) working folder 376 | .ionide/ 377 | 378 | # Fody - auto-generated XML schema 379 | FodyWeavers.xsd 380 | 381 | # VS Code files for those working on multiple tools 382 | .vscode/* 383 | !.vscode/settings.json 384 | !.vscode/tasks.json 385 | !.vscode/launch.json 386 | !.vscode/extensions.json 387 | *.code-workspace 388 | 389 | # Local History for Visual Studio Code 390 | .history/ 391 | 392 | # Windows Installer files from build outputs 393 | *.cab 394 | *.msi 395 | *.msix 396 | *.msm 397 | *.msp 398 | 399 | # JetBrains Rider 400 | *.sln.iml 401 | .env 402 | .DS_Store 403 | 404 | # UV 405 | **/uv.lock -------------------------------------------------------------------------------- /03-semantic-kernel-mcp/3.1-sk-with-mcp.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Using Semantic Kernel with MCP Servers\n", 8 | "\n", 9 | "In this notebook, we'll connect a simple SK Agent to an MCP Server.\n", 10 | "\n", 11 | "The full example can be found at [microsoft/OpenAIWorkshop](https://github.com/microsoft/OpenAIWorkshop/).\n" 12 | ] 13 | }, 14 | { 15 | "cell_type": "markdown", 16 | "metadata": {}, 17 | "source": [ 18 | "First, let's start our MCP server.\n", 19 | "\n", 20 | "Open a new terminal and run:\n", 21 | "\n", 22 | "using uv:\n", 23 | "```shell\n", 24 | "cd 03-semantic-kernel-mcp\\\n", 25 | "uv run --prerelease=allow mcp_server.py\n", 26 | "```\n", 27 | "\n", 28 | "using pip:\n", 29 | "```shell\n", 30 | "pip install fastmcp\n", 31 | "cd .\\03-semantic-kernel-mcp\\\n", 32 | "python .\\mcp_server.py\n", 33 | "```\n", 34 | "\n", 35 | "The server should come up like this:\n", 36 | "\n", 37 | "```\n", 38 | "INFO: Started server process [49488]\n", 39 | "INFO: Waiting for application startup.\n", 40 | "INFO: Application startup complete.\n", 41 | "INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)\n", 42 | "```\n", 43 | "\n", 44 | "Great, ready to go, let's connect SK to it:" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": null, 50 | "metadata": {}, 51 | "outputs": [], 52 | "source": [ 53 | "from semantic_kernel.agents import ChatCompletionAgent, ChatHistoryAgentThread\n", 54 | "from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", 55 | "from semantic_kernel.connectors.mcp import MCPSsePlugin\n", 56 | "\n", 57 | "# Set up the SSE plugin for the MCP service.\n", 58 | "contoso_mcp_plugin = MCPSsePlugin(\n", 59 | " name=\"ContosoMCP\",\n", 60 | " description=\"Contoso MCP Plugin\",\n", 61 | " url=\"http://localhost:8000/sse\", # Replace this if you're not running it locally\n", 62 | " headers={\"Content-Type\": \"application/json\"},\n", 63 | " timeout=30,\n", 64 | ")\n", 65 | "\n", 66 | "# Open the SSE connection so tools/prompts are loaded\n", 67 | "await contoso_mcp_plugin.connect()\n", 68 | "\n", 69 | "# Now create our agent and plug in the MCP plugin\n", 70 | "agent = ChatCompletionAgent(\n", 71 | " service=AzureChatCompletion(),\n", 72 | " name=\"ChatBot\",\n", 73 | " instructions=\"You are a helpful assistant. You can use multiple tools to find information \"\n", 74 | " \"and answer questions. Review the tools available under the MCPTools plugin \"\n", 75 | " \"and use them as needed. You can also ask clarifying questions if the user is not clear.\",\n", 76 | " plugins=[contoso_mcp_plugin],\n", 77 | ")\n", 78 | "\n", 79 | "# Same as prior in our workshop \n", 80 | "thread: ChatHistoryAgentThread = None\n", 81 | "user_messages = [\n", 82 | " \"I noticed my last invoice was higher than usual—can you help me understand why and what can be done about it?\",\n", 83 | " \"My customer id is 42\",\n", 84 | " ]\n", 85 | "\n", 86 | "for user_message in user_messages:\n", 87 | " print(\"*** User:\", user_message)\n", 88 | " response = await agent.get_response(messages=user_message, thread=thread)\n", 89 | " thread = response.thread\n", 90 | " print(\"*** Agent:\", response.content)\n", 91 | "\n" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": null, 97 | "metadata": {}, 98 | "outputs": [], 99 | "source": [] 100 | } 101 | ], 102 | "metadata": { 103 | "kernelspec": { 104 | "display_name": ".venv", 105 | "language": "python", 106 | "name": "python3" 107 | }, 108 | "language_info": { 109 | "codemirror_mode": { 110 | "name": "ipython", 111 | "version": 3 112 | }, 113 | "file_extension": ".py", 114 | "mimetype": "text/x-python", 115 | "name": "python", 116 | "nbconvert_exporter": "python", 117 | "pygments_lexer": "ipython3", 118 | "version": "3.12.10" 119 | } 120 | }, 121 | "nbformat": 4, 122 | "nbformat_minor": 2 123 | } 124 | -------------------------------------------------------------------------------- /03-semantic-kernel-mcp/contoso.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/semantic-kernel-workshop/b65b791a04a79ef73a8cf8be94145fc58f5b33e8/03-semantic-kernel-mcp/contoso.db -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## [project-title] Changelog 2 | 3 | 4 | # x.y.z (yyyy-mm-dd) 5 | 6 | *Features* 7 | * ... 8 | 9 | *Bug Fixes* 10 | * ... 11 | 12 | *Breaking Changes* 13 | * ... 14 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to [project-title] 2 | 3 | This project welcomes contributions and suggestions. Most contributions require you to agree to a 4 | Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us 5 | the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. 6 | 7 | When you submit a pull request, a CLA bot will automatically determine whether you need to provide 8 | a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions 9 | provided by the bot. You will only need to do this once across all repos using our CLA. 10 | 11 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 12 | For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or 13 | contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. 14 | 15 | - [Code of Conduct](#coc) 16 | - [Issues and Bugs](#issue) 17 | - [Feature Requests](#feature) 18 | - [Submission Guidelines](#submit) 19 | 20 | ## Code of Conduct 21 | Help us keep this project open and inclusive. Please read and follow our [Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 22 | 23 | ## Found an Issue? 24 | If you find a bug in the source code or a mistake in the documentation, you can help us by 25 | [submitting an issue](#submit-issue) to the GitHub Repository. Even better, you can 26 | [submit a Pull Request](#submit-pr) with a fix. 27 | 28 | ## Want a Feature? 29 | You can *request* a new feature by [submitting an issue](#submit-issue) to the GitHub 30 | Repository. If you would like to *implement* a new feature, please submit an issue with 31 | a proposal for your work first, to be sure that we can use it. 32 | 33 | * **Small Features** can be crafted and directly [submitted as a Pull Request](#submit-pr). 34 | 35 | ## Submission Guidelines 36 | 37 | ### Submitting an Issue 38 | Before you submit an issue, search the archive, maybe your question was already answered. 39 | 40 | If your issue appears to be a bug, and hasn't been reported, open a new issue. 41 | Help us to maximize the effort we can spend fixing issues and adding new 42 | features, by not reporting duplicate issues. Providing the following information will increase the 43 | chances of your issue being dealt with quickly: 44 | 45 | * **Overview of the Issue** - if an error is being thrown a non-minified stack trace helps 46 | * **Version** - what version is affected (e.g. 0.1.2) 47 | * **Motivation for or Use Case** - explain what are you trying to do and why the current behavior is a bug for you 48 | * **Browsers and Operating System** - is this a problem with all browsers? 49 | * **Reproduce the Error** - provide a live example or a unambiguous set of steps 50 | * **Related Issues** - has a similar issue been reported before? 51 | * **Suggest a Fix** - if you can't fix the bug yourself, perhaps you can point to what might be 52 | causing the problem (line of code or commit) 53 | 54 | You can file new issues by providing the above information at the corresponding repository's issues link: https://github.com/[organization-name]/[repository-name]/issues/new]. 55 | 56 | ### Submitting a Pull Request (PR) 57 | Before you submit your Pull Request (PR) consider the following guidelines: 58 | 59 | * Search the repository (https://github.com/[organization-name]/[repository-name]/pulls) for an open or closed PR 60 | that relates to your submission. You don't want to duplicate effort. 61 | 62 | * Make your changes in a new git fork: 63 | 64 | * Commit your changes using a descriptive commit message 65 | * Push your fork to GitHub: 66 | * In GitHub, create a pull request 67 | * If we suggest changes then: 68 | * Make the required updates. 69 | * Rebase your fork and force push to your GitHub repository (this will update your Pull Request): 70 | 71 | ```shell 72 | git rebase master -i 73 | git push -f 74 | ``` 75 | 76 | That's it! Thank you for your contribution! 77 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Semantic Kernel Workshop 2 | 3 | A hands-on workshop exploring Microsoft's Semantic Kernel framework for building intelligent AI applications. This workshop provides practical experience with real-world AI application patterns using Python and Azure OpenAI. 4 | 5 | ## Workshop Overview 6 | 7 | This workshop takes you from foundational concepts to advanced implementation patterns through a series of Jupyter notebooks and practical examples. You'll learn how to: 8 | 9 | - Build AI applications using Microsoft's Semantic Kernel framework 10 | - Create and orchestrate AI agents with different capabilities and roles 11 | - Construct structured AI workflows using the Process Framework 12 | - Implement enterprise-ready AI features with security and scalability in mind 13 | 14 | ## Interactive Playground Demo 15 | 16 | Experience Semantic Kernel in action through our interactive playground! This visual demonstration allows you to directly engage with the core concepts covered in the workshop. 17 | 18 | ![Semantic Kernel Playground Demo](playground/assets/sk-playground.gif) 19 | 20 | The playground offers a hands-on environment where you can: 21 | - Test semantic functions in real-time 22 | - Explore agent capabilities and interactions 23 | - Experiment with memory and embeddings 24 | - Try out native plugin integration 25 | - See the Process Framework in action 26 | 27 | No need to wait until the end of the workshop - you can start exploring the playground at any time to reinforce concepts as you learn them! 28 | 29 | For setup instructions and details on how to run the playground, refer to the [Playground README](playground/README.md). 30 | 31 | ## Prerequisites 32 | 33 | - Python 3.10 or higher 34 | - Azure OpenAI API access (API key, endpoint, and deployment name) 35 | - Basic knowledge of Python programming 36 | - Understanding of prompt engineering concepts (helpful but not required) 37 | - [UV package manager](https://docs.astral.sh/uv/getting-started/installation/) 38 | 39 | ### Local Dependencies Setup 40 | 41 | The project is managed by pyproject.toml and [uv package manager](https://docs.astral.sh/uv/getting-started/installation/). 42 | 43 | For local execution init the .venv environment using [uv package manager](https://docs.astral.sh/uv/getting-started/installation/): 44 | 45 | ```shell 46 | uv sync --prerelease=allow 47 | . ./.venv/bin/activate 48 | ``` 49 | >OBS! At the time of writing the workshop depends on the prerelease libraries. 50 | 51 | ## Getting Started 52 | 53 | 1. Clone this repository 54 | 55 | 1. Create a virtual environment: 56 | 57 | **Linux/macOS:** 58 | ```bash 59 | # Create a virtual environment 60 | python -m venv venv 61 | 62 | # Activate the virtual environment 63 | source venv/bin/activate 64 | ``` 65 | 66 | **Windows:** 67 | ```cmd 68 | # Create a virtual environment 69 | python -m venv venv 70 | 71 | # Activate the virtual environment 72 | venv\Scripts\activate 73 | ``` 74 | 75 | 1. Copy the environment variables template: 76 | ```bash 77 | cp .env.example .env 78 | ``` 79 | 80 | 1. Add your Azure OpenAI credentials to the `.env` file: 81 | ``` 82 | AZURE_OPENAI_ENDPOINT=https://xxxxxx.openai.azure.com/ 83 | AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=gpt-4o 84 | AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=text-embedding-ada-002 85 | AZURE_OPENAI_API_KEY=xxxxxxxxxxx 86 | AZURE_OPENAI_API_VERSION=2025-03-01-preview 87 | ``` 88 | 89 | 1. Start with the first notebook: 90 | - Begin with `01-intro-to-semantic-kernel/01-intro.ipynb`, which includes instructions for installing Semantic Kernel and other required packages. 91 | 92 | 93 | ## Workshop Modules 94 | 95 | ### 01. Introduction to Semantic Kernel 96 | 97 | Learn the fundamentals of Semantic Kernel: 98 | - Core architectural components (Kernel, AI Services, Plugins) 99 | - Building semantic functions with prompts 100 | - Creating native functions with Python code 101 | - Enabling automatic function calling for AI agents 102 | 103 | **Key Notebooks:** 104 | - `01-intro.ipynb`: Core concepts, services, and function creation 105 | 106 | ### 02. Semantic Kernel Agents 107 | 108 | Master the creation and orchestration of AI agents: 109 | - Creating specialized agents with different personas 110 | - Implementing multi-agent communication patterns 111 | - Agent selection strategies and orchestration 112 | - Building agent topologies for complex scenarios 113 | - Integrating plugins with agents for enhanced capabilities 114 | 115 | **Key Notebooks:** 116 | - `02.1-agents.ipynb`: Creating and configuring agents 117 | - `02.2-agents-chats.ipynb`: Inter-agent communication and complex patterns 118 | 119 | ### 03. Semantic Kernel with MCP 120 | 121 | Learn to how to connect an SK Agent to MCP: 122 | - Running your MCP server 123 | - Using an Agent in Semantic Kernel to make calls to it 124 | 125 | **Key Notebooks:** 126 | - `03.1-sk-with-mcp.ipynb`: Semantic Kernel with MCP example 127 | 128 | ### 04. Process Framework 129 | 130 | Learn to build structured, event-driven AI workflows: 131 | - Understanding the Process Framework architecture 132 | - Defining events, steps, and state management 133 | - Building conversational AI systems with processes 134 | - Implementing complex business logic with AI capabilities 135 | - Creating maintainable and testable AI workflows 136 | 137 | **Key Notebooks:** 138 | - `04.1-intro-to-processes.ipynb`: Building stateful, event-driven AI processes 139 | 140 | ## Project Structure 141 | 142 | ``` 143 | semantic-kernel-workshop/ 144 | ├── 01-intro-to-semantic-kernel/ # Introduction to core concepts 145 | │ └── 01-intro.ipynb # Basic concepts and functions 146 | ├── 02-semantic-kernel-agents/ # Agent creation and orchestration 147 | │ ├── 02.1-single-agents.ipynb # Agent fundamentals 148 | │ ├── 02.2-agents-chats.ipynb # Multi-agent communication 149 | ├── 03-semantic-kernel-mcp/ # Using SK with MCP 150 | │ └── 03.1-sk-with-mcp.ipynb # SK + MCP example 151 | ├── 04-process-framework/ # Structured AI workflows 152 | │ └── 04.1-intro-to-processes.ipynb # Process fundamentals 153 | ├── playground/ # Interactive application 154 | │ ├── backend/ # FastAPI server 155 | │ ├── frontend/ # React application 156 | │ ├── start.sh # Launch script 157 | │ └── README.md # Playground documentation 158 | └── .env.example # Environment variables template 159 | ``` 160 | 161 | ## Learning Path 162 | 163 | For optimal learning, follow the repository's folders in numerical order. 164 | 165 | ## Advanced Topics and Resources 166 | 167 | For advanced patterns and enterprise deployment scenarios, explore the [Semantic Kernel Advanced Usage](https://github.com/Azure-Samples/semantic-kernel-advanced-usage) repository, which includes: 168 | 169 | - Dapr integration for scalable, distributed systems 170 | - Authentication and security patterns 171 | - Natural language to SQL conversion 172 | - Copilot Studio integration 173 | - Microsoft Graph API integration 174 | - Production deployment architecture 175 | 176 | ## Additional Resources 177 | 178 | - [Semantic Kernel Documentation](https://learn.microsoft.com/en-us/semantic-kernel/overview/) 179 | - [Azure OpenAI Service](https://azure.microsoft.com/en-us/products/ai-services/openai-service/) 180 | - [Microsoft Copilot Studio](https://www.microsoft.com/en-us/microsoft-copilot/microsoft-copilot-studio) 181 | 182 | ## License 183 | 184 | This project is licensed under the MIT License - see the LICENSE file for details. 185 | -------------------------------------------------------------------------------- /playground/README.md: -------------------------------------------------------------------------------- 1 | # Semantic Kernel Interactive Demo 2 | 3 | An interactive web application showcasing Microsoft's Semantic Kernel capabilities including semantic memory, AI-powered functions, translation, and text summarization. 4 | 5 | ## Quick Start 6 | 7 | 1. Create a `.env` file with your Azure OpenAI credentials: 8 | ``` 9 | AZURE_OPENAI_DEPLOYMENT=your-deployment-name 10 | AZURE_OPENAI_API_KEY=your-api-key 11 | AZURE_OPENAI_ENDPOINT=your-endpoint 12 | AZURE_OPENAI_EMBEDDING_DEPLOYMENT=your-embedding-deployment-name 13 | ``` 14 | 15 | 2. Install dependencies: 16 | ``` 17 | cd frontend 18 | npm install --legacy-peer-deps 19 | ``` 20 | 21 | 3. Start the application: 22 | ``` 23 | ./start.sh 24 | ``` 25 | 26 | The script will launch both the backend server (http://localhost:8000) and frontend application (http://localhost:5173). Use Ctrl+C to stop both services. 27 | 28 | ## Features 29 | 30 | - **Semantic Memory**: Store and retrieve information using semantic search 31 | - **AI Functions**: Create and use AI-powered functions with natural language 32 | - **Translation**: Translate text between multiple languages 33 | - **Summarization**: Generate concise summaries of long texts 34 | - **Weather Plugin**: Example of native plugin integration 35 | 36 | ## Requirements 37 | 38 | - Node.js (v14+) 39 | - Python (v3.13+) 40 | - Azure OpenAI API credentials 41 | 42 | ## Project Structure 43 | 44 | - `frontend/`: React application with Material UI 45 | - `backend/`: FastAPI server with Semantic Kernel implementation 46 | - `start.sh`: Convenience script to run both services 47 | 48 | For detailed documentation and examples, visit the [Semantic Kernel Documentation](https://learn.microsoft.com/en-us/semantic-kernel/overview/) 49 | -------------------------------------------------------------------------------- /playground/assets/sk-playground.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/semantic-kernel-workshop/b65b791a04a79ef73a8cf8be94145fc58f5b33e8/playground/assets/sk-playground.gif -------------------------------------------------------------------------------- /playground/backend/.python-version: -------------------------------------------------------------------------------- 1 | 3.13 2 | -------------------------------------------------------------------------------- /playground/backend/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/semantic-kernel-workshop/b65b791a04a79ef73a8cf8be94145fc58f5b33e8/playground/backend/README.md -------------------------------------------------------------------------------- /playground/backend/app/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is intentionally left empty to make the directory a Python package 2 | -------------------------------------------------------------------------------- /playground/backend/app/api/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is intentionally left empty to make the directory a Python package 2 | -------------------------------------------------------------------------------- /playground/backend/app/api/agents.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import json 3 | from fastapi import APIRouter, HTTPException 4 | from app.models.api_models import AgentRequest, MultiAgentRequest 5 | from app.core.kernel import create_kernel 6 | from semantic_kernel.connectors.ai.open_ai import AzureChatPromptExecutionSettings 7 | from semantic_kernel.contents.chat_history import ChatHistory 8 | from semantic_kernel.connectors.ai.function_choice_behavior import ( 9 | FunctionChoiceBehavior, 10 | ) 11 | from semantic_kernel.agents import ChatCompletionAgent, AgentGroupChat 12 | from semantic_kernel.agents.strategies import ( 13 | SequentialSelectionStrategy, 14 | DefaultTerminationStrategy, 15 | ) 16 | from semantic_kernel.contents import FunctionCallContent 17 | 18 | # Configure logging 19 | logger = logging.getLogger(__name__) 20 | 21 | router = APIRouter(prefix="/agent", tags=["agents"]) 22 | 23 | 24 | @router.post("/chat") 25 | async def agent_chat(request: AgentRequest): 26 | # Create a fresh kernel with the requested plugins 27 | kernel, _ = create_kernel(plugins=request.available_plugins) 28 | 29 | try: 30 | # Create a ChatCompletionAgent with the provided system prompt 31 | agent = ChatCompletionAgent( 32 | kernel=kernel, name="PlaygroundAgent", instructions=request.system_prompt 33 | ) 34 | 35 | # Create a chat history 36 | chat_history = ChatHistory() 37 | 38 | # Add previous messages from the chat history if available 39 | for msg in request.chat_history: 40 | if msg["role"].lower() == "user": 41 | chat_history.add_user_message(msg["content"]) 42 | elif msg["role"].lower() == "assistant": 43 | chat_history.add_assistant_message(msg["content"]) 44 | 45 | # Add the current user message 46 | chat_history.add_user_message(request.message) 47 | 48 | # Create execution settings 49 | execution_settings = AzureChatPromptExecutionSettings( 50 | service_id="chat", 51 | temperature=request.temperature, 52 | top_p=0.8, 53 | max_tokens=1000, 54 | ) 55 | 56 | # Set up function calling behavior 57 | execution_settings.function_choice_behavior = FunctionChoiceBehavior.Auto() 58 | 59 | # Get the response from the agent 60 | response = await agent.get_response( 61 | messages=chat_history, execution_settings=execution_settings 62 | ) 63 | 64 | # Track function calls 65 | plugin_calls = [] 66 | 67 | # Extract function calls from the chat history 68 | for message in chat_history: 69 | for item in message.items: 70 | if isinstance(item, FunctionCallContent): 71 | # Convert arguments to a dictionary if it's a string 72 | args = item.arguments 73 | if isinstance(args, str): 74 | try: 75 | args = json.loads(args) 76 | except: 77 | args = {"location": args} 78 | 79 | plugin_calls.append( 80 | { 81 | "plugin_name": item.plugin_name, 82 | "function_name": item.function_name, 83 | "parameters": args, 84 | } 85 | ) 86 | 87 | # Return the agent's response along with the updated chat history and plugin calls 88 | return { 89 | "response": response.content, 90 | "chat_history": [ 91 | {"role": "user", "content": request.message}, 92 | {"role": "assistant", "content": response.content}, 93 | ], 94 | "plugin_calls": plugin_calls, 95 | } 96 | except Exception as e: 97 | logger.error(f"Error in agent_chat: {str(e)}") 98 | raise HTTPException(status_code=500, detail=str(e)) 99 | 100 | 101 | @router.post("/multi-chat") 102 | async def multi_agent_chat(request: MultiAgentRequest): 103 | # Create a fresh kernel with the requested plugins 104 | kernel, _ = create_kernel(plugins=request.available_plugins) 105 | 106 | try: 107 | # Create agents based on the provided configurations 108 | agents = [] 109 | for agent_config in request.agent_configs: 110 | agent = ChatCompletionAgent( 111 | kernel=kernel, 112 | name=agent_config.get("name", "Agent"), 113 | instructions=agent_config.get( 114 | "instructions", "You are a helpful assistant." 115 | ), 116 | ) 117 | agents.append(agent) 118 | 119 | # If no agents were provided, create default agents 120 | if not agents: 121 | # Create default agents with different perspectives 122 | agent_factual = ChatCompletionAgent( 123 | kernel=kernel, 124 | name="Researcher", 125 | instructions="You are a fact-based researcher who provides accurate and concise information. Always stick to verified facts and cite sources when possible. Keep your responses very concise, clear and straightforward.", 126 | ) 127 | 128 | agent_creative = ChatCompletionAgent( 129 | kernel=kernel, 130 | name="Innovator", 131 | instructions="You are a creative thinker who generates novel ideas and perspectives. Offer innovative approaches and unique ideas. Feel free to brainstorm and suggest creative solutions. Keep your responses very concise, imaginative and engaging.", 132 | ) 133 | 134 | agent_critic = ChatCompletionAgent( 135 | kernel=kernel, 136 | name="Critic", 137 | instructions="You are a thoughtful critic who evaluates ideas and identifies potential issues. Analyze the strengths and weaknesses of proposals and suggest improvements. Be constructive in your criticism. Keep your responses very concise, clear and straightforward.", 138 | ) 139 | 140 | agent_synthesizer = ChatCompletionAgent( 141 | kernel=kernel, 142 | name="Synthesizer", 143 | instructions="You are a skilled synthesizer who integrates diverse perspectives into coherent conclusions. Identify common themes across different viewpoints and create a balanced, integrated perspective. Keep your responses very concise, clear and straightforward.", 144 | ) 145 | 146 | agents = [agent_factual, agent_creative, agent_critic, agent_synthesizer] 147 | 148 | # Create a group chat with the agents 149 | group_chat = AgentGroupChat( 150 | agents=agents, 151 | selection_strategy=SequentialSelectionStrategy(), 152 | termination_strategy=DefaultTerminationStrategy( 153 | maximum_iterations=request.max_iterations 154 | ), 155 | ) 156 | 157 | # Create a chat history 158 | chat_history = ChatHistory() 159 | group_chat.history = chat_history 160 | 161 | # Add previous messages from the chat history if available 162 | for msg in request.chat_history: 163 | if msg["role"].lower() == "user": 164 | chat_history.add_user_message(msg["content"]) 165 | elif msg["role"].lower() == "assistant": 166 | chat_history.add_assistant_message(msg["content"]) 167 | 168 | # Add the current user message 169 | await group_chat.add_chat_message(message=request.message) 170 | 171 | # Create execution settings 172 | execution_settings = AzureChatPromptExecutionSettings( 173 | service_id="chat", 174 | temperature=request.temperature, 175 | top_p=0.8, 176 | max_tokens=1000, 177 | ) 178 | 179 | # Set up function calling behavior 180 | execution_settings.function_choice_behavior = FunctionChoiceBehavior.Auto() 181 | 182 | # Track agent responses 183 | agent_responses = [] 184 | current_agent = None 185 | 186 | # Invoke the group chat 187 | try: 188 | async for response in group_chat.invoke(): 189 | if response is not None and response.name: 190 | # Add a separator between different agents 191 | if current_agent != response.name: 192 | current_agent = response.name 193 | agent_responses.append( 194 | { 195 | "agent_name": response.name, 196 | "content": response.content, 197 | "is_new": True, 198 | } 199 | ) 200 | else: 201 | # Same agent continuing 202 | agent_responses.append( 203 | { 204 | "agent_name": response.name, 205 | "content": response.content, 206 | "is_new": False, 207 | } 208 | ) 209 | except Exception as e: 210 | logger.error(f"Error during group chat invocation: {str(e)}") 211 | raise HTTPException( 212 | status_code=500, detail=f"Error during group chat invocation: {str(e)}" 213 | ) 214 | 215 | # Track function calls 216 | plugin_calls = [] 217 | 218 | # Extract function calls from the chat history 219 | for message in chat_history: 220 | for item in message.items: 221 | if isinstance(item, FunctionCallContent): 222 | # Convert arguments to a dictionary if it's a string 223 | args = item.arguments 224 | if isinstance(args, str): 225 | try: 226 | args = json.loads(args) 227 | except: 228 | args = {"location": args} 229 | 230 | plugin_calls.append( 231 | { 232 | "plugin_name": item.plugin_name, 233 | "function_name": item.function_name, 234 | "parameters": args, 235 | } 236 | ) 237 | 238 | # Reset is_complete to allow for further conversations 239 | group_chat.is_complete = False 240 | 241 | # Return the agent responses along with the updated chat history and plugin calls 242 | return { 243 | "agent_responses": agent_responses, 244 | "chat_history": [{"role": "user", "content": request.message}] 245 | + [ 246 | { 247 | "role": "assistant", 248 | "content": resp["content"], 249 | "agent_name": resp["agent_name"], 250 | } 251 | for resp in agent_responses 252 | ], 253 | "plugin_calls": plugin_calls, 254 | } 255 | except Exception as e: 256 | logger.error(f"Error in multi_agent_chat: {str(e)}") 257 | raise HTTPException(status_code=500, detail=str(e)) 258 | -------------------------------------------------------------------------------- /playground/backend/app/api/filters.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from io import StringIO 3 | from fastapi import APIRouter, HTTPException 4 | from app.models.api_models import FilterRequest 5 | from app.core.kernel import create_kernel 6 | from app.filters.content_filters import ContentFilter, input_filter_fn, output_filter_fn 7 | from semantic_kernel.functions import kernel_function 8 | 9 | # Configure logging 10 | logging.basicConfig(level=logging.DEBUG) 11 | logger = logging.getLogger(__name__) 12 | 13 | router = APIRouter(prefix="/filters", tags=["filters"]) 14 | 15 | # Run a test of our regex patterns to verify they work 16 | logger.info("Initializing filters API and testing regex patterns...") 17 | content_filter = ContentFilter() 18 | test_results = content_filter.test_patterns() 19 | logger.info(f"Pattern test results: {test_results}") 20 | 21 | 22 | @router.post("/process") 23 | async def process_with_filters(request: FilterRequest): 24 | try: 25 | kernel, _ = create_kernel() 26 | 27 | # Set up log capture for this request 28 | logs = "" 29 | log_capture = StringIO() 30 | log_handler = logging.StreamHandler(log_capture) 31 | log_handler.setFormatter( 32 | logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") 33 | ) 34 | # Set logging level to capture all messages 35 | log_handler.setLevel(logging.DEBUG) 36 | 37 | # Add handler to root logger to capture all logs 38 | root_logger = logging.getLogger() 39 | root_logger.addHandler(log_handler) 40 | 41 | try: 42 | # Directly log input for debugging 43 | logger.warning( 44 | f"PROCESSING INPUT: {request.text[:50]}{'...' if len(request.text) > 50 else ''}") 45 | 46 | # Process with the content filter 47 | content_filter = ContentFilter() 48 | 49 | # Directly test the input for sensitive information 50 | logger.warning(f"Testing input directly with ContentFilter") 51 | filtered_input, input_detections = content_filter.redact_sensitive_info( 52 | request.text) 53 | 54 | if input_detections: 55 | logger.warning( 56 | f"⚠️ DETECTION ALERT: Found {len(input_detections)} sensitive items in input") 57 | for detection in input_detections: 58 | logger.warning(f"🔍 Input Detection: {detection}") 59 | input_processing = f"Detected {len(input_detections)} instances of sensitive information in the input." 60 | else: 61 | logger.warning( 62 | "✅ SECURITY CHECK: No sensitive information found in input") 63 | input_processing = "No sensitive information detected in the input." 64 | input_detections = [] 65 | 66 | # Create a simple semantic function with a prompt template 67 | prompt_template = "{{$input}}" # Simple echo prompt 68 | 69 | echo_fn = kernel.add_function( 70 | prompt=prompt_template, 71 | function_name="echo", 72 | plugin_name="TestPlugin", 73 | ) 74 | 75 | # Process with kernel 76 | result = await kernel.invoke(echo_fn, input=request.text) 77 | 78 | # Get result content (in v0.9.1b1 FunctionResult is just the string content) 79 | result_content = str(result) 80 | 81 | # Process the output with the content filter 82 | filtered_output, output_detections = content_filter.redact_sensitive_info( 83 | result_content) 84 | 85 | if output_detections: 86 | logger.warning( 87 | f"⚠️ DETECTION ALERT: Found {len(output_detections)} sensitive items in output") 88 | for detection in output_detections: 89 | logger.warning(f"🔍 Output Detection: {detection}") 90 | output_processing = f"Detected {len(output_detections)} instances of sensitive information in the output." 91 | else: 92 | logger.warning( 93 | "✅ SECURITY CHECK: No sensitive information found in output") 94 | output_processing = "No sensitive information detected in the output." 95 | output_detections = [] 96 | 97 | # Ensure we always return input_detections and output_detections lists 98 | input_detections = input_detections if 'input_detections' in locals() else [] 99 | output_detections = output_detections if 'output_detections' in locals() else [] 100 | 101 | # Everything succeeded, now capture logs 102 | logs = log_capture.getvalue() 103 | 104 | return { 105 | "result": result_content, 106 | "debug": { 107 | "input_processing": input_processing, 108 | "output_processing": output_processing, 109 | "logs": logs, 110 | "log_count": len(logs.split('\n')) if logs else 0, 111 | "input_detections": input_detections, 112 | "output_detections": output_detections, 113 | }, 114 | } 115 | 116 | except Exception as e: 117 | logger.error(f"Error in process_with_filters: {str(e)}") 118 | raise HTTPException(status_code=500, detail=str(e)) 119 | 120 | finally: 121 | # Always clean up the log handler 122 | if log_handler in root_logger.handlers: 123 | root_logger.removeHandler(log_handler) 124 | if log_capture and not log_capture.closed: 125 | log_capture.close() 126 | 127 | except Exception as e: 128 | logger.error(f"Error in process_with_filters: {str(e)}") 129 | raise HTTPException(status_code=500, detail=str(e)) 130 | -------------------------------------------------------------------------------- /playground/backend/app/api/functions.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from fastapi import APIRouter, HTTPException 3 | from app.models.api_models import FunctionInput, TranslationRequest, SummarizeRequest 4 | from app.core.kernel import create_kernel 5 | 6 | # Configure logging 7 | logger = logging.getLogger(__name__) 8 | 9 | router = APIRouter(tags=["functions"]) 10 | 11 | 12 | @router.post("/functions/semantic") 13 | async def invoke_semantic_function(data: FunctionInput): 14 | kernel, _ = create_kernel() 15 | try: 16 | # Create a semantic function 17 | function = kernel.add_function( 18 | prompt=data.prompt, 19 | function_name=data.function_name, 20 | plugin_name=data.plugin_name, 21 | max_tokens=500, 22 | ) 23 | 24 | # Prepare parameters 25 | parameters = data.parameters or {} 26 | 27 | # Invoke the function 28 | result = await kernel.invoke(function, input=data.input_text, **parameters) 29 | 30 | return {"result": str(result)} 31 | except Exception as e: 32 | logger.error(f"Error in invoke_semantic_function: {str(e)}") 33 | raise HTTPException(status_code=500, detail=str(e)) 34 | 35 | 36 | @router.post("/translate") 37 | async def translate_text(request: TranslationRequest): 38 | kernel, _ = create_kernel() 39 | try: 40 | # Define a translation function 41 | translate_prompt = """ 42 | {{$input}}\n\nTranslate this into {{$target_language}}:""" 43 | 44 | translate_fn = kernel.add_function( 45 | prompt=translate_prompt, 46 | function_name="translator", 47 | plugin_name="Translator", 48 | max_tokens=500, 49 | ) 50 | 51 | # Invoke the translation function 52 | result = await kernel.invoke( 53 | translate_fn, input=request.text, target_language=request.target_language 54 | ) 55 | 56 | return {"translated_text": str(result)} 57 | except Exception as e: 58 | logger.error(f"Error in translate_text: {str(e)}") 59 | raise HTTPException(status_code=500, detail=str(e)) 60 | 61 | 62 | @router.post("/summarize") 63 | async def summarize_text(request: SummarizeRequest): 64 | kernel, _ = create_kernel() 65 | try: 66 | # Define a summarization function 67 | summarize_prompt = """ 68 | {{$input}}\n\nTL;DR in one sentence:""" 69 | 70 | summarize_fn = kernel.add_function( 71 | prompt=summarize_prompt, 72 | function_name="tldr", 73 | plugin_name="Summarizer", 74 | max_tokens=100, 75 | ) 76 | 77 | # Invoke the summarization function 78 | result = await kernel.invoke(summarize_fn, input=request.text) 79 | 80 | return {"summary": str(result)} 81 | except Exception as e: 82 | logger.error(f"Error in summarize_text: {str(e)}") 83 | raise HTTPException(status_code=500, detail=str(e)) 84 | -------------------------------------------------------------------------------- /playground/backend/app/api/kernel.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from fastapi import APIRouter, HTTPException 3 | from app.models.api_models import KernelResetRequest 4 | from app.core.kernel import create_kernel, reset_memory 5 | 6 | # Configure logging 7 | logger = logging.getLogger(__name__) 8 | 9 | router = APIRouter(prefix="/kernel", tags=["kernel"]) 10 | 11 | 12 | @router.post("/reset") 13 | async def reset_kernel(request: KernelResetRequest): 14 | try: 15 | # Create a fresh kernel instance 16 | _, _ = create_kernel() 17 | 18 | # Clear memory if requested 19 | if request.clear_memory: 20 | await reset_memory() 21 | # Update the memory initialization flag in the memory module 22 | try: 23 | from app.api.memory import memory_initialized 24 | import app.api.memory as memory_module 25 | 26 | memory_module.memory_initialized = True 27 | logger.info("Memory reset and reinitialized") 28 | except ImportError: 29 | logger.warning("Could not update memory_initialized flag") 30 | 31 | return { 32 | "status": "success", 33 | "message": "Kernel reset successfully", 34 | "memory_cleared": request.clear_memory, 35 | } 36 | except Exception as e: 37 | logger.error(f"Error in reset_kernel: {str(e)}") 38 | raise HTTPException(status_code=500, detail=str(e)) 39 | -------------------------------------------------------------------------------- /playground/backend/app/api/memory.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from fastapi import APIRouter, HTTPException 3 | from app.models.api_models import MemoryItem, SearchQuery 4 | from app.core.kernel import ( 5 | create_kernel, 6 | FINANCE_COLLECTION, 7 | PERSONAL_COLLECTION, 8 | WEATHER_COLLECTION, 9 | initialize_memory, 10 | ) 11 | 12 | # Configure logging 13 | logger = logging.getLogger(__name__) 14 | 15 | router = APIRouter(prefix="/memory", tags=["memory"]) 16 | 17 | # Flag to track if memory has been initialized 18 | memory_initialized = False 19 | 20 | 21 | @router.post("/add") 22 | async def add_to_memory(item: MemoryItem): 23 | _, memory_instance = create_kernel() 24 | try: 25 | await memory_instance.save_information( 26 | collection=item.collection, id=item.id, text=item.text 27 | ) 28 | return { 29 | "status": "success", 30 | "message": f"Added item {item.id} to collection {item.collection}", 31 | "synthesized_response": "", 32 | "critique": "", 33 | } 34 | except Exception as e: 35 | logger.error(f"Error in add_to_memory: {str(e)}") 36 | raise HTTPException(status_code=500, detail=str(e)) 37 | 38 | 39 | @router.post("/search") 40 | async def search_memory(query: SearchQuery): 41 | # Ensure memory is initialized before searching 42 | global memory_initialized 43 | if not memory_initialized: 44 | await initialize_memory() 45 | memory_initialized = True 46 | 47 | _, memory_instance = create_kernel() 48 | try: 49 | results = await memory_instance.search( 50 | collection=query.collection, query=query.query, limit=query.limit 51 | ) 52 | 53 | # Format the results to match what the frontend expects 54 | formatted_results = [ 55 | {"id": r.id, "text": r.text, "relevance": r.relevance} for r in results 56 | ] 57 | 58 | # Return the results with empty synthesized_response and critique fields 59 | # to match the format the frontend expects 60 | return { 61 | "results": formatted_results, 62 | "synthesized_response": "", 63 | "critique": "", 64 | } 65 | except Exception as e: 66 | logger.error(f"Error in search_memory: {str(e)}") 67 | raise HTTPException(status_code=500, detail=str(e)) 68 | 69 | 70 | @router.get("/collections") 71 | async def get_collections(): 72 | try: 73 | # Initialize memory if not already done 74 | global memory_initialized 75 | if not memory_initialized: 76 | await initialize_memory() 77 | memory_initialized = True 78 | logger.info("Memory initialized on first access to collections") 79 | 80 | # Return the predefined collections 81 | return { 82 | "collections": [ 83 | FINANCE_COLLECTION, 84 | PERSONAL_COLLECTION, 85 | WEATHER_COLLECTION, 86 | ], 87 | "status": "success", 88 | } 89 | except Exception as e: 90 | logger.error(f"Error in get_collections: {str(e)}") 91 | raise HTTPException(status_code=500, detail=str(e)) 92 | -------------------------------------------------------------------------------- /playground/backend/app/api/weather.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import json 3 | from fastapi import APIRouter, HTTPException 4 | from app.models.api_models import WeatherRequest 5 | from app.core.kernel import create_kernel 6 | from semantic_kernel.connectors.ai.open_ai import AzureChatPromptExecutionSettings 7 | from semantic_kernel.contents.chat_history import ChatHistory 8 | from semantic_kernel.connectors.ai.function_choice_behavior import ( 9 | FunctionChoiceBehavior, 10 | ) 11 | from semantic_kernel.agents import ChatCompletionAgent 12 | from semantic_kernel.contents import FunctionCallContent 13 | 14 | # Configure logging 15 | logger = logging.getLogger(__name__) 16 | 17 | router = APIRouter(tags=["weather"]) 18 | 19 | 20 | @router.post("/weather") 21 | async def get_weather(request: WeatherRequest): 22 | kernel, _ = create_kernel() 23 | try: 24 | # Register the Weather plugin 25 | from app.plugins.weather import WeatherPlugin 26 | 27 | weather_plugin = WeatherPlugin() 28 | kernel.add_plugin(weather_plugin, "Weather") 29 | 30 | # Create a system message for the chat 31 | system_message = """ 32 | You are a helpful weather assistant. When asked about weather, use the Weather plugin to get accurate information. 33 | For weather queries, first determine the location, then call the appropriate weather functions to get the data. 34 | Always use get_current_weather for current conditions, get_forecast for future predictions, and get_weather_alert for any warnings.""" 35 | 36 | # Create a chat completion agent 37 | agent = ChatCompletionAgent( 38 | kernel=kernel, name="WeatherAgent", instructions=system_message 39 | ) 40 | 41 | # Create a chat history with the user query 42 | chat_history = ChatHistory() 43 | chat_history.add_user_message(request.query) 44 | 45 | # Set up execution settings for function calling 46 | execution_settings = AzureChatPromptExecutionSettings() 47 | execution_settings.function_choice_behavior = FunctionChoiceBehavior.Auto() 48 | 49 | # Get response from the agent 50 | response = await agent.get_response( 51 | messages=chat_history, execution_settings=execution_settings 52 | ) 53 | 54 | # Track function calls and results 55 | function_calls = [] 56 | current_weather = None 57 | forecast = None 58 | alerts = None 59 | 60 | # Extract function calls from the chat history 61 | for message in chat_history: 62 | for item in message.items: 63 | if isinstance(item, FunctionCallContent): 64 | function_call = { 65 | "plugin_name": item.plugin_name, 66 | "function_name": item.function_name, 67 | "parameters": item.arguments, 68 | } 69 | function_calls.append(function_call) 70 | 71 | # Execute the function and store results 72 | if item.function_name == "get_current_weather": 73 | # Convert arguments to a dictionary if it's a string 74 | args = item.arguments 75 | if isinstance(args, str): 76 | try: 77 | args = json.loads(args) 78 | except: 79 | args = {"location": args} 80 | 81 | current_weather = await kernel.invoke( 82 | kernel.plugins["Weather"]["get_current_weather"], **args 83 | ) 84 | if not isinstance(current_weather, dict): 85 | current_weather = current_weather.value 86 | 87 | elif item.function_name == "get_forecast": 88 | # Convert arguments to a dictionary if it's a string 89 | args = item.arguments 90 | if isinstance(args, str): 91 | try: 92 | args = json.loads(args) 93 | except: 94 | args = {"location": args} 95 | 96 | # Default to 3 days if not specified 97 | if "days" not in args: 98 | args["days"] = 3 99 | 100 | forecast = await kernel.invoke( 101 | kernel.plugins["Weather"]["get_forecast"], **args 102 | ) 103 | if not isinstance(forecast, list): 104 | forecast = forecast.value 105 | 106 | elif item.function_name == "get_weather_alert": 107 | # Convert arguments to a dictionary if it's a string 108 | args = item.arguments 109 | if isinstance(args, str): 110 | try: 111 | args = json.loads(args) 112 | except: 113 | args = {"location": args} 114 | 115 | alerts = await kernel.invoke( 116 | kernel.plugins["Weather"]["get_weather_alert"], **args 117 | ) 118 | if not isinstance(alerts, dict): 119 | alerts = alerts.value 120 | 121 | # Prepare response 122 | result = {"assistant_response": str(response), "function_calls": function_calls} 123 | 124 | # Add weather data if available 125 | if current_weather: 126 | # Format current weather as a string 127 | current_weather_str = f"Location: {current_weather['location']}\n" 128 | current_weather_str += f"Temperature: {current_weather['temperature']}°F\n" 129 | current_weather_str += f"Condition: {current_weather['condition']}\n" 130 | current_weather_str += f"Humidity: {current_weather['humidity']}%\n" 131 | current_weather_str += f"Wind Speed: {current_weather['wind_speed']} mph" 132 | result["current_weather"] = current_weather_str 133 | 134 | if forecast: 135 | # Format forecast as a string 136 | forecast_str = "" 137 | for day_forecast in forecast: 138 | forecast_str += f"Day {day_forecast['day']}:\n" 139 | forecast_str += f" Temperature: {day_forecast['temperature']}°F\n" 140 | forecast_str += f" Condition: {day_forecast['condition']}\n" 141 | forecast_str += f" Humidity: {day_forecast['humidity']}%\n" 142 | forecast_str += f" Wind Speed: {day_forecast['wind_speed']} mph\n\n" 143 | result["forecast"] = forecast_str.strip() 144 | 145 | if alerts: 146 | # Format alerts as a string 147 | if alerts["has_alert"]: 148 | result["alerts"] = ( 149 | f"ALERT for {alerts['location']}: {alerts['alert_message']}" 150 | ) 151 | else: 152 | result["alerts"] = f"No active weather alerts for {alerts['location']}." 153 | 154 | return result 155 | except Exception as e: 156 | logger.error(f"Error in weather endpoint: {str(e)}") 157 | raise HTTPException(status_code=500, detail=str(e)) 158 | -------------------------------------------------------------------------------- /playground/backend/app/core/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is intentionally left empty to make the directory a Python package 2 | -------------------------------------------------------------------------------- /playground/backend/app/core/kernel.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | from typing import Tuple, List, Optional 4 | import semantic_kernel as sk 5 | from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import ( 6 | AzureChatCompletion, 7 | ) 8 | from semantic_kernel.connectors.ai.open_ai.services.azure_text_embedding import ( 9 | AzureTextEmbedding, 10 | ) 11 | from semantic_kernel.memory.semantic_text_memory import SemanticTextMemory 12 | from semantic_kernel.memory.volatile_memory_store import VolatileMemoryStore 13 | from semantic_kernel.core_plugins.text_memory_plugin import TextMemoryPlugin 14 | from dotenv import load_dotenv 15 | import time 16 | from semantic_kernel.filters import FunctionInvocationContext 17 | from typing import Callable, Awaitable 18 | 19 | # Load environment variables 20 | load_dotenv("../../.env", override=True) 21 | 22 | # Configure logging 23 | logging.basicConfig( 24 | level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" 25 | ) 26 | logger = logging.getLogger(__name__) 27 | 28 | # Get Azure OpenAI credentials 29 | deployment_name = os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME") 30 | api_key = os.getenv("AZURE_OPENAI_API_KEY") 31 | base_url = os.getenv("AZURE_OPENAI_ENDPOINT") 32 | embedding_deployment = os.getenv( 33 | "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME", "text-embedding-ada-002" 34 | ) 35 | 36 | # Initialize memory store 37 | memory_store = VolatileMemoryStore() 38 | 39 | # Sample collections 40 | FINANCE_COLLECTION = "finance" 41 | PERSONAL_COLLECTION = "personal" 42 | WEATHER_COLLECTION = "weather" 43 | 44 | 45 | # Add filter for function invocation logging 46 | async def logger_filter( 47 | context: FunctionInvocationContext, 48 | next: Callable[[FunctionInvocationContext], Awaitable[None]], 49 | ) -> None: 50 | """ 51 | Filter function that logs function invocations. 52 | """ 53 | logger.info( 54 | f"FunctionInvoking - {context.function.plugin_name}.{context.function.name}" 55 | ) 56 | 57 | start_time = time.time() 58 | await next(context) 59 | duration = time.time() - start_time 60 | 61 | logger.info( 62 | f"FunctionInvoked - {context.function.plugin_name}.{context.function.name} ({duration:.3f}s)" 63 | ) 64 | 65 | 66 | def create_kernel( 67 | plugins: Optional[List[str]] = None, 68 | ) -> Tuple[sk.Kernel, SemanticTextMemory]: 69 | """ 70 | Create a fresh kernel instance with the necessary services and plugins. 71 | 72 | Args: 73 | plugins (list, optional): List of plugin names to add to the kernel. Defaults to None. 74 | 75 | Returns: 76 | Tuple[Kernel, SemanticTextMemory]: A new kernel instance and memory instance. 77 | """ 78 | # Create a new kernel instance 79 | kernel = sk.Kernel() 80 | 81 | # Remove any existing services (just to be safe) 82 | kernel.remove_all_services() 83 | 84 | # Add chat completion service 85 | chat_completion = AzureChatCompletion( 86 | endpoint=base_url, 87 | deployment_name=deployment_name, 88 | api_key=api_key, 89 | service_id="chat", 90 | ) 91 | kernel.add_service(chat_completion) 92 | 93 | # Add embedding service 94 | embedding_service = AzureTextEmbedding( 95 | endpoint=base_url, 96 | deployment_name=embedding_deployment, 97 | api_key=api_key, 98 | service_id="embeddings", 99 | ) 100 | kernel.add_service(embedding_service) 101 | 102 | # Create memory instance 103 | memory = SemanticTextMemory( 104 | storage=memory_store, embeddings_generator=embedding_service 105 | ) 106 | 107 | # Add TextMemoryPlugin to the kernel 108 | kernel.add_plugin(TextMemoryPlugin(memory), "TextMemoryPlugin") 109 | 110 | # Add the logger filter 111 | kernel.add_filter("function_invocation", logger_filter) 112 | 113 | # Import plugins here to avoid circular imports 114 | if plugins: 115 | from app.plugins.weather import WeatherPlugin 116 | 117 | if "Weather" in plugins: 118 | weather_plugin = WeatherPlugin() 119 | kernel.add_plugin(weather_plugin, plugin_name="Weather") 120 | # Add more plugin options here as they become available 121 | 122 | return kernel, memory 123 | 124 | 125 | async def initialize_memory(): 126 | """ 127 | Initialize memory with sample data. 128 | """ 129 | # Create a memory instance for initial data 130 | _, memory_instance = create_kernel() 131 | 132 | # Finance collection 133 | await memory_instance.save_information( 134 | collection=FINANCE_COLLECTION, 135 | id="budget", 136 | text="Your budget for 2024 is $100,000", 137 | ) 138 | await memory_instance.save_information( 139 | collection=FINANCE_COLLECTION, 140 | id="savings", 141 | text="Your savings from 2023 are $50,000", 142 | ) 143 | await memory_instance.save_information( 144 | collection=FINANCE_COLLECTION, 145 | id="investments", 146 | text="Your investments are $80,000", 147 | ) 148 | 149 | # Personal collection 150 | await memory_instance.save_information( 151 | collection=PERSONAL_COLLECTION, 152 | id="fact1", 153 | text="John was born in Seattle in 1980", 154 | ) 155 | await memory_instance.save_information( 156 | collection=PERSONAL_COLLECTION, 157 | id="fact2", 158 | text="John graduated from University of Washington in 2002", 159 | ) 160 | await memory_instance.save_information( 161 | collection=PERSONAL_COLLECTION, 162 | id="fact3", 163 | text="John has two children named Alex and Sam", 164 | ) 165 | 166 | # Weather collection 167 | await memory_instance.save_information( 168 | collection=WEATHER_COLLECTION, 169 | id="fact1", 170 | text="The weather in New York is typically hot and humid in summer", 171 | ) 172 | await memory_instance.save_information( 173 | collection=WEATHER_COLLECTION, 174 | id="fact2", 175 | text="London often experiences rain throughout the year", 176 | ) 177 | await memory_instance.save_information( 178 | collection=WEATHER_COLLECTION, 179 | id="fact3", 180 | text="Tokyo has a rainy season in June and July", 181 | ) 182 | 183 | 184 | async def reset_memory() -> None: 185 | """ 186 | Reset the memory store and reinitialize with sample data. 187 | """ 188 | global memory_store 189 | memory_store = VolatileMemoryStore() 190 | await initialize_memory() 191 | -------------------------------------------------------------------------------- /playground/backend/app/filters/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is intentionally left empty to make the directory a Python package 2 | -------------------------------------------------------------------------------- /playground/backend/app/filters/content_filters.py: -------------------------------------------------------------------------------- 1 | import re 2 | import logging 3 | from typing import List, Dict, Tuple, Callable, Awaitable, Any 4 | from semantic_kernel.filters import FunctionInvocationContext 5 | from semantic_kernel.functions import FunctionResult 6 | 7 | # Configure logging 8 | logger = logging.getLogger(__name__) 9 | 10 | # Regular expressions for sensitive data patterns 11 | PATTERNS = { 12 | # Credit card format: XXXX-XXXX-XXXX-XXXX 13 | 'credit_card': r'\b(?:\d{4}[-\s]?){3}\d{4}\b', 14 | # Email addresses 15 | 'email': r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b', 16 | # Phone numbers 17 | 'phone': r'\b(?:\+\d{1,3}[-\s]?)?\(?\d{3}\)?[-\s]?\d{3}[-\s]?\d{4}\b', 18 | 'ssn': r'\b\d{3}[-\s]?\d{2}[-\s]?\d{4}\b', # Social Security Numbers (US) 19 | } 20 | 21 | 22 | class ContentFilter: 23 | def __init__(self, patterns=PATTERNS): 24 | self.patterns = patterns 25 | 26 | def redact_sensitive_info(self, text: str) -> tuple[str, list[str]]: 27 | """Redact sensitive information from text and return detected items.""" 28 | if not text: 29 | return text, [] 30 | 31 | result = text 32 | detected = [] 33 | 34 | logger.debug( 35 | f"Checking text for sensitive info: {text[:50]}{'...' if len(text) > 50 else ''}") 36 | 37 | for pattern_name, pattern in self.patterns.items(): 38 | matches = re.finditer(pattern, result) 39 | match_found = False 40 | 41 | for match in matches: 42 | match_found = True 43 | match_value = match.group() 44 | detected.append(f"{pattern_name}: {match_value}") 45 | result = result.replace( 46 | match_value, f"[REDACTED {pattern_name.upper()}]") 47 | 48 | if match_found: 49 | logger.debug(f"Found {pattern_name} pattern match in text") 50 | 51 | if detected: 52 | logger.debug( 53 | f"Detected {len(detected)} instances of sensitive information") 54 | 55 | return result, detected 56 | 57 | # Add a test function to explicitly verify regex patterns work 58 | def test_patterns(self): 59 | """Test function to verify regex patterns work correctly.""" 60 | logger.info("Testing regex patterns...") 61 | test_inputs = { 62 | "credit_card": "4111-1111-1111-1111", 63 | "email": "john.doe@example.com", 64 | "phone": "(555) 123-4567", 65 | "ssn": "123-45-6789" 66 | } 67 | 68 | for pattern_name, test_input in test_inputs.items(): 69 | logger.info( 70 | f"Testing pattern '{pattern_name}' with input '{test_input}'") 71 | 72 | pattern = self.patterns[pattern_name] 73 | match = re.search(pattern, test_input) 74 | 75 | if match: 76 | logger.info( 77 | f"✅ Pattern '{pattern_name}' matched input '{test_input}'") 78 | else: 79 | logger.error( 80 | f"❌ Pattern '{pattern_name}' failed to match input '{test_input}'") 81 | 82 | # Also test a combined string 83 | combined = "Credit card: 4111-1111-1111-1111, email: john.doe@example.com" 84 | logger.info(f"Testing combined input: '{combined}'") 85 | result, detected = self.redact_sensitive_info(combined) 86 | logger.info(f"Combined detection result: {detected}") 87 | logger.info(f"Redacted text: {result}") 88 | 89 | return detected 90 | 91 | # Input filter function for semantic kernel 92 | 93 | 94 | async def input_filter_fn( 95 | context: FunctionInvocationContext, 96 | next: Callable[[FunctionInvocationContext], Awaitable[None]], 97 | ) -> None: 98 | """ 99 | Filter function that detects and redacts sensitive information from function inputs. 100 | This demonstrates pre-processing in the Semantic Kernel pipeline. 101 | """ 102 | content_filter = ContentFilter() 103 | 104 | # Check if there's an input parameter 105 | if "input" in context.arguments: 106 | original_input = context.arguments["input"] 107 | 108 | # Apply the filter 109 | filtered_input, detected = content_filter.redact_sensitive_info( 110 | original_input) 111 | 112 | if detected: 113 | # Make sure the log message is obvious 114 | logger.warning( 115 | f"SENSITIVE INFORMATION IN INPUT DETECTED: {', '.join(detected)}") 116 | # For compatibility with different log message formats 117 | logger.warning( 118 | f"Sensitive information detected in input: {', '.join(detected)}") 119 | logger.info(f"Input Filter - Detected: {', '.join(detected)}") 120 | 121 | # Replace the original input with the filtered version 122 | context.arguments["input"] = filtered_input 123 | 124 | # Continue to the next filter or function 125 | await next(context) 126 | 127 | 128 | # Output filter function for semantic kernel 129 | async def output_filter_fn( 130 | context: FunctionInvocationContext, 131 | next: Callable[[FunctionInvocationContext], Awaitable[None]], 132 | ) -> None: 133 | """ 134 | Filter function that processes function outputs. 135 | This demonstrates post-processing in the Semantic Kernel pipeline. 136 | """ 137 | # First, continue to the next filter or execute the function 138 | await next(context) 139 | 140 | # Process the output if it exists 141 | if context.result: 142 | content_filter = ContentFilter() 143 | original_output = str(context.result) 144 | 145 | # Apply the filter 146 | filtered_output, detected = content_filter.redact_sensitive_info( 147 | original_output) 148 | 149 | if detected: 150 | # Make sure the log message is obvious 151 | logger.warning( 152 | f"SENSITIVE INFORMATION IN OUTPUT DETECTED: {', '.join(detected)}") 153 | # For compatibility with different log message formats 154 | logger.warning( 155 | f"Sensitive information detected in output: {', '.join(detected)}") 156 | logger.info(f"Output Filter - Detected: {', '.join(detected)}") 157 | 158 | # Create a new FunctionResult with the filtered output 159 | context.result = FunctionResult( 160 | function=context.function.metadata, 161 | value=filtered_output, 162 | metadata=context.result.metadata if hasattr( 163 | context.result, 'metadata') else {} 164 | ) 165 | -------------------------------------------------------------------------------- /playground/backend/app/main.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from fastapi import FastAPI 3 | from fastapi.middleware.cors import CORSMiddleware 4 | from app.api import memory, functions, weather, agents, filters, kernel, process 5 | 6 | # Configure logging 7 | logging.basicConfig( 8 | level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" 9 | ) 10 | logger = logging.getLogger(__name__) 11 | 12 | app = FastAPI(title="Semantic Kernel Demo API") 13 | 14 | # Configure CORS 15 | app.add_middleware( 16 | CORSMiddleware, 17 | allow_origins=["*"], # In production, replace with specific origins 18 | allow_credentials=True, 19 | allow_methods=["*"], 20 | allow_headers=["*"], 21 | ) 22 | 23 | # Include routers 24 | app.include_router(memory.router) 25 | app.include_router(functions.router) 26 | app.include_router(weather.router) 27 | app.include_router(agents.router) 28 | app.include_router(filters.router) 29 | app.include_router(kernel.router) 30 | app.include_router(process.router) 31 | 32 | 33 | # Root endpoint 34 | @app.get("/") 35 | async def root(): 36 | return {"message": "Semantic Kernel Demo API is running"} 37 | 38 | 39 | # Note: Memory initialization is now done on-demand when accessing memory endpoints 40 | 41 | if __name__ == "__main__": 42 | import uvicorn 43 | 44 | uvicorn.run("app.main:app", host="0.0.0.0", port=8000, reload=True) 45 | -------------------------------------------------------------------------------- /playground/backend/app/models/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is intentionally left empty to make the directory a Python package 2 | -------------------------------------------------------------------------------- /playground/backend/app/models/api_models.py: -------------------------------------------------------------------------------- 1 | from typing import List, Dict, Optional 2 | from pydantic import BaseModel 3 | 4 | 5 | class MemoryItem(BaseModel): 6 | id: str 7 | text: str 8 | collection: str 9 | 10 | 11 | class SearchQuery(BaseModel): 12 | collection: str 13 | query: str 14 | limit: int = 5 15 | 16 | 17 | class FunctionInput(BaseModel): 18 | function_name: str 19 | plugin_name: str 20 | prompt: str 21 | input_text: str 22 | parameters: Optional[Dict[str, str]] = None 23 | 24 | 25 | class AgentRequest(BaseModel): 26 | message: str 27 | system_prompt: str = ( 28 | "You are a helpful assistant that provides concise and accurate information." 29 | ) 30 | temperature: float = 0.7 31 | available_plugins: List[str] = [] 32 | chat_history: List[Dict[str, str]] = [] 33 | 34 | 35 | class MultiAgentRequest(BaseModel): 36 | message: str 37 | system_prompt: str = ( 38 | "You are a helpful assistant that provides concise and accurate information." 39 | ) 40 | temperature: float = 0.7 41 | available_plugins: List[str] = [] 42 | chat_history: List[Dict[str, str]] = [] 43 | agent_configs: List[Dict[str, str]] = [] 44 | max_iterations: int = 8 45 | 46 | 47 | class TranslationRequest(BaseModel): 48 | text: str 49 | target_language: str 50 | 51 | 52 | class WeatherRequest(BaseModel): 53 | query: str # Changed from city to query to handle free text 54 | 55 | 56 | class SummarizeRequest(BaseModel): 57 | text: str 58 | 59 | 60 | class FilterRequest(BaseModel): 61 | text: str 62 | filters: Dict[str, bool] = {"pii": True, "profanity": True, "logging": True} 63 | 64 | 65 | class KernelResetRequest(BaseModel): 66 | clear_memory: bool = False 67 | 68 | 69 | # New models for the Process Framework 70 | 71 | 72 | class ChatProcessRequest(BaseModel): 73 | message: str = "" # Used for sending messages in an existing chat 74 | 75 | 76 | class ChatResponse(BaseModel): 77 | process_id: str 78 | response: str 79 | chat_history: List[Dict[str, str]] = [] 80 | 81 | 82 | # New models for the Content Creation Process 83 | 84 | 85 | class ContentProcessRequest(BaseModel): 86 | topic: str # Topic for content creation 87 | 88 | 89 | class ContentResponse(BaseModel): 90 | process_id: str 91 | status: str # processing, generating, revising, completed 92 | topic: str 93 | content: str 94 | review: str 95 | -------------------------------------------------------------------------------- /playground/backend/app/plugins/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is intentionally left empty to make the directory a Python package 2 | -------------------------------------------------------------------------------- /playground/backend/app/plugins/weather.py: -------------------------------------------------------------------------------- 1 | import random 2 | from typing import Dict, List, Annotated 3 | from semantic_kernel.functions.kernel_function_decorator import kernel_function 4 | 5 | 6 | class WeatherPlugin: 7 | def __init__(self): 8 | # Simulated weather data 9 | self.weather_conditions = [ 10 | "Sunny", 11 | "Cloudy", 12 | "Rainy", 13 | "Snowy", 14 | "Windy", 15 | "Foggy", 16 | "Stormy", 17 | ] 18 | self.temperature_ranges = { 19 | "New York": (50, 85), 20 | "London": (45, 75), 21 | "Tokyo": (55, 90), 22 | "Sydney": (60, 95), 23 | "Paris": (48, 80), 24 | "Default": (40, 100), 25 | } 26 | 27 | # Simulated alerts 28 | self.alerts = { 29 | "New York": "Heat advisory in effect", 30 | "Tokyo": "Typhoon warning for coastal areas", 31 | "Sydney": None, 32 | "London": None, 33 | "Paris": "Air quality warning", 34 | } 35 | 36 | @kernel_function 37 | async def get_current_weather( 38 | self, location: Annotated[str, "The city name to get weather for"] 39 | ) -> Dict: 40 | """Gets the current weather for a specified location.""" 41 | temp_range = self.temperature_ranges.get( 42 | location, self.temperature_ranges["Default"] 43 | ) 44 | temperature = random.randint(temp_range[0], temp_range[1]) 45 | condition = random.choice(self.weather_conditions) 46 | 47 | return { 48 | "location": location, 49 | "temperature": temperature, 50 | "condition": condition, 51 | "humidity": random.randint(30, 95), 52 | "wind_speed": random.randint(0, 30), 53 | } 54 | 55 | @kernel_function 56 | async def get_forecast( 57 | self, 58 | location: Annotated[str, "The city name to get forecast for"], 59 | days: Annotated[int, "Number of days for the forecast"] = 3, 60 | ) -> List[Dict]: 61 | """Gets a weather forecast for a specified number of days.""" 62 | forecast = [] 63 | temp_range = self.temperature_ranges.get( 64 | location, self.temperature_ranges["Default"] 65 | ) 66 | 67 | for i in range(days): 68 | forecast.append( 69 | { 70 | "day": i + 1, 71 | "temperature": random.randint(temp_range[0], temp_range[1]), 72 | "condition": random.choice(self.weather_conditions), 73 | "humidity": random.randint(30, 95), 74 | "wind_speed": random.randint(0, 30), 75 | } 76 | ) 77 | 78 | return forecast 79 | 80 | @kernel_function 81 | async def get_weather_alert( 82 | self, location: Annotated[str, "The city name to check for weather alerts"] 83 | ) -> Dict: 84 | """Gets any active weather alerts for a location.""" 85 | alert = self.alerts.get(location) 86 | 87 | return { 88 | "location": location, 89 | "has_alert": alert is not None, 90 | "alert_message": alert if alert else "No active alerts", 91 | } 92 | -------------------------------------------------------------------------------- /playground/backend/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file is maintained for backward compatibility. 3 | All functionality has been moved to the app/ directory. 4 | """ 5 | 6 | from app.main import app 7 | 8 | # Re-export app for backward compatibility 9 | # This ensures that any code importing from main.py continues to work 10 | 11 | if __name__ == "__main__": 12 | import uvicorn 13 | import os 14 | 15 | # Read host and port from environment variables, with defaults 16 | host = os.environ.get("BACKEND_HOST", "0.0.0.0") 17 | port = int(os.environ.get("BACKEND_PORT", 8000)) 18 | 19 | uvicorn.run("app.main:app", host=host, port=port, reload=True) 20 | -------------------------------------------------------------------------------- /playground/backend/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "backend" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.13" 7 | dependencies = [ 8 | "fastapi[standard]>=0.115.11", 9 | "ipykernel>=6.29.5", 10 | "mermaid-py>=0.7.1", 11 | "pydantic>=2.10.6", 12 | "python-dotenv>=1.0.1", 13 | "python-multipart>=0.0.20", 14 | "semantic-kernel>=1.27.2", 15 | "uvicorn>=0.34.0", 16 | ] 17 | -------------------------------------------------------------------------------- /playground/frontend/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.* 7 | .yarn/* 8 | !.yarn/patches 9 | !.yarn/plugins 10 | !.yarn/releases 11 | !.yarn/versions 12 | 13 | # testing 14 | /coverage 15 | 16 | # next.js 17 | /.next/ 18 | /out/ 19 | 20 | # production 21 | /build 22 | 23 | # misc 24 | .DS_Store 25 | *.pem 26 | 27 | # debug 28 | npm-debug.log* 29 | yarn-debug.log* 30 | yarn-error.log* 31 | .pnpm-debug.log* 32 | 33 | # env files (can opt-in for committing if needed) 34 | .env* 35 | 36 | # vercel 37 | .vercel 38 | 39 | # typescript 40 | *.tsbuildinfo 41 | next-env.d.ts 42 | -------------------------------------------------------------------------------- /playground/frontend/README.md: -------------------------------------------------------------------------------- 1 | # Semantic Kernel Playground - Shadcn Frontend 2 | 3 | This is a modern frontend for the Semantic Kernel Playground, built with Next.js and shadcn/ui components. 4 | 5 | ## Features 6 | 7 | - Modern, responsive UI using shadcn/ui components 8 | - TypeScript for type safety 9 | - Improved user experience with tailored components 10 | - Semantic Memory demo with vector embedding search 11 | - Functions & Plugins demo (coming soon) 12 | - Translation demo (coming soon) 13 | - Weather demo (coming soon) 14 | - Summarization demo (coming soon) 15 | - Filters & Security demo (coming soon) 16 | - Agent demo (coming soon) 17 | - Multi-agent demo (coming soon) 18 | 19 | ## Getting Started 20 | 21 | First, make sure the backend server is running. Then, run the development server: 22 | 23 | ```bash 24 | npm install --legacy-peer-deps 25 | npm run dev 26 | ``` 27 | 28 | Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. 29 | 30 | ## Technologies Used 31 | 32 | - [Next.js](https://nextjs.org/) - React framework 33 | - [shadcn/ui](https://ui.shadcn.com/) - Beautiful, accessible component library 34 | - [Tailwind CSS](https://tailwindcss.com/) - Utility-first CSS framework 35 | - [Axios](https://axios-http.com/) - Promise-based HTTP client 36 | 37 | This project uses [`next/font`](https://nextjs.org/docs/app/building-your-application/optimizing/fonts) to automatically optimize and load [Geist](https://vercel.com/font), a new font family for Vercel. 38 | 39 | ## Learn More 40 | 41 | To learn more about Next.js, take a look at the following resources: 42 | 43 | - [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API. 44 | - [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial. 45 | 46 | You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js) - your feedback and contributions are welcome! 47 | 48 | ## Deploy on Vercel 49 | 50 | The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js. 51 | 52 | Check out our [Next.js deployment documentation](https://nextjs.org/docs/app/building-your-application/deploying) for more details. 53 | -------------------------------------------------------------------------------- /playground/frontend/components.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://ui.shadcn.com/schema.json", 3 | "style": "new-york", 4 | "rsc": true, 5 | "tsx": true, 6 | "tailwind": { 7 | "config": "tailwind.config.js", 8 | "css": "src/app/globals.css", 9 | "baseColor": "neutral", 10 | "cssVariables": true, 11 | "prefix": "" 12 | }, 13 | "aliases": { 14 | "components": "@/components", 15 | "utils": "@/lib/utils", 16 | "ui": "@/components/ui", 17 | "lib": "@/lib", 18 | "hooks": "@/hooks" 19 | }, 20 | "iconLibrary": "lucide" 21 | } -------------------------------------------------------------------------------- /playground/frontend/eslint.config.mjs: -------------------------------------------------------------------------------- 1 | import { dirname } from "path"; 2 | import { fileURLToPath } from "url"; 3 | import { FlatCompat } from "@eslint/eslintrc"; 4 | 5 | const __filename = fileURLToPath(import.meta.url); 6 | const __dirname = dirname(__filename); 7 | 8 | const compat = new FlatCompat({ 9 | baseDirectory: __dirname, 10 | }); 11 | 12 | const eslintConfig = [ 13 | ...compat.extends("next/core-web-vitals", "next/typescript"), 14 | ]; 15 | 16 | export default eslintConfig; 17 | -------------------------------------------------------------------------------- /playground/frontend/next.config.ts: -------------------------------------------------------------------------------- 1 | import type { NextConfig } from "next"; 2 | 3 | const nextConfig: NextConfig = { 4 | reactStrictMode: true, 5 | transpilePackages: ["tw-animate-css"], 6 | async rewrites() { 7 | return [ 8 | { 9 | source: '/api/:path*', 10 | destination: 'http://localhost:8000/:path*', 11 | }, 12 | ]; 13 | }, 14 | }; 15 | 16 | export default nextConfig; 17 | -------------------------------------------------------------------------------- /playground/frontend/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "frontend-shadcn", 3 | "version": "0.1.0", 4 | "private": true, 5 | "scripts": { 6 | "dev": "next dev", 7 | "build": "next build", 8 | "start": "next start", 9 | "lint": "next lint" 10 | }, 11 | "dependencies": { 12 | "@hookform/resolvers": "^4.1.3", 13 | "@radix-ui/react-avatar": "^1.1.3", 14 | "@radix-ui/react-dialog": "^1.1.6", 15 | "@radix-ui/react-label": "^2.1.2", 16 | "@radix-ui/react-progress": "^1.1.2", 17 | "@radix-ui/react-scroll-area": "^1.2.3", 18 | "@radix-ui/react-select": "^2.1.6", 19 | "@radix-ui/react-separator": "^1.1.2", 20 | "@radix-ui/react-slider": "^1.2.3", 21 | "@radix-ui/react-slot": "^1.1.2", 22 | "@radix-ui/react-switch": "^1.1.3", 23 | "@radix-ui/react-tabs": "^1.1.3", 24 | "@radix-ui/react-tooltip": "^1.1.8", 25 | "axios": "^1.8.4", 26 | "class-variance-authority": "^0.7.1", 27 | "clsx": "^2.1.1", 28 | "lucide-react": "^0.483.0", 29 | "next": "15.2.3", 30 | "next-themes": "^0.4.6", 31 | "react": "^19.0.0", 32 | "react-dom": "^19.0.0", 33 | "react-hook-form": "^7.54.2", 34 | "react-markdown": "^10.1.0", 35 | "react-router-dom": "^7.4.0", 36 | "rehype-pretty-code": "^0.14.1", 37 | "shiki": "^0.14.6", 38 | "sonner": "^2.0.1", 39 | "tailwind-merge": "^3.0.2", 40 | "tw-animate-css": "^1.2.4", 41 | "zod": "^3.24.2" 42 | }, 43 | "devDependencies": { 44 | "@eslint/eslintrc": "^3", 45 | "@tailwindcss/typography": "^0.5.16", 46 | "@types/node": "^20", 47 | "@types/react": "^19", 48 | "@types/react-dom": "^19", 49 | "autoprefixer": "^10.4.16", 50 | "eslint": "^9", 51 | "eslint-config-next": "15.2.3", 52 | "postcss": "^8.4.32", 53 | "tailwindcss": "^3.4.0", 54 | "typescript": "^5" 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /playground/frontend/postcss.config.mjs: -------------------------------------------------------------------------------- 1 | const config = { 2 | plugins: { 3 | tailwindcss: {}, 4 | autoprefixer: {}, 5 | }, 6 | }; 7 | 8 | export default config; 9 | -------------------------------------------------------------------------------- /playground/frontend/public/file.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /playground/frontend/public/globe.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /playground/frontend/public/next.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /playground/frontend/public/vercel.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /playground/frontend/public/window.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /playground/frontend/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if Node.js is installed 4 | if ! command -v node &> /dev/null; then 5 | echo "Error: Node.js is required but not installed. Please install Node.js first." 6 | exit 1 7 | fi 8 | 9 | # Navigate to the directory containing this script 10 | cd "$(dirname "$0")" 11 | 12 | # Install dependencies if node_modules doesn't exist 13 | if [ ! -d "node_modules" ]; then 14 | echo "Installing dependencies..." 15 | npm install 16 | fi 17 | 18 | # Run the development server 19 | echo "Starting the development server..." 20 | npm run dev -------------------------------------------------------------------------------- /playground/frontend/src/app/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/semantic-kernel-workshop/b65b791a04a79ef73a8cf8be94145fc58f5b33e8/playground/frontend/src/app/favicon.ico -------------------------------------------------------------------------------- /playground/frontend/src/app/functions/page.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | import { useState } from 'react'; 4 | import axios from 'axios'; 5 | import Shell from '@/components/layout/shell'; 6 | import { Button } from '@/components/ui/button'; 7 | import { Card, CardContent } from '@/components/ui/card'; 8 | import { Textarea } from '@/components/ui/textarea'; 9 | import { Badge } from '@/components/ui/badge'; 10 | import { Alert, AlertDescription } from '@/components/ui/alert'; 11 | 12 | // Import icons individually 13 | import { FunctionSquare } from "lucide-react"; 14 | 15 | const API_URL = 'http://localhost:8000'; 16 | 17 | // Define the type for example prompts 18 | interface ExamplePrompt { 19 | title: string; 20 | prompt: string; 21 | inputExample: string; 22 | description: string; 23 | } 24 | 25 | // Example prompts for users to try 26 | const examplePrompts: ExamplePrompt[] = [ 27 | { 28 | title: 'Professional Rewriter', 29 | prompt: '{{$input}}\n\nRewrite this in a professional tone:', 30 | inputExample: 'Hey, I think we should meet up to talk about that project thing we were discussing last week. It\'s kind of important.', 31 | description: 'Convert casual text into professional business communication.' 32 | }, 33 | { 34 | title: 'Summarizer', 35 | prompt: '{{$input}}\n\nTL;DR in one sentence:', 36 | inputExample: 'Semantic Kernel is a lightweight SDK that integrates Large Language Models (LLMs) with conventional programming languages. It combines natural language semantic functions, traditional code native functions, and embeddings-based memory to create AI-enabled experiences.', 37 | description: 'Create a one-sentence summary of longer text.' 38 | }, 39 | { 40 | title: 'Idea Generator', 41 | prompt: '{{$input}}\n\nGenerate 5 creative ideas related to this topic:', 42 | inputExample: 'Building a mobile app for personal finance management', 43 | description: 'Generate creative ideas around a specific topic.' 44 | } 45 | ]; 46 | 47 | export default function FunctionsDemo() { 48 | const [prompt, setPrompt] = useState('{{$input}}\n\nRewrite this in a professional tone:'); 49 | const [inputText, setInputText] = useState(''); 50 | const [result, setResult] = useState(''); 51 | const [loading, setLoading] = useState(false); 52 | const [error, setError] = useState(''); 53 | 54 | const handleInvokeFunction = async () => { 55 | if (!prompt.trim() || !inputText.trim()) { 56 | setError('Please provide both a prompt template and input text'); 57 | return; 58 | } 59 | 60 | try { 61 | setLoading(true); 62 | setError(''); 63 | 64 | console.log("Current prompt template:", prompt); 65 | 66 | const response = await axios.post(`${API_URL}/functions/semantic`, { 67 | function_name: "professional_rewriter", 68 | plugin_name: "TextFormatter", 69 | prompt: prompt, 70 | input_text: inputText, 71 | parameters: {} 72 | }); 73 | 74 | setResult(response.data.result); 75 | setLoading(false); 76 | } catch (error) { 77 | console.error('Error invoking function:', error); 78 | setError('Error invoking semantic function. Please ensure the backend server is running.'); 79 | setLoading(false); 80 | } 81 | }; 82 | 83 | const loadExample = (example: ExamplePrompt) => { 84 | setPrompt(example.prompt); 85 | setInputText(example.inputExample); 86 | setResult(''); 87 | }; 88 | 89 | // Function to safely display strings with curly braces in JSX 90 | const displayWithCurlyBraces = (text: string) => { 91 | return text.split('{{').join('{ "{" }').split('}}').join('{ "}" }'); 92 | }; 93 | 94 | return ( 95 | 96 |
97 | {/* Header */} 98 |
99 |

100 | 101 | Functions & Plugins 102 |

103 |

104 | Create AI-powered semantic functions with custom prompt templates. 105 | Define how the AI should process your input with simple, reusable templates. 106 |

107 |
108 | 109 | {/* Alert for errors */} 110 | {error && ( 111 | 114 | {error} 115 | 116 | )} 117 | 118 | {/* Example prompts */} 119 |
120 |

Example Functions

121 |
122 | {examplePrompts.map((example, index) => ( 123 | 127 | 128 |

{example.title}

129 |

{example.description}

130 |
131 | 132 |
133 | 140 |
141 |
142 | ))} 143 |
144 |
145 | 146 | {/* Function Components */} 147 |
148 | {/* Define Function */} 149 |
150 | 151 | 152 |

Define Your Function

153 | 154 |
155 | 156 |