├── .devcontainer ├── Dockerfile └── devcontainer.json ├── .env.example ├── .github ├── CODE_OF_CONDUCT.md ├── ISSUE_TEMPLATE.md └── PULL_REQUEST_TEMPLATE.md ├── .gitignore ├── .python-version ├── CHANGELOG.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── SECURITY.md ├── SUPPORT.md ├── clients ├── README.md ├── python │ └── pydantic-ai │ │ ├── .env.example │ │ ├── .python-version │ │ ├── README.md │ │ ├── main.py │ │ ├── pyproject.toml │ │ └── uv.lock ├── sample-dataset │ └── customers │ │ ├── all_customers.json │ │ ├── danielle.json │ │ ├── erica.json │ │ └── john.json └── vscode │ ├── README.md │ └── mcp-configs │ ├── mcp.sse.json │ ├── mcp.stdio.uvx.local.json │ └── mcp.stdio.uvx.remote.json ├── mcp.json ├── pyproject.toml ├── src ├── __init__.py └── mcp_foundry │ ├── __init__.py │ ├── __main__.py │ ├── mcp_foundry_evaluation │ └── tools.py │ ├── mcp_foundry_knowledge │ ├── __init__.py │ ├── data_access_objects │ │ ├── __init__.py │ │ ├── dao.py │ │ └── models.py │ ├── prompts.py │ ├── resources.py │ └── tools.py │ ├── mcp_foundry_model │ ├── __init__.py │ ├── models.py │ ├── tools.py │ └── utils.py │ └── mcp_server.py ├── test_evaluator_requirements.py ├── tests ├── __init__.py ├── conftest.py ├── pytest.ini ├── test_mcp.py ├── test_mcp_foundry_tool.py └── test_utils.py └── uv.lock /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/devcontainers/python:dev-3.11 2 | 3 | # Install Azure CLI and clean up 4 | RUN curl -sL https://aka.ms/InstallAzureCLIDeb | bash && \ 5 | apt-get update && \ 6 | apt-get install -y azure-cli && \ 7 | apt-get clean && \ 8 | rm -rf /var/lib/apt/lists/* 9 | 10 | # Install uv using pipx 11 | RUN pipx install uv -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Foundry MCP Development Environment", 3 | "build": { 4 | "dockerfile": "Dockerfile" 5 | }, 6 | "remoteUser": "vscode" 7 | } -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # Duplicate this file as `.env` and set values to pass to the server 2 | # Remove or comment any unused environment variables 3 | 4 | # GitHub authentication 5 | GITHUB_TOKEN= 6 | 7 | # Azure authentication 8 | AZURE_TENANT_ID= 9 | AZURE_CLIENT_ID= 10 | AZURE_CLIENT_SECRET= 11 | 12 | # Additional info for Azure AI Foundry (currently used for Evaluation only) 13 | AZURE_AI_PROJECT_ENDPOINT= 14 | AZURE_OPENAI_ENDPOINT= 15 | AZURE_OPENAI_API_KEY= 16 | AZURE_OPENAI_DEPLOYMENT=gpt-4o 17 | AZURE_OPENAI_API_VERSION= 18 | 19 | # Additional info for Knowledge 20 | AZURE_AI_SEARCH_ENDPOINT=https://.search.windows.net/ 21 | AZURE_AI_SEARCH_API_VERSION=2025-03-01-preview 22 | SEARCH_AUTHENTICATION_METHOD=api-search-key 23 | AZURE_AI_SEARCH_API_KEY=your_api_key 24 | 25 | # Additional info for Evaluation 26 | EVAL_DATA_DIR=/path/to/eval_jsonl/ 27 | -------------------------------------------------------------------------------- /.github/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns 10 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 4 | > Please provide us with the following information: 5 | > --------------------------------------------------------------- 6 | 7 | ### This issue is for a: (mark with an `x`) 8 | ``` 9 | - [ ] bug report -> please search issues before submitting 10 | - [ ] feature request 11 | - [ ] documentation issue or request 12 | - [ ] regression (a behavior that used to work and stopped in a new release) 13 | ``` 14 | 15 | ### Minimal steps to reproduce 16 | > 17 | 18 | ### Any log messages given by the failure 19 | > 20 | 21 | ### Expected/desired behavior 22 | > 23 | 24 | ### OS and Version? 25 | > Windows 7, 8 or 10. Linux (which distribution). macOS (Yosemite? El Capitan? Sierra?) 26 | 27 | ### Versions 28 | > 29 | 30 | ### Mention any other details that might be useful 31 | 32 | > --------------------------------------------------------------- 33 | > Thanks! We'll be in touch soon. 34 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Purpose 2 | 3 | * ... 4 | 5 | ## Does this introduce a breaking change? 6 | 7 | ``` 8 | [ ] Yes 9 | [ ] No 10 | ``` 11 | 12 | ## Pull Request Type 13 | What kind of change does this Pull Request introduce? 14 | 15 | 16 | ``` 17 | [ ] Bugfix 18 | [ ] Feature 19 | [ ] Code style update (formatting, local variables) 20 | [ ] Refactoring (no functional changes, no api changes) 21 | [ ] Documentation content changes 22 | [ ] Other... Please describe: 23 | ``` 24 | 25 | ## How to Test 26 | * Get the code 27 | 28 | ``` 29 | git clone [repo-address] 30 | cd [repo-name] 31 | git checkout [branch-name] 32 | npm install 33 | ``` 34 | 35 | * Test the code 36 | 37 | ``` 38 | ``` 39 | 40 | ## What to Check 41 | Verify that the following are valid 42 | * ... 43 | 44 | ## Other Information 45 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | gitignore 2 | # Byte-compiled / optimized / DLL files 3 | __pycache__/ 4 | *.py[cod] 5 | *$py.class 6 | 7 | # C extensions 8 | *.so 9 | 10 | # Distribution / packaging 11 | .Python 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # Unit test / coverage reports 29 | htmlcov/ 30 | .tox/ 31 | .nox/ 32 | .coverage 33 | .coverage.* 34 | .cache 35 | nosetests.xml 36 | coverage.xml 37 | *.cover 38 | .hypothesis/ 39 | .pytest_cache/ 40 | 41 | # Environments 42 | .env 43 | .venv 44 | env/ 45 | venv/ 46 | ENV/ 47 | env.bak/ 48 | venv.bak/ 49 | 50 | # VS Code 51 | .vscode/ 52 | 53 | # Node.js / TypeScript 54 | node_modules/ 55 | npm-debug.log 56 | yarn-debug.log 57 | yarn-error.log 58 | .npm 59 | .yarn/cache 60 | .yarn/unplugged 61 | .yarn/build-state.yml 62 | .pnp.* 63 | 64 | # TypeScript specific 65 | *.tsbuildinfo 66 | .tscache/ 67 | *.js.map 68 | *.d.ts 69 | dist/ 70 | build/ 71 | out/ 72 | 73 | # Package manager lock files (uncomment if you want to ignore) 74 | # package-lock.json 75 | # yarn.lock 76 | 77 | # IDE - WebStorm/IntelliJ 78 | .idea/ 79 | 80 | # OS specific 81 | .DS_Store 82 | Thumbs.db -------------------------------------------------------------------------------- /.python-version: -------------------------------------------------------------------------------- 1 | 3.11 2 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## MCP Foundry Changelog 2 | 3 | 4 | # 1.0.0 (2025-03-21) 5 | 6 | *Features* 7 | * Initial release of MCP Foundry for Azure AI Agents integration 8 | * Seamless connection between Claude Desktop and Azure AI Agents via Model Context Protocol 9 | * Support for Azure AI Agent tool integrations (AI Search, Bing Web Grounding) 10 | * Thread isolation for client conversations 11 | * Customizable user agent identification 12 | * Automatic credential handling via DefaultAzureCredential 13 | 14 | *Bug Fixes* 15 | * N/A - Initial release 16 | 17 | *Breaking Changes* 18 | * N/A - Initial release -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to [project-title] 2 | 3 | This project welcomes contributions and suggestions. Most contributions require you to agree to a 4 | Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us 5 | the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. 6 | 7 | When you submit a pull request, a CLA bot will automatically determine whether you need to provide 8 | a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions 9 | provided by the bot. You will only need to do this once across all repos using our CLA. 10 | 11 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 12 | For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or 13 | contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. 14 | 15 | - [Code of Conduct](#coc) 16 | - [Issues and Bugs](#issue) 17 | - [Feature Requests](#feature) 18 | - [Submission Guidelines](#submit) 19 | 20 | ## Code of Conduct 21 | Help us keep this project open and inclusive. Please read and follow our [Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 22 | 23 | ## Found an Issue? 24 | If you find a bug in the source code or a mistake in the documentation, you can help us by 25 | [submitting an issue](#submit-issue) to the GitHub Repository. Even better, you can 26 | [submit a Pull Request](#submit-pr) with a fix. 27 | 28 | ## Want a Feature? 29 | You can *request* a new feature by [submitting an issue](#submit-issue) to the GitHub 30 | Repository. If you would like to *implement* a new feature, please submit an issue with 31 | a proposal for your work first, to be sure that we can use it. 32 | 33 | * **Small Features** can be crafted and directly [submitted as a Pull Request](#submit-pr). 34 | 35 | ## Submission Guidelines 36 | 37 | ### Submitting an Issue 38 | Before you submit an issue, search the archive, maybe your question was already answered. 39 | 40 | If your issue appears to be a bug, and hasn't been reported, open a new issue. 41 | Help us to maximize the effort we can spend fixing issues and adding new 42 | features, by not reporting duplicate issues. Providing the following information will increase the 43 | chances of your issue being dealt with quickly: 44 | 45 | * **Overview of the Issue** - if an error is being thrown a non-minified stack trace helps 46 | * **Version** - what version is affected (e.g. 0.1.2) 47 | * **Motivation for or Use Case** - explain what are you trying to do and why the current behavior is a bug for you 48 | * **Browsers and Operating System** - is this a problem with all browsers? 49 | * **Reproduce the Error** - provide a live example or a unambiguous set of steps 50 | * **Related Issues** - has a similar issue been reported before? 51 | * **Suggest a Fix** - if you can't fix the bug yourself, perhaps you can point to what might be 52 | causing the problem (line of code or commit) 53 | 54 | You can file new issues by providing the above information at the corresponding repository's issues link: https://github.com/[organization-name]/[repository-name]/issues/new]. 55 | 56 | ### Submitting a Pull Request (PR) 57 | Before you submit your Pull Request (PR) consider the following guidelines: 58 | 59 | * Search the repository (https://github.com/[organization-name]/[repository-name]/pulls) for an open or closed PR 60 | that relates to your submission. You don't want to duplicate effort. 61 | 62 | * Make your changes in a new git fork: 63 | 64 | * Commit your changes using a descriptive commit message 65 | * Push your fork to GitHub: 66 | * In GitHub, create a pull request 67 | * If we suggest changes then: 68 | * Make the required updates. 69 | * Rebase your fork and force push to your GitHub repository (this will update your Pull Request): 70 | 71 | ```shell 72 | git rebase master -i 73 | git push -f 74 | ``` 75 | 76 | That's it! Thank you for your contribution! 77 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MCP Server that interacts with Azure AI Foundry (experimental) 2 | 3 | A Model Context Protocol server for Azure AI Foundry, providing a unified set of tools for models, knowledge, evaluation, and more. 4 | 5 | [![GitHub watchers](https://img.shields.io/github/watchers/azure-ai-foundry/mcp-foundry.svg?style=social&label=Watch)](https://github.com/azure-ai-foundry/mcp-foundry/watchers) 6 | [![GitHub forks](https://img.shields.io/github/forks/azure-ai-foundry/mcp-foundry.svg?style=social&label=Fork)](https://github.com/azure-ai-foundry/mcp-foundry/fork) 7 | [![GitHub stars](https://img.shields.io/github/stars/azure-ai-foundry/mcp-foundry?style=social&label=Star)](https://github.com/azure-ai-foundry/mcp-foundry/stargazers) 8 | 9 | [![Azure AI Community Discord](https://dcbadge.vercel.app/api/server/ByRwuEEgH4)](https://discord.gg/REmjGvvFpW) 10 | 11 | ## Available Tools 12 | 13 | ### Capabilities: Models 14 | 15 | | Category | Tool | Description | 16 | |---|---|---| 17 | | **Explore** | `list_models_from_model_catalog` | Retrieves a list of supported models from the Azure AI Foundry catalog. | 18 | | | `list_azure_ai_foundry_labs_projects` | Retrieves a list of state-of-the-art AI models from Microsoft Research available in Azure AI Foundry Labs. | 19 | | | `get_model_details_and_code_samples` | Retrieves detailed information for a specific model from the Azure AI Foundry catalog. | 20 | | **Build** | `get_prototyping_instructions_for_github_and_labs` | Provides comprehensive instructions and setup guidance for starting to work with models from Azure AI Foundry and Azure AI Foundry Labs. | 21 | | **Deploy** | `get_model_quotas` | Get model quotas for a specific Azure location. | 22 | | | `create_azure_ai_services_account` | Creates an Azure AI Services account. | 23 | | | `list_deployments_from_azure_ai_services` | Retrieves a list of deployments from Azure AI Services. | 24 | | | `deploy_model_on_ai_services` | Deploys a model on Azure AI Services. | 25 | | | `create_foundry_project` | Creates a new Azure AI Foundry project. | 26 | 27 | ### Capabilities: Knowledge 28 | 29 | | Category | Tool | Description | 30 | |---|---|---| 31 | | **Index** | `list_index_names` | Retrieve all names of indexes from the AI Search Service | 32 | | | `list_index_schemas` | Retrieve all index schemas from the AI Search Service | 33 | | | `retrieve_index_schema` | Retrieve the schema for a specific index from the AI Search Service | 34 | | | `create_index` | Creates a new index | 35 | | | `modify_index` | Modifies the index definition of an existing index | 36 | | | `delete_index` | Removes an existing index | 37 | | **Document** | `add_document` | Adds a document to the index | 38 | | | `delete_document` | Removes a document from the index | 39 | | **Query** | `query_index` | Searches a specific index to retrieve matching documents | 40 | | | `get_document_count` | Returns the total number of documents in the index | 41 | | **Indexer** | `list_indexers` | Retrieve all names of indexers from the AI Search Service | 42 | | | `get_indexer` | Retrieve the full definition of a specific indexer from the AI Search Service | 43 | | | `create_indexer` | Create a new indexer in the Search Service with the skill, index and data source | 44 | | | `delete_indexer` | Delete an indexer from the AI Search Service by name | 45 | | **Data Source** | `list_data_sources` | Retrieve all names of data sources from the AI Search Service | 46 | | | `get_data_source` | Retrieve the full definition of a specific data source | 47 | | **Skill Set** | `list_skill_sets` | Retrieve all names of skill sets from the AI Search Service | 48 | | | `get_skill_set` | Retrieve the full definition of a specific skill set | 49 | | **Content** | `fk_fetch_local_file_contents` | Retrieves the contents of a local file path (sample JSON, document etc) | 50 | | | `fk_fetch_url_contents` | Retrieves the contents of a URL (sample JSON, document etc) | 51 | 52 | ### Capabilities: Evaluation 53 | 54 | | Category | Tool | Description | 55 | |---|---|---| 56 | | **Evaluator Utilities** | `list_text_evaluators` | List all available text evaluators. | 57 | | | `list_agent_evaluators` | List all available agent evaluators. | 58 | | | `get_text_evaluator_requirements` | Show input requirements for each text evaluator. | 59 | | | `get_agent_evaluator_requirements` | Show input requirements for each agent evaluator. | 60 | | **Text Evaluation** | `run_text_eval` | Run one or multiple text evaluators on a JSONL file or content. | 61 | | | `format_evaluation_report` | Convert evaluation output into a readable Markdown report. | 62 | | **Agent Evaluation** | `agent_query_and_evaluate` | Query an agent and evaluate its response using selected evaluators. End-to-End agent evaluation. | 63 | | | `run_agent_eval` | Evaluate a single agent interaction with specific data (query, response, tool calls, definitions). | 64 | | **Agent Service** | `list_agents` | List all Azure AI Agents available in the configured project. | 65 | | | `connect_agent` | Send a query to a specified agent. | 66 | | | `query_default_agent` | Query the default agent defined in environment variables. | 67 | 68 | ## Prompt Examples 69 | 70 | ### Models 71 | 72 | #### Explore models 73 | 74 | - How can you help me find the right model? 75 | - What models can I use from Azure AI Foundry? 76 | - What OpenAI models are available in Azure AI Foundry? 77 | - What are the most popular models in Azure AI Foundry? Pick me 10 models. 78 | - What models are good for reasoning? Show me some examples in two buckets, one for large models and one for small models. 79 | - Can you compare Phi models and explain differences? 80 | - Show me the model card for Phi-4-reasoning. 81 | - Can you show me how to test a model? 82 | - What does free playground in Azure AI Foundry mean? 83 | - Can I use GitHub token to test models? 84 | - Show me latest models that support GitHub token. 85 | - Who are the model publishers for the models in Azure AI Foundry? 86 | - Show me models from Meta. 87 | - Show me models with MIT license. 88 | 89 | #### Build prototypes 90 | 91 | - Can you describe how you can help me build a prototype using the model? 92 | - Describe how you can build a prototype that uses an OpenAI model with my GitHub token. Don't try to create one yet. 93 | - Recommend me a few scenarios to build prototypes with models. 94 | - Tell me about Azure AI Foundry Labs. 95 | - Tell me more about Magentic One 96 | - What is Omniparser and what are potential use cases? 97 | - Can you help me build a prototype using Omniparser? 98 | 99 | #### Deploy OpenAI models 100 | 101 | - Can you help me deploy OpenAI models? 102 | - What steps do I need to take to deploy OpenAI models on Azure AI Foundry? 103 | - Can you help me understand how I can use OpenAI models on Azure AI Foundry using GitHub token? Can I use it for production? 104 | - I already have an Azure AI services resource. Can I deploy OpenAI models on it? 105 | - What does quota for OpenAI models mean on Azure AI Foundry? 106 | - Get me current quota for my AI services resource. 107 | 108 | ## Quick Start with GitHub Copilot 109 | 110 | [![Use The Template](https://img.shields.io/static/v1?style=for-the-badge&label=Use+The+Template&message=GitHub&color=181717&logo=github)](https://github.com/azure-ai-foundry/foundry-models-playground/generate) 111 | 112 | > This GitHub template has minimal setup with MCP server configuration and all required dependencies, making it easy to get started with your own projects. 113 | 114 | [![Install in VS Code](https://img.shields.io/static/v1?style=for-the-badge&label=Install+in+VS+Code&message=Open&color=007ACC&logo=visualstudiocode)](https://insiders.vscode.dev/redirect/mcp/install?name=Azure%20Foundry%20MCP%20Server&config=%7B%22type%22%3A%22stdio%22%2C%22command%22%3A%22uvx%22%2C%22args%22%3A%5B%22--prerelease%3Dallow%22%2C%22--from%22%2C%22git%2Bhttps%3A%2F%2Fgithub.com%2Fazure-ai-foundry%2Fmcp-foundry.git%22%2C%22run-azure-ai-foundry-mcp%22%5D%7D) 115 | 116 | > This helps you automatically set up the MCP server in your VS Code environment under user settings. 117 | > You will need `uvx` installed in your environment to run the server. 118 | 119 | ## Manual Setup 120 | 121 | 1. Install `uv` by following [Installing uv](https://docs.astral.sh/uv/getting-started/installation/). 122 | 1. Start a new workspace in VS Code. 123 | 1. (Optional) Create `.env` file in the root of your workspace to set environment variables. 124 | 1. Create `.vscode/mcp.json` in the root of your workspace. 125 | 126 | ```json 127 | { 128 | "servers": { 129 | "mcp_foundry_server": { 130 | "type": "stdio", 131 | "command": "uvx", 132 | "args": [ 133 | "--prerelease=allow", 134 | "--from", 135 | "git+https://github.com/azure-ai-foundry/mcp-foundry.git", 136 | "run-azure-ai-foundry-mcp", 137 | "--envFile", 138 | "${workspaceFolder}/.env" 139 | ] 140 | } 141 | } 142 | } 143 | ``` 144 | 145 | 1. Click `Start` button for the server in `.vscode/mcp.json` file. 146 | 1. Open GitHub Copilot chat in Agent mode and start asking questions. 147 | 148 | See [More examples for advanced setup](./clients/README.md) for more details on how to set up the MCP server. 149 | 150 | ## Setting the Environment Variables 151 | 152 | To securely pass information to the MCP server, such as API keys, endpoints, and other sensitive data, you can use environment variables. This is especially important for tools that require authentication or access to external services. 153 | 154 | You can set these environment variables in a `.env` file in the root of your project. You can pass the location of `.env` file when setting up MCP Server, and the server will automatically load these variables when it starts. 155 | 156 | See [example .env file](./clients/python/pydantic-ai/.env.example) for a sample configuration. 157 | 158 | | Category | Variable | Required? | Description | 159 | | -------------- | ----------------------------- | ---------------------------------- | ------------------------------------------------ | 160 | | **Model** | `GITHUB_TOKEN` | No | GitHub token for testing models for free with rate limits. | 161 | | **Knowledge** | `AZURE_AI_SEARCH_ENDPOINT` | Always | The endpoint URL for your Azure AI Search service. It should look like this: `https://.search.windows.net/`. | 162 | | | `AZURE_AI_SEARCH_API_VERSION` | No | API Version to use. Defaults to `2025-03-01-preview`. | 163 | | | `SEARCH_AUTHENTICATION_METHOD`| Always | `service-principal` or `api-search-key`. | 164 | | | `AZURE_TENANT_ID` | Yes when using `service-principal` | The ID of your Azure Active Directory tenant. | 165 | | | `AZURE_CLIENT_ID` | Yes when using `service-principal` | The ID of your Service Principal (app registration) | 166 | | | `AZURE_CLIENT_SECRET` | Yes when using `service-principal` | The secret credential for the Service Principal. | 167 | | | `AZURE_AI_SEARCH_API_KEY` | Yes when using `api-search-key` | The API key for your Azure AI Search service. | 168 | | **Evaluation** | `EVAL_DATA_DIR` | Always | Path to the JSONL evaluation dataset | 169 | | | `AZURE_OPENAI_ENDPOINT` | Text quality evaluators | Endpoint for Azure OpenAI | 170 | | | `AZURE_OPENAI_API_KEY` | Text quality evaluators | API key for Azure OpenAI | 171 | | | `AZURE_OPENAI_DEPLOYMENT` | Text quality evaluators | Deployment name (e.g., `gpt-4o`) | 172 | | | `AZURE_OPENAI_API_VERSION` | Text quality evaluators | Version of the OpenAI API | 173 | | | `AZURE_AI_PROJECT_ENDPOINT` | Agent services | Used for Azure AI Agent querying and evaluation | 174 | 175 | > [!NOTE] 176 | > **Model** 177 | > - `GITHUB_TOKEN` is used to authenticate with GitHub API for testing models. It is not required if you are exploring models from Foundry catalog. 178 | > 179 | > **Knowledge** 180 | > - See [Create a search service](https://learn.microsoft.com/en-us/azure/search/search-create-service-portal) to learn more about provisioning a search service. 181 | > - Azure AI Search supports multiple authentication methods. You can use either a **Microsoft Entra authentication** or an **Key-based authentication** to authenticate your requests. The choice of authentication method depends on your security requirements and the Azure environment you are working in. 182 | > - See [Authenication](https://learn.microsoft.com/en-us/azure/search/search-security-overview#authentication) to learn more about authentication methods for a search service. 183 | > 184 | > **Evaluation** 185 | > - If you're using **agent tools or safety evaluators**, make sure the Azure project credentials are valid. 186 | > - If you're only doing **text quality evaluation**, the OpenAI endpoint and key are sufficient. 187 | 188 | ## License 189 | 190 | MIT License. See LICENSE for details. 191 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd). 40 | 41 | 42 | -------------------------------------------------------------------------------- /SUPPORT.md: -------------------------------------------------------------------------------- 1 | # TODO: The maintainer of this repo has not yet edited this file 2 | 3 | **REPO OWNER**: Do you want Customer Service & Support (CSS) support for this product/project? 4 | 5 | - **No CSS support:** Fill out this template with information about how to file issues and get help. 6 | - **Yes CSS support:** Fill out an intake form at [aka.ms/onboardsupport](https://aka.ms/onboardsupport). CSS will work with/help you to determine next steps. 7 | - **Not sure?** Fill out an intake as though the answer were "Yes". CSS will help you decide. 8 | 9 | *Then remove this first heading from this SUPPORT.MD file before publishing your repo.* 10 | 11 | # Support 12 | 13 | ## How to file issues and get help 14 | 15 | This project uses GitHub Issues to track bugs and feature requests. Please search the existing 16 | issues before filing new issues to avoid duplicates. For new issues, file your bug or 17 | feature request as a new Issue. 18 | 19 | For help and questions about using this project, please **REPO MAINTAINER: INSERT INSTRUCTIONS HERE 20 | FOR HOW TO ENGAGE REPO OWNERS OR COMMUNITY FOR HELP. COULD BE A STACK OVERFLOW TAG OR OTHER 21 | CHANNEL. WHERE WILL YOU HELP PEOPLE?**. 22 | 23 | ## Microsoft Support Policy 24 | 25 | Support for this **PROJECT or PRODUCT** is limited to the resources listed above. 26 | -------------------------------------------------------------------------------- /clients/README.md: -------------------------------------------------------------------------------- 1 | # Client examples 2 | 3 | ## Prepare the environment to run the server 4 | 5 | Recommended way to run this MCP server is to use `uv` / `uvx`. 6 | 7 | To install `uv` / `uvx`, refer to [Installing uv](https://docs.astral.sh/uv/getting-started/installation/). 8 | 9 | For example, in Linux/macOS, 10 | 11 | ```bash 12 | curl -LsSf https://astral.sh/uv/install.sh | sh 13 | ``` 14 | 15 | Or, in Windows, 16 | 17 | ```powershell 18 | powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" 19 | ``` 20 | 21 | If you have Python installed, you can also install `uv` / `uvx` using `pipx` or `pip`: 22 | 23 | ```bash 24 | # pipx recommended to install uv into an isolated environment 25 | pipx install uv 26 | # or `pip install uv` 27 | ``` 28 | 29 | > [!NOTE] 30 | > - `uvx` is a simple alias to `uv tool run` created for convenience. By installing `uv`, you will also have `uvx` available. 31 | 32 | ## Run the server to use with Visual Studio Code 33 | 34 | For Visual Studio Code, simply use the dedicated configuration file (`.vscode/mcp.json`) to run and/or connect to the MCP server. Visual Studio Code supports both Standard Input/Output (`stdio`) and Server-Sent Events (`sse`) modes. 35 | 36 | ### MCP configuration examples 37 | 38 | #### Quick start example 39 | 40 | The following `vscode/mcp.json` allows downloading and running the server from the remote URL. 41 | 42 | ```json 43 | { 44 | "servers": { 45 | "mcp_foundry_server": { 46 | "type": "stdio", 47 | "command": "uvx", 48 | "args": [ 49 | "--prerelease=allow", 50 | "--from", 51 | "git+https://github.com/azure-ai-foundry/mcp-foundry.git", 52 | "run-azure-ai-foundry-mcp", 53 | "--envFile", 54 | "${workspaceFolder}/.env" 55 | ] 56 | } 57 | } 58 | } 59 | ``` 60 | 61 | > [!NOTE] 62 | > - The server can take `.env` file as an argument to load environment variables from it. You can use the `--envFile` option to specify the path to the `.env` file. If the file is not found, the server will still start without loading it and will support all capabilities that do not require values passed via environment variables. 63 | 64 | #### Other scenarios 65 | 66 | - If you want to ensure it always download and run the latest version of the MCP server, you can use the `--no-cache` option. 67 | 68 |
69 | Use with --no-cache option 70 | 71 | ```json 72 | { 73 | "servers": { 74 | "mcp_foundry_server": { 75 | "type": "stdio", 76 | "command": "uvx", 77 | "args": [ 78 | "--no-cache", 79 | "--prerelease=allow", 80 | "--from", 81 | "git+https://github.com/azure-ai-foundry/mcp-foundry.git", 82 | "run-azure-ai-foundry-mcp" 83 | ] 84 | } 85 | } 86 | } 87 | ``` 88 | 89 |
90 | 91 | - You can run the server manually using the command line with SSE (server-sent events) mode, and configure `.vscode/mcp.json` to use the SSE transport. 92 | 93 |
94 | Use with SSE transport 95 | 96 | First run the server using the command line: 97 | 98 | ```bash 99 | uvx --prerelease=allow --from git+https://github.com/azure-ai-foundry/mcp-foundry.git run-azure-ai-foundry-mcp --transport sse 100 | ``` 101 | 102 | > [!NOTE] 103 | > - You can add `--no-cache` or `--envFile` option as you need. 104 | 105 | Then configure the `.vscode/mcp.json` to use the SSE transport: 106 | 107 | ```json 108 | { 109 | "servers": { 110 | "mcp_foundry_server": { 111 | "type": "sse", 112 | "url": "http://localhost:8000/sse" 113 | } 114 | } 115 | } 116 | ``` 117 | 118 |
119 | 120 | - You can run the server from your local file system, instead of a remote URL. 121 | 122 |
123 | Use with local clone 124 | 125 | First clone the repo to your local file system: 126 | 127 | ```bash 128 | git clone https://github.com/azure-ai-foundry/mcp-foundry.git 129 | ``` 130 | 131 | Then use the following `.vscode/mcp.json` to run the server: 132 | 133 | ```json 134 | { 135 | "servers": { 136 | "mcp_foundry_server": { 137 | "type": "stdio", 138 | "command": "uvx", 139 | "args": [ 140 | "--prerelease=allow", 141 | "--from", 142 | "./path/to/local/repo", 143 | "run-azure-ai-foundry-mcp" 144 | ] 145 | } 146 | } 147 | } 148 | ``` 149 | 150 |
151 | 152 | > [!NOTE] 153 | > - Role of `.vscode/mcp.json` is to configure the MCP server for Visual Studio Code. For `stdio` mode, it helps starting the server and connecting to the server. For `sse` mode, it helps connecting to the server that is already running. 154 | > - Above examples are provided for your reference. You can modify the command and arguments as per your requirements. 155 | > - To learn more about the transport modes supported by GitHub Copilot and its configuration format, refer to [Configuration format](https://code.visualstudio.com/docs/copilot/chat/mcp-servers#_configuration-format). 156 | 157 | ### Sample mcp.json files 158 | 159 | For convenience, we provide a few samples of MCP configuration files for VS Code in the `mcp-configs` folder. You can use them as a reference to create your own configuration file `.vscode/mcp.json`. 160 | 161 | - [mcp.stdio.uvx.local.json](./vscode/mcp-configs/mcp.stdio.uvx.local.json) 162 | - [mcp.stdio.uvx.remote.json](./vscode/mcp-configs/mcp.stdio.uvx.remote.json) 163 | - [mcp.sse.json](./vscode/mcp-configs/mcp.sse.json) 164 | 165 | ## Run the server in SSE mode for other MCP Clients 166 | 167 | You can run the server manually using the command line with SSE (server-sent events) mode, either from remote URL or a cloned repo. 168 | 169 | Below is an example command to run the server using the remote URL: 170 | 171 | ```bash 172 | uvx --prerelease=allow --from git+https://github.com/azure-ai-foundry/mcp-foundry.git run-azure-ai-foundry-mcp --transport sse 173 | ``` 174 | 175 | > [!NOTE] 176 | > - You can add `--no-cache` or `--envFile` option as you need, or even run it from a cloned repo. See [Other scenarios](#other-scenarios) for reference. 177 | 178 | Once the server is up and running, you can configure the MCP client to use the SSE transport by specifying the URL of the server `http://localhost:8000/sse`. 179 | 180 | ### Sample MCP Client app using SSE transport 181 | 182 | For your reference, we provide a [sample MCP client app based on PydanticAI](./python/pydantic-ai/README.md) that uses SSE transport. 183 | 184 | ## Troubleshooting 185 | 186 | - Server fails to start because uvx, uv are not available 187 | - Refer to [Installing uv](https://docs.astral.sh/uv/getting-started/installation/) to install and fix for your environment. 188 | -------------------------------------------------------------------------------- /clients/python/pydantic-ai/.env.example: -------------------------------------------------------------------------------- 1 | # Azure OpenAI credentials and endpoints 2 | AZURE_OPENAI_ENDPOINT="https://example.openai.azure.com/" 3 | OPENAI_API_VERSION="2025-01-01-preview" 4 | AZURE_OPENAI_API_KEY="example_key" 5 | 6 | # Service Principal - Azure Identity Default Credentials 7 | AZURE_TENANT_ID="example" 8 | AZURE_CLIENT_ID="example" 9 | AZURE_CLIENT_SECRET="example" 10 | 11 | SEARCH_AUTHENTICATION_METHOD="api-search-key" 12 | 13 | # Azure AI Search Configurations 14 | # AI Search Endpoint 15 | # This is the URL of the Azure AI Search service. It is used to send requests to the service. 16 | AZURE_AI_SEARCH_ENDPOINT="https://example.search.windows.net" 17 | 18 | # AI Search API Key for Read/Write permissions 19 | # This key is used to authenticate requests to the Azure AI Search service. 20 | # It is important to keep this key secure and not expose it in public repositories. 21 | # The key is used in the Authorization header of requests to the Azure AI Search service. 22 | # It is recommended to use environment variables to store sensitive information like API keys. 23 | AZURE_AI_SEARCH_API_KEY="example" 24 | 25 | -------------------------------------------------------------------------------- /clients/python/pydantic-ai/.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /clients/python/pydantic-ai/README.md: -------------------------------------------------------------------------------- 1 | # MCP Client using PydanticAI 2 | 3 | This is an MCP client written with PydanticAI that is intended to be used for demoing the Foundry MCP Server. 4 | 5 | It comes with a helper MCP server that has tools for fetching remote URL contents that can be used in the service. 6 | 7 | For the demo, we are going to use the sample dataset to interact with the Azure AI Search tools from the MCP service. 8 | 9 | You can run the PydanticAI Sample code as follows: 10 | 11 | ```bash 12 | git clone git@github.com:azure-ai-foundry/mcp-foundry.git 13 | 14 | cd clients/python/pydantic-ai 15 | 16 | uv run main.py 17 | ``` 18 | 19 | Once you run the command, you can use prompts to interact with the MCP server. 20 | -------------------------------------------------------------------------------- /clients/python/pydantic-ai/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Sequence 3 | 4 | from dotenv import load_dotenv 5 | from openai import AsyncAzureOpenAI 6 | from pydantic_ai import Agent 7 | from pydantic_ai.agent import AgentRunResult 8 | from pydantic_ai.mcp import MCPServerHTTP, MCPServer 9 | from pydantic_ai.messages import ModelRequest, ModelResponse 10 | from pydantic_ai.models.openai import OpenAIModel 11 | from pydantic_ai.providers.openai import OpenAIProvider 12 | from rich.prompt import Prompt 13 | 14 | mcp_env_file: str = '.env' 15 | 16 | if mcp_env_file and os.path.exists(mcp_env_file): 17 | load_dotenv(dotenv_path=mcp_env_file) 18 | print(f"Environment variables loaded from {mcp_env_file}") 19 | else: 20 | print(f"Env file '{mcp_env_file}' not found. Skipping environment loading.") 21 | 22 | # Run the program as: 23 | # uv run main.py 24 | 25 | # These MCP servers have to be running in SSE mode before you start this Python program 26 | 27 | # Tools for Azure AI Search Operations 28 | ai_search_mcp_server = MCPServerHTTP(url='http://127.0.0.1:8000/sse') 29 | 30 | my_mcp_servers: Sequence[MCPServer] = [ai_search_mcp_server] 31 | 32 | # This is the OpenAI model name 33 | model_name = 'gpt-4o-mini' 34 | 35 | client = AsyncAzureOpenAI() 36 | model = OpenAIModel(model_name, provider=OpenAIProvider(openai_client=client)) 37 | agent = Agent(model, mcp_servers=my_mcp_servers) 38 | 39 | 40 | global_message = """ 41 | Hello AI Search Developer, 42 | I am a helpful assistant and I can answer questions about Contoso Groceries - an online grocery service where shopping is a pleasure. 43 | I have access to the AI Search Index for Contoso Groceries and can help developers interact with the data and resources in the AI Search service. 44 | Please let me know how I can help you. 45 | Ask me to show you what tools I have available to support your development efforts. If you forget the tools, please ask me again. 46 | 47 | """ 48 | prompt = """ 49 | How can I help you?""" 50 | 51 | print(global_message) 52 | 53 | 54 | async def main(): 55 | async with agent.run_mcp_servers(): 56 | 57 | message_history: list[ModelRequest | ModelResponse] = [] 58 | 59 | while True: 60 | # Prompt the user for input 61 | # and send it to the agent for processing 62 | # Use rich prompt for better user experience 63 | question = Prompt.ask(prompt) 64 | result: AgentRunResult = await agent.run(question, message_history=message_history) 65 | 66 | latest_message_history: list[ModelRequest | ModelResponse] = result.all_messages() 67 | 68 | message_history = latest_message_history 69 | 70 | print(result.output) 71 | 72 | if __name__ == '__main__': 73 | import asyncio 74 | import sys 75 | asyncio.run(main()) -------------------------------------------------------------------------------- /clients/python/pydantic-ai/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "ai-search-pydantic-mcp-client" 3 | version = "0.1.0" 4 | description = "An MCP Client for Azure AI Search Service" 5 | readme = "README.md" 6 | requires-python = ">=3.12" 7 | dependencies = [ 8 | "httpx>=0.28.1", 9 | "mcp==1.6.0", 10 | "openai>=1.77.0", 11 | "pydantic>=2.11.4", 12 | "pydantic-ai>=0.1.10", 13 | "python-dotenv>=1.1.0", 14 | "rich>=14.0.0", 15 | ] 16 | -------------------------------------------------------------------------------- /clients/sample-dataset/customers/all_customers.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "id": "301", 4 | "first_name": "Danielle", 5 | "last_name": "Johnson", 6 | "email": "danielle.johnson@contogrocery.com", 7 | "phone_number": "332-181-9600", 8 | "date_of_birth": "1988-09-23", 9 | "gender": "Female", 10 | "signup_date": "2024-08-14", 11 | "preferred_language": "en", 12 | "active": true, 13 | "points": 740.81 14 | }, 15 | { 16 | "id": "302", 17 | "first_name": "Anthony", 18 | "last_name": "Gonzalez", 19 | "email": "anthony.gonzalez@contogrocery.com", 20 | "phone_number": "863-794-0265", 21 | "date_of_birth": "1961-04-18", 22 | "gender": "Male", 23 | "signup_date": "2023-07-16", 24 | "preferred_language": "en", 25 | "active": true, 26 | "points": 735.73 27 | }, 28 | { 29 | "id": "303", 30 | "first_name": "Noah", 31 | "last_name": "Howard", 32 | "email": "noah.howard@contogrocery.com", 33 | "phone_number": "(940)781-6184", 34 | "date_of_birth": "1967-06-05", 35 | "gender": "Male", 36 | "signup_date": "2024-11-02", 37 | "preferred_language": "fr", 38 | "active": true, 39 | "points": 589.9 40 | }, 41 | { 42 | "id": "304", 43 | "first_name": "Chad", 44 | "last_name": "Stanley", 45 | "email": "chad.stanley@contogrocery.com", 46 | "phone_number": "413-164-7525", 47 | "date_of_birth": "1997-07-20", 48 | "gender": "Male", 49 | "signup_date": "2023-09-14", 50 | "preferred_language": "en", 51 | "active": true, 52 | "points": 218.42 53 | }, 54 | { 55 | "id": "305", 56 | "first_name": "Amanda", 57 | "last_name": "Sanchez", 58 | "email": "amanda.sanchez@contogrocery.com", 59 | "phone_number": "648.350.3056", 60 | "date_of_birth": "1992-12-08", 61 | "gender": "Female", 62 | "signup_date": "2025-03-15", 63 | "preferred_language": "fr", 64 | "active": true, 65 | "points": 560.68 66 | }, 67 | { 68 | "id": "306", 69 | "first_name": "Jennifer", 70 | "last_name": "Lewis", 71 | "email": "jennifer.lewis@contogrocery.com", 72 | "phone_number": "672.423.8849", 73 | "date_of_birth": "1969-04-07", 74 | "gender": "Female", 75 | "signup_date": "2023-08-20", 76 | "preferred_language": "fr", 77 | "active": false, 78 | "points": 220.22 79 | }, 80 | { 81 | "id": "307", 82 | "first_name": "William", 83 | "last_name": "Davis", 84 | "email": "william.davis@contogrocery.com", 85 | "phone_number": "407.555.1212", 86 | "date_of_birth": "1990-05-07", 87 | "gender": "Male", 88 | "signup_date": "2023-11-16", 89 | "preferred_language": "es", 90 | "active": true, 91 | "points": 758.05 92 | }, 93 | { 94 | "id": "308", 95 | "first_name": "Tasha", 96 | "last_name": "Valencia", 97 | "email": "tasha.valencia@contogrocery.com", 98 | "phone_number": "184.514.6270", 99 | "date_of_birth": "2003-06-24", 100 | "gender": "Female", 101 | "signup_date": "2025-03-15", 102 | "preferred_language": "fr", 103 | "active": false, 104 | "points": 339.91 105 | }, 106 | { 107 | "id": "309", 108 | "first_name": "Jennifer", 109 | "last_name": "Stanton", 110 | "email": "jennifer.stanton@contogrocery.com", 111 | "phone_number": "281-489-3252", 112 | "date_of_birth": "1987-08-04", 113 | "gender": "Female", 114 | "signup_date": "2023-05-20", 115 | "preferred_language": "en", 116 | "active": false, 117 | "points": 102.11 118 | }, 119 | { 120 | "id": "310", 121 | "first_name": "Daniel", 122 | "last_name": "Cox", 123 | "email": "daniel.cox@contogrocery.com", 124 | "phone_number": "303-911-7182", 125 | "date_of_birth": "1972-05-18", 126 | "gender": "Female", 127 | "signup_date": "2024-06-17", 128 | "preferred_language": "en", 129 | "active": false, 130 | "points": 846.65 131 | }, 132 | { 133 | "id": "311", 134 | "first_name": "Monique", 135 | "last_name": "Henderson", 136 | "email": "monique.henderson@contogrocery.com", 137 | "phone_number": "834.657.8713", 138 | "date_of_birth": "1969-08-18", 139 | "gender": "Female", 140 | "signup_date": "2023-06-23", 141 | "preferred_language": "es", 142 | "active": true, 143 | "points": 729.0 144 | }, 145 | { 146 | "id": "312", 147 | "first_name": "Matthew", 148 | "last_name": "Chapman", 149 | "email": "matthew.chapman@contogrocery.com", 150 | "phone_number": "930.103.1051", 151 | "date_of_birth": "1989-04-30", 152 | "gender": "Male", 153 | "signup_date": "2023-11-05", 154 | "preferred_language": "en", 155 | "active": false, 156 | "points": 78.72 157 | }, 158 | { 159 | "id": "313", 160 | "first_name": "Joshua", 161 | "last_name": "Washington", 162 | "email": "joshua.washington@contogrocery.com", 163 | "phone_number": "299-737-6311", 164 | "date_of_birth": "1999-03-09", 165 | "gender": "Male", 166 | "signup_date": "2024-04-03", 167 | "preferred_language": "fr", 168 | "active": false, 169 | "points": 576.77 170 | }, 171 | { 172 | "id": "314", 173 | "first_name": "Michael", 174 | "last_name": "Jenkins", 175 | "email": "michael.jenkins@contogrocery.com", 176 | "phone_number": "301-651-3338", 177 | "date_of_birth": "1973-04-18", 178 | "gender": "Male", 179 | "signup_date": "2024-04-28", 180 | "preferred_language": "en", 181 | "active": true, 182 | "points": 660.6 183 | }, 184 | { 185 | "id": "315", 186 | "first_name": "John", 187 | "last_name": "Leblanc", 188 | "email": "john.leblanc@contogrocery.com", 189 | "phone_number": "810.801.3267x736", 190 | "date_of_birth": "1958-05-01", 191 | "gender": "Male", 192 | "signup_date": "2023-09-09", 193 | "preferred_language": "en", 194 | "active": true, 195 | "points": 865.62 196 | }, 197 | { 198 | "id": "316", 199 | "first_name": "Nicholas", 200 | "last_name": "Galloway", 201 | "email": "nicholas.galloway@contogrocery.com", 202 | "phone_number": "746-872-3430", 203 | "date_of_birth": "2005-03-29", 204 | "gender": "Male", 205 | "signup_date": "2024-01-03", 206 | "preferred_language": "es", 207 | "active": false, 208 | "points": 635.05 209 | }, 210 | { 211 | "id": "317", 212 | "first_name": "Brian", 213 | "last_name": "Silva", 214 | "email": "brian.silva@contogrocery.com", 215 | "phone_number": "820.812.1913", 216 | "date_of_birth": "1994-10-18", 217 | "gender": "Male", 218 | "signup_date": "2023-06-04", 219 | "preferred_language": "en", 220 | "active": false, 221 | "points": 354.92 222 | }, 223 | { 224 | "id": "318", 225 | "first_name": "Christine", 226 | "last_name": "Barnes", 227 | "email": "christine.barnes@contogrocery.com", 228 | "phone_number": "985-435-3462", 229 | "date_of_birth": "2000-01-18", 230 | "gender": "Female", 231 | "signup_date": "2024-09-17", 232 | "preferred_language": "es", 233 | "active": true, 234 | "points": 608.52 235 | }, 236 | { 237 | "id": "319", 238 | "first_name": "Kevin", 239 | "last_name": "Stewart", 240 | "email": "kevin.stewart@contogrocery.com", 241 | "phone_number": "107-991-1838", 242 | "date_of_birth": "1979-06-27", 243 | "gender": "Male", 244 | "signup_date": "2023-12-12", 245 | "preferred_language": "fr", 246 | "active": true, 247 | "points": 163.24 248 | }, 249 | { 250 | "id": "320", 251 | "first_name": "Erica", 252 | "last_name": "Bass", 253 | "email": "erica.bass@contogrocery.com", 254 | "phone_number": "498-084-1241", 255 | "date_of_birth": "1972-11-15", 256 | "gender": "Female", 257 | "signup_date": "2023-12-09", 258 | "preferred_language": "es", 259 | "active": true, 260 | "points": 683.93 261 | } 262 | ] -------------------------------------------------------------------------------- /clients/sample-dataset/customers/danielle.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "301", 3 | "first_name": "Danielle", 4 | "last_name": "Johnson", 5 | "email": "danielle.johnson@contogrocery.com", 6 | "phone_number": "332-181-9600", 7 | "date_of_birth": "1988-09-23", 8 | "gender": "Female", 9 | "signup_date": "2024-08-14", 10 | "preferred_language": "en", 11 | "active": true, 12 | "points": 740.81 13 | } -------------------------------------------------------------------------------- /clients/sample-dataset/customers/erica.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "320", 3 | "first_name": "Erica", 4 | "last_name": "Bass", 5 | "email": "erica.bass@contogrocery.com", 6 | "phone_number": "498-084-1241", 7 | "date_of_birth": "1972-11-15", 8 | "gender": "Female", 9 | "signup_date": "2023-12-09", 10 | "preferred_language": "es", 11 | "active": true, 12 | "points": 683.93 13 | } -------------------------------------------------------------------------------- /clients/sample-dataset/customers/john.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "315", 3 | "first_name": "John", 4 | "last_name": "Leblanc", 5 | "email": "john.leblanc@contogrocery.com", 6 | "phone_number": "810.801.3267x736", 7 | "date_of_birth": "1958-05-01", 8 | "gender": "Male", 9 | "signup_date": "2023-09-09", 10 | "preferred_language": "en", 11 | "active": true, 12 | "points": 865.62 13 | } -------------------------------------------------------------------------------- /clients/vscode/README.md: -------------------------------------------------------------------------------- 1 | # VSCode Setup for Foundry MCP Server 2 | 3 | Following the guidance from [Visual Studio Code](https://code.visualstudio.com/docs/copilot/chat/mcp-servers), you can use one of the files in the mcp config files from [the examples](./mcp-configs) to configure your Visual Studio Code in Agent Mode. 4 | 5 | You can take the content of the config files that meets your need, and put it in the file `.vscode/mcp.json` in your workspace, and you should be able to start up the MCP server. 6 | -------------------------------------------------------------------------------- /clients/vscode/mcp-configs/mcp.sse.json: -------------------------------------------------------------------------------- 1 | { 2 | "servers": { 3 | "mcp_foundry_server": { 4 | "type": "sse", 5 | "url": "http://localhost:8000/sse" 6 | } 7 | } 8 | } -------------------------------------------------------------------------------- /clients/vscode/mcp-configs/mcp.stdio.uvx.local.json: -------------------------------------------------------------------------------- 1 | { 2 | "servers": { 3 | "mcp_foundry_server": { 4 | "type": "stdio", 5 | "command": "uvx", 6 | "args": [ 7 | "--no-cache", 8 | "--prerelease=allow", 9 | "--directory", 10 | "${workspaceFolder}", 11 | "--from", 12 | ".", 13 | "run-azure-ai-foundry-mcp", 14 | "--envFile", 15 | ".env" 16 | ] 17 | } 18 | } 19 | } -------------------------------------------------------------------------------- /clients/vscode/mcp-configs/mcp.stdio.uvx.remote.json: -------------------------------------------------------------------------------- 1 | { 2 | "servers": { 3 | "mcp_foundry_server": { 4 | "type": "stdio", 5 | "command": "uvx", 6 | "args": [ 7 | "--prerelease=allow", 8 | "--from", 9 | "git+https://github.com/azure-ai-foundry/mcp-foundry.git", 10 | "run-azure-ai-foundry-mcp", 11 | "--envFile", 12 | "${workspaceFolder}/.env" 13 | ] 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /mcp.json: -------------------------------------------------------------------------------- 1 | { 2 | "servers": { 3 | "mcp_foundry_server": { 4 | "type": "stdio", 5 | "command": "uvx", 6 | "args": [ 7 | "--prerelease=allow", 8 | "--from", 9 | "git+https://github.com/azure-ai-foundry/mcp-foundry.git", 10 | "run-azure-ai-foundry-mcp", 11 | "--envFile", 12 | "${workspaceFolder}/.env" 13 | ] 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "mcp-foundry" 3 | version = "0.1.0" 4 | description = "MCP Server for Azure AI Foundry (experimental)" 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | dependencies = [ 8 | "mcp>=1.8.0", 9 | "requests>=2.32.3", 10 | "azure-mgmt-cognitiveservices>=13.0.0", 11 | "azure-identity>=1.0", 12 | "jinja2~=3.0", 13 | "azure-search-documents>=11.5.2", 14 | "azure-cli>=2.60.0", 15 | "azure-ai-evaluation>=1.7.0", 16 | "azure-ai-projects>=1.0.0b11" 17 | ] 18 | 19 | [dependency-groups] 20 | test = [ 21 | "pytest>=8.3.5", 22 | "pytest-asyncio>=0.26.0", 23 | ] 24 | [tool.pytest.ini_options] 25 | asyncio_default_fixture_loop_scope = "function" # or "module", "session" based on my use case 26 | pythonpath = ["src"] 27 | 28 | [project.scripts] 29 | run-azure-ai-foundry-mcp = "mcp_foundry.__main__:main" 30 | -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/azure-ai-foundry/mcp-foundry/af7ac18108edd8313c94812cf24b294c4e1bf4ea/src/__init__.py -------------------------------------------------------------------------------- /src/mcp_foundry/__init__.py: -------------------------------------------------------------------------------- 1 | from .mcp_foundry_knowledge import SearchIndexDao, SearchBaseDao, SearchClientDao, SearchIndexerDao, SearchIndexSchema, SearchFieldSchema 2 | from .mcp_foundry_knowledge import SuggesterSchema, CorsOptionsSchema, ScoringProfileSchema, FieldMappingModel, convert_pydantic_model_to_search_index 3 | from .mcp_foundry_knowledge import convert_to_field_mappings, OperationResult, SearchDocument 4 | 5 | from .mcp_server import mcp 6 | 7 | __all__ = ( 8 | 'mcp', 9 | 'SearchIndexDao', 10 | 'SearchBaseDao', 11 | 'SearchClientDao', 12 | 'SearchIndexerDao', 13 | 'SearchIndexSchema', 14 | 'SearchFieldSchema', 15 | 'SuggesterSchema', 16 | 'CorsOptionsSchema', 17 | 'ScoringProfileSchema', 18 | 'FieldMappingModel', 19 | 'convert_pydantic_model_to_search_index', 20 | 'convert_to_field_mappings', 21 | 'OperationResult', 22 | 'SearchDocument' 23 | ) 24 | 25 | 26 | -------------------------------------------------------------------------------- /src/mcp_foundry/__main__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | from argparse import ArgumentParser 5 | from typing import Literal 6 | from dotenv import load_dotenv 7 | 8 | from .mcp_server import mcp, auto_import_modules 9 | 10 | 11 | # Configure logging 12 | logging.basicConfig( 13 | level=logging.INFO, 14 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", 15 | stream=sys.stderr, 16 | ) 17 | logger = logging.getLogger("__main__") 18 | 19 | def main() -> None: 20 | """Runs the MCP server""" 21 | 22 | parser = ArgumentParser(description="Start the MCP service with provided or default configuration.") 23 | 24 | parser.add_argument('--transport', required=False, default='stdio', 25 | help='Transport protocol (sse | stdio | streamable-http) (default: stdio)') 26 | parser.add_argument('--envFile', required=False, default='.env', 27 | help='Path to .env file (default: .env)') 28 | 29 | # Parse the application arguments 30 | args = parser.parse_args() 31 | 32 | # Retrieve the specified transport and environment file 33 | specified_transport: Literal["stdio", "sse", "streamable-http"] = args.transport 34 | mcp_env_file = args.envFile 35 | 36 | logger.info(f"Starting MCP server: Transport = {specified_transport}") 37 | 38 | # Check if envFile exists and load it 39 | if mcp_env_file and os.path.exists(mcp_env_file): 40 | load_dotenv(dotenv_path=mcp_env_file) 41 | logger.info(f"Environment variables loaded from {mcp_env_file}") 42 | else: 43 | logger.warning(f"Environment file '{mcp_env_file}' not found. Skipping environment loading.") 44 | 45 | # Run this on startup 46 | auto_import_modules("mcp_foundry", targets=["tools", "resources", "prompts"]) 47 | mcp.run(transport=specified_transport) 48 | 49 | 50 | if __name__ == "__main__": 51 | main() 52 | -------------------------------------------------------------------------------- /src/mcp_foundry/mcp_foundry_evaluation/tools.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import contextlib 3 | import json 4 | import logging 5 | import os 6 | import subprocess 7 | import sys 8 | import tempfile 9 | from typing import Any, Dict, List, Optional, Union 10 | 11 | 12 | # Azure AI Evaluation Imports 13 | from azure.ai.evaluation import ( 14 | # Agent Converter 15 | BleuScoreEvaluator, 16 | CodeVulnerabilityEvaluator, 17 | CoherenceEvaluator, 18 | ContentSafetyEvaluator, 19 | F1ScoreEvaluator, 20 | FluencyEvaluator, 21 | # Text Evaluators 22 | GroundednessEvaluator, 23 | HateUnfairnessEvaluator, 24 | IndirectAttackEvaluator, 25 | # Agent Evaluators 26 | IntentResolutionEvaluator, 27 | MeteorScoreEvaluator, 28 | ProtectedMaterialEvaluator, 29 | QAEvaluator, 30 | RelevanceEvaluator, 31 | RetrievalEvaluator, 32 | RougeScoreEvaluator, 33 | SelfHarmEvaluator, 34 | SexualEvaluator, 35 | SimilarityEvaluator, 36 | TaskAdherenceEvaluator, 37 | ToolCallAccuracyEvaluator, 38 | UngroundedAttributesEvaluator, 39 | ViolenceEvaluator, 40 | evaluate, 41 | ) 42 | from azure.ai.projects.aio import AIProjectClient 43 | from azure.ai.agents.models import Agent, MessageRole, MessageTextContent 44 | 45 | # Azure Imports 46 | from azure.identity import DefaultAzureCredential 47 | from azure.identity.aio import DefaultAzureCredential as AsyncDefaultAzureCredential 48 | from dotenv import load_dotenv 49 | 50 | from mcp_foundry.mcp_server import mcp 51 | 52 | # Configure logging 53 | logging.basicConfig( 54 | level=logging.INFO, 55 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", 56 | stream=sys.stderr, 57 | ) 58 | logger = logging.getLogger("mcp_foundry_evaluation") 59 | 60 | 61 | # Configure PromptFlow logging to go to stderr 62 | def configure_promptflow_logging(): 63 | import logging 64 | 65 | promptflow_logger = logging.getLogger("promptflow") 66 | for handler in promptflow_logger.handlers: 67 | promptflow_logger.removeHandler(handler) 68 | handler = logging.StreamHandler(sys.stderr) 69 | handler.setLevel(logging.INFO) 70 | formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") 71 | handler.setFormatter(formatter) 72 | promptflow_logger.addHandler(handler) 73 | promptflow_logger.propagate = False # Don't propagate to root logger 74 | 75 | 76 | # Call this function early in your script's execution 77 | configure_promptflow_logging() 78 | 79 | # Load environment variables 80 | load_dotenv() 81 | 82 | ####################### 83 | # CONFIGURATION SETUP # 84 | ####################### 85 | 86 | # Initialize Azure AI project and Azure OpenAI connection with environment variables 87 | try: 88 | # Sync credential for evaluations 89 | CREDENTIAL = DefaultAzureCredential() 90 | 91 | # Azure OpenAI model configuration 92 | MODEL_CONFIG = { 93 | "azure_endpoint": os.environ.get("AZURE_OPENAI_ENDPOINT"), 94 | "api_key": os.environ.get("AZURE_OPENAI_API_KEY"), 95 | "azure_deployment": os.environ.get("AZURE_OPENAI_DEPLOYMENT"), 96 | "api_version": os.environ.get("AZURE_OPENAI_API_VERSION"), 97 | } 98 | 99 | # Directory for evaluation data files 100 | EVAL_DATA_DIR = os.environ.get("EVAL_DATA_DIR", ".") 101 | 102 | # Azure AI Agent configuration 103 | DEFAULT_AGENT_ID = os.environ.get("DEFAULT_AGENT_ID") 104 | AZURE_AI_PROJECT_ENDPOINT = os.environ.get("AZURE_AI_PROJECT_ENDPOINT") 105 | 106 | # Initialization flags 107 | EVALUATION_INITIALIZED = True 108 | if not all([AZURE_AI_PROJECT_ENDPOINT, MODEL_CONFIG["azure_endpoint"]]): 109 | EVALUATION_INITIALIZED = False 110 | logger.warning("Some evaluation credentials are missing, some evaluators may not work") 111 | 112 | AGENT_INITIALIZED = bool(AZURE_AI_PROJECT_ENDPOINT) 113 | if not AGENT_INITIALIZED: 114 | logger.warning("AZURE_AI_PROJECT_ENDPOINT is missing, agent features will not work") 115 | 116 | except Exception as e: 117 | logger.error(f"Initialization error: {str(e)}") 118 | CREDENTIAL = None 119 | AZURE_AI_PROJECT_ENDPOINT = None 120 | MODEL_CONFIG = None 121 | EVALUATION_INITIALIZED = False 122 | AGENT_INITIALIZED = False 123 | 124 | # Global variables for agent client and cache 125 | AI_CLIENT: Optional[AIProjectClient] = None 126 | AGENT_CACHE = {} 127 | 128 | 129 | async def initialize_agent_client(): 130 | """Initialize the Azure AI Agent client asynchronously.""" 131 | global AI_CLIENT 132 | 133 | if not AGENT_INITIALIZED: 134 | return False 135 | 136 | try: 137 | async_credential = AsyncDefaultAzureCredential() 138 | AI_CLIENT = AIProjectClient(endpoint=AZURE_AI_PROJECT_ENDPOINT, credential=async_credential) 139 | return True 140 | except Exception as e: 141 | logger.error(f"Failed to initialize AIProjectClient: {str(e)}") 142 | return False 143 | 144 | 145 | ####################### 146 | # EVALUATOR MAPPINGS # 147 | ####################### 148 | 149 | # Map evaluator names to classes for dynamic instantiation 150 | TEXT_EVALUATOR_MAP = { 151 | "groundedness": GroundednessEvaluator, 152 | "relevance": RelevanceEvaluator, 153 | "coherence": CoherenceEvaluator, 154 | "fluency": FluencyEvaluator, 155 | "similarity": SimilarityEvaluator, 156 | "retrieval": RetrievalEvaluator, 157 | "f1": F1ScoreEvaluator, 158 | "rouge": RougeScoreEvaluator, 159 | "bleu": BleuScoreEvaluator, 160 | "meteor": MeteorScoreEvaluator, 161 | "violence": ViolenceEvaluator, 162 | "sexual": SexualEvaluator, 163 | "self_harm": SelfHarmEvaluator, 164 | "hate_unfairness": HateUnfairnessEvaluator, 165 | "indirect_attack": IndirectAttackEvaluator, 166 | "protected_material": ProtectedMaterialEvaluator, 167 | "ungrounded_attributes": UngroundedAttributesEvaluator, 168 | "code_vulnerability": CodeVulnerabilityEvaluator, 169 | "qa": QAEvaluator, 170 | "content_safety": ContentSafetyEvaluator, 171 | } 172 | 173 | # Map agent evaluator names to classes 174 | AGENT_EVALUATOR_MAP = { 175 | "intent_resolution": IntentResolutionEvaluator, 176 | "tool_call_accuracy": ToolCallAccuracyEvaluator, 177 | "task_adherence": TaskAdherenceEvaluator, 178 | } 179 | 180 | # Required parameters for each text evaluator 181 | TEXT_EVALUATOR_REQUIREMENTS = { 182 | "groundedness": {"query": "Optional", "response": "Required", "context": "Required"}, 183 | "relevance": {"query": "Required", "response": "Required"}, 184 | "coherence": {"query": "Required", "response": "Required"}, 185 | "fluency": {"response": "Required"}, 186 | "similarity": {"query": "Required", "response": "Required", "ground_truth": "Required"}, 187 | "retrieval": {"query": "Required", "context": "Required"}, 188 | "f1": {"response": "Required", "ground_truth": "Required"}, 189 | "rouge": {"response": "Required", "ground_truth": "Required"}, 190 | "bleu": {"response": "Required", "ground_truth": "Required"}, 191 | "meteor": {"response": "Required", "ground_truth": "Required"}, 192 | "violence": {"query": "Required", "response": "Required"}, 193 | "sexual": {"query": "Required", "response": "Required"}, 194 | "self_harm": {"query": "Required", "response": "Required"}, 195 | "hate_unfairness": {"query": "Required", "response": "Required"}, 196 | "indirect_attack": {"query": "Required", "response": "Required", "context": "Required"}, 197 | "protected_material": {"query": "Required", "response": "Required"}, 198 | "ungrounded_attributes": {"query": "Required", "response": "Required", "context": "Required"}, 199 | "code_vulnerability": {"query": "Required", "response": "Required"}, 200 | "qa": {"query": "Required", "response": "Required", "context": "Required", "ground_truth": "Required"}, 201 | "content_safety": {"query": "Required", "response": "Required"}, 202 | } 203 | 204 | # Required parameters for each agent evaluator 205 | agent_evaluator_requirements = { 206 | "intent_resolution": { 207 | "query": "Required (Union[str, list[Message]])", 208 | "response": "Required (Union[str, list[Message]])", 209 | "tool_definitions": "Optional (list[ToolDefinition])", 210 | }, 211 | "tool_call_accuracy": { 212 | "query": "Required (Union[str, list[Message]])", 213 | "response": "Optional (Union[str, list[Message]])", 214 | "tool_calls": "Optional (Union[dict, list[ToolCall]])", 215 | "tool_definitions": "Required (list[ToolDefinition])", 216 | }, 217 | "task_adherence": { 218 | "query": "Required (Union[str, list[Message]])", 219 | "response": "Required (Union[str, list[Message]])", 220 | "tool_definitions": "Optional (list[ToolCall])", 221 | }, 222 | } 223 | 224 | ###################### 225 | # HELPER FUNCTIONS # 226 | ###################### 227 | 228 | 229 | def create_text_evaluator(evaluator_name: str) -> Any: 230 | """Create and configure an appropriate text evaluator instance.""" 231 | if evaluator_name not in TEXT_EVALUATOR_MAP: 232 | raise ValueError(f"Unknown text evaluator: {evaluator_name}") 233 | 234 | EvaluatorClass = TEXT_EVALUATOR_MAP[evaluator_name] 235 | 236 | # AI-assisted quality evaluators need a model 237 | if evaluator_name in ["groundedness", "relevance", "coherence", "fluency", "similarity"]: 238 | if not MODEL_CONFIG or not all([MODEL_CONFIG["azure_endpoint"], MODEL_CONFIG["api_key"]]): 239 | raise ValueError(f"Model configuration required for {evaluator_name} evaluator") 240 | return EvaluatorClass(MODEL_CONFIG) 241 | 242 | # AI-assisted risk and safety evaluators need Azure credentials 243 | elif evaluator_name in [ 244 | "violence", 245 | "sexual", 246 | "self_harm", 247 | "hate_unfairness", 248 | "indirect_attack", 249 | "protected_material", 250 | "ungrounded_attributes", 251 | "code_vulnerability", 252 | "content_safety", 253 | ]: 254 | if CREDENTIAL is None or AZURE_AI_PROJECT_ENDPOINT is None: 255 | raise ValueError(f"Azure credentials required for {evaluator_name} evaluator") 256 | return EvaluatorClass(credential=CREDENTIAL, azure_ai_project=AZURE_AI_PROJECT_ENDPOINT) 257 | 258 | # NLP evaluators don't need special configuration 259 | else: 260 | return EvaluatorClass() 261 | 262 | 263 | def create_agent_evaluator(evaluator_name: str) -> Any: 264 | """Create and configure an appropriate agent evaluator instance.""" 265 | if evaluator_name not in AGENT_EVALUATOR_MAP: 266 | raise ValueError(f"Unknown agent evaluator: {evaluator_name}") 267 | 268 | if not MODEL_CONFIG or not all([MODEL_CONFIG["azure_endpoint"], MODEL_CONFIG["api_key"]]): 269 | raise ValueError(f"Model configuration required for {evaluator_name} evaluator") 270 | 271 | EvaluatorClass = AGENT_EVALUATOR_MAP[evaluator_name] 272 | return EvaluatorClass(model_config=MODEL_CONFIG) 273 | 274 | 275 | async def get_agent(client: AIProjectClient, agent_id: str) -> Agent: 276 | """Get an agent by ID with simple caching.""" 277 | global AGENT_CACHE 278 | 279 | # Check cache first 280 | if agent_id in AGENT_CACHE: 281 | return AGENT_CACHE[agent_id] 282 | 283 | # Fetch agent if not in cache 284 | try: 285 | agent = await client.agents.get_agent(agent_id=agent_id) 286 | AGENT_CACHE[agent_id] = agent 287 | return agent 288 | except Exception as e: 289 | logger.error(f"Agent retrieval failed - ID: {agent_id}, Error: {str(e)}") 290 | raise ValueError(f"Agent not found or inaccessible: {agent_id}") 291 | 292 | 293 | async def query_agent(client: AIProjectClient, agent_id: str, query: str) -> Dict: 294 | """Query an Azure AI Agent and get the response with full thread/run data.""" 295 | try: 296 | # Get agent (from cache or fetch it) 297 | agent = await get_agent(client, agent_id) 298 | 299 | # Always create a new thread 300 | thread = await client.agents.threads.create() 301 | thread_id = thread.id 302 | 303 | # Add message to thread 304 | await client.agents.messages.create(thread_id=thread_id, role=MessageRole.USER, content=query) 305 | 306 | # Process the run 307 | run = await client.agents.runs.create(thread_id=thread_id, agent_id=agent_id) 308 | run_id = run.id 309 | 310 | # Poll until the run is complete 311 | while run.status in ["queued", "in_progress", "requires_action"]: 312 | await asyncio.sleep(1) # Non-blocking sleep 313 | run = await client.agents.runs.get(thread_id=thread_id, run_id=run.id) 314 | 315 | if run.status == "failed": 316 | error_msg = f"Agent run failed: {run.last_error}" 317 | logger.error(error_msg) 318 | return { 319 | "success": False, 320 | "error": error_msg, 321 | "thread_id": thread_id, 322 | "run_id": run_id, 323 | "result": f"Error: {error_msg}", 324 | } 325 | 326 | # Get the agent's response 327 | response_messages = client.agents.messages.list(thread_id=thread_id) 328 | response_message = None 329 | async for msg in response_messages: 330 | if msg.role == MessageRole.AGENT: 331 | response_message = msg 332 | 333 | result = "" 334 | citations = [] 335 | 336 | if response_message: 337 | # Collect text content 338 | for text_message in response_message.text_messages: 339 | result += text_message.text.value + "\n" 340 | 341 | # Collect citations 342 | for annotation in response_message.url_citation_annotations: 343 | citation = f"[{annotation.url_citation.title}]({annotation.url_citation.url})" 344 | if citation not in citations: 345 | citations.append(citation) 346 | 347 | # Add citations if any 348 | if citations: 349 | result += "\n\n## Sources\n" 350 | for citation in citations: 351 | result += f"- {citation}\n" 352 | 353 | return { 354 | "success": True, 355 | "thread_id": thread_id, 356 | "run_id": run_id, 357 | "result": result.strip(), 358 | "citations": citations, 359 | } 360 | 361 | except Exception as e: 362 | logger.error(f"Agent query failed - ID: {agent_id}, Error: {str(e)}") 363 | raise 364 | 365 | 366 | def az(*args: str) -> dict: 367 | """Run azure-cli and return output with improved error handling""" 368 | cmd = [sys.executable, "-m", "azure.cli", *args, "-o", "json"] 369 | 370 | # Log the command that's about to be executed 371 | logger.info(f"Attempting to run: {' '.join(cmd)}") 372 | 373 | try: 374 | # Run with full logging 375 | result = subprocess.run( 376 | cmd, 377 | text=True, 378 | capture_output=True, 379 | check=False, # Don't raise exception to see all errors 380 | ) 381 | 382 | # Log the results 383 | logger.info(f"Command exit code: {result.returncode}") 384 | logger.info(f"Command stdout (first 100 chars): {result.stdout[:100] if result.stdout else 'Empty'}") 385 | logger.warning(f"Command stderr (first 100 chars): {result.stderr[:100] if result.stderr else 'Empty'}") 386 | 387 | if result.returncode != 0: 388 | # Command failed 389 | return {"error": "Command failed", "stderr": result.stderr, "returncode": result.returncode} 390 | 391 | try: 392 | # Try to parse JSON 393 | return json.loads(result.stdout.strip()) 394 | except json.JSONDecodeError as json_err: 395 | # JSON parsing failed 396 | return { 397 | "error": f"Failed to parse JSON: {str(json_err)}", 398 | "raw_output": result.stdout[:500], # First 500 chars for debugging 399 | } 400 | 401 | except Exception as e: 402 | # Catch all other exceptions 403 | logger.error(f"Exception executing command: {str(e)}") 404 | return {"error": f"Exception: {str(e)}", "type": type(e).__name__} 405 | 406 | 407 | ######################## 408 | # TEXT EVALUATION TOOLS # 409 | ######################## 410 | 411 | 412 | @mcp.tool() 413 | def list_text_evaluators() -> List[str]: 414 | """ 415 | Returns a list of available text evaluator names for evaluating text outputs. 416 | """ 417 | return list(TEXT_EVALUATOR_MAP.keys()) 418 | 419 | 420 | @mcp.tool() 421 | def list_agent_evaluators() -> List[str]: 422 | """ 423 | Returns a list of available agent evaluator names for evaluating agent behaviors. 424 | """ 425 | return list(AGENT_EVALUATOR_MAP.keys()) 426 | 427 | 428 | @mcp.tool() 429 | def get_text_evaluator_requirements(evaluator_name: str = None) -> Dict: 430 | """ 431 | Get the required input fields for a specific text evaluator or all text evaluators. 432 | 433 | Parameters: 434 | - evaluator_name: Optional name of evaluator. If None, returns requirements for all evaluators. 435 | """ 436 | if evaluator_name is not None: 437 | if evaluator_name not in TEXT_EVALUATOR_MAP: 438 | raise ValueError(f"Unknown evaluator {evaluator_name}") 439 | return {evaluator_name: TEXT_EVALUATOR_REQUIREMENTS[evaluator_name]} 440 | else: 441 | return TEXT_EVALUATOR_REQUIREMENTS 442 | 443 | 444 | @mcp.tool() 445 | def get_agent_evaluator_requirements(evaluator_name: str = None) -> Dict: 446 | """ 447 | Get the required input fields for a specific agent evaluator or all agent evaluators. 448 | 449 | Parameters: 450 | - evaluator_name: Optional name of evaluator. If None, returns requirements for all evaluators. 451 | """ 452 | if evaluator_name is not None: 453 | if evaluator_name not in AGENT_EVALUATOR_MAP: 454 | raise ValueError(f"Unknown evaluator {evaluator_name}") 455 | return {evaluator_name: agent_evaluator_requirements[evaluator_name]} 456 | else: 457 | return agent_evaluator_requirements 458 | 459 | 460 | @mcp.tool() 461 | def run_text_eval( 462 | evaluator_names: Union[str, List[str]], # Single evaluator name or list of evaluator names 463 | file_path: Optional[str] = None, # Path to JSONL file 464 | content: Optional[str] = None, # JSONL content as a string (optional) 465 | include_studio_url: bool = True, # Option to include studio URL in response 466 | return_row_results: bool = False, # Option to include detailed row results 467 | ) -> Dict: 468 | """ 469 | Run one or multiple evaluators on a JSONL file or content string. 470 | 471 | Parameters: 472 | - evaluator_names: Either a single evaluator name (string) or a list of evaluator names 473 | - file_path: Path to a JSONL file to evaluate (preferred for efficiency) 474 | - content: JSONL content as a string (alternative if file_path not available) 475 | - include_studio_url: Whether to include the Azure AI studio URL in the response 476 | - return_row_results: Whether to include detailed row results (False by default for large datasets) 477 | """ 478 | # Save original stdout so we can restore it later 479 | original_stdout = sys.stdout 480 | # Redirect stdout to stderr to prevent PromptFlow output from breaking MCP 481 | sys.stdout = sys.stderr 482 | 483 | # Heartbeat mechanism 484 | import threading 485 | import time 486 | 487 | # Set up a heartbeat mechanism to keep the connection alive 488 | heartbeat_active = True 489 | 490 | def send_heartbeats(): 491 | count = 0 492 | while heartbeat_active: 493 | count += 1 494 | logger.info(f"Heartbeat {count} - Evaluation in progress...") 495 | # Print to stderr to keep connection alive 496 | print(f"Evaluation in progress... ({count * 15}s)", file=sys.stderr, flush=True) 497 | time.sleep(15) # Send heartbeat every 15 seconds 498 | 499 | # Start heartbeat thread 500 | heartbeat_thread = threading.Thread(target=send_heartbeats, daemon=True) 501 | heartbeat_thread.start() 502 | 503 | try: 504 | if not EVALUATION_INITIALIZED: 505 | heartbeat_active = False # Stop heartbeat 506 | return {"error": "Evaluation not initialized. Check environment variables."} 507 | 508 | # Validate inputs 509 | if content is None and file_path is None: 510 | heartbeat_active = False # Stop heartbeat 511 | return {"error": "Either file_path or content must be provided"} 512 | 513 | # Convert single evaluator to list for unified processing 514 | if isinstance(evaluator_names, str): 515 | evaluator_names = [evaluator_names] 516 | 517 | # Validate evaluator names 518 | for name in evaluator_names: 519 | if name not in TEXT_EVALUATOR_MAP: 520 | heartbeat_active = False # Stop heartbeat 521 | return {"error": f"Unknown evaluator: {name}"} 522 | 523 | # Variable to track if we need to clean up a temp file 524 | temp_file = None 525 | 526 | try: 527 | # Determine which input to use (prioritize file_path for efficiency) 528 | input_file = None 529 | if file_path: 530 | # Resolve file path 531 | if os.path.isfile(file_path): 532 | input_file = file_path 533 | else: 534 | # Check in data directory 535 | data_dir = os.environ.get("EVAL_DATA_DIR", ".") 536 | alternate_path = os.path.join(data_dir, file_path) 537 | if os.path.isfile(alternate_path): 538 | input_file = alternate_path 539 | else: 540 | heartbeat_active = False # Stop heartbeat 541 | return {"error": f"File not found: {file_path} (also checked in {data_dir})"} 542 | 543 | # Count rows quickly using file iteration 544 | with open(input_file, "r", encoding="utf-8") as f: 545 | row_count = sum(1 for line in f if line.strip()) 546 | 547 | elif content: 548 | # Create temporary file for content string 549 | fd, temp_file = tempfile.mkstemp(suffix=".jsonl") 550 | os.close(fd) 551 | 552 | # Write content to temp file 553 | with open(temp_file, "w", encoding="utf-8") as f: 554 | f.write(content) 555 | 556 | input_file = temp_file 557 | row_count = content.count("\n") + (0 if content.endswith("\n") else 1) 558 | 559 | logger.info(f"Processing {row_count} rows for {len(evaluator_names)} evaluator(s)") 560 | 561 | # Prepare evaluators 562 | evaluators = {} 563 | eval_config = {} 564 | 565 | for name in evaluator_names: 566 | # Create evaluator instance 567 | evaluators[name] = create_text_evaluator(name) 568 | 569 | # Set up column mapping for this evaluator 570 | requirements = TEXT_EVALUATOR_REQUIREMENTS[name] 571 | column_mapping = {} 572 | for field, requirement in requirements.items(): 573 | if requirement == "Required": 574 | column_mapping[field] = f"${{data.{field}}}" 575 | eval_config[name] = {"column_mapping": column_mapping} 576 | 577 | # Prepare evaluation args 578 | eval_args = {"data": input_file, "evaluators": evaluators, "evaluator_config": eval_config} 579 | 580 | # Add Azure AI project info if initialized 581 | if AZURE_AI_PROJECT_ENDPOINT and include_studio_url: 582 | eval_args["azure_ai_project"] = AZURE_AI_PROJECT_ENDPOINT 583 | 584 | # Run evaluation with additional stdout redirection for extra safety 585 | with contextlib.redirect_stdout(sys.stderr): 586 | result = evaluate(**eval_args) 587 | 588 | # Prepare response 589 | response = {"evaluators": evaluator_names, "row_count": row_count, "metrics": result.get("metrics", {})} 590 | 591 | # Only include detailed row results if explicitly requested 592 | if return_row_results: 593 | response["row_results"] = result.get("rows", []) 594 | 595 | # Include studio URL if available 596 | if include_studio_url and "studio_url" in result: 597 | response["studio_url"] = result.get("studio_url") 598 | heartbeat_active = False # Stop heartbeat 599 | return response 600 | 601 | except Exception as e: 602 | logger.error(f"Evaluation error: {str(e)}") 603 | heartbeat_active = False # Stop heartbeat 604 | return {"error": str(e)} 605 | 606 | finally: 607 | # Clean up temp file if we created one 608 | if temp_file and os.path.exists(temp_file): 609 | try: 610 | os.remove(temp_file) 611 | except Exception: 612 | pass 613 | 614 | # Make sure heartbeat is stopped 615 | heartbeat_active = False 616 | 617 | finally: 618 | # Always restore stdout, even if an exception occurs 619 | sys.stdout = original_stdout 620 | heartbeat_active = False 621 | 622 | 623 | @mcp.tool() 624 | async def agent_query_and_evaluate( 625 | agent_id: str, 626 | query: str, 627 | evaluator_names: List[str] = None, 628 | include_studio_url: bool = True, # Option to include studio URL 629 | ) -> Dict: 630 | """ 631 | Query an agent and evaluate its response in a single operation. 632 | 633 | Parameters: 634 | - agent_id: ID of the agent to query 635 | - query: Text query to send to the agent 636 | - evaluator_names: Optional list of agent evaluator names to use (defaults to all) 637 | - include_studio_url: Whether to include the Azure AI studio URL in the response 638 | 639 | Returns both the agent response and evaluation results 640 | """ 641 | # Save original stdout so we can restore it later 642 | original_stdout = sys.stdout 643 | # Redirect stdout to stderr to prevent PromptFlow output from breaking MCP 644 | sys.stdout = sys.stderr 645 | 646 | # Heartbeat mechanism to keep connection alive during long operations 647 | import threading 648 | import time 649 | 650 | # Set up a heartbeat mechanism to keep the connection alive 651 | heartbeat_active = True 652 | 653 | def send_heartbeats(): 654 | count = 0 655 | while heartbeat_active: 656 | count += 1 657 | logger.info(f"Heartbeat {count} - Evaluation in progress...") 658 | # Print to stderr to keep connection alive 659 | print(f"Evaluation in progress... ({count * 15}s)", file=sys.stderr, flush=True) 660 | time.sleep(15) # Send heartbeat every 15 seconds 661 | 662 | # Start heartbeat thread 663 | heartbeat_thread = threading.Thread(target=send_heartbeats, daemon=True) 664 | heartbeat_thread.start() 665 | 666 | try: 667 | if not AGENT_INITIALIZED or not EVALUATION_INITIALIZED: 668 | heartbeat_active = False # Stop heartbeat 669 | return {"error": "Services not fully initialized. Check environment variables."} 670 | 671 | if AI_CLIENT is None: 672 | success = await initialize_agent_client() 673 | if not success or AI_CLIENT is None: 674 | heartbeat_active = False # Stop heartbeat 675 | return {"error": "Failed to initialize Azure AI Agent client."} 676 | 677 | try: 678 | # Query the agent (this part remains async) 679 | query_response = await query_agent(AI_CLIENT, agent_id, query) 680 | 681 | if not query_response.get("success", False): 682 | heartbeat_active = False # Stop heartbeat 683 | return query_response 684 | 685 | # Get the thread and run IDs 686 | thread_id = query_response["thread_id"] 687 | run_id = query_response["run_id"] 688 | 689 | # Now we'll switch to synchronous mode, exactly like the GitHub example 690 | 691 | # Step 1: Create a synchronous client (this is what GitHub example uses) 692 | from azure.ai.projects import AIProjectClient # This is the sync version 693 | from azure.identity import DefaultAzureCredential 694 | 695 | sync_client = AIProjectClient(endpoint=AZURE_AI_PROJECT_ENDPOINT, credential=DefaultAzureCredential()) 696 | 697 | # Step 2: Create converter with the sync client, exactly like example 698 | from azure.ai.evaluation import AIAgentConverter 699 | 700 | converter = AIAgentConverter(sync_client) 701 | 702 | # Step 3: Create a temp file name 703 | temp_filename = "temp_evaluation_data.jsonl" 704 | 705 | try: 706 | # Step 4: Convert data synchronously, exactly as in their example 707 | evaluation_data = converter.convert(thread_id=thread_id, run_id=run_id) 708 | 709 | # Step 5: Write to file 710 | with open(temp_filename, "w") as f: 711 | json.dump(evaluation_data, f) 712 | 713 | # Step 6: Default to all agent evaluators if none specified 714 | if not evaluator_names: 715 | evaluator_names = list(AGENT_EVALUATOR_MAP.keys()) 716 | 717 | # Step 7: Create evaluators 718 | evaluators = {} 719 | for name in evaluator_names: 720 | evaluators[name] = create_agent_evaluator(name) 721 | 722 | # Step 8: Run evaluation, exactly as in their example 723 | # Use contextlib to ensure all stdout is redirected 724 | with contextlib.redirect_stdout(sys.stderr): 725 | from azure.ai.evaluation import evaluate 726 | 727 | evaluation_result = evaluate( 728 | data=temp_filename, 729 | evaluators=evaluators, 730 | azure_ai_project=AZURE_AI_PROJECT_ENDPOINT if include_studio_url else None, 731 | ) 732 | 733 | # Step 9: Prepare response 734 | response = { 735 | "success": True, 736 | "agent_id": agent_id, 737 | "thread_id": thread_id, 738 | "run_id": run_id, 739 | "query": query, 740 | "response": query_response["result"], 741 | "citations": query_response.get("citations", []), 742 | "evaluation_metrics": evaluation_result.get("metrics", {}), 743 | } 744 | 745 | # Include studio URL if available 746 | if include_studio_url and "studio_url" in evaluation_result: 747 | response["studio_url"] = evaluation_result.get("studio_url") 748 | 749 | heartbeat_active = False # Stop heartbeat 750 | return response 751 | 752 | except Exception as e: 753 | logger.error(f"Evaluation error: {str(e)}") 754 | import traceback 755 | 756 | logger.error(traceback.format_exc()) 757 | heartbeat_active = False # Stop heartbeat 758 | return {"error": f"Evaluation error: {str(e)}"} 759 | 760 | finally: 761 | # Clean up temp file 762 | if os.path.exists(temp_filename): 763 | try: 764 | os.remove(temp_filename) 765 | except Exception: 766 | pass 767 | 768 | except Exception as e: 769 | logger.error(f"Error in query and evaluate: {str(e)}") 770 | heartbeat_active = False # Stop heartbeat 771 | return {"error": f"Error in query and evaluate: {str(e)}"} 772 | 773 | finally: 774 | # Always restore stdout, even if an exception occurs 775 | sys.stdout = original_stdout 776 | heartbeat_active = False # Stop heartbeat 777 | 778 | 779 | # Add this new helper function to format evaluation outputs 780 | @mcp.tool() 781 | def format_evaluation_report(evaluation_result: Dict) -> str: 782 | """ 783 | Format evaluation results into a readable report with metrics and Studio URL. 784 | 785 | Parameters: 786 | - evaluation_result: The evaluation result dictionary from run_text_eval or agent_query_and_evaluate 787 | 788 | Returns a formatted report with metrics and Azure AI Studio URL if available 789 | """ 790 | if "error" in evaluation_result: 791 | return f"❌ Evaluation Error: {evaluation_result['error']}" 792 | 793 | # Start the report 794 | report = ["# Evaluation Report\n"] 795 | 796 | # Add evaluator info 797 | evaluator = evaluation_result.get("evaluator") 798 | if evaluator: 799 | report.append(f"## Evaluator: {evaluator}\n") 800 | 801 | # Add metrics 802 | metrics = evaluation_result.get("metrics", {}) 803 | if metrics: 804 | report.append("## Metrics\n") 805 | for metric_name, metric_value in metrics.items(): 806 | # Format metric value based on type 807 | if isinstance(metric_value, (int, float)): 808 | formatted_value = f"{metric_value:.4f}" if isinstance(metric_value, float) else str(metric_value) 809 | else: 810 | formatted_value = str(metric_value) 811 | 812 | report.append(f"- **{metric_name}**: {formatted_value}") 813 | report.append("\n") 814 | 815 | # Add studio URL if available 816 | studio_url = evaluation_result.get("studio_url") 817 | if studio_url: 818 | report.append("## Azure AI Studio\n") 819 | report.append(f"📊 [View detailed evaluation results in Azure AI Studio]({studio_url})\n") 820 | 821 | # Return the formatted report 822 | return "\n".join(report) 823 | 824 | 825 | @mcp.tool() 826 | def run_agent_eval( 827 | evaluator_name: str, 828 | query: str, 829 | response: Optional[str] = None, 830 | tool_calls: Optional[str] = None, 831 | tool_definitions: Optional[str] = None, 832 | ) -> Dict: 833 | """ 834 | Run agent evaluation on agent data. Accepts both plain text and JSON strings. 835 | 836 | Parameters: 837 | - evaluator_name: Name of the agent evaluator to use (intent_resolution, tool_call_accuracy, task_adherence) 838 | - query: User query (plain text or JSON string) 839 | - response: Agent response (plain text or JSON string) 840 | - tool_calls: Optional tool calls data (JSON string) 841 | - tool_definitions: Optional tool definitions (JSON string) 842 | """ 843 | if not EVALUATION_INITIALIZED: 844 | return {"error": "Evaluation not initialized. Check environment variables."} 845 | 846 | if evaluator_name not in AGENT_EVALUATOR_MAP: 847 | raise ValueError(f"Unknown agent evaluator: {evaluator_name}") 848 | 849 | try: 850 | # Helper function to process inputs 851 | def process_input(input_str): 852 | if not input_str: 853 | return None 854 | 855 | # Check if it's already a valid JSON string 856 | try: 857 | # Try to parse as JSON 858 | return json.loads(input_str) 859 | except json.JSONDecodeError: 860 | # If not a JSON string, treat as plain text 861 | return input_str 862 | 863 | # Process inputs - handle both direct text and JSON strings 864 | query_data = process_input(query) 865 | response_data = process_input(response) if response else None 866 | tool_calls_data = process_input(tool_calls) if tool_calls else None 867 | tool_definitions_data = process_input(tool_definitions) if tool_definitions else None 868 | 869 | # If query/response are plain text, wrap them in the expected format 870 | if isinstance(query_data, str): 871 | query_data = {"content": query_data} 872 | 873 | if isinstance(response_data, str): 874 | response_data = {"content": response_data} 875 | 876 | # Create evaluator instance 877 | evaluator = create_agent_evaluator(evaluator_name) 878 | 879 | # Prepare kwargs for the evaluator 880 | kwargs = {"query": query_data} 881 | if response_data: 882 | kwargs["response"] = response_data 883 | if tool_calls_data: 884 | kwargs["tool_calls"] = tool_calls_data 885 | if tool_definitions_data: 886 | kwargs["tool_definitions"] = tool_definitions_data 887 | 888 | # Run evaluation 889 | result = evaluator(**kwargs) 890 | 891 | return {"evaluator": evaluator_name, "result": result} 892 | 893 | except Exception as e: 894 | logger.error(f"Agent evaluation error: {str(e)}") 895 | return {"error": str(e)} 896 | 897 | 898 | ######################## 899 | # AGENT SERVICE TOOLS # 900 | ######################## 901 | 902 | 903 | @mcp.tool() 904 | async def list_agents() -> str: 905 | """List available agents in the Azure AI Agent Service.""" 906 | if not AGENT_INITIALIZED: 907 | return "Error: Azure AI Agent service is not initialized. Check environment variables." 908 | 909 | if AI_CLIENT is None: 910 | await initialize_agent_client() 911 | if AI_CLIENT is None: 912 | return "Error: Failed to initialize Azure AI Agent client." 913 | 914 | try: 915 | agents = AI_CLIENT.agents.list_agents() 916 | if not agents: 917 | return "No agents found in the Azure AI Agent Service." 918 | 919 | result = "## Available Azure AI Agents\n\n" 920 | async for agent in agents: 921 | result += f"- **{agent.name}**: `{agent.id}`\n" 922 | 923 | if DEFAULT_AGENT_ID: 924 | result += f"\n**Default Agent ID**: `{DEFAULT_AGENT_ID}`" 925 | 926 | return result 927 | except Exception as e: 928 | logger.error(f"Error listing agents: {str(e)}") 929 | return f"Error listing agents: {str(e)}" 930 | 931 | 932 | @mcp.tool() 933 | async def connect_agent(agent_id: str, query: str) -> Dict: 934 | """ 935 | Connect to a specific Azure AI Agent and run a query. 936 | 937 | Parameters: 938 | - agent_id: ID of the agent to connect to 939 | - query: Text query to send to the agent 940 | 941 | Returns a dict with the agent's response and thread/run IDs for potential evaluation 942 | """ 943 | if not AGENT_INITIALIZED: 944 | return {"error": "Azure AI Agent service is not initialized. Check environment variables."} 945 | 946 | if AI_CLIENT is None: 947 | await initialize_agent_client() 948 | if AI_CLIENT is None: 949 | return {"error": "Failed to initialize Azure AI Agent client."} 950 | 951 | try: 952 | response = await query_agent(AI_CLIENT, agent_id, query) 953 | return response 954 | except Exception as e: 955 | logger.error(f"Error connecting to agent: {str(e)}") 956 | return {"error": f"Error connecting to agent: {str(e)}"} 957 | 958 | 959 | @mcp.tool() 960 | async def query_default_agent(query: str) -> Dict: 961 | """ 962 | Send a query to the default configured Azure AI Agent. 963 | 964 | Parameters: 965 | - query: Text query to send to the default agent 966 | 967 | Returns a dict with the agent's response and thread/run IDs for potential evaluation 968 | """ 969 | if not AGENT_INITIALIZED: 970 | return {"error": "Azure AI Agent service is not initialized. Check environment variables."} 971 | 972 | if not DEFAULT_AGENT_ID: 973 | return { 974 | "error": "No default agent configured. Set DEFAULT_AGENT_ID environment variable or use connect_agent tool." 975 | } 976 | 977 | if AI_CLIENT is None: 978 | await initialize_agent_client() 979 | if AI_CLIENT is None: 980 | return {"error": "Failed to initialize Azure AI Agent client."} 981 | 982 | try: 983 | response = await query_agent(AI_CLIENT, DEFAULT_AGENT_ID, query) 984 | return response 985 | except Exception as e: 986 | logger.error(f"Error querying default agent: {str(e)}") 987 | return {"error": f"Error querying default agent: {str(e)}"} 988 | -------------------------------------------------------------------------------- /src/mcp_foundry/mcp_foundry_knowledge/__init__.py: -------------------------------------------------------------------------------- 1 | from .data_access_objects import ( 2 | SearchIndexDao, 3 | SearchBaseDao, 4 | SearchClientDao, 5 | SearchIndexerDao, 6 | SearchIndexSchema, 7 | SearchFieldSchema, 8 | SuggesterSchema, 9 | CorsOptionsSchema, 10 | ScoringProfileSchema, 11 | convert_pydantic_model_to_search_index, 12 | convert_to_field_mappings, 13 | FieldMappingModel, OperationResult, SearchDocument, 14 | ) 15 | 16 | __all__ = ( 17 | 'SearchIndexDao', 18 | 'SearchBaseDao', 19 | 'SearchClientDao', 20 | 'SearchIndexerDao', 21 | 'SearchIndexSchema', 22 | 'SearchFieldSchema', 23 | 'SuggesterSchema', 24 | 'CorsOptionsSchema', 25 | 'ScoringProfileSchema', 26 | 'FieldMappingModel', 27 | 'convert_pydantic_model_to_search_index', 28 | 'convert_to_field_mappings', 29 | 'OperationResult', 30 | 'SearchDocument' 31 | ) 32 | 33 | 34 | -------------------------------------------------------------------------------- /src/mcp_foundry/mcp_foundry_knowledge/data_access_objects/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .dao import SearchIndexDao, SearchBaseDao, SearchClientDao, SearchIndexerDao 3 | from .models import SearchIndexSchema, \ 4 | convert_pydantic_model_to_search_index, SearchFieldSchema, SuggesterSchema, CorsOptionsSchema, ScoringProfileSchema, \ 5 | FieldMappingModel, convert_to_field_mappings, OperationResult, SearchDocument 6 | 7 | __all__ = ( 8 | 'SearchBaseDao', 9 | 'SearchIndexDao', 10 | 'SearchClientDao', 11 | 'SearchIndexerDao', 12 | 'SearchIndexSchema', 13 | 'SearchFieldSchema', 14 | 'SuggesterSchema', 15 | 'CorsOptionsSchema', 16 | 'ScoringProfileSchema', 17 | 'FieldMappingModel', 18 | 'convert_pydantic_model_to_search_index', 19 | 'convert_to_field_mappings', 20 | 'OperationResult', 21 | 'SearchDocument' 22 | ) 23 | 24 | -------------------------------------------------------------------------------- /src/mcp_foundry/mcp_foundry_knowledge/data_access_objects/dao.py: -------------------------------------------------------------------------------- 1 | import os 2 | from datetime import timedelta 3 | from typing import MutableMapping, Any, Optional, List, Union 4 | from mcp.server.fastmcp.server import logger 5 | from azure.core.credentials import AzureKeyCredential 6 | from azure.core.paging import ItemPaged 7 | from azure.identity import DefaultAzureCredential 8 | from azure.search.documents import SearchClient, SearchItemPaged 9 | from azure.search.documents.indexes import SearchIndexClient, SearchIndexerClient 10 | from azure.search.documents.indexes._generated.models import FieldMapping, IndexingSchedule, IndexingParameters, \ 11 | IndexingParametersConfiguration 12 | from azure.search.documents.indexes.models import SearchIndex, SearchIndexer, SearchIndexerDataSourceConnection 13 | 14 | 15 | class SearchBaseDao: 16 | """ 17 | Base class for Azure Cognitive Search data access operations. 18 | 19 | Handles environment configuration and authentication setup 20 | for interacting with Azure AI Search services. 21 | """ 22 | 23 | def __init__(self): 24 | """ 25 | Initializes the SearchBaseDao by reading configuration from environment variables. 26 | """ 27 | self.authentication_method = self._get_env_variable("SEARCH_AUTHENTICATION_METHOD", "api-search-key") 28 | self.service_endpoint = self._get_env_variable("AZURE_AI_SEARCH_ENDPOINT") 29 | self.api_version = self._get_env_variable('AZURE_AI_SEARCH_API_VERSION', '2025-03-01-preview') 30 | 31 | @staticmethod 32 | def _environment_var_message(key): 33 | return f"{key} must be set in the environment variable or defined in the environment file" 34 | 35 | def check_environment_sanity(self): 36 | assert self.service_endpoint is not None, self._environment_var_message("AZURE_AI_SEARCH_ENDPOINT") 37 | if self.authentication_method == "api-search-key": 38 | api_key = self._get_env_variable('AZURE_AI_SEARCH_API_KEY') 39 | assert api_key is not None, self._environment_var_message("AZURE_AI_SEARCH_API_KEY") 40 | elif self.authentication_method == 'service-principal': 41 | tenant_id = self._get_env_variable('AZURE_TENANT_ID') 42 | client_id = self._get_env_variable('AZURE_CLIENT_ID') 43 | client_secret = self._get_env_variable('AZURE_CLIENT_SECRET') 44 | assert tenant_id is not None, self._environment_var_message("AZURE_TENANT_ID") 45 | assert client_id is not None, self._environment_var_message("AZURE_CLIENT_ID") 46 | assert client_secret is not None, self._environment_var_message("AZURE_CLIENT_SECRET") 47 | 48 | @staticmethod 49 | def _get_env_variable(key: str, default_value: str | None = None) -> str: 50 | """ 51 | Retrieves an environment variable value or returns a default if not set. 52 | 53 | Args: 54 | key (str): The name of the environment variable. 55 | default_value (str | None): Optional fallback value. 56 | 57 | Returns: 58 | str: The value of the environment variable or the default. 59 | """ 60 | return os.environ.get(key, default_value) 61 | 62 | def _fetch_credentials(self) -> AzureKeyCredential | DefaultAzureCredential: 63 | """ 64 | Fetches the appropriate credentials for Azure Search based on the configured authentication method. 65 | 66 | Returns: 67 | AzureKeyCredential | DefaultAzureCredential: A credential object for authenticating requests. 68 | 69 | Raises: 70 | Exception: If the authentication method is missing or invalid. 71 | """ 72 | self.check_environment_sanity() 73 | 74 | if self.authentication_method == 'api-search-key': 75 | api_key = self._get_env_variable('AZURE_AI_SEARCH_API_KEY') 76 | credential = AzureKeyCredential(api_key) 77 | return credential 78 | elif self.authentication_method == 'service-principal': 79 | credential = DefaultAzureCredential() 80 | return credential 81 | 82 | error_message = ( 83 | "SEARCH_AUTHENTICATION_METHOD was not specified or is invalid. " 84 | "Must be one of api-search-key or service-principal" 85 | ) 86 | raise Exception(error_message) 87 | 88 | 89 | class SearchIndexDao(SearchBaseDao): 90 | """ 91 | Data Access Object (DAO) for interacting with Azure AI Search Indexes. 92 | 93 | Inherits configuration and authentication from SearchBaseDao. 94 | """ 95 | 96 | def __init__(self): 97 | """ 98 | Initializes the SearchIndexDao with a SearchIndexClient instance. 99 | """ 100 | super().__init__() 101 | credentials = self._fetch_credentials() 102 | self.client = SearchIndexClient(self.service_endpoint, credentials, api_version=self.api_version) 103 | 104 | def close(self): 105 | """Shuts down the Data Access Object instance and associated resources 106 | 107 | :rtype: None 108 | """ 109 | self.client.close() 110 | 111 | def retrieve_index_names(self) -> list[str]: 112 | """ 113 | Retrieves a list of all search index names from the Azure Search service. 114 | 115 | Returns: 116 | list[str]: A list of index names. 117 | """ 118 | search_results = self.client.list_index_names() 119 | results: list[str] = [] 120 | 121 | for search_result in search_results: 122 | results.append(search_result) 123 | 124 | return results 125 | 126 | def retrieve_index_schemas(self) -> list[MutableMapping[str, Any]]: 127 | """ 128 | Retrieves the full schema definition for each search index. 129 | 130 | Returns: 131 | list[SearchIndex]: A list of serialized index schema definitions. 132 | """ 133 | search_results: ItemPaged[SearchIndex] = self.client.list_indexes() 134 | results = [] 135 | 136 | for search_result in search_results: 137 | results.append(search_result.serialize(keep_readonly=True)) 138 | 139 | return results 140 | 141 | def retrieve_index_schema(self, index_name: str) -> MutableMapping[str, Any]: 142 | """ 143 | Retrieves the full schema definition for a search index. 144 | 145 | Returns: 146 | SearchIndex: A serialized index schema definition. 147 | """ 148 | search_results = self.client.get_index(index_name) 149 | 150 | return search_results.serialize(keep_readonly=True) 151 | 152 | def modify_index(self, index_name: str, updated_index_definition: SearchIndex) -> MutableMapping[str, Any]: 153 | """ 154 | Updates an existing index in the Azure AI Search service. 155 | 156 | Args: 157 | index_name (SearchIndex): The name of the index to be updated 158 | updated_index_definition (SearchIndex): The full definition of the index. 159 | 160 | Returns: 161 | MutableMapping[str, Any]: The serialized response of the created index. 162 | """ 163 | 164 | logger.debug(f"Updating Index {index_name} with new definition", updated_index_definition) 165 | 166 | updated_index_definition.name = index_name 167 | operation_results = self.client.create_or_update_index(updated_index_definition) 168 | return operation_results.serialize(keep_readonly=True) 169 | 170 | def create_index(self, index_definition: SearchIndex) -> MutableMapping[str, Any]: 171 | """ 172 | Creates a new index in the Azure AI Search service. 173 | 174 | Args: 175 | index_definition (SearchIndex): The full definition of the index to be created. 176 | 177 | Returns: 178 | MutableMapping[str, Any]: The serialized response of the created index. 179 | """ 180 | logger.debug("Creating Index ", index_definition) 181 | operation_results = self.client.create_index(index_definition) 182 | return operation_results.serialize(keep_readonly=True) 183 | 184 | def delete_index(self, index_name: str): 185 | """ 186 | Deletes an existing index from the Azure AI Search service. 187 | 188 | Args: 189 | index_name (str): The name of the index to be deleted. 190 | 191 | Returns: 192 | None 193 | """ 194 | logger.debug(f"Deleting Index {index_name}") 195 | self.client.delete_index(index_name) 196 | 197 | class SearchClientDao(SearchBaseDao): 198 | 199 | def __init__(self, index_name: str): 200 | """ 201 | Initializes the SearchIndexDao with a SearchIndexClient instance. 202 | :param index_name: The name of the index to connect to 203 | """ 204 | super().__init__() 205 | credentials = self._fetch_credentials() 206 | self.index_name = index_name 207 | self.client = SearchClient(self.service_endpoint, index_name, credentials, api_version=self.api_version) 208 | 209 | def close(self): 210 | """Shuts down the Data Access Object instance and associated resources 211 | 212 | :rtype: None 213 | """ 214 | self.client.close() 215 | 216 | def get_document_count(self) -> int: 217 | """ 218 | Return the total number of documents in the index 219 | 220 | Returns: 221 | int: The total number of documents in the index. 222 | """ 223 | search_text: str | None = None 224 | search_results: SearchItemPaged[dict] = self.client.search(search_text=search_text, include_total_count=True) 225 | 226 | return search_results.get_count() 227 | 228 | def add_document(self, document: dict): 229 | """ 230 | Uploads a single document to the Azure AI Search index. 231 | 232 | Args: 233 | document (dict): The document to be added to the index. 234 | 235 | Returns: 236 | MutableMapping[str, Any]: The serialized result of the add operation for the single document. 237 | """ 238 | documents_to_add = [document] 239 | operation_results = self.add_documents(documents_to_add) 240 | return operation_results[0] 241 | 242 | def add_documents(self, documents: list[dict]) -> list[MutableMapping[str, Any]]: 243 | """ 244 | Uploads a batch of documents to the Azure AI Search index. 245 | 246 | Args: 247 | documents (list[dict]): A list of documents to upload. 248 | 249 | Returns: 250 | list[MutableMapping[str, Any]]: A list of serialized results for each document upload operation. 251 | """ 252 | 253 | logger.debug(f"Adding documents to index {self.index_name}", documents) 254 | operation_results = self.client.upload_documents(documents) 255 | 256 | results: list[MutableMapping[str, Any]] = [] 257 | 258 | for operation_result in operation_results: 259 | results.append(operation_result.serialize(keep_readonly=True)) 260 | 261 | return results 262 | 263 | def delete_document(self, key_field_name: str, key_value: str): 264 | """ 265 | Deletes a single document from the Azure AI Search index. 266 | 267 | Args: 268 | key_field_name (str): The name of the key field in the index 269 | key_value (str): The value of the key field 270 | 271 | Returns: 272 | list[MutableMapping[str, Any]]: A list of serialized results for each document deletion operation. 273 | """ 274 | document_lookup = {key_field_name: key_value} 275 | documents = [document_lookup] 276 | document_keys: list[str] = [key_value] 277 | 278 | logger.debug(f"Removing document from index {self.index_name}", documents) 279 | results = self.delete_documents(key_field_name=key_field_name, document_keys=document_keys) 280 | 281 | return results 282 | 283 | # @TODO: expose this to the MCP clients only after thorough testing with a lot of models 284 | # For now it is ok to delete just one document at a time 285 | def delete_documents(self, key_field_name: str, document_keys: list[str]) -> list[MutableMapping[str, Any]]: 286 | """ 287 | Deletes a batch of documents from the Azure AI Search index. 288 | 289 | Args: 290 | key_field_name (str): The name of the key field in the index 291 | document_keys (list[str]): A list of document keys to delete. 292 | 293 | Returns: 294 | list[MutableMapping[str, Any]]: A list of serialized results for each document deletion operation. 295 | """ 296 | documents_to_delete = [] 297 | for document_key in document_keys: 298 | documents_to_delete.append({key_field_name: document_key}) 299 | 300 | logger.debug(f"Removing document from index {self.index_name}", documents_to_delete) 301 | operation_results = self.client.delete_documents(documents_to_delete) 302 | 303 | results: list[MutableMapping[str, Any]] = [] 304 | 305 | 306 | for operation_result in operation_results: 307 | results.append(operation_result.serialize(keep_readonly=True)) 308 | 309 | return results 310 | 311 | def query_index(self, 312 | search_text: Optional[str] = None, 313 | *, 314 | query_filter: Optional[str] = None, 315 | order_by: Optional[List[str]] = None, 316 | select: Optional[List[str]] = None, 317 | skip: Optional[int] = None, 318 | top: Optional[int] = None, 319 | include_total_count: Optional[bool] = None, 320 | ) -> list[dict]: 321 | """Search the Azure search index for documents. 322 | 323 | :param str search_text: A full-text search query expression; Use "*" or omit this parameter to 324 | match all documents. 325 | :param str query_filter: The OData $filter expression to apply to the search query. 326 | :param list[str] order_by: The list of OData $orderby expressions by which to sort the results. Each 327 | expression can be either a field name or a call to either the geo.distance() or the 328 | search.score() functions. Each expression can be followed by asc to indicate ascending, and 329 | desc to indicate descending. The default is ascending order. Ties will be broken by the match 330 | scores of documents. If no OrderBy is specified, the default sort order is descending by 331 | document match score. There can be at most 32 $orderby clauses. 332 | :param list[str] select: The list of fields to retrieve. If unspecified, all fields marked as retrievable 333 | in the schema are included. 334 | :param int skip: The number of search results to skip. This value cannot be greater than 100,000. 335 | If you need to scan documents in sequence, but cannot use $skip due to this limitation, 336 | consider using $orderby on a totally-ordered key and $filter with a range query instead. 337 | :param int top: The number of search results to retrieve. This can be used in conjunction with 338 | $skip to implement client-side paging of search results. If results are truncated due to 339 | server-side paging, the response will include a continuation token that can be used to issue 340 | another Search request for the next page of results. 341 | :param bool include_total_count: A value that specifies whether to fetch the total count of 342 | results. Default is false. Setting this value to true may have a performance impact. Note that 343 | the count returned is an approximation. 344 | :rtype: list[dict] 345 | """ 346 | search_results: SearchItemPaged[dict] = self.client.search( 347 | search_text=search_text, 348 | include_total_count=include_total_count, 349 | filter=query_filter, 350 | order_by=order_by, 351 | select=select, 352 | skip=skip, 353 | top=top 354 | ) 355 | 356 | query_results: list[dict] = [] 357 | 358 | for search_result_item in search_results: 359 | query_results.append(search_result_item) 360 | 361 | return query_results 362 | 363 | 364 | 365 | 366 | 367 | class SearchIndexerDao(SearchBaseDao): 368 | """ 369 | A data access object (DAO) for managing Azure AI Search indexers, data sources, and skillsets. 370 | 371 | This class provides methods for listing, retrieving, creating, and deleting indexers, 372 | as well as accessing data source connections and skillsets configured in the Azure AI Search service. 373 | """ 374 | 375 | def __init__(self): 376 | """ 377 | Initializes the SearchIndexerDao by creating a SearchIndexerClient using credentials 378 | and service configuration from the base class. 379 | """ 380 | super().__init__() 381 | credentials = self._fetch_credentials() 382 | self.client = SearchIndexerClient(self.service_endpoint, credentials, api_version=self.api_version) 383 | 384 | def close(self): 385 | """Shuts down the Data Access Object instance and associated resources 386 | 387 | :rtype: None 388 | """ 389 | self.client.close() 390 | 391 | def list_indexers(self) -> list[str]: 392 | """ 393 | Retrieves the names of all indexers registered in the Azure AI Search service. 394 | 395 | Returns: 396 | list[str]: A list of indexer names. 397 | """ 398 | search_results = self.client.get_indexer_names() 399 | indexer_names: list[str] = [] 400 | 401 | for search_result in search_results: 402 | indexer_names.append(search_result) 403 | return indexer_names 404 | 405 | def get_indexer(self, name: str) -> MutableMapping[str, Any]: 406 | """ 407 | Retrieves the full definition of a specific indexer. 408 | 409 | Args: 410 | name (str): The name of the indexer to retrieve. 411 | 412 | Returns: 413 | MutableMapping[str, Any]: A dictionary representing the serialized indexer definition. 414 | """ 415 | indexer_details = self.client.get_indexer(name) 416 | indexer_result = indexer_details.serialize(keep_readonly=True) 417 | return indexer_result 418 | 419 | def create_indexer(self, name: str, 420 | data_source_name: str, 421 | target_index_name: str, 422 | description: str, 423 | field_mappings: list[FieldMapping], 424 | output_field_mappings: list[FieldMapping], 425 | skill_set_name: str = None, 426 | ) -> MutableMapping[str, Any]: 427 | """ 428 | Creates a new indexer in the Azure AI Search service. 429 | 430 | Args: 431 | name (str): The name of the indexer to be created. 432 | data_source_name (str): The name of the indexer to be created. 433 | target_index_name (str): The name of the indexer to be created. 434 | description (str): The name of the indexer to be created. 435 | field_mappings (list[FieldMapping]): The name of the indexer to be created. 436 | output_field_mappings (list[FieldMapping]): The name of the indexer to be created. 437 | skill_set_name (str): The name of the indexer to be created. 438 | 439 | Returns: 440 | MutableMapping[str, Any]: A dictionary representing the created indexer. 441 | """ 442 | 443 | interval: timedelta = timedelta(minutes=5) 444 | schedule: IndexingSchedule = IndexingSchedule(interval=interval) 445 | 446 | parameters = self._prepare_indexer_parameters(data_source_name) 447 | 448 | indexer_definition = SearchIndexer( 449 | name=name, 450 | data_source_name=data_source_name, 451 | target_index_name=target_index_name, 452 | description=description, 453 | skillset_name=skill_set_name, 454 | field_mappings=field_mappings, 455 | output_field_mappings=output_field_mappings, 456 | schedule=schedule, 457 | parameters=parameters 458 | ) 459 | indexer_result = self.client.create_indexer(indexer_definition) 460 | return indexer_result.serialize(keep_readonly=True) 461 | 462 | def _prepare_indexer_parameters(self, data_source_name) -> IndexingParameters | None: 463 | 464 | data_source_detail: SearchIndexerDataSourceConnection = self.client.get_data_source_connection(name=data_source_name) 465 | data_source_type = data_source_detail.type 466 | 467 | # Possible values include: "azuresql", "cosmosdb", "azureblob", "azuretable", "mysql", "adlsgen2". 468 | if data_source_type == "azureblob": 469 | indexing_configuration = IndexingParametersConfiguration(data_to_extract='contentAndMetadata', 470 | parsing_mode='json', query_timeout=None) 471 | parameters = IndexingParameters(configuration=indexing_configuration) 472 | return parameters 473 | return None 474 | 475 | 476 | def delete_indexer(self, name: str) -> None: 477 | """ 478 | Deletes an indexer by name from the Azure AI Search service. 479 | 480 | Args: 481 | name (str): The name of the indexer to delete. 482 | """ 483 | self.client.delete_indexer(name) 484 | 485 | def list_data_sources(self) -> list[str]: 486 | """ 487 | Lists the names of all data source connections configured in the AI Search service. 488 | 489 | Returns: 490 | list[str]: A list of data source connection names. 491 | """ 492 | data_source_names = self.client.get_data_source_connection_names() 493 | search_results: list[str] = [] 494 | for data_source_name in data_source_names: 495 | search_results.append(data_source_name) 496 | return search_results 497 | 498 | def get_data_source(self, name: str) -> MutableMapping[str, Any]: 499 | """ 500 | Retrieves the full definition of a specific data source connection. 501 | 502 | Args: 503 | name (str): The name of the data source connection to retrieve. 504 | 505 | Returns: 506 | MutableMapping[str, Any]: A dictionary representing the serialized data source definition. 507 | """ 508 | data_source_detail: SearchIndexerDataSourceConnection = self.client.get_data_source_connection(name=name) 509 | data_source_result = data_source_detail.serialize(keep_readonly=True) 510 | return data_source_result 511 | 512 | def list_skill_sets(self) -> list[str]: 513 | """ 514 | Lists the names of all skillsets configured in the Azure AI Search service. 515 | 516 | Returns: 517 | list[str]: A list of skillset names. 518 | """ 519 | skill_set_names = self.client.get_skillset_names() 520 | search_results: list[str] = [] 521 | for skill_set_name in skill_set_names: 522 | search_results.append(skill_set_name) 523 | return search_results 524 | 525 | def get_skill_set(self, skill_set_name: str) -> MutableMapping[str, Any]: 526 | """ 527 | Retrieves the full definition of a specific skillset. 528 | 529 | Args: 530 | skill_set_name (str): The name of the skillset to retrieve. 531 | 532 | Returns: 533 | MutableMapping[str, Any]: A dictionary representing the serialized skillset definition. 534 | """ 535 | skill_set_result = self.client.get_skillset(skill_set_name) 536 | return skill_set_result.serialize(keep_readonly=True) 537 | -------------------------------------------------------------------------------- /src/mcp_foundry/mcp_foundry_knowledge/data_access_objects/models.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional, AnyStr, Any 2 | 3 | from azure.search.documents.indexes._generated.models import FieldMapping 4 | from pydantic import BaseModel, ConfigDict 5 | from azure.search.documents.indexes.models import SearchIndex, SimpleField, SearchSuggester 6 | 7 | OperationResult = dict[str, Any] 8 | 9 | from pydantic import BaseModel, Extra 10 | 11 | class SearchDocument(BaseModel): 12 | id: str 13 | model_config = ConfigDict(extra="allow") 14 | 15 | class SearchFieldSchema(BaseModel): 16 | name: str 17 | type: str 18 | key: Optional[bool] = False 19 | searchable: Optional[bool] = False 20 | filterable: Optional[bool] = False 21 | sortable: Optional[bool] = False 22 | facetable: Optional[bool] = False 23 | retrievable: Optional[bool] = True 24 | analyzer: Optional[str] = None 25 | search_analyzer: Optional[str] = None 26 | index_analyzer: Optional[str] = None 27 | synonym_maps: Optional[List[str]] = None 28 | 29 | 30 | class SuggesterSchema(BaseModel): 31 | name: str 32 | source_fields: List[str] 33 | 34 | 35 | class CorsOptionsSchema(BaseModel): 36 | allowed_origins: List[str] 37 | max_age_in_seconds: Optional[int] = 300 38 | 39 | 40 | class ScoringProfileSchema(BaseModel): 41 | name: str 42 | # @TODO Add specific scoring profile fields as needed 43 | 44 | 45 | class SearchIndexSchema(BaseModel): 46 | name: str 47 | fields: List[SearchFieldSchema] 48 | suggesters: Optional[List[SuggesterSchema]] = None 49 | scoring_profiles: Optional[List[ScoringProfileSchema]] = None 50 | default_scoring_profile: Optional[str] = None 51 | cors_options: Optional[CorsOptionsSchema] = None 52 | semantic_settings: Optional[dict] = None # @TODO expand this to a model if needed 53 | encryption_key: Optional[dict] = None # @TODO expand this to a model if needed 54 | 55 | 56 | class FieldMappingModel(BaseModel): 57 | source_field_name: str 58 | target_field_name: str 59 | mapping_function: str | None = None 60 | 61 | def convert_pydantic_model_to_search_index(schema: SearchIndexSchema) -> SearchIndex: 62 | fields = [SimpleField(**field.model_dump()) for field in schema.fields] 63 | suggesters = [SearchSuggester(name=s.name, source_fields=s.source_fields) for s in (schema.suggesters or [])] 64 | 65 | return SearchIndex( 66 | name=schema.name, 67 | fields=fields, 68 | suggesters=suggesters or None, 69 | scoring_profiles=schema.scoring_profiles, 70 | default_scoring_profile=schema.default_scoring_profile, 71 | cors_options=schema.cors_options, 72 | semantic_settings=schema.semantic_settings, 73 | encryption_key=schema.encryption_key 74 | ) 75 | 76 | 77 | def convert_to_field_mappings(models: List[FieldMappingModel]) -> List[FieldMapping]: 78 | """ 79 | Converts a list of FieldMappingModel instances to Azure FieldMapping objects. 80 | 81 | Args: 82 | models (List[FieldMappingModel]): List of Pydantic models representing field mappings. 83 | 84 | Returns: 85 | List[FieldMapping]: List of Azure SDK FieldMapping instances. 86 | """ 87 | return [ 88 | FieldMapping( 89 | source_field_name=model.source_field_name, 90 | target_field_name=model.target_field_name, 91 | mapping_function=model.mapping_function 92 | ) 93 | for model in models 94 | ] 95 | 96 | -------------------------------------------------------------------------------- /src/mcp_foundry/mcp_foundry_knowledge/prompts.py: -------------------------------------------------------------------------------- 1 | from mcp_foundry.mcp_server import mcp 2 | 3 | @mcp.prompt(description="A prompt to list the names of all the indices") 4 | async def list_all_indices_prompt() -> str: 5 | return "List all the indices by name" 6 | 7 | @mcp.prompt(description="A prompt to retrieve the schema details of all the indices") 8 | async def list_all_indices_details_prompt() -> str: 9 | return "Show the schema details of all the indexes" 10 | 11 | @mcp.prompt(description="Get the detail for a specific schema") 12 | async def retrieve_index_schema_prompt(index_name: str) -> str: 13 | return f"Show the for the {index_name} index" 14 | 15 | @mcp.prompt(description="Display the contents of a local file") 16 | async def fetch_local_file_contents_prompt(file_path: str) -> str: 17 | return f"Display the contents of the local file {file_path}" 18 | 19 | @mcp.prompt(description="Display the contents of a URL") 20 | async def fetch_url_contents_prompt(url: str) -> str: 21 | return f"Display the contents of the file {url}" 22 | 23 | @mcp.prompt(description="Creates an index matching the schema of a JSON file (local file or URL)") 24 | async def create_index_from_file_analysis_prompt(index_name: str, url: str) -> str: 25 | return f"Create an index called '{index_name}' that is compatible with the JSON file contents in the file {url}" 26 | 27 | @mcp.prompt(description="Updates the index definition for a specific field") 28 | async def modify_index_field_definition_prompt(index_name: str, field_name: str) -> str: 29 | return f"Modify the index '{index_name}' and make the {field_name} retrievable, searchable and filterable" 30 | 31 | @mcp.prompt(description="Removes a specific index") 32 | async def remove_index_definition_prompt(index_name: str) -> str: 33 | return f"Remove the '{index_name}' index" 34 | 35 | @mcp.prompt(description="Adds the contents of a JSON file (local file or URL) to the specified index") 36 | async def add_document_from_file_analysis_prompt(index_name: str, url: str) -> str: 37 | return f"Add a document or documents to the '{index_name}' index using the contents of the file {url}" 38 | 39 | @mcp.prompt(description="Remove a document from the index") 40 | async def remove_document_prompt(index_name: str, id: str) -> str: 41 | return f""" 42 | Remove a document from the '{index_name}' index matching id '{id}' 43 | Remove all documents from the '{index_name}' where the preferred language is French 44 | Remove all documents from the '{index_name}' where the sign up date is March 30th 2025 45 | """ 46 | 47 | @mcp.prompt(description="Queries the index") 48 | async def search_index_prompt(index_name: str, id: str) -> str: 49 | return f""" 50 | 51 | - Show all documents from the '{index_name}' index 52 | - Show all documents from the '{index_name}' where the preferred language is French 53 | - Show all documents from the '{index_name}' where the sign up date is March 30th 2025 54 | """ 55 | 56 | @mcp.prompt(description="How many documents are in a specific document") 57 | async def get_document_count_prompt(index_name: str, id: str) -> str: 58 | return f"How many documents are in the '{index_name}' index" 59 | 60 | @mcp.prompt(description="List the names of the indexers in AI Search") 61 | async def list_indexers_prompt() -> str: 62 | return f"List the names of the indexers in AI Search" 63 | 64 | @mcp.prompt(description="Get details about a specific indexer") 65 | async def get_indexer_detail_prompt(name: str) -> str: 66 | return f"Show the details for the '{name}' indexer" 67 | 68 | @mcp.prompt(description="Creates and indexer with a datasource") 69 | async def create_indexer_datasource_prompt(indexer_name: str, data_source_name: str) -> str: 70 | return f"Create an indexer named '{indexer_name}' with field mappings using the data source '{data_source_name}'" 71 | 72 | @mcp.prompt(description="Creates and indexer with a datasource and skill set") 73 | async def create_indexer_datasource_skill_set_prompt(indexer_name: str, data_source_name: str, 74 | skill_set_name: str) -> str: 75 | return f"Create an indexer named '{indexer_name}' with field mappings using the data source '{data_source_name}' and skillset '{skill_set_name}'" 76 | 77 | @mcp.prompt(description="List all the data sources and skill sets") 78 | async def list_skills_and_data_sources_prompt() -> str: 79 | return "List all the skill sets and data sources" 80 | 81 | @mcp.prompt(description="Show details for a specific data source") 82 | async def get_data_source_details_prompt(name: str) -> str: 83 | return f"Show details for the '{name}' data source" 84 | 85 | @mcp.prompt(description="Show details for a specific skill set") 86 | async def get_skillset_details_prompt(name: str) -> str: 87 | return f"Show details for the '{name}' skillset" -------------------------------------------------------------------------------- /src/mcp_foundry/mcp_foundry_knowledge/resources.py: -------------------------------------------------------------------------------- 1 | from mcp_foundry.mcp_server import mcp 2 | 3 | @mcp.resource("examples://python-mcp-client-pydantic-ai", 4 | description="A resource showing how to communicate with the MCP server using Pydantic AI", 5 | mime_type="text/markdown") 6 | async def sample_python_mcp_client_resource() -> str: 7 | return """ 8 | ## MCP Client Written with Pydantic AI 9 | 10 | This is an MCP client written with Pydantic AI that is intended to be used for checking out all the capabilities of the MCP service 11 | 12 | https://github.com/azure-ai-foundry/mcp-foundry/sample-python-clients 13 | """ -------------------------------------------------------------------------------- /src/mcp_foundry/mcp_foundry_knowledge/tools.py: -------------------------------------------------------------------------------- 1 | 2 | from pathlib import Path 3 | from typing import Optional, List, cast 4 | import logging 5 | import sys 6 | 7 | import httpx 8 | from azure.search.documents.indexes._generated.models import FieldMapping 9 | from mcp_foundry.mcp_server import mcp 10 | 11 | from .data_access_objects import SearchIndexDao, SearchClientDao, SearchIndexerDao, SearchIndexSchema, \ 12 | convert_pydantic_model_to_search_index, FieldMappingModel, convert_to_field_mappings, \ 13 | OperationResult, \ 14 | SearchDocument 15 | 16 | # Configure logging 17 | logging.basicConfig( 18 | level=logging.INFO, 19 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", 20 | stream=sys.stderr, 21 | ) 22 | logger = logging.getLogger("mcp_foundry_knowledge") 23 | 24 | @mcp.tool(description="Reads the content of a local file and returns it as a string") 25 | def fk_fetch_local_file_contents(file_path: str, encoding: str = "utf-8") -> str: 26 | """ 27 | Reads the content of a local file and returns it as a string. 28 | 29 | Args: 30 | file_path (str): The path to the local file. 31 | encoding (str): The character encoding to use (default is 'utf-8'). 32 | 33 | Returns: 34 | str: The contents of the file as a string. 35 | 36 | Raises: 37 | FileNotFoundError: If the file does not exist. 38 | IOError: If the file cannot be read. 39 | """ 40 | path = Path(file_path) 41 | if not path.is_file(): 42 | raise FileNotFoundError(f"No such file: '{file_path}'") 43 | 44 | return path.read_text(encoding=encoding) 45 | 46 | @mcp.tool(description="Fetches the contents of the given HTTP URL") 47 | async def fk_fetch_url_contents(url: str) -> str: 48 | """ 49 | Fetches the contents of the given HTTP URL 50 | 51 | Args: 52 | url (str): The URL to fetch content from. 53 | 54 | Returns: 55 | str: The content retrieved from the URL. 56 | 57 | Raises: 58 | httpx.RequestError: If the request fails due to a network problem. 59 | httpx.HTTPStatusError: If the response status code is not 2xx. 60 | """ 61 | async with httpx.AsyncClient() as client: 62 | response = await client.get(url) 63 | response.raise_for_status() 64 | return response.text 65 | 66 | @mcp.tool(description="Retrieves the names of all indexes ") 67 | async def list_index_names() -> list[str]: 68 | """ 69 | Retrieves the names of all indexes 70 | 71 | Returns: 72 | list[str]: A list containing the names of all available search indexes. 73 | """ 74 | dao = SearchIndexDao() 75 | return dao.retrieve_index_names() 76 | 77 | @mcp.tool(description="Retrieves the schemas for all indexes ") 78 | async def list_index_schemas() -> list[OperationResult]: 79 | """ 80 | Retrieves the schemas for all indexes. 81 | 82 | Returns: 83 | list[OperationResult]: A list of dictionaries, each representing the schema of an index. 84 | """ 85 | dao = SearchIndexDao() 86 | return cast(list[OperationResult], dao.retrieve_index_schemas()) 87 | 88 | @mcp.tool(description="Retrieves the schema for a specific index") 89 | async def retrieve_index_schema(index_name: str) -> OperationResult: 90 | """ 91 | Retrieves the schema for a specific index 92 | 93 | Args: 94 | index_name (str): The name of the index for which the schema should be retrieved. 95 | 96 | Returns: 97 | OperationResult: A dictionary representing the schema of the specified index. 98 | """ 99 | dao = SearchIndexDao() 100 | return cast(OperationResult, dao.retrieve_index_schema(index_name)) 101 | 102 | @mcp.tool(description="Creates an AI Search index") 103 | async def create_index(index_definition: SearchIndexSchema) -> OperationResult: 104 | """ 105 | Creates a new index. 106 | 107 | Args: 108 | index_definition (SearchIndexSchema): The full definition of the index to be created. 109 | 110 | Returns: 111 | OperationResult: The serialized response of the created index. 112 | """ 113 | dao = SearchIndexDao() 114 | compatible_index_definition = convert_pydantic_model_to_search_index(index_definition) 115 | return cast(OperationResult, dao.create_index(compatible_index_definition)) 116 | 117 | @mcp.tool(description="Updates an AI Search index with a new index definition") 118 | async def modify_index(index_name: str, updated_index_definition: SearchIndexSchema) -> OperationResult: 119 | """ 120 | Updates an AI Search index with the modified index definition 121 | 122 | Args: 123 | index_name (str): The name of the index to be updated 124 | updated_index_definition (SearchIndexSchema): The full updated definition of the index. 125 | 126 | Returns: 127 | OperationResult: The serialized response of the modified index. 128 | """ 129 | dao = SearchIndexDao() 130 | compatible_index_definition = convert_pydantic_model_to_search_index(updated_index_definition) 131 | return cast(OperationResult, dao.modify_index(index_name, compatible_index_definition)) 132 | 133 | @mcp.tool(description="Deletes the specified index") 134 | async def delete_index(index_name: str) -> str: 135 | """ 136 | Deletes an existing index . 137 | 138 | Args: 139 | index_name (str): The name of the index to be deleted. 140 | 141 | Returns: 142 | str: The result of the operation 143 | """ 144 | dao = SearchIndexDao() 145 | dao.delete_index(index_name) 146 | return "Successful" 147 | 148 | @mcp.tool(description="Return the total number of documents in the index") 149 | def get_document_count(index_name: str) -> int: 150 | """ 151 | Returns the total number of documents in the index 152 | 153 | Args: 154 | index_name (str): the name of the index 155 | 156 | Returns: 157 | int: The total number of documents in the index 158 | """ 159 | search_client_dao = SearchClientDao(index_name) 160 | result = search_client_dao.get_document_count() 161 | return result 162 | 163 | @mcp.tool(description="Adds a document to the index") 164 | def add_document(index_name: str, document: SearchDocument) -> OperationResult: 165 | """ 166 | Add a document to the specified Azure AI Search index 167 | 168 | Args: 169 | index_name (str): the name of the index we are adding the document to 170 | document (SearchDocument): The contents of the document to be added to the index. 171 | 172 | Returns: 173 | OperationResult: The serialized result of the add operation for the single document. 174 | """ 175 | search_client_dao = SearchClientDao(index_name) 176 | result = search_client_dao.add_document(document.model_dump()) 177 | return cast(OperationResult, result) 178 | 179 | @mcp.tool(description="Removes a document from the index") 180 | async def delete_document(index_name: str, key_field_name: str, key_value: str) -> OperationResult: 181 | """ 182 | Removes a document from the index. 183 | 184 | Args: 185 | index_name (str): the name of the index from which to delete the document 186 | key_field_name (str): The name of the key field in the index we are removing the document from 187 | key_value (str): The value of the key field for the document we are deleting 188 | 189 | Returns: 190 | OperationResult: A list of serialized results for each document deletion operation. 191 | """ 192 | search_client_dao = SearchClientDao(index_name) 193 | return cast(OperationResult, search_client_dao.delete_document(key_field_name, key_value)) 194 | 195 | @mcp.tool(description="Search a specific index for documents in that index") 196 | async def query_index( 197 | index_name: str, 198 | search_text: Optional[str] = None, 199 | *, 200 | query_filter: Optional[str] = None, 201 | order_by: Optional[List[str]] = None, 202 | select: Optional[List[str]] = None, 203 | skip: Optional[int] = None, 204 | top: Optional[int] = None, 205 | include_total_count: Optional[bool] = None, 206 | ) -> list[dict]: 207 | """Searches the Azure search index for documents matching the query criteria 208 | 209 | :param str index_name: The name of the index to query. This parameter is required 210 | :param str search_text: A full-text search query expression; Use "*" or omit this parameter to 211 | match all documents. 212 | :param str query_filter: The OData $filter expression to apply to the search query. 213 | :param list[str] order_by: The list of OData $orderby expressions by which to sort the results. Each 214 | expression can be either a field name or a call to either the geo.distance() or the 215 | search.score() functions. Each expression can be followed by asc to indicate ascending, and 216 | desc to indicate descending. The default is ascending order. Ties will be broken by the match 217 | scores of documents. If no OrderBy is specified, the default sort order is descending by 218 | document match score. There can be at most 32 $orderby clauses. 219 | :param list[str] select: The list of fields to retrieve. If unspecified, all fields marked as retrievable 220 | in the schema are included. 221 | :param int skip: The number of search results to skip. This value cannot be greater than 100,000. 222 | If you need to scan documents in sequence, but cannot use $skip due to this limitation, 223 | consider using $orderby on a totally-ordered key and $filter with a range query instead. 224 | :param int top: The number of search results to retrieve. This can be used in conjunction with 225 | $skip to implement client-side paging of search results. If results are truncated due to 226 | server-side paging, the response will include a continuation token that can be used to issue 227 | another Search request for the next page of results. 228 | :param bool include_total_count: A value that specifies whether to fetch the total count of 229 | results. Default is false. Setting this value to true may have a performance impact. Note that 230 | the count returned is an approximation. 231 | :rtype: list[dict] 232 | """ 233 | search_client_dao = SearchClientDao(index_name) 234 | 235 | search_results: list[dict] = search_client_dao.query_index( 236 | search_text=search_text, 237 | include_total_count=include_total_count, 238 | query_filter=query_filter, 239 | order_by=order_by, 240 | select=select, 241 | skip=skip, 242 | top=top 243 | ) 244 | 245 | return search_results 246 | 247 | @mcp.tool( 248 | description="Retrieves the list of all the names of the indexers") 249 | async def list_indexers() -> list[str]: 250 | """ 251 | Retrieves the list of all indexers registered . 252 | 253 | Returns: 254 | list[str]: A list of indexer names. 255 | """ 256 | search_indexer_dao = SearchIndexerDao() 257 | return search_indexer_dao.list_indexers() 258 | 259 | @mcp.tool(description="Retrieves the details of a specific indexer by name.") 260 | async def get_indexer(name: str) -> OperationResult: 261 | """ 262 | Retrieves the details of a specific indexer by name. 263 | 264 | Args: 265 | name (str): The name of the indexer to retrieve. 266 | 267 | Returns: 268 | OperationResult: A dictionary containing the indexer details. 269 | """ 270 | search_indexer_dao = SearchIndexerDao() 271 | return cast(OperationResult, search_indexer_dao.get_indexer(name)) 272 | 273 | @mcp.tool(description="Creates a new indexer") 274 | async def create_indexer( 275 | name: str, 276 | data_source_name: str, 277 | target_index_name: str, 278 | description: str, 279 | field_mappings: list[FieldMappingModel], 280 | output_field_mappings: list[FieldMappingModel], 281 | skill_set_name: str = None 282 | ) -> OperationResult: 283 | """ 284 | Creates a new indexer. 285 | 286 | Args: 287 | name (str): The name of the indexer to be created. 288 | data_source_name (str): The name of the indexer to be created. 289 | target_index_name (str): The name of the indexer to be created. 290 | description (str): The name of the indexer to be created. 291 | field_mappings (list[FieldMapping]): The field mappings to be created. 292 | output_field_mappings (list[FieldMapping]): The field mappings in the index . 293 | skill_set_name (str): The name of the indexer to be created. 294 | 295 | Returns: 296 | OperationResult: A dictionary representing the created indexer. 297 | """ 298 | search_indexer_dao = SearchIndexerDao() 299 | 300 | compat_field_mappings = convert_to_field_mappings(field_mappings) 301 | compat_output_field_mappings = convert_to_field_mappings(output_field_mappings) 302 | 303 | result = search_indexer_dao.create_indexer( 304 | name=name, 305 | data_source_name=data_source_name, 306 | target_index_name=target_index_name, 307 | description=description, 308 | field_mappings=compat_field_mappings, 309 | output_field_mappings=compat_output_field_mappings, 310 | skill_set_name=skill_set_name 311 | ) 312 | 313 | return cast(OperationResult, result) 314 | 315 | @mcp.tool(description="Deletes the indexer") 316 | async def delete_indexer(name: str) -> str: 317 | """ 318 | Deletes an indexer by name. 319 | 320 | Args: 321 | name (str): The name of the indexer to delete. 322 | 323 | Returns: 324 | None 325 | """ 326 | search_indexer_dao = SearchIndexerDao() 327 | search_indexer_dao.delete_indexer(name) 328 | return "Successful" 329 | 330 | @mcp.tool(description="Retrieves the list of all data source names") 331 | async def list_data_sources() -> list[str]: 332 | """ 333 | Retrieves the list of all data source names 334 | 335 | Returns: 336 | list[str]: A list of data source names. 337 | """ 338 | search_indexer_dao = SearchIndexerDao() 339 | return search_indexer_dao.list_data_sources() 340 | 341 | @mcp.tool(description="Retrieves the details of a specific data source by name") 342 | async def get_data_source(name: str) -> OperationResult: 343 | """ 344 | Retrieves the details of a specific data source by name. 345 | 346 | Args: 347 | name (str): The name of the data source to retrieve. 348 | 349 | Returns: 350 | OperationResult: A dictionary containing the data source details. 351 | """ 352 | search_indexer_dao = SearchIndexerDao() 353 | return cast(OperationResult, search_indexer_dao.get_data_source(name)) 354 | 355 | @mcp.tool(description="Retrieves the list of the names of all skill sets") 356 | async def list_skill_sets() -> list[str]: 357 | """ 358 | Retrieves the list of all skill sets 359 | 360 | Returns: 361 | list[str]: A list of skill set names. 362 | """ 363 | search_indexer_dao = SearchIndexerDao() 364 | return search_indexer_dao.list_skill_sets() 365 | 366 | @mcp.tool(description="Retrieves the details of a specific skill set by name") 367 | async def get_skill_set(skill_set_name: str) -> OperationResult: 368 | """ 369 | Retrieves the details of a specific skill set by name. 370 | 371 | Args: 372 | skill_set_name (str): The name of the skill set to retrieve. 373 | 374 | Returns: 375 | OperationResult: A dictionary containing the skill set details. 376 | """ 377 | search_indexer_dao = SearchIndexerDao() 378 | return cast(OperationResult, search_indexer_dao.get_skill_set(skill_set_name)) 379 | -------------------------------------------------------------------------------- /src/mcp_foundry/mcp_foundry_model/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/azure-ai-foundry/mcp-foundry/af7ac18108edd8313c94812cf24b294c4e1bf4ea/src/mcp_foundry/mcp_foundry_model/__init__.py -------------------------------------------------------------------------------- /src/mcp_foundry/mcp_foundry_model/models.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from pydantic import BaseModel 3 | 4 | class DeploymentOption(Enum): 5 | """ 6 | Enum to represent the deployment options for a model in the MCP Foundry. 7 | """ 8 | FREE_PLAYGROUND = "Free Playground" 9 | SERVERLESS_ENDPOINT = "Serverless Endpoint" 10 | OPENAI = "OpenAI" 11 | MANAGED_COMPUTE = "Managed Compute" 12 | LABS = "Labs" 13 | 14 | class ModelsList(BaseModel): 15 | """ 16 | Model to store the list of models in the MCP Foundry. 17 | """ 18 | total_models_count: int 19 | fetched_models_count: int 20 | summaries: list[dict] 21 | 22 | class ModelDetails(BaseModel): 23 | """ 24 | Model to store the details of a model in the MCP Foundry. 25 | """ 26 | details: dict 27 | code_sample_azure: str | dict | None 28 | code_sample_github: str | dict | None 29 | type: DeploymentOption 30 | link: str -------------------------------------------------------------------------------- /src/mcp_foundry/mcp_foundry_model/tools.py: -------------------------------------------------------------------------------- 1 | from mcp.server.fastmcp import Context 2 | from mcp_foundry.mcp_server import mcp 3 | import requests 4 | import os 5 | import sys 6 | import logging 7 | from typing import Optional 8 | 9 | from azure.mgmt.cognitiveservices.models import ( 10 | Deployment, 11 | DeploymentModel, 12 | DeploymentProperties, 13 | DeploymentScaleSettings, 14 | Sku, 15 | ) 16 | 17 | from .models import ModelDetails 18 | from .utils import ( 19 | deploy_inline_bicep_template, 20 | get_client_headers_info, 21 | get_code_sample_for_deployment_under_ai_services, 22 | get_code_sample_for_github_model, 23 | get_code_sample_for_labs_model, 24 | get_cognitiveservices_client, 25 | get_models_list, 26 | ) 27 | 28 | labs_api_url = os.environ.get("LABS_API_URL", "https://foundry-labs-mcp-api.azurewebsites.net/api/v1") 29 | 30 | # Configure logging 31 | logging.basicConfig( 32 | level=logging.INFO, 33 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", 34 | stream=sys.stderr, 35 | ) 36 | logger = logging.getLogger("mcp_foundry_model") 37 | 38 | @mcp.tool() 39 | async def list_models_from_model_catalog(ctx: Context, search_for_free_playground: bool = False, publisher_name = "", license_name = "") -> str: 40 | """ 41 | Retrieves a list of supported models from the Azure AI Foundry catalog. 42 | 43 | This function is useful when a user requests a list of available Foundry models or Foundry Labs projects. 44 | It fetches models based on optional filters like whether the model supports free playground usage, 45 | the publisher name, and the license type. The function will return the list of models with useful fields. 46 | 47 | Parameters: 48 | ctx (Context): The context of the current session. Contains metadata about the request and session. 49 | search_for_free_playground (bool, optional): If `True`, filters models to include only those that 50 | can be used for free by users for prototyping. If `False`, all models will be included regardless of free playground support. 51 | Defaults to `False`. 52 | publisher_name (str, optional): A filter to specify the publisher of the models to retrieve. If provided, 53 | only models from this publisher will be returned. Defaults to an empty string, meaning no filter is applied. 54 | license_name (str, optional): A filter to specify the license type of the models to retrieve. If provided, 55 | only models with this license will be returned. Defaults to an empty string, meaning no filter is applied. 56 | 57 | Returns: 58 | str: A JSON-encoded string containing the list of models and their metadata. The list will include 59 | model names, inference model names, summaries, and the total count of models retrieved. 60 | 61 | Usage: 62 | Use this function when users inquire about available models from the Azure AI Foundry catalog. 63 | It can also be used when filtering models by free playground usage, publisher name, or license type. 64 | If user didn't specify free playground or ask for models that support GitHub token, always explain that by default it will show the all the models but some of them would support free playground. 65 | Explain to the user that if they want to find models suitable for prototyping and free to use with support for free playground, they can look for models that supports free playground, or look for models that they can use with GitHub token. 66 | """ 67 | max_pages = 3 68 | # Note: if max_pages becomes larger, the agent will find it more difficult to "summarize" the result, which may not be desired. 69 | # max_pages = 10 70 | 71 | logger.debug("Calling get_models_list with parameters:") 72 | logger.debug(f"search_for_free_playground: {search_for_free_playground}") 73 | logger.debug(f"publisher_name: {publisher_name}") 74 | logger.debug(f"license_name: {license_name}") 75 | logger.debug(f"max_pages: {max_pages}") 76 | 77 | models_list = get_models_list(ctx, search_for_free_playground, publisher_name, license_name, max_pages) 78 | 79 | return models_list.json() 80 | 81 | @mcp.tool() 82 | async def list_azure_ai_foundry_labs_projects(ctx: Context): 83 | """ 84 | Retrieves a list of state-of-the-art AI models from Microsoft Research available in Azure AI Foundry Labs. 85 | 86 | This function is used when a user requests information about the cutting-edge models and projects developed by Microsoft Research within the Azure AI Foundry Labs. These models represent the latest advancements in AI research and are often experimental or in early development stages. 87 | 88 | Parameters: 89 | ctx (Context): The context of the current session, which includes metadata and session-specific information. 90 | 91 | Returns: 92 | list: A list containing the list of available AI models and projects in Azure AI Foundry Labs. The list will include information such as project names, descriptions, and possibly other metadata relevant to the state-of-the-art models. 93 | 94 | Usage: 95 | Use this function when a user wants to explore the latest models and research projects available in the Azure AI Foundry Labs. These projects are typically cutting-edge and may involve new or experimental features not yet widely available. 96 | 97 | Notes: 98 | - The models and projects in Azure AI Foundry Labs are generally from the forefront of AI research and may have specific requirements or experimental capabilities. 99 | - The list returned may change frequently as new models and projects are developed and made available for exploration. 100 | """ 101 | 102 | headers = get_client_headers_info(ctx) 103 | 104 | response = requests.get(f"{labs_api_url}/projects?source=afl", headers=headers) 105 | if response.status_code != 200: 106 | return f"Error fetching projects from API: {response.status_code}" 107 | 108 | project_response = response.json() 109 | 110 | return project_response["projects"] 111 | 112 | @mcp.tool() 113 | async def get_model_details_and_code_samples(model_name: str, ctx: Context): 114 | """ 115 | Retrieves detailed information for a specific model from the Azure AI Foundry catalog. 116 | 117 | This function is used when a user requests detailed information about a particular model in the Foundry catalog. 118 | It fetches the model's metadata, capabilities, descriptions, and other relevant details associated with the given asset ID. 119 | 120 | It is important that you provide the user a link to more information for compliance reasons. Use the link provided. 121 | 122 | Parameters: 123 | model_name (str): The name of the model whose details are to be retrieved. This is a required parameter. 124 | ctx (Context): The context of the current session, containing metadata about the request and session. 125 | 126 | Returns: 127 | dict: A dictionary containing the model's detailed information, including: 128 | - model name, version, framework, tags, datasets 129 | - model URL and storage location 130 | - model capabilities (e.g., agents, assistants, reasoning, tool-calling) 131 | - description, summary, and key capabilities 132 | - publisher information, licensing details, and terms of use 133 | - model creation and modification times 134 | - variant information, model metadata, and system requirements 135 | - link to more information about the model 136 | 137 | Usage: 138 | Call this function when you need to retrieve detailed information about a model using its asset ID. 139 | This is useful when users inquire about a model's features, or when specific metadata about a model is required. 140 | """ 141 | headers = get_client_headers_info(ctx) 142 | 143 | #TODO: Have link go to actual model card not just generic site 144 | model_details = { 145 | "details": {}, 146 | "code_sample_azure": None, 147 | "code_sample_github": None, 148 | "type": None, 149 | "link": "https://ai.azure.com/explore/models" 150 | } 151 | 152 | response = requests.get(f"{labs_api_url}/projects?source=afl", headers=headers) 153 | if response.status_code != 200: 154 | return f"Error fetching projects from API: {response.status_code}" 155 | 156 | project_response = response.json() 157 | 158 | project_names = [project["name"] for project in project_response["projects"]] 159 | 160 | if model_name in project_names: 161 | model_details["details"] = project_response["projects"][project_names.index(model_name)] 162 | model_details["code_sample_github"] = await get_code_sample_for_labs_model(model_name, ctx) 163 | model_details["type"] = "Labs" 164 | model_details["link"] = "https://ai.azure.com/labs" 165 | return ModelDetails(**model_details) 166 | 167 | model_list_details = get_models_list(ctx, model_name=model_name) 168 | if model_list_details.fetched_models_count == 0: 169 | return f"Model '{model_name}' not found in the catalog." 170 | 171 | model_list_details = model_list_details.summaries[0] 172 | 173 | response = requests.get(f"https://ai.azure.com/api/westus2/modelregistry/v1.0/registry/models?assetIdOrReference={model_list_details['assetId']}", headers=headers) 174 | if response.status_code != 200: 175 | return f"Error fetching model details from API: {response.status_code}" 176 | 177 | model_details["details"] = response.json() 178 | 179 | # Free playground model add GH guidance to model details 180 | if "freePlayground" in model_details['details']['kvTags'] and model_details['details']['kvTags']["freePlayground"] == "true": 181 | model_details["type"] = "Free Playground" 182 | model_details["code_sample_github"] = await get_code_sample_for_github_model(model_list_details["publisher"], model_list_details["name"], ctx) 183 | 184 | # OpenAI model add OpenAI guidance to model details 185 | if model_list_details["deployment_options"]["openai"]: 186 | if not model_details["type"] == "Free Playground": 187 | model_details["type"] = "OpenAI" 188 | model_details["code_sample_azure"] = get_code_sample_for_deployment_under_ai_services(model_list_details["name"], model_list_details['inferenceTasks'][0], "", "") 189 | 190 | # PayGo model add PayGo guidance to model details 191 | elif model_list_details["deployment_options"]["serverless_endpoint"]: 192 | if not model_details["type"] == "Free Playground": 193 | model_details["type"] = "Serverless Endpoint" 194 | model_details["code_sample_azure"] = get_code_sample_for_deployment_under_ai_services(model_list_details["name"],model_list_details['inferenceTasks'][0], "", "") 195 | 196 | # Managed compute model add managed compute guidance to model details 197 | elif model_list_details["deployment_options"]["managed_compute"]: 198 | model_details["type"] = "Managed Compute" 199 | pass 200 | 201 | return ModelDetails(**model_details) 202 | 203 | @mcp.tool() 204 | def get_prototyping_instructions_for_github_and_labs(ctx: Context) -> str: 205 | """ 206 | Provides comprehensive instructions and setup guidance for starting to work with models from Azure AI Foundry and Azure AI Foundry Labs. 207 | 208 | This function is crucial to call whenever a user begins talking about or expressing an interest in working with Foundry models. It provides the essential prototyping instructions that include setup, configuration, and the first steps in querying and utilizing the models. It should always be invoked before any other interactions with the models to ensure that the user has the necessary context and knowledge to proceed effectively. 209 | 210 | The instructions include: 211 | - Required setup for working with Foundry models. 212 | - Details about how to configure the environment. 213 | - How to query the models. 214 | - Best practices for using Foundry models in prototyping. 215 | 216 | Parameters: 217 | ctx (Context): The context of the current session, which may include session-specific information and metadata that can be used to customize the returned instructions. 218 | 219 | Returns: 220 | str: A detailed set of instructions to guide the user in setting up and using Foundry models, including steps on how to get started with queries and the prototyping process. 221 | 222 | Usage: 223 | Call this function at the beginning of any interaction involving Foundry models to provide the user with the necessary setup information and best practices. This ensures that the user can begin their work with all the foundational knowledge and tools needed. 224 | 225 | Notes: 226 | - This function should be the first step before any interaction with the Foundry models to ensure proper setup and understanding. 227 | - It is essential to invoke this function as it provides the groundwork for a successful prototyping experience with Foundry models. 228 | 229 | Importance: 230 | The function is critical for preparing the user to effectively use the Azure AI Foundry models, ensuring they have the proper guidance on how to interact with them from the very beginning. 231 | """ 232 | 233 | headers = get_client_headers_info(ctx) 234 | response = requests.get(f"{labs_api_url}/resources/resource/copilot-instructions.md", headers=headers) 235 | if response.status_code != 200: 236 | return f"Error fetching instructions from API: {response.status_code}" 237 | 238 | copilot_instructions = response.json() 239 | return copilot_instructions["resource"] 240 | 241 | @mcp.tool() 242 | def get_model_quotas(subscription_id: str, location: str) -> list[dict]: 243 | """Get model quotas for a specific Azure location. 244 | 245 | Args: 246 | subscription_id: The ID of the Azure subscription. This is string 247 | with the format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` 248 | location: The Azure location to retrieve quotas for. 249 | 250 | Returns: 251 | list: Returns a list of quota usages. 252 | 253 | Usage: 254 | Call this when you need to get information about available quota. 255 | You should ensure that you use a valid subscription id. 256 | """ 257 | 258 | client = get_cognitiveservices_client(subscription_id) 259 | return [usage.as_dict() for usage in client.usages.list(location)] 260 | 261 | @mcp.tool() 262 | def create_azure_ai_services_account( 263 | subscription_id: str, 264 | resource_group: str, 265 | azure_ai_services_name: str, 266 | location: str, 267 | ) -> dict: 268 | """Create an Azure AI services account. 269 | 270 | The created Azure AI services account can be used to create a Foundry Project. 271 | 272 | Args: 273 | resource_group: The name of the resource group to create the account in. 274 | azure_ai_services_name: The name of the Azure AI services account to create. 275 | location: The Azure region to create the account in. 276 | sku_name: The SKU name for the account (default is "S0"). 277 | tags: Optional tags to apply to the account. 278 | 279 | Returns: 280 | dict: The created Azure AI services account. 281 | """ 282 | 283 | bicep_template = f""" 284 | param ai_services_name string = '{azure_ai_services_name}' 285 | param location string = 'eastus' 286 | 287 | resource account 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' = {{ 288 | name: ai_services_name 289 | location: location 290 | identity: {{ 291 | type: 'SystemAssigned' 292 | }} 293 | kind: 'AIServices' 294 | sku: {{ 295 | name: 'S0' 296 | }} 297 | properties: {{ 298 | // Networking 299 | publicNetworkAccess: 'Enabled' 300 | 301 | // Specifies whether this resource support project management as child resources, used as containers for access management, data isolation, and cost in AI Foundry. 302 | allowProjectManagement: true 303 | 304 | // Defines developer API endpoint subdomain 305 | customSubDomainName: ai_services_name 306 | 307 | // Auth 308 | disableLocalAuth: false 309 | }} 310 | }} 311 | """ 312 | 313 | # TODO: Use the Python SDK once the update is released 314 | deploy_inline_bicep_template(subscription_id, resource_group, bicep_template) 315 | 316 | client = get_cognitiveservices_client(subscription_id) 317 | 318 | return client.accounts.get(resource_group, azure_ai_services_name) 319 | 320 | @mcp.tool() 321 | def list_deployments_from_azure_ai_services(subscription_id: str, resource_group: str, azure_ai_services_name: str) -> list[dict]: 322 | """ 323 | Retrieves a list of deployments from Azure AI Services. 324 | 325 | This function is used when a user requests information about the available deployments in Azure AI Services. It provides an overview of the models and services that are currently deployed and available for use. 326 | 327 | Parameters: 328 | ctx (Context): The context of the current session, which includes metadata and session-specific information. 329 | 330 | Returns: 331 | list: A list containing the details of the deployments in Azure AI Services. The list will include information such as deployment names, descriptions, and possibly other metadata relevant to the deployed services. 332 | 333 | Usage: 334 | Use this function when a user wants to explore the available deployments in Azure AI Services. This can help users understand what models and services are currently operational and how they can be utilized. 335 | 336 | Notes: 337 | - The deployments listed may include various models and services that are part of Azure AI Services. 338 | - The list may change frequently as new deployments are added or existing ones are updated. 339 | """ 340 | 341 | client = get_cognitiveservices_client(subscription_id) 342 | 343 | return [deployment.as_dict() for deployment in client.deployments.list(resource_group,account_name=azure_ai_services_name)] 344 | 345 | @mcp.tool() 346 | async def deploy_model_on_ai_services( 347 | deployment_name: str, 348 | model_name: str, 349 | model_format: str, 350 | azure_ai_services_name: str, 351 | resource_group: str, 352 | subscription_id: str, 353 | model_version: Optional[str] = None, 354 | model_source: Optional[str] = None, 355 | sku_name: Optional[str] = None, 356 | sku_capacity: Optional[int] = None, 357 | scale_type: Optional[str] = None, 358 | scale_capacity: Optional[int] = None, 359 | ) -> Deployment: 360 | """Deploy a model to Azure AI. 361 | 362 | This function is used to deploy a model on Azure AI Services, allowing users to integrate the model into their applications and utilize its capabilities. 363 | 364 | Args: 365 | deployment_name: The name of the deployment. 366 | model_name: The name of the model to deploy. 367 | model_format: The format of the model (e.g. "OpenAI", "Meta", "Microsoft"). 368 | azure_ai_services_name: The name of the Azure AI services account to deploy to. 369 | resource_group: The name of the resource group containing the Azure AI services account. 370 | subscription_id: The ID of the Azure subscription. This is string 371 | with the format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`. 372 | model_version: (Optional) The version of the model to deploy. If not provided, the default version 373 | will be used. 374 | model_source: (Optional) The source of the model. 375 | sku_name: (Optional) The SKU name for the deployment. 376 | sku_capacity: (Optional) The SKU capacity for the deployment. 377 | scale_type: (Optional) The scale type for the deployment. 378 | scale_capacity: (Optional) The scale capacity for the deployment. 379 | 380 | Returns: 381 | Deployment: The deployment object created or updated. 382 | """ 383 | 384 | model = DeploymentModel( 385 | format=model_format, 386 | name=model_name, 387 | version=model_version, 388 | ) 389 | 390 | sku: Optional[Sku] = None 391 | scale_settings: Optional[DeploymentScaleSettings] = None 392 | if model_source is not None: 393 | model.source = model_source 394 | 395 | if sku_name is not None: 396 | sku = Sku(name=sku_name, capacity=sku_capacity) 397 | 398 | if scale_type is not None: 399 | scale_settings = DeploymentScaleSettings( 400 | scale_type=scale_type, capacity=scale_capacity 401 | ) 402 | 403 | properties = DeploymentProperties( 404 | model=model, 405 | scale_settings=scale_settings, 406 | ) 407 | 408 | client = get_cognitiveservices_client(subscription_id) 409 | 410 | return client.deployments.begin_create_or_update( 411 | resource_group, 412 | azure_ai_services_name, 413 | deployment_name, 414 | deployment=Deployment(properties=properties, sku=sku), 415 | polling=False, 416 | ) 417 | 418 | @mcp.tool() 419 | def create_foundry_project( 420 | subscription_id: str, 421 | resource_group: str, 422 | azure_ai_services_name: str, 423 | project_name: str, 424 | location: str = "eastus", 425 | ) -> None: 426 | """Create an Azure AI Foundry Project. 427 | 428 | Args: 429 | subscription_id: The ID of the subscription to create the account in. 430 | resource_group: The name of the resource group to create the account in. 431 | azure_ai_services_name: The name of the Azure AI services to link the project to. 432 | The project must have been created with allowProjectManagement set to "true". 433 | project_name: The name of the project to create. 434 | location: The Azure region to create the account in. 435 | 436 | Returns: 437 | dict: The created Azure AI services account. 438 | """ 439 | 440 | bicep_template = f""" 441 | param ai_services_name string = '{azure_ai_services_name}' 442 | param location string = '{location}' 443 | param defaultProjectName string = '{project_name}' 444 | 445 | 446 | resource account 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' existing = {{ 447 | name: ai_services_name 448 | }} 449 | 450 | resource project 'Microsoft.CognitiveServices/accounts/projects@2025-04-01-preview' = {{ 451 | name: defaultProjectName 452 | parent: account 453 | location: location 454 | 455 | identity: {{ 456 | type: 'SystemAssigned' 457 | }} 458 | properties: {{ }} 459 | }} 460 | """ 461 | 462 | # TODO: Use the Python SDK once the update is released 463 | return deploy_inline_bicep_template(subscription_id, resource_group, bicep_template) 464 | -------------------------------------------------------------------------------- /src/mcp_foundry/mcp_foundry_model/utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | import re 5 | import subprocess 6 | import sys 7 | import tempfile 8 | from pathlib import Path 9 | from typing import Optional 10 | 11 | import dotenv 12 | import json 13 | import logging 14 | import os 15 | import re 16 | import subprocess 17 | import sys 18 | import tempfile 19 | from pathlib import Path 20 | from typing import Optional 21 | 22 | import dotenv 23 | import requests 24 | from azure.identity import DefaultAzureCredential 25 | from azure.mgmt.cognitiveservices import CognitiveServicesManagementClient 26 | from jinja2.sandbox import SandboxedEnvironment 27 | from markupsafe import Markup 28 | from azure.identity import DefaultAzureCredential 29 | from azure.mgmt.cognitiveservices import CognitiveServicesManagementClient 30 | from jinja2.sandbox import SandboxedEnvironment 31 | from markupsafe import Markup 32 | from mcp.server.fastmcp import Context 33 | from mcp_foundry.mcp_foundry_model.models import ModelsList 34 | 35 | dotenv.load_dotenv() 36 | 37 | logger = logging.getLogger("mcp_foundry") 38 | logging.basicConfig(level=logging.DEBUG) 39 | 40 | labs_api_url = os.environ.get("LABS_API_URL", "https://foundry-labs-mcp-api.azurewebsites.net/api/v1") 41 | 42 | def get_client_headers_info(ctx): 43 | """Get client headers info.""" 44 | client_info = getattr( 45 | getattr(ctx.session._client_params, "clientInfo", None), "__dict__", {}) or {} 46 | client_name = client_info.get("name", "UnknownClient").replace(" ", "-") 47 | client_version = client_info.get("version", "0.0.0") 48 | 49 | headers = { 50 | "User-Agent": f"MCP-Client/{client_name} - {client_version}" 51 | } 52 | return headers 53 | 54 | def get_models_list(ctx: Context, search_for_free_playground: bool = False, publisher_name: str = "", license_name: str = "", 55 | max_pages: int = 10, model_name: str = "") -> ModelsList: 56 | """Get a list of all supported models from Azure AI Foundry with optional filters.""" 57 | url = "https://api.catalog.azureml.ms/asset-gallery/v1.0/models" 58 | headers = get_client_headers_info(ctx) 59 | 60 | filters = [] 61 | 62 | # Always include 'latest' label 63 | filters.append({"field": "labels", "values": ["latest"], "operator": "eq"}) 64 | 65 | # Only add the freePlayground filter if the value is exactly True 66 | # If search_for_free_playground is False or None, do not add any filter for it 67 | if search_for_free_playground is True: 68 | filters.append({ 69 | "field": "freePlayground", 70 | "values": ["true"], 71 | "operator": "eq" 72 | }) 73 | 74 | if publisher_name is not None and publisher_name != "": 75 | filters.append({ 76 | "field": "publisher", 77 | "values": [publisher_name], 78 | "operator": "contains" 79 | }) 80 | 81 | if license_name is not None and license_name != "": 82 | filters.append({ 83 | "field": "license", 84 | "values": [license_name], 85 | "operator": "contains" 86 | }) 87 | 88 | if model_name is not None and model_name != "": 89 | filters.append({ 90 | "field": "name", 91 | "values": [model_name], 92 | "operator": "eq" 93 | }) 94 | 95 | body = {"filters": filters} 96 | logger.info(f"Request body: {body}") 97 | 98 | models_list = {"total_models_count": 0, 99 | "fetched_models_count": 0, "summaries": []} 100 | 101 | page_count = 0 102 | 103 | try: 104 | while True and page_count < max_pages: 105 | page_count += 1 106 | try: 107 | response = requests.post(url, json=body, headers=headers) 108 | response.raise_for_status() 109 | except Exception as e: 110 | logger.error(f"Exception during POST request on page {page_count}: {e}") 111 | break 112 | try: 113 | res_json = response.json() 114 | except Exception as e: 115 | logger.error(f"Exception parsing JSON response on page {page_count}: {e}") 116 | break 117 | 118 | logger.info(f"### Page: {page_count}") 119 | # logger.debug(f"Response body: {res_json}") 120 | 121 | if "summaries" not in res_json: 122 | logger.warning(f"No 'summaries' in response on page {page_count}") 123 | break 124 | 125 | for summary in res_json["summaries"]: 126 | try: 127 | summary["deployment_options"] = { 128 | "openai": False, 129 | "serverless_endpoint": False, 130 | "managed_compute": False, 131 | "free_playground": False, 132 | } 133 | 134 | if "playgroundLimits" in summary: 135 | summary["deployment_options"]['free_playground'] = True 136 | 137 | publisher = summary.get("publisher", "") 138 | azureOffers = summary.get("azureOffers", []) 139 | # Even if publisher and azureOffers are present, they can be None 140 | # or empty, so we need to fall back to default values 141 | if not publisher: 142 | publisher = "" 143 | if not azureOffers: 144 | azureOffers = [] 145 | 146 | if publisher and publisher.lower() == "openai": 147 | summary["deployment_options"]['openai'] = True 148 | else: 149 | if "standard-paygo" in azureOffers: 150 | summary["deployment_options"]['serverless_endpoint'] = True 151 | if "VM" in azureOffers or "VM-withSurcharge" in azureOffers: 152 | summary["deployment_options"]['managed_compute'] = True 153 | except Exception as e: 154 | logger.error(f"Exception processing summary on page {page_count}: {e}") 155 | logger.error(f"publisher: {publisher}") 156 | logger.error(f"azureOffers: {azureOffers}") 157 | logger.error(f"Summary: {summary}") 158 | 159 | models_list["total_models_count"] = res_json.get("totalCount", 0) 160 | models_list["summaries"].extend(res_json["summaries"]) 161 | 162 | # If there are no more pages, break the loop 163 | if not res_json.get("continuationToken", False): 164 | break 165 | 166 | # Update the body for the next request 167 | body["continuationToken"] = res_json.get("continuationToken") 168 | except Exception as e: 169 | logger.error(f"Exception in get_models_list main loop: {e}") 170 | 171 | models_list["fetched_models_count"] = len(models_list["summaries"]) 172 | 173 | # logging the total models count and fetched models count 174 | logger.info(f"Total models count: {models_list['total_models_count']}") 175 | logger.info(f"Fetched models count: {models_list['fetched_models_count']}") 176 | 177 | try: 178 | return ModelsList(**models_list) 179 | except Exception as e: 180 | logger.error(f"Exception constructing ModelsList: {e}") 181 | return None 182 | 183 | async def get_code_sample_for_github_model(publisher_name: str, model_name: str, ctx: Context) -> str: 184 | headers = get_client_headers_info(ctx) 185 | try: 186 | response = requests.get(f"{labs_api_url}/resources/resource/gh_guidance.md", headers=headers) 187 | if response.status_code != 200: 188 | return f"Error fetching projects from API: {response.status_code}" 189 | guidance = response.json() 190 | GH_GUIDANCE = guidance["resource"]["content"] 191 | guidance = GH_GUIDANCE.replace("{{inference_model_name}}", f"{publisher_name}/{model_name}") 192 | return guidance 193 | except Exception as e: 194 | logger.error(f"Exception in get_code_sample_for_github_model: {e}") 195 | return f"Exception: {e}" 196 | 197 | async def get_code_sample_for_labs_model(model_name: str, ctx: Context) -> str: 198 | headers = get_client_headers_info(ctx) 199 | try: 200 | response = requests.get(f"{labs_api_url}/projects/{model_name}/implementation", headers=headers) 201 | if response.status_code != 200: 202 | return f"Error fetching projects from API: {response.status_code}" 203 | project_response = response.json() 204 | return project_response['project'] 205 | except Exception as e: 206 | logger.error(f"Exception in get_code_sample_for_labs_model: {e}") 207 | return f"Exception: {e}" 208 | 209 | def get_cognitiveservices_client( 210 | subscription_id: str, 211 | ) -> CognitiveServicesManagementClient: 212 | return CognitiveServicesManagementClient( 213 | credential=DefaultAzureCredential(), subscription_id=subscription_id 214 | ) 215 | 216 | def get_code_sample_for_deployment_under_ai_services(model_name:str, inference_task: str, endpoint: str, deployment_name: str) -> Optional[str]: 217 | """Get a code snippet for a specific deployment. 218 | 219 | This function is used to get code examples and implementation instructions for deploying models in Azure AI Services, helping users understand how to integrate and use the models effectively in their applications. 220 | 221 | Args: 222 | deployment_name: The name of the deployment. 223 | model_name: The name of the model. 224 | endpoint_name: The Azure OpenAI endpoint. 225 | 226 | Returns: 227 | str: A rendered code snippet demonstrating usage of the deployment. 228 | """ 229 | 230 | template_response = requests.get( 231 | f"https://ai.azure.com/modelcache/code2/oai-sdk-key-auth/en/{inference_task}-python-template.md", 232 | ) 233 | 234 | if not template_response.ok: 235 | logger.error(f"Error fetching template: {template_response.status_code}") 236 | return None 237 | 238 | ejs_template = template_response.text 239 | try: 240 | model_template_config = ( 241 | requests.get( 242 | f"https://ai.azure.com/modelcache/widgets/en/Serverless/azure-openai/{model_name}.json" 243 | ) 244 | ).json() 245 | except Exception as e: 246 | logger.error(f"Error fetching model template config: {e}") 247 | return None 248 | 249 | naive_jinja2_template = re.sub(r"<%=\s+([\w\.]+)\s%>", r"{{ \1|e }}", ejs_template) 250 | 251 | env = SandboxedEnvironment() 252 | 253 | template = env.from_string(naive_jinja2_template) 254 | 255 | additionalParameters = {} 256 | 257 | if inference_task == "chat-completion": 258 | example_content = [ 259 | history["content"] 260 | for history in model_template_config[0]["config"]["examples"][0]["chatHistory"] 261 | ] 262 | additionalParameters = { 263 | "example": { 264 | "example_1": example_content[0], 265 | "example_2": example_content[1], 266 | "example_3": example_content[2], 267 | }, 268 | } 269 | elif inference_task == "embeddings": 270 | additionalParameters = { 271 | "example": { 272 | "input": Markup(', '.join(map(repr,model_template_config[0]["config"]["examples"][0]["jsonInput"]["input"]))) 273 | } 274 | } 275 | 276 | return template.render( 277 | **{ 278 | "endpointUrl": endpoint, 279 | "deploymentName": deployment_name, 280 | "modelName": model_name, 281 | **additionalParameters 282 | } 283 | ) 284 | 285 | 286 | async def get_ai_services_usage_list(ctx: Context) -> str: 287 | """ 288 | Retrieves a list of usage examples for Azure AI Services. 289 | 290 | This function is used to get examples of how to use Azure AI Services, helping users understand the various applications and use cases for the services. 291 | 292 | Returns: 293 | str: A string containing the usage examples for Azure AI Services. 294 | """ 295 | 296 | headers = get_client_headers_info(ctx) 297 | 298 | pass 299 | 300 | def az(*args: str) -> dict: 301 | """Run azure-cli and return output 302 | 303 | :param str *args: The command line arguments to provide to git 304 | :returns: The standard output of the git command. Surrounding whitespace is removed 305 | :rtype: str 306 | """ 307 | output = subprocess.run( 308 | [sys.executable, "-m", "azure.cli", *args, "-o", "json"], 309 | text=True, 310 | capture_output=True, 311 | check=True, 312 | ).stdout.strip() 313 | 314 | return json.loads(output) 315 | 316 | def deploy_inline_bicep_template( 317 | subscription_id: str, resource_group: str, bicep_template: str 318 | ): 319 | """Deploy a bicep template from a string""" 320 | with tempfile.NamedTemporaryFile(suffix=".bicep") as tmp: 321 | Path(tmp.name).write_text(bicep_template, encoding="utf-8") 322 | try: 323 | return az( 324 | "deployment", 325 | "group", 326 | "create", 327 | "--subscription", 328 | subscription_id, 329 | "--resource-group", 330 | resource_group, 331 | "--template-file", 332 | tmp.name, 333 | ) 334 | except subprocess.CalledProcessError as e: 335 | logger.exception(e.stderr + e.stdout) 336 | raise -------------------------------------------------------------------------------- /src/mcp_foundry/mcp_server.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | import logging 4 | import sys 5 | from mcp.server.fastmcp import FastMCP 6 | 7 | mcp = FastMCP("azure-ai-foundry-mcp-server") 8 | 9 | # Configure logging 10 | logging.basicConfig( 11 | level=logging.INFO, 12 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", 13 | stream=sys.stderr, 14 | ) 15 | logger = logging.getLogger("mcp_server") 16 | 17 | def auto_import_modules(base_package: str, targets: list[str]): 18 | """ 19 | Automatically imports specified Python modules (e.g., tools.py, resources.py, prompts.py) 20 | from each subpackage of base_package. 21 | """ 22 | package = importlib.import_module(base_package) 23 | package_path = package.__path__[0] 24 | 25 | for submodule in os.listdir(package_path): 26 | sub_path = os.path.join(package_path, submodule) 27 | 28 | if not os.path.isdir(sub_path) or submodule.startswith("__"): 29 | continue 30 | 31 | for target in targets: 32 | module_name = f"{base_package}.{submodule}.{target}" 33 | try: 34 | importlib.import_module(module_name) 35 | logger.info(f"✅ Imported: {module_name}") 36 | except ModuleNotFoundError: 37 | logger.warning(f"⚠️ Skipping {module_name} (not found)") 38 | except Exception as e: 39 | logger.error(f"❌ Error importing {module_name}: {e}") 40 | 41 | -------------------------------------------------------------------------------- /test_evaluator_requirements.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/azure-ai-foundry/mcp-foundry/af7ac18108edd8313c94812cf24b294c4e1bf4ea/test_evaluator_requirements.py -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/azure-ai-foundry/mcp-foundry/af7ac18108edd8313c94812cf24b294c4e1bf4ea/tests/__init__.py -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | def pytest_configure(config): 4 | config.addinivalue_line("markers", "integration: mark test as integration") 5 | 6 | def pytest_collection_modifyitems(config, items): 7 | run_integration = config.getoption("--runintegration") 8 | 9 | if run_integration: 10 | return # allow all tests 11 | 12 | skip_marker = pytest.mark.skip(reason="Skipped integration test (use --runintegration to enable)") 13 | for item in items: 14 | if "integration" in item.keywords: 15 | item.add_marker(skip_marker) 16 | 17 | def pytest_addoption(parser): 18 | parser.addoption( 19 | "--runintegration", action="store_true", default=False, help="Run integration tests" 20 | ) 21 | -------------------------------------------------------------------------------- /tests/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | markers = 3 | integration: marks tests as slow or requiring external services (deselect with '-m "not integration"') -------------------------------------------------------------------------------- /tests/test_mcp.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from mcp import ClientSession, StdioServerParameters 3 | from mcp.client.stdio import stdio_client 4 | 5 | @pytest.mark.integration 6 | @pytest.mark.asyncio 7 | async def test_mcp_client_lists_tools(): 8 | server_params = StdioServerParameters( 9 | command="pipx", 10 | args=["run", "--no-cache", "--spec", "..", "run-azure-foundry-mcp"], 11 | ) 12 | 13 | async with stdio_client(server_params) as (stdio, write): 14 | async with ClientSession(stdio, write) as session: 15 | await session.initialize() 16 | response = await session.list_tools() 17 | tools = response.tools 18 | assert tools, "Expected at least one tool from the MCP server" 19 | 20 | 21 | #TODO: Add tools that take prompts and test that the correct tool(s) are selected 22 | #TODO: Find way to only create client once per test module or make it faster 23 | #TODO: Add LLM to client 24 | ##TODO: Make LLM easily configurable 25 | ##TODO: Make it so we can test against multiple LLMs 26 | -------------------------------------------------------------------------------- /tests/test_mcp_foundry_tool.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from mcp_foundry.mcp_foundry_model.models import ModelDetails, DeploymentOption 3 | from mcp_foundry.mcp_foundry_model.tools import get_model_details_and_code_samples 4 | 5 | def _mock_ctx(): 6 | """Mock context for testing.""" 7 | class MockContext: 8 | def __init__(self): 9 | self.session = MockSession() 10 | class MockSession: 11 | def __init__(self): 12 | self._client_params = MockClientParams() 13 | class MockClientParams: 14 | def __init__(self): 15 | self.clientInfo = MockClientInfo() 16 | class MockClientInfo: 17 | def __init__(self): 18 | self.name = "TestClient" 19 | self.version = "1.0.0" 20 | return MockContext() 21 | 22 | @pytest.mark.asyncio 23 | async def test_get_model_details_from_model_catalog_gh_model(): 24 | mock_ctx = _mock_ctx() 25 | models = await get_model_details_and_code_samples('o3',mock_ctx) 26 | assert isinstance(models, ModelDetails) 27 | assert models.type == DeploymentOption.FREE_PLAYGROUND 28 | 29 | 30 | @pytest.mark.asyncio 31 | async def test_get_model_details_from_model_catalog_gh_model_2(): 32 | mock_ctx = _mock_ctx() 33 | models = await get_model_details_and_code_samples('Phi-4-reasoning',mock_ctx) 34 | assert isinstance(models, ModelDetails) 35 | assert models.type == DeploymentOption.FREE_PLAYGROUND 36 | 37 | @pytest.mark.asyncio 38 | async def test_get_model_details_from_model_catalog_labs_model(): 39 | mock_ctx = _mock_ctx() 40 | models = await get_model_details_and_code_samples('omniparserv2',mock_ctx) 41 | assert isinstance(models, ModelDetails) 42 | assert models.type == DeploymentOption.LABS 43 | 44 | @pytest.mark.asyncio 45 | async def test_get_model_details_from_paid_openai_model(): 46 | mock_ctx = _mock_ctx() 47 | models = await get_model_details_and_code_samples('gpt-image-1',mock_ctx) 48 | assert isinstance(models, ModelDetails) 49 | assert models.type == DeploymentOption.OPENAI 50 | 51 | @pytest.mark.asyncio 52 | async def test_get_model_details_from_standard_paygo_model(): 53 | mock_ctx = _mock_ctx() 54 | models = await get_model_details_and_code_samples('BioEmu',mock_ctx) 55 | assert isinstance(models, ModelDetails) 56 | assert models.type == DeploymentOption.SERVERLESS_ENDPOINT 57 | 58 | @pytest.mark.asyncio 59 | async def test_get_model_details_from_model_catalog_unsupported_model(): 60 | mock_ctx = _mock_ctx() 61 | models = await get_model_details_and_code_samples('unsupported_model',mock_ctx) 62 | assert models == "Model 'unsupported_model' not found in the catalog." -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from mcp_foundry.mcp_foundry_model.models import ModelsList 3 | from mcp_foundry.mcp_foundry_model.utils import get_models_list 4 | 5 | def _mock_ctx(): 6 | """Mock context for testing.""" 7 | class MockContext: 8 | def __init__(self): 9 | self.session = MockSession() 10 | class MockSession: 11 | def __init__(self): 12 | self._client_params = MockClientParams() 13 | class MockClientParams: 14 | def __init__(self): 15 | self.clientInfo = MockClientInfo() 16 | class MockClientInfo: 17 | def __init__(self): 18 | self.name = "TestClient" 19 | self.version = "1.0.0" 20 | return MockContext() 21 | 22 | def test_get_models_list_no_filters(): 23 | mock_ctx = _mock_ctx() 24 | models = get_models_list(mock_ctx) 25 | assert isinstance(models, ModelsList) 26 | 27 | def test_get_models_list_free_playground(): 28 | mock_ctx = _mock_ctx() 29 | models = get_models_list(mock_ctx, search_for_free_playground=True) 30 | assert isinstance(models, ModelsList) --------------------------------------------------------------------------------