├── tests ├── setup.ts ├── basic.test.ts ├── config.test.ts ├── server.test.ts ├── integration.test.ts ├── fabric-client-simple.test.ts ├── server.test.ts.disabled ├── package.json ├── basic.test.js ├── basic.test.ts.disabled ├── integration.test.ts.disabled ├── config.test.ts.disabled ├── fabric-client.test.ts └── fabric-client.test.ts.disabled ├── find-deployment.mjs ├── .github └── copilot-instructions.md ├── github-copilot-msal-config.json ├── python-wrapper ├── fabric_analytics_mcp.egg-info │ ├── not-zip-safe │ ├── dependency_links.txt │ ├── top_level.txt │ ├── entry_points.txt │ ├── requires.txt │ └── SOURCES.txt ├── MANIFEST.in ├── fabric_analytics_mcp │ ├── __init__.py │ └── server_manager.py ├── fabric_analytics_mcp-1.0.0 │ └── fabric_analytics_mcp │ │ └── __init__.py ├── setup.py ├── build_package.py └── README.md ├── commitlint.config.json ├── babel.config.json ├── monitoring ├── grafana │ ├── dashboards │ │ └── dashboard.yml │ └── datasources │ │ └── prometheus.yml └── prometheus.yml ├── .vscode ├── tasks.json ├── mcp.json └── launch.json ├── claude-desktop-config.json ├── claude-desktop-config.template.json ├── azure-openai-config.json ├── requirements.txt ├── vs-code-settings.json ├── tsconfig.json ├── jest.config.json ├── eslint.config.json ├── k8s ├── configmap.yaml ├── namespace.yaml ├── service.yaml ├── secret.yaml ├── hpa.yaml ├── ingress.yaml ├── rbac.yaml └── deployment.yaml ├── github-copilot-mcp-config.json ├── LICENSE ├── SETUP_COMPLETE.md ├── count-tools.mjs ├── Dockerfile ├── sample-notebook.ipynb ├── eslint.config.js ├── package.json ├── validate-config.js ├── SECURITY.md ├── .gitignore ├── config.template.json ├── final-check.js ├── scripts ├── DOCKER_INSTALL.md ├── test-azure-cli-auth.js ├── build-and-push.sh └── install-unix.sh ├── src ├── migration │ ├── types.ts │ ├── capacity-tools.ts │ └── spark-pool-tools.ts ├── capacity-tools.ts └── azure-openai-analyzer.ts ├── docker-compose.yml ├── docs ├── WORKSPACE_MANAGEMENT.md └── AZURE_CLI_AUTH.md ├── CONTRIBUTING.md ├── test-before-push.ps1 ├── AUTHENTICATION_SETUP.md └── MCP_TOOLS_USER_GUIDE.md /tests/setup.ts: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /find-deployment.mjs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/basic.test.ts: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/config.test.ts: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/server.test.ts: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/integration.test.ts: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.github/copilot-instructions.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /github-copilot-msal-config.json: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/fabric-client-simple.test.ts: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python-wrapper/fabric_analytics_mcp.egg-info/not-zip-safe: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /python-wrapper/fabric_analytics_mcp.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /python-wrapper/fabric_analytics_mcp.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | fabric_analytics_mcp 2 | -------------------------------------------------------------------------------- /commitlint.config.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": ["@commitlint/config-conventional"] 3 | } 4 | -------------------------------------------------------------------------------- /babel.config.json: -------------------------------------------------------------------------------- 1 | { 2 | "presets": [ 3 | ["@babel/preset-env", { 4 | "targets": { 5 | "node": "18" 6 | } 7 | }] 8 | ] 9 | } 10 | -------------------------------------------------------------------------------- /python-wrapper/fabric_analytics_mcp.egg-info/entry_points.txt: -------------------------------------------------------------------------------- 1 | [console_scripts] 2 | fabric-analytics-mcp = fabric_analytics_mcp.cli:main 3 | fabric-mcp = fabric_analytics_mcp.cli:main 4 | -------------------------------------------------------------------------------- /python-wrapper/fabric_analytics_mcp.egg-info/requires.txt: -------------------------------------------------------------------------------- 1 | click>=8.0.0 2 | requests>=2.25.0 3 | packaging>=20.0 4 | setuptools>=40.0 5 | 6 | [async] 7 | aiohttp>=3.8.0 8 | asyncio>=3.4.3 9 | 10 | [dev] 11 | pytest>=6.0 12 | pytest-cov>=2.0 13 | black>=21.0 14 | flake8>=3.8 15 | mypy>=0.800 16 | -------------------------------------------------------------------------------- /monitoring/grafana/dashboards/dashboard.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: 'default' 5 | orgId: 1 6 | folder: '' 7 | type: file 8 | disableDeletion: false 9 | updateIntervalSeconds: 10 10 | allowUiUpdates: true 11 | options: 12 | path: /etc/grafana/provisioning/dashboards 13 | -------------------------------------------------------------------------------- /.vscode/tasks.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "2.0.0", 3 | "tasks": [ 4 | { 5 | "label": "Build and Run MCP Server", 6 | "type": "shell", 7 | "command": "npm", 8 | "args": [ 9 | "run", 10 | "build" 11 | ], 12 | "group": "build", 13 | "problemMatcher": [ 14 | "$tsc" 15 | ], 16 | "isBackground": false 17 | } 18 | ] 19 | } -------------------------------------------------------------------------------- /.vscode/mcp.json: -------------------------------------------------------------------------------- 1 | { 2 | "servers": { 3 | "fabric-analytics": { 4 | "type": "stdio", 5 | "command": "node", 6 | "args": ["C:\\Users\\saravi\\Fabric-Analytics-MCP\\build\\index.js"], 7 | "cwd": "C:\\Users\\saravi\\Fabric-Analytics-MCP", 8 | "env": { 9 | "FABRIC_AUTH_METHOD": "azure_cli", 10 | "PATH": "${env:PATH}" 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /monitoring/grafana/datasources/prometheus.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | datasources: 4 | - name: Prometheus 5 | type: prometheus 6 | access: proxy 7 | orgId: 1 8 | url: http://prometheus:9090 9 | basicAuth: false 10 | isDefault: true 11 | version: 1 12 | editable: true 13 | jsonData: 14 | httpMethod: POST 15 | queryTimeout: 60s 16 | timeInterval: 15s 17 | -------------------------------------------------------------------------------- /claude-desktop-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "fabric-analytics": { 4 | "command": "node", 5 | "args": [ 6 | "C:\\Users\\saravi\\Fabric-Analytics-MCP\\build\\index.js" 7 | ], 8 | "cwd": "C:\\Users\\saravi\\Fabric-Analytics-MCP", 9 | "env": { 10 | "NODE_ENV": "production", 11 | "FABRIC_AUTH_METHOD": "azure-cli" 12 | } 13 | } 14 | } 15 | } -------------------------------------------------------------------------------- /claude-desktop-config.template.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "fabric-analytics": { 4 | "command": "node", 5 | "args": [ 6 | "/absolute/path/to/Fabric-Analytics-MCP/build/index.js" 7 | ], 8 | "cwd": "/absolute/path/to/Fabric-Analytics-MCP", 9 | "env": { 10 | "NODE_ENV": "production", 11 | "FABRIC_AUTH_METHOD": "azure-cli" 12 | } 13 | } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /python-wrapper/MANIFEST.in: -------------------------------------------------------------------------------- 1 | # Manifest file for including additional files in the Python package 2 | include README.md 3 | include LICENSE 4 | include CONTRIBUTING.md 5 | include SECURITY.md 6 | recursive-include fabric_analytics_mcp *.py 7 | recursive-include fabric_analytics_mcp/server *.js *.json *.md 8 | recursive-exclude fabric_analytics_mcp/server/node_modules * 9 | global-exclude __pycache__ 10 | global-exclude *.py[co] 11 | global-exclude .DS_Store 12 | global-exclude *.so 13 | global-exclude .git* 14 | -------------------------------------------------------------------------------- /azure-openai-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "azureOpenAI": { 3 | "apiKey": "YOUR_AZURE_OPENAI_API_KEY", 4 | "endpoint": "https://your-azure-openai-endpoint.cognitiveservices.azure.com/", 5 | "apiVersion": "2025-01-01-preview", 6 | "deploymentName": "YOUR_DEPLOYMENT_NAME", 7 | "model": "gpt-4", 8 | "maxTokens": 2000, 9 | "temperature": 0.3 10 | }, 11 | "analysis": { 12 | "enableLLMAnalysis": true, 13 | "analysisTypes": ["performance", "optimization", "recommendations", "error_patterns"] 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /python-wrapper/fabric_analytics_mcp.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | MANIFEST.in 2 | README.md 3 | setup.py 4 | fabric_analytics_mcp/__init__.py 5 | fabric_analytics_mcp/cli.py 6 | fabric_analytics_mcp/server_manager.py 7 | fabric_analytics_mcp.egg-info/PKG-INFO 8 | fabric_analytics_mcp.egg-info/SOURCES.txt 9 | fabric_analytics_mcp.egg-info/dependency_links.txt 10 | fabric_analytics_mcp.egg-info/entry_points.txt 11 | fabric_analytics_mcp.egg-info/not-zip-safe 12 | fabric_analytics_mcp.egg-info/requires.txt 13 | fabric_analytics_mcp.egg-info/top_level.txt -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # Microsoft Fabric Analytics MCP Server - Python Dependencies 2 | # Install with: pip install -r requirements.txt 3 | 4 | # Core dependencies 5 | requests>=2.31.0 6 | msal>=1.25.0 7 | 8 | # Optional dependencies for enhanced functionality 9 | jupyter>=1.0.0 10 | pandas>=2.0.0 11 | numpy>=1.24.0 12 | 13 | # Development and testing 14 | pytest>=7.4.0 15 | black>=23.0.0 16 | flake8>=6.0.0 17 | mypy>=1.5.0 18 | 19 | # Additional utilities 20 | python-dotenv>=1.0.0 21 | pyyaml>=6.0.0 22 | tabulate>=0.9.0 23 | colorama>=0.4.6 24 | rich>=13.0.0 25 | -------------------------------------------------------------------------------- /vs-code-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "github.copilot.mcp.servers": { 3 | "fabric-analytics": { 4 | "command": "node", 5 | "args": [ 6 | "C:\\Users\\saravi\\Fabric-Analytics-MCP\\build\\index.js" 7 | ], 8 | "cwd": "C:\\Users\\saravi\\Fabric-Analytics-MCP", 9 | "env": { 10 | "FABRIC_AUTH_METHOD": "azure-cli", 11 | "NODE_ENV": "production" 12 | } 13 | } 14 | }, 15 | "github.copilot.enable": { 16 | "*": true, 17 | "plaintext": true, 18 | "markdown": true, 19 | "json": true, 20 | "typescript": true, 21 | "javascript": true 22 | } 23 | } -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2020", 4 | "lib": ["ES2020", "DOM"], 5 | "module": "ESNext", 6 | "moduleResolution": "Node", 7 | "outDir": "./build", 8 | "rootDir": "./src", 9 | "strict": true, 10 | "esModuleInterop": true, 11 | "allowSyntheticDefaultImports": true, 12 | "skipLibCheck": true, 13 | "forceConsistentCasingInFileNames": true, 14 | "resolveJsonModule": true, 15 | "declaration": true, 16 | "sourceMap": true, 17 | "isolatedModules": true, 18 | "types": ["node", "jest"] 19 | }, 20 | "include": ["src/**/*.ts"], 21 | "exclude": ["node_modules", "build", "src/index_*.ts"] 22 | } 23 | -------------------------------------------------------------------------------- /jest.config.json: -------------------------------------------------------------------------------- 1 | { 2 | "preset": "ts-jest/presets/default-esm", 3 | "testEnvironment": "node", 4 | "roots": ["/tests"], 5 | "testMatch": [ 6 | "**/tests/**/*.test.ts", 7 | "**/tests/**/*.test.js" 8 | ], 9 | "transform": { 10 | "^.+\\.ts$": ["ts-jest", { 11 | "useESM": true, 12 | "tsconfig": { 13 | "module": "ESNext", 14 | "moduleResolution": "node" 15 | } 16 | }] 17 | }, 18 | "moduleFileExtensions": ["ts", "js", "json"], 19 | "moduleNameMapper": { 20 | "^(\\.{1,2}/.*)\\.js$": "$1" 21 | }, 22 | "extensionsToTreatAsEsm": [".ts"], 23 | "collectCoverageFrom": [ 24 | "src/**/*.ts", 25 | "!src/**/*.d.ts" 26 | ], 27 | "coverageDirectory": "coverage", 28 | "testTimeout": 10000 29 | } 30 | -------------------------------------------------------------------------------- /eslint.config.json: -------------------------------------------------------------------------------- 1 | { 2 | "parser": "@typescript-eslint/parser", 3 | "extends": [ 4 | "eslint:recommended", 5 | "@typescript-eslint/recommended" 6 | ], 7 | "parserOptions": { 8 | "ecmaVersion": 2020, 9 | "sourceType": "module", 10 | "project": "./tsconfig.json" 11 | }, 12 | "rules": { 13 | "@typescript-eslint/no-unused-vars": ["error", { "argsIgnorePattern": "^_" }], 14 | "@typescript-eslint/no-explicit-any": "warn", 15 | "@typescript-eslint/explicit-function-return-type": "off", 16 | "@typescript-eslint/explicit-module-boundary-types": "off", 17 | "@typescript-eslint/no-inferrable-types": "off", 18 | "prefer-const": "error", 19 | "no-var": "error" 20 | }, 21 | "env": { 22 | "node": true, 23 | "es6": true 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /python-wrapper/fabric_analytics_mcp/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Microsoft Fabric Analytics MCP Server Python Package 3 | 4 | This package provides a Python wrapper around the Node.js-based Microsoft Fabric Analytics 5 | MCP Server, enabling easy installation and use with any MCP-compatible client like Claude, 6 | GitHub Copilot, or other AI assistants. 7 | 8 | Features: 9 | - 41+ Microsoft Fabric analytics tools 10 | - Workspace management and discovery 11 | - Spark job monitoring and analysis 12 | - Notebook execution and management 13 | - Livy session management 14 | - Comprehensive authentication support 15 | - Easy PyPI installation: pip install fabric-analytics-mcp 16 | """ 17 | 18 | from .cli import main 19 | from .server_manager import FabricMCPServer 20 | 21 | __version__ = "1.0.0" 22 | __author__ = "Microsoft Fabric Analytics Community" 23 | 24 | __all__ = ["main", "FabricMCPServer"] 25 | -------------------------------------------------------------------------------- /python-wrapper/fabric_analytics_mcp-1.0.0/fabric_analytics_mcp/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Microsoft Fabric Analytics MCP Server Python Package 3 | 4 | This package provides a Python wrapper around the Node.js-based Microsoft Fabric Analytics 5 | MCP Server, enabling easy installation and use with any MCP-compatible client like Claude, 6 | GitHub Copilot, or other AI assistants. 7 | 8 | Features: 9 | - 41+ Microsoft Fabric analytics tools 10 | - Workspace management and discovery 11 | - Spark job monitoring and analysis 12 | - Notebook execution and management 13 | - Livy session management 14 | - Comprehensive authentication support 15 | - Easy PyPI installation: pip install fabric-analytics-mcp 16 | """ 17 | 18 | from .cli import main 19 | from .server_manager import FabricMCPServer 20 | 21 | __version__ = "1.0.0" 22 | __author__ = "Microsoft Fabric Analytics Community" 23 | 24 | __all__ = ["main", "FabricMCPServer"] 25 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2.0", 3 | "configurations": [ 4 | { 5 | "name": "Debug MCP Server", 6 | "type": "node", 7 | "request": "launch", 8 | "program": "${workspaceFolder}/build/index.js", 9 | "console": "integratedTerminal", 10 | "outFiles": ["${workspaceFolder}/build/**/*.js"], 11 | "sourceMaps": true, 12 | "skipFiles": ["/**"], 13 | "env": { 14 | "NODE_ENV": "development" 15 | } 16 | }, 17 | { 18 | "name": "Build and Debug", 19 | "type": "node", 20 | "request": "launch", 21 | "program": "${workspaceFolder}/build/index.js", 22 | "console": "integratedTerminal", 23 | "outFiles": ["${workspaceFolder}/build/**/*.js"], 24 | "sourceMaps": true, 25 | "skipFiles": ["/**"], 26 | "preLaunchTask": "Build and Run MCP Server" 27 | } 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /tests/server.test.ts.disabled: -------------------------------------------------------------------------------- 1 | import { describe, it, expect } from '@jest/globals'; 2 | import * as fs from 'fs'; 3 | import * as path from 'path'; 4 | 5 | describe('Server Configuration', () => { 6 | it('should have required configuration files', () => { 7 | const configFiles = [ 8 | 'tsconfig.json', 9 | 'package.json' 10 | ]; 11 | 12 | configFiles.forEach(file => { 13 | const filePath = path.resolve(__dirname, '..', file); 14 | expect(fs.existsSync(filePath)).toBe(true); 15 | }); 16 | }); 17 | 18 | it('should validate package.json structure', () => { 19 | const packagePath = path.resolve(__dirname, '..', 'package.json'); 20 | const packageJson = JSON.parse(fs.readFileSync(packagePath, 'utf8')); 21 | 22 | expect(packageJson.name).toBe('mcp-for-microsoft-fabric-analytics'); 23 | expect(packageJson.dependencies).toBeDefined(); 24 | expect(packageJson.scripts).toBeDefined(); 25 | }); 26 | }); 27 | -------------------------------------------------------------------------------- /tests/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "fabric-mcp-tests", 3 | "version": "1.0.0", 4 | "description": "Test suite for Microsoft Fabric Analytics MCP Server", 5 | "type": "module", 6 | "scripts": { 7 | "test": "jest", 8 | "test:watch": "jest --watch", 9 | "test:coverage": "jest --coverage", 10 | "lint": "eslint . --ext .ts,.js", 11 | "lint:fix": "eslint . --ext .ts,.js --fix" 12 | }, 13 | "devDependencies": { 14 | "@types/jest": "^29.5.0", 15 | "@typescript-eslint/eslint-plugin": "^6.0.0", 16 | "@typescript-eslint/parser": "^6.0.0", 17 | "eslint": "^8.40.0", 18 | "jest": "^29.5.0", 19 | "ts-jest": "^29.1.0", 20 | "typescript": "^5.0.0" 21 | }, 22 | "jest": { 23 | "preset": "ts-jest", 24 | "testEnvironment": "node", 25 | "extensionsToTreatAsEsm": [".ts"], 26 | "moduleNameMapping": { 27 | "^(\\.{1,2}/.*)\\.js$": "$1" 28 | }, 29 | "transform": { 30 | "^.+\\.tsx?$": ["ts-jest", { 31 | "useESM": true 32 | }] 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /k8s/configmap.yaml: -------------------------------------------------------------------------------- 1 | # ConfigMap for Microsoft Fabric Analytics MCP Server Configuration 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: fabric-mcp-config 6 | namespace: fabric-mcp 7 | labels: 8 | app: fabric-analytics-mcp 9 | data: 10 | # Application configuration 11 | NODE_ENV: "production" 12 | PORT: "3000" 13 | LOG_LEVEL: "info" 14 | 15 | # MCP Server configuration 16 | MCP_SERVER_NAME: "fabric-analytics" 17 | MCP_SERVER_VERSION: "1.0.0" 18 | 19 | # Microsoft Fabric API configuration 20 | FABRIC_API_BASE_URL: "https://api.fabric.microsoft.com" 21 | FABRIC_API_VERSION: "v1" 22 | 23 | # Performance and scaling configuration 24 | MAX_CONCURRENT_REQUESTS: "100" 25 | REQUEST_TIMEOUT: "30000" 26 | ENABLE_REQUEST_LOGGING: "true" 27 | 28 | # Health check configuration 29 | HEALTH_CHECK_ENDPOINT: "/health" 30 | READINESS_CHECK_ENDPOINT: "/ready" 31 | 32 | # Monitoring configuration 33 | ENABLE_METRICS: "true" 34 | METRICS_ENDPOINT: "/metrics" 35 | 36 | # Security configuration 37 | ENABLE_CORS: "true" 38 | CORS_ORIGIN: "*" 39 | ENABLE_HELMET: "true" 40 | -------------------------------------------------------------------------------- /github-copilot-mcp-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "GitHub Copilot MCP configuration for Fabric Analytics Server", 3 | "vscode_settings": { 4 | "github.copilot.mcp.servers": { 5 | "fabric-analytics": { 6 | "command": "node", 7 | "args": [ 8 | "C:\\Users\\saravi\\Fabric-Analytics-MCP\\build\\index.js" 9 | ], 10 | "cwd": "C:\\Users\\saravi\\Fabric-Analytics-MCP", 11 | "env": { 12 | "NODE_ENV": "production", 13 | "FABRIC_AUTH_METHOD": "azure-cli" 14 | } 15 | } 16 | } 17 | }, 18 | "usage_instructions": { 19 | "setup": "Add the 'github.copilot.mcp.servers' section to your VS Code settings.json", 20 | "authentication": "Ensure you're logged into Azure CLI with 'az login'", 21 | "test_workspace": "c22f6805-d84a-4143-80b2-0c9e9832e5a2", 22 | "example_prompts": [ 23 | "List all items in my Fabric workspace c22f6805-d84a-4143-80b2-0c9e9832e5a2", 24 | "Show me the latest job runs from my workspace", 25 | "Create a new notebook in the workspace", 26 | "Start a Spark session for data analysis" 27 | ] 28 | } 29 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Microsoft Fabric Analytics MCP Server 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /SETUP_COMPLETE.md: -------------------------------------------------------------------------------- 1 | ## 🔧 **Configuration Instructions** 2 | 3 | To complete the setup, you need to: 4 | 5 | 1. **Get your Fabric Workspace ID:** 6 | - Go to your Microsoft Fabric workspace 7 | - Copy the workspace ID from the URL or workspace settings 8 | 9 | 2. **Update the configuration:** 10 | - Edit: `%APPDATA%\Claude\claude_desktop_config.json` 11 | - Replace the empty `FABRIC_DEFAULT_WORKSPACE_ID` with your actual workspace ID 12 | 13 | 3. **Example configuration:** 14 | ```json 15 | { 16 | "mcpServers": { 17 | "fabric-analytics": { 18 | "command": "node", 19 | "args": ["C:\\Users\\saravi\\Fabric-Analytics-MCP\\build\\index.js"], 20 | "env": { 21 | "FABRIC_AUTH_METHOD": "azure_cli", 22 | "FABRIC_DEFAULT_WORKSPACE_ID": "your-workspace-id-here" 23 | } 24 | } 25 | } 26 | } 27 | ``` 28 | 29 | ## ✅ **Ready to Test!** 30 | Your MCP server is now configured with: 31 | - ✅ Azure CLI authentication 32 | - ✅ 17 comprehensive tools 33 | - ✅ Notebook management capabilities 34 | - ✅ Spark monitoring features 35 | - ✅ Full API integration 36 | 37 | **Next:** Restart Claude Desktop and start asking questions about your Microsoft Fabric workspace! 38 | -------------------------------------------------------------------------------- /k8s/namespace.yaml: -------------------------------------------------------------------------------- 1 | # Kubernetes Namespace for Microsoft Fabric Analytics MCP Server 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: fabric-mcp 6 | labels: 7 | name: fabric-mcp 8 | app: fabric-analytics-mcp 9 | version: v1 10 | annotations: 11 | description: "Microsoft Fabric Analytics MCP Server namespace" 12 | --- 13 | # Resource Quota to control resource usage 14 | apiVersion: v1 15 | kind: ResourceQuota 16 | metadata: 17 | name: fabric-mcp-quota 18 | namespace: fabric-mcp 19 | spec: 20 | hard: 21 | requests.cpu: "4" 22 | requests.memory: 8Gi 23 | limits.cpu: "8" 24 | limits.memory: 16Gi 25 | pods: "20" 26 | services: "5" 27 | persistentvolumeclaims: "2" 28 | --- 29 | # Network Policy for security isolation 30 | apiVersion: networking.k8s.io/v1 31 | kind: NetworkPolicy 32 | metadata: 33 | name: fabric-mcp-network-policy 34 | namespace: fabric-mcp 35 | spec: 36 | podSelector: 37 | matchLabels: 38 | app: fabric-analytics-mcp 39 | policyTypes: 40 | - Ingress 41 | - Egress 42 | ingress: 43 | - from: 44 | - namespaceSelector: 45 | matchLabels: 46 | name: ingress-nginx 47 | - namespaceSelector: 48 | matchLabels: 49 | name: kube-system 50 | ports: 51 | - protocol: TCP 52 | port: 3000 53 | egress: 54 | - {} # Allow all egress traffic (needed for Microsoft Fabric API calls) 55 | -------------------------------------------------------------------------------- /count-tools.mjs: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | // Count tools in the MCP server 4 | import { readFileSync } from 'fs'; 5 | 6 | try { 7 | const indexContent = readFileSync('src/index.ts', 'utf-8'); 8 | 9 | // Count server.tool( occurrences 10 | const toolMatches = indexContent.match(/server\.tool\(/g) || []; 11 | const toolCount = toolMatches.length; 12 | 13 | console.log('🔧 Tool Count Analysis'); 14 | console.log('===================='); 15 | console.log(`📊 Total tools found: ${toolCount}`); 16 | 17 | // Extract tool names 18 | const toolNameRegex = /server\.tool\(\s*["']([^"']+)["']/g; 19 | const toolNames = []; 20 | let match; 21 | 22 | while ((match = toolNameRegex.exec(indexContent)) !== null) { 23 | toolNames.push(match[1]); 24 | } 25 | 26 | console.log('\n📋 Tool categories:'); 27 | const categories = {}; 28 | toolNames.forEach(name => { 29 | const category = name.split('-')[0]; 30 | categories[category] = (categories[category] || 0) + 1; 31 | }); 32 | 33 | Object.entries(categories).sort().forEach(([cat, count]) => { 34 | console.log(` ${cat}: ${count} tools`); 35 | }); 36 | 37 | console.log('\n🎯 New Spark monitoring tools added:'); 38 | const sparkTools = toolNames.filter(name => 39 | name.includes('spark') || 40 | name.includes('mcp_fabric-analyt2') 41 | ); 42 | 43 | sparkTools.forEach(tool => { 44 | console.log(` - ${tool}`); 45 | }); 46 | 47 | console.log(`\n✅ Successfully added ${sparkTools.length} Spark monitoring tools!`); 48 | console.log(`🎉 Total comprehensive tools: ${toolCount}`); 49 | 50 | } catch (error) { 51 | console.error('❌ Error analyzing tools:', error.message); 52 | } 53 | -------------------------------------------------------------------------------- /k8s/service.yaml: -------------------------------------------------------------------------------- 1 | # Service for Microsoft Fabric Analytics MCP Server 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: fabric-analytics-mcp-service 6 | namespace: fabric-mcp 7 | labels: 8 | app: fabric-analytics-mcp 9 | annotations: 10 | service.beta.kubernetes.io/azure-load-balancer-resource-group: "your-node-resource-group" 11 | service.beta.kubernetes.io/azure-dns-label-name: "fabric-mcp" 12 | spec: 13 | type: LoadBalancer 14 | selector: 15 | app: fabric-analytics-mcp 16 | ports: 17 | - name: http 18 | port: 80 19 | targetPort: 3000 20 | protocol: TCP 21 | - name: https 22 | port: 443 23 | targetPort: 3000 24 | protocol: TCP 25 | sessionAffinity: ClientIP 26 | sessionAffinityConfig: 27 | clientIP: 28 | timeoutSeconds: 10800 29 | --- 30 | # ClusterIP Service for internal communication 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: fabric-analytics-mcp-internal 35 | namespace: fabric-mcp 36 | labels: 37 | app: fabric-analytics-mcp 38 | service-type: internal 39 | spec: 40 | type: ClusterIP 41 | selector: 42 | app: fabric-analytics-mcp 43 | ports: 44 | - name: http 45 | port: 3000 46 | targetPort: 3000 47 | protocol: TCP 48 | --- 49 | # Headless Service for StatefulSet support (if needed) 50 | apiVersion: v1 51 | kind: Service 52 | metadata: 53 | name: fabric-analytics-mcp-headless 54 | namespace: fabric-mcp 55 | labels: 56 | app: fabric-analytics-mcp 57 | service-type: headless 58 | spec: 59 | clusterIP: None 60 | selector: 61 | app: fabric-analytics-mcp 62 | ports: 63 | - name: http 64 | port: 3000 65 | targetPort: 3000 66 | protocol: TCP 67 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Multi-stage Docker build for Microsoft Fabric Analytics MCP Server 2 | # Stage 1: Build stage 3 | FROM node:18-alpine AS builder 4 | 5 | # Set working directory 6 | WORKDIR /app 7 | 8 | # Copy package files 9 | COPY package*.json ./ 10 | 11 | # Install dependencies (skip prepare script to avoid premature build) 12 | RUN npm ci --ignore-scripts 13 | 14 | # Copy source code 15 | COPY . . 16 | 17 | # Build the TypeScript application 18 | RUN npm run build 19 | 20 | # Remove dev dependencies to reduce image size 21 | RUN npm ci --production && npm cache clean --force 22 | 23 | # Stage 2: Production stage 24 | FROM node:18-alpine AS production 25 | 26 | # Install dumb-init for proper signal handling 27 | RUN apk add --no-cache dumb-init 28 | 29 | # Create non-root user for security 30 | RUN addgroup -g 1001 -S nodejs && \ 31 | adduser -S mcp -u 1001 -G nodejs 32 | 33 | # Set working directory 34 | WORKDIR /app 35 | 36 | # Copy built application from builder stage 37 | COPY --from=builder --chown=mcp:nodejs /app/build ./build 38 | COPY --from=builder --chown=mcp:nodejs /app/node_modules ./node_modules 39 | COPY --from=builder --chown=mcp:nodejs /app/package*.json ./ 40 | 41 | # Create necessary directories with proper permissions 42 | RUN mkdir -p /app/logs && \ 43 | chown -R mcp:nodejs /app 44 | 45 | # Switch to non-root user 46 | USER mcp 47 | 48 | # Set environment variables 49 | ENV NODE_ENV=production 50 | ENV PORT=3000 51 | 52 | # Expose the port 53 | EXPOSE 3000 54 | 55 | # Health check 56 | HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ 57 | CMD node -e "require('http').get('http://localhost:3000/health', (res) => { process.exit(res.statusCode === 200 ? 0 : 1) })" 58 | 59 | # Use dumb-init to handle signals properly 60 | ENTRYPOINT ["dumb-init", "--"] 61 | 62 | # Start the application 63 | CMD ["node", "build/index.js"] 64 | -------------------------------------------------------------------------------- /sample-notebook.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "43333c11", 6 | "metadata": {}, 7 | "source": [ 8 | "# Sample Notebook for Microsoft Fabric\n", 9 | "\n", 10 | "This is a sample notebook that demonstrates the structure supported by the Fabric Analytics MCP Server." 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "id": "342f31ff", 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "# Import required libraries\n", 21 | "import pandas as pd\n", 22 | "import numpy as np\n", 23 | "import matplotlib.pyplot as plt\n", 24 | "\n", 25 | "print('Welcome to Microsoft Fabric Analytics!')" 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": null, 31 | "id": "dee21013", 32 | "metadata": {}, 33 | "outputs": [], 34 | "source": [ 35 | "# Create sample data\n", 36 | "data = {\n", 37 | " 'date': pd.date_range('2024-01-01', periods=100, freq='D'),\n", 38 | " 'value': np.random.randn(100).cumsum()\n", 39 | "}\n", 40 | "df = pd.DataFrame(data)\n", 41 | "\n", 42 | "print(f'Generated {len(df)} rows of sample data')\n", 43 | "print(df.head())" 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": null, 49 | "id": "044115c9", 50 | "metadata": {}, 51 | "outputs": [], 52 | "source": [ 53 | "# Create visualization\n", 54 | "plt.figure(figsize=(12, 6))\n", 55 | "plt.plot(df['date'], df['value'])\n", 56 | "plt.title('Sample Time Series Data')\n", 57 | "plt.xlabel('Date')\n", 58 | "plt.ylabel('Value')\n", 59 | "plt.xticks(rotation=45)\n", 60 | "plt.tight_layout()\n", 61 | "plt.show()" 62 | ] 63 | } 64 | ], 65 | "metadata": { 66 | "language_info": { 67 | "name": "python" 68 | } 69 | }, 70 | "nbformat": 4, 71 | "nbformat_minor": 5 72 | } 73 | -------------------------------------------------------------------------------- /tests/basic.test.js: -------------------------------------------------------------------------------- 1 | describe('MCP Server', () => { 2 | test('should have working test environment', () => { 3 | expect(true).toBe(true); 4 | }); 5 | 6 | test('should validate core dependencies', () => { 7 | const fs = require('fs'); 8 | const path = require('path'); 9 | const packagePath = path.join(__dirname, '..', 'package.json'); 10 | const packageJson = JSON.parse(fs.readFileSync(packagePath, 'utf8')); 11 | 12 | expect(packageJson.name).toBe('mcp-for-microsoft-fabric-analytics'); 13 | expect(packageJson.dependencies).toBeDefined(); 14 | expect(packageJson.dependencies['zod']).toBeDefined(); 15 | expect(packageJson.dependencies['@modelcontextprotocol/sdk']).toBeDefined(); 16 | expect(packageJson.dependencies['@azure/msal-node']).toBeDefined(); 17 | }); 18 | 19 | test('should validate TypeScript source files exist', () => { 20 | const fs = require('fs'); 21 | const path = require('path'); 22 | 23 | const sourceIndexPath = path.join(__dirname, '..', 'src', 'index.ts'); 24 | expect(fs.existsSync(sourceIndexPath)).toBe(true); 25 | }); 26 | 27 | test('should validate configuration files', () => { 28 | const fs = require('fs'); 29 | const path = require('path'); 30 | 31 | const configFiles = [ 32 | '../tsconfig.json', 33 | '../package.json', 34 | '../jest.config.json' 35 | ]; 36 | 37 | configFiles.forEach(file => { 38 | const filePath = path.join(__dirname, file); 39 | expect(fs.existsSync(filePath)).toBe(true); 40 | }); 41 | }); 42 | 43 | test('should validate source files exist', () => { 44 | const fs = require('fs'); 45 | const path = require('path'); 46 | 47 | const sourceFiles = [ 48 | '../src/index.ts', 49 | '../src/fabric-client.ts', 50 | '../src/auth-client.ts' 51 | ]; 52 | 53 | sourceFiles.forEach(file => { 54 | const filePath = path.join(__dirname, file); 55 | expect(fs.existsSync(filePath)).toBe(true); 56 | }); 57 | }); 58 | }); 59 | -------------------------------------------------------------------------------- /eslint.config.js: -------------------------------------------------------------------------------- 1 | import js from '@eslint/js'; 2 | import tseslint from '@typescript-eslint/eslint-plugin'; 3 | import tsparser from '@typescript-eslint/parser'; 4 | 5 | export default [ 6 | js.configs.recommended, 7 | { 8 | files: ['**/*.ts'], 9 | languageOptions: { 10 | parser: tsparser, 11 | parserOptions: { 12 | ecmaVersion: 2022, 13 | sourceType: 'module' 14 | }, 15 | globals: { 16 | // Node.js globals 17 | console: 'readonly', 18 | process: 'readonly', 19 | Buffer: 'readonly', 20 | setTimeout: 'readonly', 21 | clearTimeout: 'readonly', 22 | setInterval: 'readonly', 23 | clearInterval: 'readonly', 24 | URL: 'readonly', 25 | URLSearchParams: 'readonly', 26 | fetch: 'readonly', 27 | AbortController: 'readonly', 28 | // Additional Node.js globals 29 | __dirname: 'readonly', 30 | __filename: 'readonly', 31 | module: 'readonly', 32 | require: 'readonly', 33 | exports: 'readonly', 34 | global: 'readonly' 35 | } 36 | }, 37 | plugins: { 38 | '@typescript-eslint': tseslint 39 | }, 40 | rules: { 41 | '@typescript-eslint/no-unused-vars': ['error', { 42 | 'argsIgnorePattern': '^_', 43 | 'varsIgnorePattern': '^_', 44 | 'ignoreRestSiblings': true 45 | }], 46 | 'no-unused-vars': ['error', { 47 | 'argsIgnorePattern': '^_', 48 | 'varsIgnorePattern': '^_', 49 | 'ignoreRestSiblings': true 50 | }], 51 | '@typescript-eslint/no-explicit-any': 'warn', 52 | '@typescript-eslint/explicit-function-return-type': 'off', 53 | '@typescript-eslint/explicit-module-boundary-types': 'off', 54 | '@typescript-eslint/no-inferrable-types': 'off', 55 | 'prefer-const': 'error', 56 | 'no-var': 'error', 57 | 'no-case-declarations': 'error' 58 | } 59 | }, 60 | { 61 | ignores: ['build/**', 'node_modules/**', 'coverage/**', '*.js'] 62 | } 63 | ]; 64 | -------------------------------------------------------------------------------- /tests/basic.test.ts.disabled: -------------------------------------------------------------------------------- 1 | describe('MCP Server', () => { 2 | test('should have working test environment', () => { 3 | expect(true).toBe(true); 4 | }); 5 | 6 | test('should validate core dependencies', () => { 7 | const fs = require('fs'); 8 | const path = require('path'); 9 | const packagePath = path.join(__dirname, '..', 'package.json'); 10 | const packageJson = JSON.parse(fs.readFileSync(packagePath, 'utf8')); 11 | 12 | expect(packageJson.name).toBe('mcp-for-microsoft-fabric-analytics'); 13 | expect(packageJson.dependencies).toBeDefined(); 14 | expect(packageJson.dependencies['zod']).toBeDefined(); 15 | expect(packageJson.dependencies['@modelcontextprotocol/sdk']).toBeDefined(); 16 | expect(packageJson.dependencies['@azure/msal-node']).toBeDefined(); 17 | }); 18 | 19 | test('should validate TypeScript source files exist', () => { 20 | const fs = require('fs'); 21 | const path = require('path'); 22 | 23 | const sourceIndexPath = path.join(__dirname, '..', 'src', 'index.ts'); 24 | expect(fs.existsSync(sourceIndexPath)).toBe(true); 25 | }); 26 | 27 | test('should validate configuration files', () => { 28 | const fs = require('fs'); 29 | const path = require('path'); 30 | 31 | const configFiles = [ 32 | '../tsconfig.json', 33 | '../package.json', 34 | '../jest.config.json' 35 | ]; 36 | 37 | configFiles.forEach(file => { 38 | const filePath = path.join(__dirname, file); 39 | expect(fs.existsSync(filePath)).toBe(true); 40 | }); 41 | }); 42 | 43 | test('should validate source files exist', () => { 44 | const fs = require('fs'); 45 | const path = require('path'); 46 | 47 | const sourceFiles = [ 48 | '../src/index.ts', 49 | '../src/fabric-client.ts', 50 | '../src/auth-client.ts' 51 | ]; 52 | 53 | sourceFiles.forEach(file => { 54 | const filePath = path.join(__dirname, file); 55 | expect(fs.existsSync(filePath)).toBe(true); 56 | }); 57 | }); 58 | }); 59 | -------------------------------------------------------------------------------- /tests/integration.test.ts.disabled: -------------------------------------------------------------------------------- 1 | /** 2 | * Simple Integration Test for MCP Server 3 | * This test validates core functionality without complex dependencies 4 | */ 5 | 6 | describe('MCP Server Integration', () => { 7 | test('should have working test environment', () => { 8 | expect(true).toBe(true); 9 | }); 10 | 11 | test('should validate core dependencies', () => { 12 | // Test if we can require the core modules 13 | const packageJson = require('../package.json'); 14 | 15 | expect(packageJson.name).toBe('mcp-for-microsoft-fabric-analytics'); 16 | expect(packageJson.dependencies).toBeDefined(); 17 | expect(packageJson.dependencies['zod']).toBeDefined(); 18 | expect(packageJson.dependencies['@modelcontextprotocol/sdk']).toBeDefined(); 19 | expect(packageJson.dependencies['@azure/msal-node']).toBeDefined(); 20 | }); 21 | 22 | test('should validate TypeScript source files exist', () => { 23 | const fs = require('fs'); 24 | const path = require('path'); 25 | 26 | const sourceIndexPath = path.join(__dirname, '..', 'src', 'index.ts'); 27 | expect(fs.existsSync(sourceIndexPath)).toBe(true); 28 | }); 29 | 30 | test('should validate configuration files', () => { 31 | const fs = require('fs'); 32 | const path = require('path'); 33 | 34 | const configFiles = [ 35 | '../tsconfig.json', 36 | '../jest.config.json', 37 | '../package.json' 38 | ]; 39 | 40 | configFiles.forEach(configFile => { 41 | const filePath = path.join(__dirname, configFile); 42 | expect(fs.existsSync(filePath)).toBe(true); 43 | }); 44 | }); 45 | 46 | test('should validate source files exist', () => { 47 | const fs = require('fs'); 48 | const path = require('path'); 49 | 50 | const sourceFiles = [ 51 | '../src/index.ts', 52 | '../src/fabric-client.ts', 53 | '../src/auth-client.ts' 54 | ]; 55 | 56 | sourceFiles.forEach(sourceFile => { 57 | const filePath = path.join(__dirname, sourceFile); 58 | expect(fs.existsSync(filePath)).toBe(true); 59 | }); 60 | }); 61 | }); 62 | -------------------------------------------------------------------------------- /k8s/secret.yaml: -------------------------------------------------------------------------------- 1 | # Secret for Microsoft Fabric Analytics MCP Server sensitive data 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: fabric-mcp-secrets 6 | namespace: fabric-mcp 7 | labels: 8 | app: fabric-analytics-mcp 9 | type: Opaque 10 | stringData: 11 | # Microsoft Fabric Service Principal Authentication 12 | # These values should be replaced with actual credentials during deployment 13 | FABRIC_CLIENT_ID: "your-client-id-here" 14 | FABRIC_CLIENT_SECRET: "your-client-secret-here" 15 | FABRIC_TENANT_ID: "your-tenant-id-here" 16 | 17 | # Optional: Default workspace for simplified operations 18 | FABRIC_DEFAULT_WORKSPACE_ID: "your-default-workspace-id" 19 | 20 | # Optional: Bearer token for alternative authentication 21 | FABRIC_BEARER_TOKEN: "" 22 | 23 | # Application secrets 24 | JWT_SECRET: "your-jwt-secret-for-internal-auth" 25 | API_KEY: "your-api-key-for-internal-services" 26 | --- 27 | # Azure Key Vault SecretProviderClass (optional - for Azure Key Vault integration) 28 | apiVersion: secrets-store.csi.x-k8s.io/v1 29 | kind: SecretProviderClass 30 | metadata: 31 | name: fabric-mcp-secrets-provider 32 | namespace: fabric-mcp 33 | spec: 34 | provider: azure 35 | parameters: 36 | usePodIdentity: "false" 37 | useVMManagedIdentity: "true" 38 | userAssignedIdentityID: "your-managed-identity-client-id" 39 | keyvaultName: "your-keyvault-name" 40 | tenantId: "your-tenant-id" 41 | objects: | 42 | array: 43 | - | 44 | objectName: fabric-client-id 45 | objectType: secret 46 | objectVersion: "" 47 | - | 48 | objectName: fabric-client-secret 49 | objectType: secret 50 | objectVersion: "" 51 | - | 52 | objectName: fabric-tenant-id 53 | objectType: secret 54 | objectVersion: "" 55 | secretObjects: 56 | - secretName: fabric-mcp-secrets-from-keyvault 57 | type: Opaque 58 | data: 59 | - objectName: fabric-client-id 60 | key: FABRIC_CLIENT_ID 61 | - objectName: fabric-client-secret 62 | key: FABRIC_CLIENT_SECRET 63 | - objectName: fabric-tenant-id 64 | key: FABRIC_TENANT_ID 65 | -------------------------------------------------------------------------------- /k8s/hpa.yaml: -------------------------------------------------------------------------------- 1 | # Horizontal Pod Autoscaler for Microsoft Fabric Analytics MCP Server 2 | apiVersion: autoscaling/v2 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | name: fabric-analytics-mcp-hpa 6 | namespace: fabric-mcp 7 | labels: 8 | app: fabric-analytics-mcp 9 | spec: 10 | scaleTargetRef: 11 | apiVersion: apps/v1 12 | kind: Deployment 13 | name: fabric-analytics-mcp 14 | 15 | minReplicas: 3 16 | maxReplicas: 10 17 | 18 | metrics: 19 | # CPU utilization target 20 | - type: Resource 21 | resource: 22 | name: cpu 23 | target: 24 | type: Utilization 25 | averageUtilization: 70 26 | 27 | # Memory utilization target 28 | - type: Resource 29 | resource: 30 | name: memory 31 | target: 32 | type: Utilization 33 | averageUtilization: 80 34 | 35 | # Custom metrics (if Prometheus is available) 36 | - type: Pods 37 | pods: 38 | metric: 39 | name: http_requests_per_second 40 | target: 41 | type: AverageValue 42 | averageValue: "50" 43 | 44 | # Scale-down behavior configuration 45 | behavior: 46 | scaleDown: 47 | stabilizationWindowSeconds: 300 # 5 minutes 48 | policies: 49 | - type: Percent 50 | value: 10 51 | periodSeconds: 60 52 | - type: Pods 53 | value: 1 54 | periodSeconds: 60 55 | selectPolicy: Min 56 | 57 | # Scale-up behavior configuration 58 | scaleUp: 59 | stabilizationWindowSeconds: 60 # 1 minute 60 | policies: 61 | - type: Percent 62 | value: 50 63 | periodSeconds: 60 64 | - type: Pods 65 | value: 2 66 | periodSeconds: 60 67 | selectPolicy: Max 68 | --- 69 | # Vertical Pod Autoscaler (optional - requires VPA to be installed) 70 | apiVersion: autoscaling.k8s.io/v1 71 | kind: VerticalPodAutoscaler 72 | metadata: 73 | name: fabric-analytics-mcp-vpa 74 | namespace: fabric-mcp 75 | labels: 76 | app: fabric-analytics-mcp 77 | spec: 78 | targetRef: 79 | apiVersion: apps/v1 80 | kind: Deployment 81 | name: fabric-analytics-mcp 82 | 83 | updatePolicy: 84 | updateMode: "Auto" 85 | 86 | resourcePolicy: 87 | containerPolicies: 88 | - containerName: fabric-analytics-mcp 89 | minAllowed: 90 | cpu: 100m 91 | memory: 128Mi 92 | maxAllowed: 93 | cpu: 1000m 94 | memory: 1Gi 95 | controlledResources: ["cpu", "memory"] 96 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mcp-for-microsoft-fabric-analytics", 3 | "version": "2.0.0", 4 | "description": "MCP server for Microsoft Fabric Analytics with Synapse-to-Fabric migration - enables LLMs to access, analyze, and migrate workloads to Microsoft Fabric", 5 | "type": "module", 6 | "main": "./build/index.js", 7 | "bin": { 8 | "fabric-analytics": "./build/index.js", 9 | "fabric-analytics-mcp": "./build/index.js" 10 | }, 11 | "scripts": { 12 | "build": "tsc", 13 | "dev": "tsc --watch", 14 | "start": "node build/index.js", 15 | "test": "jest --forceExit", 16 | "test:watch": "jest --watch", 17 | "test:coverage": "jest --coverage", 18 | "lint": "eslint src/**/*.ts", 19 | "lint:fix": "eslint src/**/*.ts --fix", 20 | "docker:build": "docker build -t fabric-analytics-mcp .", 21 | "docker:run": "docker run -p 3000:3000 --env-file .env fabric-analytics-mcp", 22 | "docker:push": "./scripts/build-and-push.sh", 23 | "k8s:deploy": "./scripts/deploy-to-aks.sh", 24 | "azure:setup": "./scripts/setup-azure-resources.sh", 25 | "health:check": "curl -f http://localhost:3000/health || exit 1" 26 | }, 27 | "keywords": [ 28 | "mcp", 29 | "model-context-protocol", 30 | "microsoft-fabric", 31 | "analytics", 32 | "data-analysis", 33 | "llm", 34 | "ai", 35 | "claude", 36 | "spark", 37 | "livy", 38 | "msal", 39 | "authentication" 40 | ], 41 | "author": "Microsoft Fabric Analytics Community", 42 | "license": "MIT", 43 | "repository": { 44 | "type": "git", 45 | "url": "git+https://github.com/santhoshravindran7/Fabric-Analytics-MCP.git" 46 | }, 47 | "homepage": "https://github.com/santhoshravindran7/Fabric-Analytics-MCP#readme", 48 | "bugs": { 49 | "url": "https://github.com/santhoshravindran7/Fabric-Analytics-MCP/issues" 50 | }, 51 | "engines": { 52 | "node": ">=18.0.0" 53 | }, 54 | "files": [ 55 | "build", 56 | "README.md", 57 | "LICENSE", 58 | "CONTRIBUTING.md", 59 | "SECURITY.md" 60 | ], 61 | "devDependencies": { 62 | "@babel/core": "^7.28.0", 63 | "@babel/preset-env": "^7.28.0", 64 | "@types/jest": "^30.0.0", 65 | "@types/node": "^24.0.3", 66 | "@typescript-eslint/eslint-plugin": "^8.35.0", 67 | "@typescript-eslint/parser": "^8.35.0", 68 | "babel-jest": "^30.0.4", 69 | "eslint": "^9.29.0", 70 | "jest": "^30.0.3", 71 | "ts-jest": "^29.4.0", 72 | "typescript": "^5.8.3" 73 | }, 74 | "dependencies": { 75 | "@azure/msal-node": "^2.16.2", 76 | "@modelcontextprotocol/sdk": "^1.13.0", 77 | "@types/node-fetch": "^2.6.13", 78 | "child_process": "^1.0.2", 79 | "node-fetch": "^3.3.2", 80 | "zod": "^3.25.67" 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /validate-config.js: -------------------------------------------------------------------------------- 1 | // MCP Configuration Validator 2 | import fs from 'fs'; 3 | import path from 'path'; 4 | 5 | const configPath = "C:\\Users\\saravi\\AppData\\Roaming\\Claude\\claude_desktop_config.json"; 6 | const buildPath = "C:\\Users\\saravi\\OneDrive - Microsoft\\MCP for Microsoft Fabric Analytics\\build\\index.js"; 7 | 8 | console.log("🔧 Validating MCP Configuration...\n"); 9 | 10 | // Check if config file exists 11 | if (!fs.existsSync(configPath)) { 12 | console.log("❌ Claude Desktop config file not found at:", configPath); 13 | process.exit(1); 14 | } 15 | 16 | // Check if build file exists 17 | if (!fs.existsSync(buildPath)) { 18 | console.log("❌ MCP server build file not found at:", buildPath); 19 | process.exit(1); 20 | } 21 | 22 | // Read and validate config 23 | try { 24 | const config = JSON.parse(fs.readFileSync(configPath, 'utf8')); 25 | 26 | console.log("✅ Configuration file found and valid JSON"); 27 | 28 | if (config.mcpServers && config.mcpServers['fabric-analytics']) { 29 | console.log("✅ fabric-analytics server configured"); 30 | 31 | const server = config.mcpServers['fabric-analytics']; 32 | 33 | if (server.env && server.env.FABRIC_TOKEN && server.env.FABRIC_WORKSPACE_ID) { 34 | console.log("✅ Environment variables configured"); 35 | console.log(` - Token: ${server.env.FABRIC_TOKEN.substring(0, 20)}...`); 36 | console.log(` - Workspace ID: ${server.env.FABRIC_WORKSPACE_ID}`); 37 | } else { 38 | console.log("❌ Missing environment variables"); 39 | } 40 | 41 | if (server.command === "node" && server.args && server.args[0]) { 42 | console.log("✅ Node command configured"); 43 | console.log(` - Command: ${server.command}`); 44 | console.log(` - Script: ${server.args[0]}`); 45 | } else { 46 | console.log("❌ Invalid command configuration"); 47 | } 48 | } else { 49 | console.log("❌ fabric-analytics server not found in config"); 50 | } 51 | 52 | console.log("\n🎯 Configuration Summary:"); 53 | console.log("- Config file: VALID"); 54 | console.log("- Build file: EXISTS"); 55 | console.log("- Server name: fabric-analytics"); 56 | console.log("- Ready for Claude Desktop startup"); 57 | 58 | } catch (error) { 59 | console.log("❌ Error reading config:", error.message); 60 | process.exit(1); 61 | } 62 | 63 | console.log("\n🚀 Next steps:"); 64 | console.log("1. Start Claude Desktop"); 65 | console.log("2. Wait for initialization"); 66 | console.log("3. Check Settings > Features > Model Context Protocol"); 67 | console.log("4. Look for 'fabric-analytics' server with tools listed"); 68 | -------------------------------------------------------------------------------- /tests/config.test.ts.disabled: -------------------------------------------------------------------------------- 1 | import { describe, it, expect, beforeEach, jest } from '@jest/globals'; 2 | import * as fs from 'fs'; 3 | import * as path from 'path'; 4 | 5 | describe('MCP Server Configuration', () => { 6 | beforeEach(() => { 7 | jest.clearAllMocks(); 8 | }); 9 | 10 | describe('Environment Check', () => { 11 | it('should have Node.js version 18 or higher', () => { 12 | const nodeVersion = process.version; 13 | const majorVersion = parseInt(nodeVersion.substring(1).split('.')[0]); 14 | expect(majorVersion).toBeGreaterThanOrEqual(18); 15 | }); 16 | 17 | it('should run in test environment', () => { 18 | expect(process.env.NODE_ENV).toBe('test'); 19 | }); 20 | }); 21 | 22 | describe('TypeScript Compilation', () => { 23 | it('should compile without errors', () => { 24 | // This test passes if the file compiles successfully 25 | expect(true).toBe(true); 26 | }); 27 | }); 28 | 29 | describe('Module Imports', () => { 30 | it('should import zod successfully', async () => { 31 | const { z } = await import('zod'); 32 | expect(z).toBeDefined(); 33 | expect(typeof z.string).toBe('function'); 34 | }); 35 | 36 | it('should import MCP SDK successfully', async () => { 37 | try { 38 | const mcp = await import('@modelcontextprotocol/sdk/server/mcp.js'); 39 | expect(mcp).toBeDefined(); 40 | } catch (error) { 41 | // Skip if module not found during testing 42 | console.warn('MCP SDK import skipped in test environment'); 43 | } 44 | }); 45 | }); 46 | 47 | describe('Configuration Validation', () => { 48 | it('should validate required configuration files exist', () => { 49 | const configFiles = [ 50 | 'package.json', 51 | 'tsconfig.json', 52 | 'jest.config.json' 53 | ]; 54 | 55 | configFiles.forEach(file => { 56 | const filePath = path.resolve(process.cwd(), file); 57 | expect(fs.existsSync(filePath)).toBe(true); 58 | }); 59 | }); 60 | 61 | it('should validate package.json structure', () => { 62 | const packagePath = path.resolve(process.cwd(), 'package.json'); 63 | const packageJson = JSON.parse(fs.readFileSync(packagePath, 'utf8')); 64 | 65 | expect(packageJson.name).toBeDefined(); 66 | expect(packageJson.version).toBeDefined(); 67 | expect(packageJson.dependencies).toBeDefined(); 68 | expect(packageJson.dependencies['@modelcontextprotocol/sdk']).toBeDefined(); 69 | expect(packageJson.dependencies['zod']).toBeDefined(); 70 | }); 71 | 72 | it('should validate source files exist', () => { 73 | const sourcePath = path.resolve(process.cwd(), 'src', 'index.ts'); 74 | expect(fs.existsSync(sourcePath)).toBe(true); 75 | }); 76 | }); 77 | }); 78 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Reporting Security Vulnerabilities 4 | 5 | We take security seriously. If you discover a security vulnerability, please report it responsibly. 6 | 7 | ### How to Report 8 | 9 | **Please do NOT create a public GitHub issue for security vulnerabilities.** 10 | 11 | Instead, please report security issues by: 12 | 13 | 1. **GitHub Security Advisory** (Preferred) 14 | - Go to the repository's Security tab 15 | - Click "Report a vulnerability" 16 | - Provide detailed information about the vulnerability 17 | 18 | 2. **Email** (Alternative) 19 | - Send details to: [santhosh.ravindran@microsoft.com] 20 | - Include steps to reproduce the vulnerability 21 | - Provide any proof-of-concept code if applicable 22 | 23 | ### What to Include 24 | 25 | When reporting a security vulnerability, please include: 26 | 27 | - **Description** of the vulnerability 28 | - **Steps to reproduce** the issue 29 | - **Potential impact** assessment 30 | - **Suggested fix** (if you have one) 31 | - **Your contact information** for follow-up 32 | 33 | ### Response Timeline 34 | 35 | - **Acknowledgment**: Within 48 hours 36 | - **Initial Assessment**: Within 1 week 37 | - **Fix Development**: Timeline depends on severity 38 | - **Public Disclosure**: After fix is available 39 | 40 | ## Security Best Practices 41 | 42 | ### For Users 43 | 44 | - **Never commit authentication tokens** to version control 45 | - **Use environment variables** for sensitive configuration 46 | - **Regularly rotate** your Microsoft Fabric tokens 47 | - **Follow principle of least privilege** for API access 48 | - **Keep dependencies updated** to latest secure versions 49 | 50 | ### For Contributors 51 | 52 | - **Review code** for potential security issues 53 | - **Validate all inputs** and sanitize outputs 54 | - **Use secure coding practices** for authentication 55 | - **Never log sensitive information** like tokens 56 | - **Test security-related changes** thoroughly 57 | 58 | ## Supported Versions 59 | 60 | | Version | Supported | 61 | | ------- | ------------------ | 62 | | Latest | ✅ Yes | 63 | | < 1.0 | ❌ No | 64 | 65 | ## Security Features 66 | 67 | This project includes several security features: 68 | 69 | - **Input validation** using Zod schemas 70 | - **Token sanitization** in logs and error messages 71 | - **Secure authentication** patterns 72 | - **No hardcoded credentials** in source code 73 | - **Comprehensive .gitignore** for sensitive files 74 | 75 | ## Dependencies 76 | 77 | We regularly monitor and update dependencies for security vulnerabilities using: 78 | 79 | - GitHub Dependabot alerts 80 | - npm audit 81 | - Regular security reviews 82 | 83 | ## Contact 84 | 85 | For any security-related questions or concerns, please use the reporting channels mentioned above. 86 | 87 | --- 88 | 89 | **Thank you for helping keep our project secure!** 90 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | node_modules/ 3 | npm-debug.log* 4 | yarn-debug.log* 5 | yarn-error.log* 6 | 7 | # Build output 8 | build/ 9 | dist/ 10 | 11 | # Environment variables 12 | .env 13 | .env.local 14 | .env.development.local 15 | .env.test.local 16 | .env.production.local 17 | 18 | # IDE files 19 | .vscode/settings.json 20 | .idea/ 21 | *.swp 22 | *.swo 23 | 24 | # OS generated files 25 | .DS_Store 26 | .DS_Store? 27 | ._* 28 | .Spotlight-V100 29 | .Trashes 30 | ehthumbs.db 31 | Thumbs.db 32 | 33 | # Logs 34 | logs 35 | *.log 36 | 37 | # Runtime data 38 | pids 39 | *.pid 40 | *.seed 41 | *.pid.lock 42 | 43 | # Coverage directory used by tools like istanbul 44 | coverage/ 45 | 46 | # nyc test coverage 47 | .nyc_output 48 | 49 | # Dependency directories 50 | jspm_packages/ 51 | 52 | # Optional npm cache directory 53 | .npm 54 | 55 | # Optional REPL history 56 | .node_repl_history 57 | 58 | # Output of 'npm pack' 59 | *.tgz 60 | 61 | # Yarn Integrity file 62 | .yarn-integrity 63 | 64 | # dotenv environment variables file 65 | .env 66 | 67 | # parcel-bundler cache (https://parceljs.org/) 68 | .cache 69 | .parcel-cache 70 | 71 | # next.js build output 72 | .next 73 | 74 | # nuxt.js build output 75 | .nuxt 76 | 77 | # vuepress build output 78 | .vuepress/dist 79 | 80 | # Serverless directories 81 | .serverless 82 | 83 | # FuseBox cache 84 | .fusebox/ 85 | 86 | # DynamoDB Local files 87 | .dynamodb/ 88 | 89 | # TernJS port file 90 | .tern-port 91 | 92 | # Jupyter Notebook checkpoints 93 | .ipynb_checkpoints 94 | 95 | # Microsoft Fabric credentials and sensitive data (security) 96 | *bearer-token* 97 | *fabric-token* 98 | *credentials* 99 | *secrets* 100 | *.env 101 | .env.* 102 | 103 | # Specific sensitive config files (not build configs) 104 | workspace_details_*.json 105 | fabric-config.json 106 | auth-config.json 107 | user-config.json 108 | 109 | # Claude Desktop config (contains local paths) 110 | claude_desktop_config.json 111 | development-archive/ 112 | 113 | development-archive/ 114 | 115 | 116 | # Test files and validation scripts (generated during development) 117 | *.cjs 118 | test*.js 119 | test*.mjs 120 | test-*.mjs 121 | *test*.py 122 | *demo*.py 123 | *validation*.py 124 | quick-*.js 125 | quick-*.cjs 126 | setup-*.cjs 127 | validate-*.cjs 128 | validate-*.js 129 | generate-*.cjs 130 | test-*.ps1 131 | final-check.js 132 | debug-*.mjs 133 | debug-*.js 134 | check-*.mjs 135 | assign-*.mjs 136 | count-*.mjs 137 | find-*.mjs 138 | get-*.cjs 139 | list-*.cjs 140 | run-*.mjs 141 | 142 | # Temporary files and reports from testing 143 | migration-*.json 144 | comprehensive-*.py 145 | *-report.json 146 | *-analysis.py 147 | fabric-workspace-id.txt 148 | 149 | # Temporary markdown documentation (keep only production docs) 150 | SETUP_COMPLETE.md 151 | MIGRATION_*.md 152 | migration-*.md 153 | -------------------------------------------------------------------------------- /config.template.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Configuration template for Microsoft Fabric Analytics MCP Server testing", 3 | "fabric_credentials": { 4 | "bearer_token": "YOUR_BEARER_TOKEN_HERE", 5 | "workspace_id": "YOUR_WORKSPACE_ID_HERE", 6 | "lakehouse_id": "YOUR_LAKEHOUSE_ID_HERE", 7 | "notebook_id": "YOUR_NOTEBOOK_ID_HERE" 8 | }, 9 | "testing_options": { 10 | "include_performance_tests": true, 11 | "timeout_seconds": 300, 12 | "retry_attempts": 3, 13 | "verbose_logging": true 14 | }, 15 | "spark_cluster_configs": { 16 | "small": { 17 | "driverCores": 2, 18 | "driverMemory": "4g", 19 | "executorCores": 1, 20 | "executorMemory": "2g", 21 | "numExecutors": 2 22 | }, 23 | "medium": { 24 | "driverCores": 4, 25 | "driverMemory": "8g", 26 | "executorCores": 2, 27 | "executorMemory": "4g", 28 | "numExecutors": 3 29 | }, 30 | "large": { 31 | "driverCores": 8, 32 | "driverMemory": "16g", 33 | "executorCores": 4, 34 | "executorMemory": "8g", 35 | "numExecutors": 5 36 | } 37 | }, 38 | "sample_data_scenarios": { 39 | "sales_analysis": { 40 | "description": "Comprehensive sales data analysis with forecasting", 41 | "estimated_runtime": "5-10 minutes", 42 | "required_permissions": ["lakehouse_read", "spark_compute"] 43 | }, 44 | "customer_segmentation": { 45 | "description": "Customer lifetime value and segmentation analysis", 46 | "estimated_runtime": "3-7 minutes", 47 | "required_permissions": ["lakehouse_read", "spark_compute"] 48 | }, 49 | "inventory_optimization": { 50 | "description": "Inventory forecasting and optimization", 51 | "estimated_runtime": "8-15 minutes", 52 | "required_permissions": ["lakehouse_read", "spark_compute", "ml_workspace"] 53 | } 54 | }, 55 | "notebook_test_scenarios": [ 56 | { 57 | "name": "Financial Reporting Automation", 58 | "parameters": { 59 | "report_period": "Q4-2024", 60 | "include_forecasts": true, 61 | "departments": ["Sales", "Marketing", "Finance"], 62 | "export_formats": ["xlsx", "pdf"], 63 | "email_distribution": true 64 | } 65 | }, 66 | { 67 | "name": "Real-time Dashboard Updates", 68 | "parameters": { 69 | "refresh_interval_minutes": 15, 70 | "data_sources": ["sales_db", "marketing_api", "finance_warehouse"], 71 | "alert_thresholds": { 72 | "revenue_variance": 0.1, 73 | "conversion_rate_drop": 0.05 74 | } 75 | } 76 | } 77 | ], 78 | "environment_variables": { 79 | "FABRIC_BEARER_TOKEN": "Set this to your actual bearer token", 80 | "FABRIC_WORKSPACE_ID": "Set this to your workspace ID", 81 | "FABRIC_LAKEHOUSE_ID": "Set this to your lakehouse ID", 82 | "FABRIC_NOTEBOOK_ID": "Set this to your notebook ID", 83 | "MCP_SERVER_HOST": "localhost", 84 | "MCP_SERVER_PORT": "3000", 85 | "LOG_LEVEL": "INFO" 86 | }, 87 | "usage_instructions": { 88 | "quick_start": "Copy this file to config.json and replace YOUR_*_HERE with actual values", 89 | "command_line": "python real_fabric_test.py --config config.json", 90 | "interactive": "python real_fabric_test.py --interactive", 91 | "performance": "python real_fabric_test.py --config config.json --performance" 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /tests/fabric-client.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, it, expect, beforeEach, jest } from '@jest/globals'; 2 | import { FabricApiClient } from '../src/fabric-client'; 3 | 4 | // Mock global fetch 5 | global.fetch = jest.fn() as jest.Mock; 6 | 7 | describe('FabricApiClient', () => { 8 | let client: FabricApiClient; 9 | const mockToken = 'test-bearer-token'; 10 | const mockWorkspaceId = 'test-workspace-123'; 11 | const mockFetch = global.fetch as jest.Mock; 12 | 13 | beforeEach(() => { 14 | client = new FabricApiClient(mockToken, mockWorkspaceId); 15 | mockFetch.mockClear(); 16 | }); 17 | 18 | describe('constructor', () => { 19 | it('should initialize with provided token and workspace', () => { 20 | expect(client).toBeDefined(); 21 | expect((client as any)._bearerToken).toBe(mockToken); 22 | expect((client as any)._workspaceId).toBe(mockWorkspaceId); 23 | }); 24 | 25 | it('should use default config when none provided', () => { 26 | expect((client as any)._config.apiBaseUrl).toBe('https://api.fabric.microsoft.com/v1'); 27 | expect((client as any)._config.timeout).toBe(30000); 28 | }); 29 | }); 30 | 31 | describe('makeRequest', () => { 32 | it('should make successful GET request', async () => { 33 | const mockResponse = { items: ['item1', 'item2'] }; 34 | mockFetch.mockResolvedValueOnce({ 35 | ok: true, 36 | json: () => Promise.resolve(mockResponse) 37 | } as any); 38 | 39 | const result = await client.makeRequest('items'); 40 | 41 | expect(mockFetch).toHaveBeenCalledWith( 42 | `https://api.fabric.microsoft.com/v1/workspaces/${mockWorkspaceId}/items`, 43 | expect.objectContaining({ 44 | method: 'GET', 45 | headers: expect.objectContaining({ 46 | 'Authorization': `Bearer ${mockToken}`, 47 | 'Content-Type': 'application/json' 48 | }) 49 | }) 50 | ); 51 | 52 | expect(result.status).toBe('success'); 53 | expect(result.data).toEqual(mockResponse); 54 | }); 55 | 56 | it('should handle error response', async () => { 57 | mockFetch.mockResolvedValueOnce({ 58 | ok: false, 59 | status: 401, 60 | text: () => Promise.resolve('Unauthorized') 61 | } as any); 62 | 63 | const result = await client.makeRequest('items'); 64 | 65 | expect(result.status).toBe('error'); 66 | expect(result.error).toContain('HTTP 401'); 67 | }); 68 | 69 | it('should handle network error', async () => { 70 | mockFetch.mockRejectedValueOnce(new Error('Network error')); 71 | 72 | const result = await client.makeRequest('items'); 73 | 74 | expect(result.status).toBe('error'); 75 | expect(result.error).toContain('Request failed'); 76 | }); 77 | }); 78 | 79 | describe('Basic API operations', () => { 80 | it('should list items', async () => { 81 | const mockItems = { value: [{ id: '123', displayName: 'Test Item' }] }; 82 | mockFetch.mockResolvedValueOnce({ 83 | ok: true, 84 | json: () => Promise.resolve(mockItems) 85 | } as any); 86 | 87 | const result = await client.listItems(); 88 | 89 | expect(mockFetch).toHaveBeenCalledWith( 90 | expect.stringContaining('/items'), 91 | expect.any(Object) 92 | ); 93 | expect(result.status).toBe('success'); 94 | }); 95 | }); 96 | }); 97 | -------------------------------------------------------------------------------- /tests/fabric-client.test.ts.disabled: -------------------------------------------------------------------------------- 1 | import { describe, it, expect, beforeEach, jest } from '@jest/globals'; 2 | import { FabricApiClient } from '../src/fabric-client'; 3 | 4 | // Mock global fetch 5 | global.fetch = jest.fn() as jest.Mock; 6 | 7 | describe('FabricApiClient', () => { 8 | let client: FabricApiClient; 9 | const mockToken = 'test-bearer-token'; 10 | const mockWorkspaceId = 'test-workspace-123'; 11 | const mockFetch = global.fetch as jest.Mock; 12 | 13 | beforeEach(() => { 14 | client = new FabricApiClient(mockToken, mockWorkspaceId); 15 | mockFetch.mockClear(); 16 | }); 17 | 18 | describe('constructor', () => { 19 | it('should initialize with provided token and workspace', () => { 20 | expect(client).toBeDefined(); 21 | expect((client as any)._bearerToken).toBe(mockToken); 22 | expect((client as any)._workspaceId).toBe(mockWorkspaceId); 23 | }); 24 | 25 | it('should use default config when none provided', () => { 26 | expect((client as any)._config.apiBaseUrl).toBe('https://api.fabric.microsoft.com/v1'); 27 | expect((client as any)._config.timeout).toBe(30000); 28 | }); 29 | }); 30 | 31 | describe('makeRequest', () => { 32 | it('should make successful GET request', async () => { 33 | const mockResponse = { items: ['item1', 'item2'] }; 34 | mockFetch.mockResolvedValueOnce({ 35 | ok: true, 36 | json: () => Promise.resolve(mockResponse) 37 | } as any); 38 | 39 | const result = await client.makeRequest('items'); 40 | 41 | expect(mockFetch).toHaveBeenCalledWith( 42 | `https://api.fabric.microsoft.com/v1/workspaces/${mockWorkspaceId}/items`, 43 | expect.objectContaining({ 44 | method: 'GET', 45 | headers: expect.objectContaining({ 46 | 'Authorization': `Bearer ${mockToken}`, 47 | 'Content-Type': 'application/json' 48 | }) 49 | }) 50 | ); 51 | 52 | expect(result.status).toBe('success'); 53 | expect(result.data).toEqual(mockResponse); 54 | }); 55 | 56 | it('should handle error response', async () => { 57 | mockFetch.mockResolvedValueOnce({ 58 | ok: false, 59 | status: 401, 60 | text: () => Promise.resolve('Unauthorized') 61 | } as any); 62 | 63 | const result = await client.makeRequest('items'); 64 | 65 | expect(result.status).toBe('error'); 66 | expect(result.error).toContain('HTTP 401'); 67 | }); 68 | 69 | it('should handle network error', async () => { 70 | mockFetch.mockRejectedValueOnce(new Error('Network error')); 71 | 72 | const result = await client.makeRequest('items'); 73 | 74 | expect(result.status).toBe('error'); 75 | expect(result.error).toContain('Request failed'); 76 | }); 77 | }); 78 | 79 | describe('Basic API operations', () => { 80 | it('should list items', async () => { 81 | const mockItems = { value: [{ id: '123', displayName: 'Test Item' }] }; 82 | mockFetch.mockResolvedValueOnce({ 83 | ok: true, 84 | json: () => Promise.resolve(mockItems) 85 | } as any); 86 | 87 | const result = await client.listItems(); 88 | 89 | expect(mockFetch).toHaveBeenCalledWith( 90 | expect.stringContaining('/items'), 91 | expect.any(Object) 92 | ); 93 | expect(result.status).toBe('success'); 94 | }); 95 | }); 96 | }); 97 | -------------------------------------------------------------------------------- /final-check.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | /** 4 | * Final Status Check - Ready to Push Assessment 5 | * Quick status check to confirm everything is ready for Git push 6 | */ 7 | 8 | import fs from 'fs'; 9 | import path from 'path'; 10 | import { execSync } from 'child_process'; 11 | 12 | console.log('🔍 Final Pre-Push Status Check'); 13 | console.log('==============================\n'); 14 | 15 | const checks = [ 16 | { 17 | name: 'TypeScript Build', 18 | check: () => fs.existsSync('build/index.js'), 19 | fix: 'Run: npm run build' 20 | }, 21 | { 22 | name: 'Core Source Files', 23 | check: () => fs.existsSync('src/index.ts') && fs.existsSync('src/fabric-client.ts'), 24 | fix: 'Ensure src files exist' 25 | }, 26 | { 27 | name: 'Package Configuration', 28 | check: () => { 29 | const pkg = JSON.parse(fs.readFileSync('package.json', 'utf8')); 30 | return pkg.dependencies && pkg.dependencies['@modelcontextprotocol/sdk']; 31 | }, 32 | fix: 'Check package.json dependencies' 33 | }, 34 | { 35 | name: 'Notebook Management Features', 36 | check: () => { 37 | const indexContent = fs.readFileSync('src/index.ts', 'utf8'); 38 | return indexContent.includes('create-fabric-notebook') && 39 | indexContent.includes('get-fabric-notebook-definition') && 40 | indexContent.includes('update-fabric-notebook-definition') && 41 | indexContent.includes('run-fabric-notebook'); 42 | }, 43 | fix: 'Ensure all notebook management tools are implemented' 44 | }, 45 | { 46 | name: 'API Client Methods', 47 | check: () => { 48 | const clientContent = fs.readFileSync('src/fabric-client.ts', 'utf8'); 49 | return clientContent.includes('createNotebook') && 50 | clientContent.includes('getItemDefinition') && 51 | clientContent.includes('updateItemDefinition') && 52 | clientContent.includes('runNotebook'); 53 | }, 54 | fix: 'Ensure all API client methods are implemented' 55 | }, 56 | { 57 | name: 'Documentation', 58 | check: () => fs.existsSync('README.md') && fs.existsSync('NOTEBOOK_MANAGEMENT_GUIDE.md'), 59 | fix: 'Ensure documentation files exist' 60 | } 61 | ]; 62 | 63 | let allPassed = true; 64 | 65 | checks.forEach((check, index) => { 66 | try { 67 | if (check.check()) { 68 | console.log(`✅ ${check.name}`); 69 | } else { 70 | console.log(`❌ ${check.name} - ${check.fix}`); 71 | allPassed = false; 72 | } 73 | } catch (error) { 74 | console.log(`❌ ${check.name} - Error: ${error.message}`); 75 | allPassed = false; 76 | } 77 | }); 78 | 79 | console.log('\n' + '='.repeat(40)); 80 | 81 | if (allPassed) { 82 | console.log('🎉 ALL CHECKS PASSED!'); 83 | console.log('\n✅ Ready to push to Git'); 84 | console.log('\nCommands to run:'); 85 | console.log(' git add .'); 86 | console.log(' git commit -m "Add comprehensive notebook management features with templates and API integration"'); 87 | console.log(' git push origin master'); 88 | 89 | // Show what will be committed 90 | try { 91 | console.log('\n📝 Files to be committed:'); 92 | const gitStatus = execSync('git status --porcelain', { encoding: 'utf8' }); 93 | if (gitStatus.trim()) { 94 | console.log(gitStatus); 95 | } else { 96 | console.log('No changes detected'); 97 | } 98 | } catch (error) { 99 | console.log('Git status check failed'); 100 | } 101 | 102 | } else { 103 | console.log('❌ Some checks failed - please fix before pushing'); 104 | } 105 | 106 | console.log('\n🏁 Status check complete!'); 107 | -------------------------------------------------------------------------------- /scripts/DOCKER_INSTALL.md: -------------------------------------------------------------------------------- 1 | # Microsoft Fabric Analytics MCP Server - Docker Installation 2 | 3 | ## Quick Start with Docker 4 | 5 | ### Using Pre-built Image (Coming Soon) 6 | 7 | ```bash 8 | # Pull and run the official image 9 | docker run -d \ 10 | --name fabric-mcp \ 11 | -p 3000:3000 \ 12 | -e FABRIC_AUTH_METHOD=bearer_token \ 13 | -e FABRIC_CLIENT_ID=your-client-id \ 14 | -e FABRIC_CLIENT_SECRET=your-secret \ 15 | -e FABRIC_TENANT_ID=your-tenant-id \ 16 | santhoshravindran7/fabric-analytics-mcp:latest 17 | ``` 18 | 19 | ### Building from Source 20 | 21 | ```bash 22 | # Clone repository 23 | git clone https://github.com/santhoshravindran7/Fabric-Analytics-MCP.git 24 | cd Fabric-Analytics-MCP 25 | 26 | # Build Docker image 27 | docker build -t fabric-analytics-mcp . 28 | 29 | # Run container 30 | docker run -d \ 31 | --name fabric-mcp \ 32 | -p 3000:3000 \ 33 | --env-file .env \ 34 | fabric-analytics-mcp 35 | ``` 36 | 37 | ### Docker Compose 38 | 39 | ```yaml 40 | version: '3.8' 41 | services: 42 | fabric-mcp: 43 | build: . 44 | ports: 45 | - "3000:3000" 46 | environment: 47 | - FABRIC_AUTH_METHOD=bearer_token 48 | - FABRIC_CLIENT_ID=${FABRIC_CLIENT_ID} 49 | - FABRIC_CLIENT_SECRET=${FABRIC_CLIENT_SECRET} 50 | - FABRIC_TENANT_ID=${FABRIC_TENANT_ID} 51 | volumes: 52 | - ./config:/app/config 53 | restart: unless-stopped 54 | healthcheck: 55 | test: ["CMD", "curl", "-f", "http://localhost:3000/health"] 56 | interval: 30s 57 | timeout: 10s 58 | retries: 3 59 | ``` 60 | 61 | ### Environment Configuration 62 | 63 | Create a `.env` file: 64 | 65 | ```env 66 | FABRIC_AUTH_METHOD=bearer_token 67 | FABRIC_CLIENT_ID=your-client-id 68 | FABRIC_CLIENT_SECRET=your-client-secret 69 | FABRIC_TENANT_ID=your-tenant-id 70 | FABRIC_DEFAULT_WORKSPACE_ID=your-workspace-id 71 | ``` 72 | 73 | ### Health Check 74 | 75 | ```bash 76 | # Check container health 77 | docker exec fabric-mcp curl -f http://localhost:3000/health 78 | 79 | # View logs 80 | docker logs fabric-mcp 81 | 82 | # Access container shell 83 | docker exec -it fabric-mcp bash 84 | ``` 85 | 86 | ## Kubernetes Deployment 87 | 88 | ### Helm Chart (Recommended) 89 | 90 | ```bash 91 | # Add Helm repository (coming soon) 92 | helm repo add fabric-mcp https://santhoshravindran7.github.io/fabric-analytics-mcp-helm 93 | 94 | # Install with custom values 95 | helm install fabric-mcp fabric-mcp/fabric-analytics-mcp \ 96 | --set auth.method=service_principal \ 97 | --set auth.clientId=your-client-id \ 98 | --set auth.clientSecret=your-secret \ 99 | --set auth.tenantId=your-tenant-id 100 | ``` 101 | 102 | ### Manual Kubernetes Deployment 103 | 104 | ```bash 105 | # Apply all Kubernetes manifests 106 | kubectl apply -f k8s/ 107 | 108 | # Check deployment status 109 | kubectl get pods -l app=fabric-analytics-mcp 110 | 111 | # Port forward for local access 112 | kubectl port-forward service/fabric-analytics-mcp 3000:3000 113 | ``` 114 | 115 | ### Configuration via ConfigMap 116 | 117 | ```yaml 118 | apiVersion: v1 119 | kind: ConfigMap 120 | metadata: 121 | name: fabric-mcp-config 122 | data: 123 | FABRIC_AUTH_METHOD: "service_principal" 124 | FABRIC_DEFAULT_WORKSPACE_ID: "your-workspace-id" 125 | ``` 126 | 127 | ## Production Considerations 128 | 129 | ### Security 130 | 131 | - Use Kubernetes secrets for sensitive data 132 | - Enable TLS/SSL for external access 133 | - Configure network policies 134 | - Use service accounts with minimal permissions 135 | 136 | ### Scaling 137 | 138 | - Configure horizontal pod autoscaling (HPA) 139 | - Set appropriate resource limits and requests 140 | - Consider using node affinity for performance 141 | 142 | ### Monitoring 143 | 144 | - Prometheus metrics enabled by default 145 | - Grafana dashboards included 146 | - Health check endpoints available 147 | - Structured logging with correlation IDs 148 | 149 | ### Backup & Recovery 150 | 151 | - Configuration stored in ConfigMaps 152 | - No persistent storage required for stateless operation 153 | - Consider backing up logs for audit purposes 154 | -------------------------------------------------------------------------------- /python-wrapper/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Simplified setup script for Microsoft Fabric Analytics MCP Server Python Package 4 | 5 | This creates a Python wrapper that installs and manages the npm package. 6 | """ 7 | 8 | from setuptools import setup, find_packages 9 | import json 10 | from pathlib import Path 11 | 12 | 13 | def get_version(): 14 | """Extract version from package.json""" 15 | package_json_path = Path(__file__).parent.parent / "package.json" 16 | if package_json_path.exists(): 17 | with open(package_json_path, 'r') as f: 18 | package_data = json.load(f) 19 | return package_data.get("version", "1.0.0") 20 | return "1.0.0" 21 | 22 | 23 | def get_long_description(): 24 | """Get long description from README.md""" 25 | readme_path = Path(__file__).parent / "README.md" 26 | if readme_path.exists(): 27 | with open(readme_path, 'r', encoding='utf-8') as f: 28 | return f.read() 29 | return "Microsoft Fabric Analytics MCP Server" 30 | 31 | setup( 32 | name="fabric-analytics-mcp", 33 | version=get_version(), 34 | author="Santhosh Ravindran", 35 | author_email="santhoshravindran7@gmail.com", 36 | description="Microsoft Fabric Analytics MCP Server - Enable AI assistants to access and analyze Microsoft Fabric data", 37 | long_description=get_long_description(), 38 | long_description_content_type="text/markdown", 39 | url="https://github.com/santhoshravindran7/Fabric-Analytics-MCP", 40 | project_urls={ 41 | "Bug Tracker": "https://github.com/santhoshravindran7/Fabric-Analytics-MCP/issues", 42 | "Documentation": "https://github.com/santhoshravindran7/Fabric-Analytics-MCP#readme", 43 | "Source Code": "https://github.com/santhoshravindran7/Fabric-Analytics-MCP", 44 | }, 45 | packages=find_packages(), 46 | classifiers=[ 47 | "Development Status :: 5 - Production/Stable", 48 | "Intended Audience :: Developers", 49 | "Intended Audience :: Science/Research", 50 | "Topic :: Software Development :: Libraries :: Python Modules", 51 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 52 | "Topic :: Database :: Database Engines/Servers", 53 | "License :: OSI Approved :: MIT License", 54 | "Programming Language :: Python :: 3", 55 | "Programming Language :: Python :: 3.8", 56 | "Programming Language :: Python :: 3.9", 57 | "Programming Language :: Python :: 3.10", 58 | "Programming Language :: Python :: 3.11", 59 | "Programming Language :: Python :: 3.12", 60 | "Operating System :: OS Independent", 61 | "Environment :: Console", 62 | ], 63 | keywords=[ 64 | "mcp", "model-context-protocol", "microsoft-fabric", "analytics", 65 | "data-analysis", "llm", "ai", "claude", "copilot", "spark", "livy", 66 | "authentication", "azure", "powerbi", "data-science" 67 | ], 68 | python_requires=">=3.8", 69 | install_requires=[ 70 | "click>=8.0.0", 71 | "requests>=2.25.0", 72 | "packaging>=20.0", 73 | "setuptools>=40.0", 74 | ], 75 | extras_require={ 76 | "dev": [ 77 | "pytest>=6.0", 78 | "pytest-cov>=2.0", 79 | "black>=21.0", 80 | "flake8>=3.8", 81 | "mypy>=0.800", 82 | ], 83 | "async": [ 84 | "aiohttp>=3.8.0", 85 | "asyncio>=3.4.3", 86 | ] 87 | }, 88 | entry_points={ 89 | "console_scripts": [ 90 | "fabric-analytics-mcp=fabric_analytics_mcp.cli:main", 91 | "fabric-mcp=fabric_analytics_mcp.cli:main", 92 | ], 93 | }, 94 | include_package_data=True, 95 | package_data={ 96 | "fabric_analytics_mcp": [ 97 | "server/*", 98 | "server/build/*", 99 | "server/node_modules/**/*", 100 | "*.md", 101 | "*.json", 102 | ], 103 | }, 104 | zip_safe=False, 105 | # Post-install hook to ensure Node.js dependencies 106 | cmdclass={}, 107 | ) 108 | -------------------------------------------------------------------------------- /src/migration/types.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Migration Module Types 3 | * Shared types for Synapse and HDInsight migration 4 | */ 5 | 6 | export interface MigrationSource { 7 | type: 'synapse' | 'hdinsight'; 8 | resourceGroup: string; 9 | subscriptionId: string; 10 | } 11 | 12 | export interface SynapseSource extends MigrationSource { 13 | type: 'synapse'; 14 | workspaceName: string; 15 | sparkPoolName?: string; 16 | } 17 | 18 | export interface HDInsightSource extends MigrationSource { 19 | type: 'hdinsight'; 20 | clusterName: string; 21 | } 22 | 23 | export interface NotebookAsset { 24 | id: string; 25 | name: string; 26 | path: string; 27 | content: any; // ipynb JSON 28 | properties?: Record; 29 | } 30 | 31 | export interface PipelineAsset { 32 | id: string; 33 | name: string; 34 | definition: any; // JSON definition 35 | properties?: Record; 36 | } 37 | 38 | export interface LinkedServiceAsset { 39 | id: string; 40 | name: string; 41 | type: string; 42 | properties: Record; 43 | } 44 | 45 | export interface SparkJobAsset { 46 | id: string; 47 | name: string; 48 | scriptPath?: string; 49 | mainClass?: string; 50 | arguments?: string[]; 51 | libraries?: string[]; 52 | configuration?: Record; 53 | } 54 | 55 | export interface MigrationInventory { 56 | source: MigrationSource; 57 | notebooks: NotebookAsset[]; 58 | pipelines: PipelineAsset[]; 59 | linkedServices: LinkedServiceAsset[]; 60 | sparkJobs: SparkJobAsset[]; 61 | discoveredAt: Date; 62 | } 63 | 64 | export interface TransformationResult { 65 | notebookId?: string; 66 | notebookName: string; 67 | original?: string; 68 | originalContent?: any; 69 | transformed?: string; 70 | transformedContent: any; 71 | changes: CodeChange[]; 72 | warnings?: string[]; 73 | errors?: string[]; 74 | success: boolean; 75 | requiresManualReview?: boolean; 76 | } 77 | 78 | export interface CodeChange { 79 | type: 'replacement' | 'addition' | 'removal' | 'code' | 'metadata'; 80 | line?: number; 81 | location?: string; 82 | original: string; 83 | transformed: string; 84 | reason?: string; 85 | rule?: string; 86 | } 87 | 88 | export interface ProvisioningPlan { 89 | targetWorkspaceId: string; 90 | items: ProvisioningItem[]; 91 | dryRun: boolean; 92 | } 93 | 94 | export interface ProvisioningItem { 95 | type: 'notebook' | 'lakehouse' | 'pipeline' | 'shortcut'; 96 | name: string; 97 | sourceAsset: NotebookAsset | PipelineAsset | any; 98 | transformedContent?: any; 99 | dependencies?: string[]; 100 | } 101 | 102 | export interface MigrationReport { 103 | summary: { 104 | totalAssets: number; 105 | successful: number; 106 | failed: number; 107 | requiresManualReview: number; 108 | duration: number; // milliseconds 109 | }; 110 | details: MigrationItemResult[]; 111 | recommendations: string[]; 112 | generatedAt: Date; 113 | } 114 | 115 | export interface MigrationItemResult { 116 | assetName: string; 117 | assetType: string; 118 | status: 'success' | 'failed' | 'manual_review'; 119 | fabricItemId?: string; 120 | changes?: CodeChange[]; 121 | warnings?: string[]; 122 | error?: string; 123 | } 124 | 125 | export interface MigrationConfig { 126 | sourceType?: 'synapse' | 'hdinsight'; 127 | targetWorkspaceName?: string; 128 | targetLakehouseName?: string; 129 | targetWorkspace?: string; 130 | targetLakehouse?: string; 131 | migrateData?: boolean; 132 | useShortcuts?: boolean; 133 | createShortcuts?: boolean; 134 | validateAfterMigration?: boolean; 135 | validateTransformation?: boolean; 136 | dryRun?: boolean; 137 | backupOriginal?: boolean; 138 | customTransformRules?: TransformRule[]; 139 | transformRules?: TransformRule[]; 140 | targetEnvironment?: { 141 | lakehouseId?: string; 142 | sparkPoolSize?: 'small' | 'medium' | 'large'; 143 | }; 144 | } 145 | 146 | export interface TransformRule { 147 | name: string; 148 | pattern: string | RegExp; 149 | replacement: string | ((match: string) => string); 150 | description?: string; 151 | scope?: 'notebooks' | 'pipelines' | 'all'; 152 | } 153 | -------------------------------------------------------------------------------- /k8s/ingress.yaml: -------------------------------------------------------------------------------- 1 | # Ingress for Microsoft Fabric Analytics MCP Server 2 | apiVersion: networking.k8s.io/v1 3 | kind: Ingress 4 | metadata: 5 | name: fabric-analytics-mcp-ingress 6 | namespace: fabric-mcp 7 | labels: 8 | app: fabric-analytics-mcp 9 | annotations: 10 | # NGINX Ingress Controller annotations 11 | kubernetes.io/ingress.class: "nginx" 12 | nginx.ingress.kubernetes.io/rewrite-target: / 13 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 14 | nginx.ingress.kubernetes.io/force-ssl-redirect: "true" 15 | 16 | # SSL/TLS configuration 17 | cert-manager.io/cluster-issuer: "letsencrypt-prod" 18 | nginx.ingress.kubernetes.io/ssl-protocols: "TLSv1.2 TLSv1.3" 19 | nginx.ingress.kubernetes.io/ssl-ciphers: "ECDHE-RSA-AES128-GCM-SHA256,ECDHE-RSA-AES256-GCM-SHA384" 20 | 21 | # Security headers 22 | nginx.ingress.kubernetes.io/server-snippet: | 23 | add_header X-Frame-Options "SAMEORIGIN" always; 24 | add_header X-Content-Type-Options "nosniff" always; 25 | add_header X-XSS-Protection "1; mode=block" always; 26 | add_header Referrer-Policy "strict-origin-when-cross-origin" always; 27 | add_header Content-Security-Policy "default-src 'self'; script-src 'self'; style-src 'self' 'unsafe-inline'; img-src 'self' data:; font-src 'self';" always; 28 | 29 | # Rate limiting 30 | nginx.ingress.kubernetes.io/rate-limit: "100" 31 | nginx.ingress.kubernetes.io/rate-limit-window: "1m" 32 | 33 | # CORS configuration 34 | nginx.ingress.kubernetes.io/enable-cors: "true" 35 | nginx.ingress.kubernetes.io/cors-allow-origin: "*" 36 | nginx.ingress.kubernetes.io/cors-allow-methods: "GET, POST, PUT, DELETE, OPTIONS" 37 | nginx.ingress.kubernetes.io/cors-allow-headers: "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization" 38 | 39 | # Connection and timeout settings 40 | nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" 41 | nginx.ingress.kubernetes.io/proxy-send-timeout: "60" 42 | nginx.ingress.kubernetes.io/proxy-read-timeout: "60" 43 | nginx.ingress.kubernetes.io/proxy-body-size: "10m" 44 | 45 | # Load balancing 46 | nginx.ingress.kubernetes.io/upstream-hash-by: "$remote_addr" 47 | nginx.ingress.kubernetes.io/affinity: "cookie" 48 | nginx.ingress.kubernetes.io/affinity-mode: "balanced" 49 | 50 | spec: 51 | tls: 52 | - hosts: 53 | - fabric-mcp.your-domain.com 54 | - api.fabric-mcp.your-domain.com 55 | secretName: fabric-mcp-tls-secret 56 | 57 | rules: 58 | - host: fabric-mcp.your-domain.com 59 | http: 60 | paths: 61 | - path: / 62 | pathType: Prefix 63 | backend: 64 | service: 65 | name: fabric-analytics-mcp-service 66 | port: 67 | number: 80 68 | 69 | - host: api.fabric-mcp.your-domain.com 70 | http: 71 | paths: 72 | - path: / 73 | pathType: Prefix 74 | backend: 75 | service: 76 | name: fabric-analytics-mcp-service 77 | port: 78 | number: 80 79 | --- 80 | # Azure Application Gateway Ingress (alternative to NGINX) 81 | apiVersion: networking.k8s.io/v1 82 | kind: Ingress 83 | metadata: 84 | name: fabric-analytics-mcp-appgw-ingress 85 | namespace: fabric-mcp 86 | labels: 87 | app: fabric-analytics-mcp 88 | annotations: 89 | kubernetes.io/ingress.class: azure/application-gateway 90 | appgw.ingress.kubernetes.io/ssl-redirect: "true" 91 | appgw.ingress.kubernetes.io/connection-draining: "true" 92 | appgw.ingress.kubernetes.io/connection-draining-timeout: "30" 93 | appgw.ingress.kubernetes.io/cookie-based-affinity: "true" 94 | appgw.ingress.kubernetes.io/request-timeout: "30" 95 | appgw.ingress.kubernetes.io/backend-path-prefix: "/" 96 | spec: 97 | tls: 98 | - hosts: 99 | - fabric-mcp-appgw.your-domain.com 100 | secretName: fabric-mcp-appgw-tls-secret 101 | 102 | rules: 103 | - host: fabric-mcp-appgw.your-domain.com 104 | http: 105 | paths: 106 | - path: / 107 | pathType: Prefix 108 | backend: 109 | service: 110 | name: fabric-analytics-mcp-service 111 | port: 112 | number: 80 113 | -------------------------------------------------------------------------------- /k8s/rbac.yaml: -------------------------------------------------------------------------------- 1 | # RBAC Configuration for Microsoft Fabric Analytics MCP Server 2 | # Service Account 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: fabric-mcp-service-account 7 | namespace: fabric-mcp 8 | labels: 9 | app: fabric-analytics-mcp 10 | annotations: 11 | # Azure Workload Identity (if using Azure AD Pod Identity) 12 | azure.workload.identity/client-id: "your-managed-identity-client-id" 13 | azure.workload.identity/tenant-id: "your-tenant-id" 14 | automountServiceAccountToken: true 15 | --- 16 | # Cluster Role for minimal required permissions 17 | apiVersion: rbac.authorization.k8s.io/v1 18 | kind: ClusterRole 19 | metadata: 20 | name: fabric-mcp-cluster-role 21 | labels: 22 | app: fabric-analytics-mcp 23 | rules: 24 | # Minimal permissions for health checks and metrics 25 | - apiGroups: [""] 26 | resources: ["pods"] 27 | verbs: ["get", "list"] 28 | - apiGroups: ["apps"] 29 | resources: ["deployments"] 30 | verbs: ["get", "list"] 31 | # Permissions for service mesh integration (if using Istio/Linkerd) 32 | - apiGroups: ["networking.istio.io"] 33 | resources: ["virtualservices", "destinationrules"] 34 | verbs: ["get", "list"] 35 | --- 36 | # Role for namespace-specific permissions 37 | apiVersion: rbac.authorization.k8s.io/v1 38 | kind: Role 39 | metadata: 40 | namespace: fabric-mcp 41 | name: fabric-mcp-role 42 | labels: 43 | app: fabric-analytics-mcp 44 | rules: 45 | # ConfigMap access for dynamic configuration 46 | - apiGroups: [""] 47 | resources: ["configmaps"] 48 | verbs: ["get", "list", "watch"] 49 | # Secret access for credential management 50 | - apiGroups: [""] 51 | resources: ["secrets"] 52 | verbs: ["get", "list"] 53 | # Pod access for self-monitoring 54 | - apiGroups: [""] 55 | resources: ["pods"] 56 | verbs: ["get", "list", "watch"] 57 | # Service access for service discovery 58 | - apiGroups: [""] 59 | resources: ["services"] 60 | verbs: ["get", "list"] 61 | # Events for troubleshooting 62 | - apiGroups: [""] 63 | resources: ["events"] 64 | verbs: ["create", "patch"] 65 | --- 66 | # Cluster Role Binding 67 | apiVersion: rbac.authorization.k8s.io/v1 68 | kind: ClusterRoleBinding 69 | metadata: 70 | name: fabric-mcp-cluster-role-binding 71 | labels: 72 | app: fabric-analytics-mcp 73 | subjects: 74 | - kind: ServiceAccount 75 | name: fabric-mcp-service-account 76 | namespace: fabric-mcp 77 | roleRef: 78 | kind: ClusterRole 79 | name: fabric-mcp-cluster-role 80 | apiGroup: rbac.authorization.k8s.io 81 | --- 82 | # Role Binding for namespace permissions 83 | apiVersion: rbac.authorization.k8s.io/v1 84 | kind: RoleBinding 85 | metadata: 86 | name: fabric-mcp-role-binding 87 | namespace: fabric-mcp 88 | labels: 89 | app: fabric-analytics-mcp 90 | subjects: 91 | - kind: ServiceAccount 92 | name: fabric-mcp-service-account 93 | namespace: fabric-mcp 94 | roleRef: 95 | kind: Role 96 | name: fabric-mcp-role 97 | apiGroup: rbac.authorization.k8s.io 98 | --- 99 | # Pod Security Policy (if PSP is enabled) 100 | apiVersion: policy/v1beta1 101 | kind: PodSecurityPolicy 102 | metadata: 103 | name: fabric-mcp-psp 104 | labels: 105 | app: fabric-analytics-mcp 106 | spec: 107 | privileged: false 108 | allowPrivilegeEscalation: false 109 | requiredDropCapabilities: 110 | - ALL 111 | volumes: 112 | - 'configMap' 113 | - 'emptyDir' 114 | - 'projected' 115 | - 'secret' 116 | - 'downwardAPI' 117 | - 'persistentVolumeClaim' 118 | runAsUser: 119 | rule: 'MustRunAsNonRoot' 120 | runAsGroup: 121 | rule: 'MustRunAs' 122 | ranges: 123 | - min: 1001 124 | max: 1001 125 | seLinux: 126 | rule: 'RunAsAny' 127 | fsGroup: 128 | rule: 'RunAsAny' 129 | readOnlyRootFilesystem: true 130 | --- 131 | # Security Context Constraints (for OpenShift) 132 | apiVersion: security.openshift.io/v1 133 | kind: SecurityContextConstraints 134 | metadata: 135 | name: fabric-mcp-scc 136 | labels: 137 | app: fabric-analytics-mcp 138 | allowHostDirVolumePlugin: false 139 | allowHostIPC: false 140 | allowHostNetwork: false 141 | allowHostPID: false 142 | allowHostPorts: false 143 | allowPrivilegedContainer: false 144 | allowedCapabilities: [] 145 | defaultAddCapabilities: [] 146 | fsGroup: 147 | type: RunAsAny 148 | readOnlyRootFilesystem: true 149 | requiredDropCapabilities: 150 | - ALL 151 | runAsUser: 152 | type: MustRunAsNonRoot 153 | seLinuxContext: 154 | type: RunAsAny 155 | users: 156 | - system:serviceaccount:fabric-mcp:fabric-mcp-service-account 157 | -------------------------------------------------------------------------------- /monitoring/prometheus.yml: -------------------------------------------------------------------------------- 1 | # Prometheus configuration for Microsoft Fabric Analytics MCP Server 2 | global: 3 | scrape_interval: 15s 4 | evaluation_interval: 15s 5 | 6 | # Alertmanager configuration 7 | alerting: 8 | alertmanagers: 9 | - static_configs: 10 | - targets: 11 | # - alertmanager:9093 12 | 13 | # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. 14 | rule_files: 15 | # - "first_rules.yml" 16 | # - "second_rules.yml" 17 | 18 | # A scrape configuration containing exactly one endpoint to scrape: 19 | scrape_configs: 20 | # The job name is added as a label `job=` to any timeseries scraped from this config. 21 | - job_name: "prometheus" 22 | static_configs: 23 | - targets: ["localhost:9090"] 24 | 25 | # Microsoft Fabric Analytics MCP Server 26 | - job_name: "fabric-mcp" 27 | static_configs: 28 | - targets: ["fabric-mcp:3000"] 29 | metrics_path: "/metrics" 30 | scrape_interval: 30s 31 | scrape_timeout: 10s 32 | honor_labels: true 33 | 34 | # Node Exporter (system metrics) 35 | - job_name: "node-exporter" 36 | static_configs: 37 | - targets: ["node-exporter:9100"] 38 | scrape_interval: 30s 39 | 40 | # Redis metrics (if using Redis) 41 | - job_name: "redis" 42 | static_configs: 43 | - targets: ["redis:6379"] 44 | scrape_interval: 30s 45 | 46 | # Kubernetes cluster metrics (when running in K8s) 47 | - job_name: 'kubernetes-apiservers' 48 | kubernetes_sd_configs: 49 | - role: endpoints 50 | scheme: https 51 | tls_config: 52 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 53 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 54 | relabel_configs: 55 | - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] 56 | action: keep 57 | regex: default;kubernetes;https 58 | 59 | - job_name: 'kubernetes-nodes' 60 | kubernetes_sd_configs: 61 | - role: node 62 | scheme: https 63 | tls_config: 64 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 65 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 66 | relabel_configs: 67 | - action: labelmap 68 | regex: __meta_kubernetes_node_label_(.+) 69 | - target_label: __address__ 70 | replacement: kubernetes.default.svc:443 71 | - source_labels: [__meta_kubernetes_node_name] 72 | regex: (.+) 73 | target_label: __metrics_path__ 74 | replacement: /api/v1/nodes/${1}/proxy/metrics 75 | 76 | - job_name: 'kubernetes-pods' 77 | kubernetes_sd_configs: 78 | - role: pod 79 | relabel_configs: 80 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] 81 | action: keep 82 | regex: true 83 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] 84 | action: replace 85 | target_label: __metrics_path__ 86 | regex: (.+) 87 | - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] 88 | action: replace 89 | regex: ([^:]+)(?::\d+)?;(\d+) 90 | replacement: $1:$2 91 | target_label: __address__ 92 | - action: labelmap 93 | regex: __meta_kubernetes_pod_label_(.+) 94 | - source_labels: [__meta_kubernetes_namespace] 95 | action: replace 96 | target_label: kubernetes_namespace 97 | - source_labels: [__meta_kubernetes_pod_name] 98 | action: replace 99 | target_label: kubernetes_pod_name 100 | 101 | # MCP Server specific metrics for Kubernetes 102 | - job_name: 'fabric-mcp-k8s' 103 | kubernetes_sd_configs: 104 | - role: pod 105 | namespaces: 106 | names: 107 | - fabric-mcp 108 | relabel_configs: 109 | - source_labels: [__meta_kubernetes_pod_label_app] 110 | action: keep 111 | regex: fabric-analytics-mcp 112 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] 113 | action: keep 114 | regex: true 115 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] 116 | action: replace 117 | target_label: __metrics_path__ 118 | regex: (.+) 119 | - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] 120 | action: replace 121 | regex: ([^:]+)(?::\d+)?;(\d+) 122 | replacement: $1:$2 123 | target_label: __address__ 124 | - action: labelmap 125 | regex: __meta_kubernetes_pod_label_(.+) 126 | - source_labels: [__meta_kubernetes_namespace] 127 | action: replace 128 | target_label: kubernetes_namespace 129 | - source_labels: [__meta_kubernetes_pod_name] 130 | action: replace 131 | target_label: kubernetes_pod_name 132 | -------------------------------------------------------------------------------- /scripts/test-azure-cli-auth.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | /** 4 | * Test script for Azure CLI authentication 5 | * This script helps users verify their Azure CLI setup and test authentication 6 | */ 7 | 8 | const { exec } = require('child_process'); 9 | const { promisify } = require('util'); 10 | 11 | const execAsync = promisify(exec); 12 | 13 | async function testAzureCliAuth() { 14 | console.log('🔍 Testing Azure CLI Authentication Setup\n'); 15 | 16 | try { 17 | // Step 1: Check Azure CLI installation 18 | console.log('1️⃣ Checking Azure CLI installation...'); 19 | await execAsync('az --version'); 20 | console.log('✅ Azure CLI is installed\n'); 21 | 22 | // Step 2: Check login status 23 | console.log('2️⃣ Checking Azure login status...'); 24 | const { stdout: accountInfo } = await execAsync('az account show --output json'); 25 | const account = JSON.parse(accountInfo); 26 | 27 | console.log('✅ Successfully logged in to Azure'); 28 | console.log(` Account: ${account.user?.name || account.user?.type || 'Unknown'}`); 29 | console.log(` Subscription: ${account.name} (${account.id})`); 30 | console.log(` Tenant: ${account.tenantId}\n`); 31 | 32 | // Step 3: Test getting Fabric token 33 | console.log('3️⃣ Testing Microsoft Fabric token acquisition...'); 34 | const fabricScope = 'https://api.fabric.microsoft.com/.default'; 35 | const { stdout: tokenInfo } = await execAsync(`az account get-access-token --scope "${fabricScope}" --output json`); 36 | const token = JSON.parse(tokenInfo); 37 | 38 | if (token.accessToken) { 39 | console.log('✅ Successfully obtained Microsoft Fabric access token'); 40 | console.log(` Token type: ${token.tokenType || 'Bearer'}`); 41 | console.log(` Expires: ${new Date(token.expiresOn).toISOString()}\n`); 42 | } else { 43 | throw new Error('Failed to get access token'); 44 | } 45 | 46 | // Step 4: Test Power BI token (alternative scope) 47 | console.log('4️⃣ Testing Power BI API token acquisition...'); 48 | const powerBiScope = 'https://analysis.windows.net/powerbi/api/.default'; 49 | const { stdout: pbiTokenInfo } = await execAsync(`az account get-access-token --scope "${powerBiScope}" --output json`); 50 | const pbiToken = JSON.parse(pbiTokenInfo); 51 | 52 | if (pbiToken.accessToken) { 53 | console.log('✅ Successfully obtained Power BI access token'); 54 | console.log(` Token type: ${pbiToken.tokenType || 'Bearer'}`); 55 | console.log(` Expires: ${new Date(pbiToken.expiresOn).toISOString()}\n`); 56 | } else { 57 | throw new Error('Failed to get Power BI access token'); 58 | } 59 | 60 | console.log('🎉 All tests passed! You can now use Azure CLI authentication with the MCP server.\n'); 61 | 62 | console.log('📋 To use Azure CLI authentication, set the following environment variable:'); 63 | console.log(' FABRIC_AUTH_METHOD=azure_cli\n'); 64 | 65 | console.log('💡 Example usage:'); 66 | console.log(' export FABRIC_AUTH_METHOD=azure_cli'); 67 | console.log(' npm run start\n'); 68 | 69 | } catch (error) { 70 | const errorMessage = error instanceof Error ? error.message : String(error); 71 | 72 | if (errorMessage.includes('az: command not found') || errorMessage.includes("'az' is not recognized")) { 73 | console.error('❌ Azure CLI is not installed'); 74 | console.error('\n📥 Install Azure CLI:'); 75 | console.error(' Windows: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-windows'); 76 | console.error(' macOS: brew install azure-cli'); 77 | console.error(' Linux: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-linux'); 78 | 79 | } else if (errorMessage.includes('Please run')) { 80 | console.error('❌ Azure CLI is installed but you are not logged in'); 81 | console.error('\n🔐 Login to Azure:'); 82 | console.error(' az login'); 83 | console.error(' # Follow the browser prompts to complete authentication'); 84 | 85 | } else if (errorMessage.includes('AADSTS')) { 86 | console.error('❌ Authentication error - your login may have expired'); 87 | console.error('\n🔄 Try logging in again:'); 88 | console.error(' az logout'); 89 | console.error(' az login'); 90 | 91 | } else { 92 | console.error(`❌ Test failed: ${errorMessage}`); 93 | console.error('\n🔧 Troubleshooting:'); 94 | console.error(' 1. Ensure you have the correct permissions for Microsoft Fabric'); 95 | console.error(' 2. Try: az login --tenant '); 96 | console.error(' 3. Verify your account has access to Fabric workspaces'); 97 | } 98 | 99 | process.exit(1); 100 | } 101 | } 102 | 103 | testAzureCliAuth().catch(console.error); 104 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | # Docker Compose for Microsoft Fabric Analytics MCP Server 2 | # Provides local development and testing environment 3 | 4 | services: 5 | # Main MCP Server 6 | fabric-mcp: 7 | build: 8 | context: . 9 | dockerfile: Dockerfile 10 | ports: 11 | - "3000:3000" 12 | environment: 13 | - NODE_ENV=production 14 | - PORT=3000 15 | - LOG_LEVEL=info 16 | - ENABLE_HEALTH_SERVER=true 17 | 18 | # Authentication configuration 19 | - FABRIC_AUTH_METHOD=${FABRIC_AUTH_METHOD:-service_principal} 20 | - FABRIC_CLIENT_ID=${FABRIC_CLIENT_ID} 21 | - FABRIC_CLIENT_SECRET=${FABRIC_CLIENT_SECRET} 22 | - FABRIC_TENANT_ID=${FABRIC_TENANT_ID} 23 | - FABRIC_DEFAULT_WORKSPACE_ID=${FABRIC_DEFAULT_WORKSPACE_ID} 24 | 25 | # API configuration 26 | - FABRIC_API_BASE_URL=https://api.fabric.microsoft.com 27 | - FABRIC_API_VERSION=v1 28 | 29 | # Performance settings 30 | - MAX_CONCURRENT_REQUESTS=100 31 | - REQUEST_TIMEOUT=30000 32 | - ENABLE_REQUEST_LOGGING=true 33 | 34 | # Health check configuration 35 | - HEALTH_CHECK_ENDPOINT=/health 36 | - READINESS_CHECK_ENDPOINT=/ready 37 | - METRICS_ENDPOINT=/metrics 38 | 39 | # Security settings 40 | - ENABLE_CORS=true 41 | - CORS_ORIGIN=* 42 | - ENABLE_HELMET=true 43 | 44 | healthcheck: 45 | test: ["CMD", "curl", "-f", "http://localhost:3000/health"] 46 | interval: 30s 47 | timeout: 10s 48 | retries: 3 49 | start_period: 40s 50 | 51 | restart: unless-stopped 52 | 53 | volumes: 54 | # Mount logs directory for persistence 55 | - ./logs:/app/logs 56 | 57 | labels: 58 | - "traefik.enable=true" 59 | - "traefik.http.routers.fabric-mcp.rule=Host(`mcp.localhost`)" 60 | - "traefik.http.routers.fabric-mcp.entrypoints=web" 61 | - "traefik.http.services.fabric-mcp.loadbalancer.server.port=3000" 62 | 63 | # Redis for caching (optional) 64 | redis: 65 | image: redis:7-alpine 66 | ports: 67 | - "6379:6379" 68 | environment: 69 | - REDIS_PASSWORD=${REDIS_PASSWORD:-} 70 | command: redis-server --appendonly yes 71 | volumes: 72 | - redis_data:/data 73 | healthcheck: 74 | test: ["CMD", "redis-cli", "ping"] 75 | interval: 30s 76 | timeout: 3s 77 | retries: 5 78 | restart: unless-stopped 79 | 80 | # Traefik reverse proxy (optional) 81 | traefik: 82 | image: traefik:v3.0 83 | ports: 84 | - "80:80" 85 | - "8080:8080" # Traefik dashboard 86 | command: 87 | - "--api.insecure=true" 88 | - "--providers.docker=true" 89 | - "--providers.docker.exposedbydefault=false" 90 | - "--entrypoints.web.address=:80" 91 | - "--accesslog=true" 92 | - "--log.level=INFO" 93 | volumes: 94 | - /var/run/docker.sock:/var/run/docker.sock:ro 95 | restart: unless-stopped 96 | profiles: 97 | - "reverse-proxy" 98 | 99 | # Prometheus for metrics collection (optional) 100 | prometheus: 101 | image: prom/prometheus:latest 102 | ports: 103 | - "9090:9090" 104 | command: 105 | - '--config.file=/etc/prometheus/prometheus.yml' 106 | - '--storage.tsdb.path=/prometheus' 107 | - '--web.console.libraries=/etc/prometheus/console_libraries' 108 | - '--web.console.templates=/etc/prometheus/consoles' 109 | - '--storage.tsdb.retention.time=200h' 110 | - '--web.enable-lifecycle' 111 | volumes: 112 | - ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml 113 | - prometheus_data:/prometheus 114 | restart: unless-stopped 115 | profiles: 116 | - "monitoring" 117 | 118 | # Grafana for metrics visualization (optional) 119 | grafana: 120 | image: grafana/grafana:latest 121 | ports: 122 | - "3001:3000" 123 | environment: 124 | - GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD:-admin} 125 | - GF_USERS_ALLOW_SIGN_UP=false 126 | volumes: 127 | - grafana_data:/var/lib/grafana 128 | - ./monitoring/grafana/dashboards:/etc/grafana/provisioning/dashboards 129 | - ./monitoring/grafana/datasources:/etc/grafana/provisioning/datasources 130 | restart: unless-stopped 131 | profiles: 132 | - "monitoring" 133 | depends_on: 134 | - prometheus 135 | 136 | # Node Exporter for system metrics (optional) 137 | node-exporter: 138 | image: prom/node-exporter:latest 139 | ports: 140 | - "9100:9100" 141 | volumes: 142 | - /proc:/host/proc:ro 143 | - /sys:/host/sys:ro 144 | - /:/rootfs:ro 145 | command: 146 | - '--path.procfs=/host/proc' 147 | - '--path.rootfs=/rootfs' 148 | - '--path.sysfs=/host/sys' 149 | - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)' 150 | restart: unless-stopped 151 | profiles: 152 | - "monitoring" 153 | 154 | volumes: 155 | redis_data: 156 | driver: local 157 | prometheus_data: 158 | driver: local 159 | grafana_data: 160 | driver: local 161 | 162 | networks: 163 | default: 164 | name: fabric-mcp-network 165 | driver: bridge 166 | -------------------------------------------------------------------------------- /k8s/deployment.yaml: -------------------------------------------------------------------------------- 1 | # Deployment for Microsoft Fabric Analytics MCP Server 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: fabric-analytics-mcp 6 | namespace: fabric-mcp 7 | labels: 8 | app: fabric-analytics-mcp 9 | version: v1 10 | annotations: 11 | deployment.kubernetes.io/revision: "1" 12 | spec: 13 | replicas: 3 14 | strategy: 15 | type: RollingUpdate 16 | rollingUpdate: 17 | maxSurge: 2 18 | maxUnavailable: 1 19 | selector: 20 | matchLabels: 21 | app: fabric-analytics-mcp 22 | template: 23 | metadata: 24 | labels: 25 | app: fabric-analytics-mcp 26 | version: v1 27 | annotations: 28 | prometheus.io/scrape: "true" 29 | prometheus.io/port: "3000" 30 | prometheus.io/path: "/metrics" 31 | spec: 32 | serviceAccountName: fabric-mcp-service-account 33 | securityContext: 34 | runAsNonRoot: true 35 | runAsUser: 1001 36 | runAsGroup: 1001 37 | fsGroup: 1001 38 | seccompProfile: 39 | type: RuntimeDefault 40 | containers: 41 | - name: fabric-analytics-mcp 42 | image: your-acr-registry.azurecr.io/fabric-analytics-mcp:latest 43 | imagePullPolicy: Always 44 | ports: 45 | - containerPort: 3000 46 | name: http 47 | protocol: TCP 48 | 49 | # Environment variables from ConfigMap 50 | envFrom: 51 | - configMapRef: 52 | name: fabric-mcp-config 53 | 54 | # Sensitive environment variables from Secret 55 | env: 56 | - name: FABRIC_AUTH_METHOD 57 | value: "service_principal" 58 | - name: FABRIC_CLIENT_ID 59 | valueFrom: 60 | secretKeyRef: 61 | name: fabric-mcp-secrets 62 | key: FABRIC_CLIENT_ID 63 | - name: FABRIC_CLIENT_SECRET 64 | valueFrom: 65 | secretKeyRef: 66 | name: fabric-mcp-secrets 67 | key: FABRIC_CLIENT_SECRET 68 | - name: FABRIC_TENANT_ID 69 | valueFrom: 70 | secretKeyRef: 71 | name: fabric-mcp-secrets 72 | key: FABRIC_TENANT_ID 73 | - name: FABRIC_DEFAULT_WORKSPACE_ID 74 | valueFrom: 75 | secretKeyRef: 76 | name: fabric-mcp-secrets 77 | key: FABRIC_DEFAULT_WORKSPACE_ID 78 | optional: true 79 | 80 | # Resource limits and requests 81 | resources: 82 | requests: 83 | memory: "256Mi" 84 | cpu: "100m" 85 | limits: 86 | memory: "512Mi" 87 | cpu: "500m" 88 | 89 | # Security context 90 | securityContext: 91 | allowPrivilegeEscalation: false 92 | readOnlyRootFilesystem: true 93 | runAsNonRoot: true 94 | runAsUser: 1001 95 | capabilities: 96 | drop: 97 | - ALL 98 | 99 | # Liveness probe 100 | livenessProbe: 101 | httpGet: 102 | path: /health 103 | port: 3000 104 | scheme: HTTP 105 | initialDelaySeconds: 30 106 | periodSeconds: 10 107 | timeoutSeconds: 5 108 | successThreshold: 1 109 | failureThreshold: 3 110 | 111 | # Readiness probe 112 | readinessProbe: 113 | httpGet: 114 | path: /ready 115 | port: 3000 116 | scheme: HTTP 117 | initialDelaySeconds: 10 118 | periodSeconds: 5 119 | timeoutSeconds: 3 120 | successThreshold: 1 121 | failureThreshold: 3 122 | 123 | # Startup probe 124 | startupProbe: 125 | httpGet: 126 | path: /health 127 | port: 3000 128 | scheme: HTTP 129 | initialDelaySeconds: 10 130 | periodSeconds: 10 131 | timeoutSeconds: 3 132 | successThreshold: 1 133 | failureThreshold: 30 134 | 135 | # Volume mounts for temporary files 136 | volumeMounts: 137 | - name: tmp-volume 138 | mountPath: /tmp 139 | - name: cache-volume 140 | mountPath: /app/.cache 141 | 142 | # Volumes 143 | volumes: 144 | - name: tmp-volume 145 | emptyDir: {} 146 | - name: cache-volume 147 | emptyDir: {} 148 | 149 | # Pod affinity for better distribution 150 | affinity: 151 | podAntiAffinity: 152 | preferredDuringSchedulingIgnoredDuringExecution: 153 | - weight: 100 154 | podAffinityTerm: 155 | labelSelector: 156 | matchLabels: 157 | app: fabric-analytics-mcp 158 | topologyKey: kubernetes.io/hostname 159 | 160 | # Tolerations for node taints 161 | tolerations: 162 | - key: "node.kubernetes.io/not-ready" 163 | operator: "Exists" 164 | effect: "NoExecute" 165 | tolerationSeconds: 300 166 | - key: "node.kubernetes.io/unreachable" 167 | operator: "Exists" 168 | effect: "NoExecute" 169 | tolerationSeconds: 300 170 | 171 | # Termination grace period 172 | terminationGracePeriodSeconds: 30 173 | -------------------------------------------------------------------------------- /docs/WORKSPACE_MANAGEMENT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Fabric Workspace Management 2 | 3 | ## Overview 4 | Enhanced workspace management tools to make it easier for users to work with Microsoft Fabric workspaces without needing to remember complex GUIDs. 5 | 6 | ## New Tools Added 7 | 8 | ### 1. `fabric_list_workspaces` 9 | **Enhanced to support the official Microsoft Fabric Admin API** 10 | 11 | **Description**: List all workspaces accessible to the user using the admin API endpoint. 12 | 13 | **API Endpoint**: `GET https://api.fabric.microsoft.com/v1/admin/workspaces` 14 | 15 | **Parameters**: 16 | - `bearerToken` (optional): Bearer token for authentication 17 | - `type` (optional): Filter by workspace type 18 | - `capacityId` (optional): Filter by capacity ID 19 | - `name` (optional): Filter by workspace name 20 | - `state` (optional): Filter by state (Active, Deleted, etc.) 21 | - `continuationToken` (optional): For pagination 22 | 23 | **Example Usage**: 24 | ``` 25 | fabric_list_workspaces 26 | fabric_list_workspaces name="Sales" state="Active" 27 | fabric_list_workspaces type="Workspace" 28 | ``` 29 | 30 | **Sample Response**: 31 | ``` 32 | Workspaces (5 found): 33 | 34 | 1. Sales Analytics Workspace (Workspace) 35 | ID: 41ce06d1-d81b-4ea0-bc6d-2ce3dd2f8e87 36 | State: Active 37 | Capacity ID: 41ce06d1-d81b-4ea0-bc6d-2ce3dd2f8e84 38 | 39 | 2. Marketing Data Hub (Workspace) 40 | ID: 52df17e2-e92c-5fb1-cd7e-3df4ee3f9f98 41 | State: Active 42 | Capacity ID: 52df17e2-e92c-5fb1-cd7e-3df4ee3f9f95 43 | ``` 44 | 45 | ### 2. `fabric_find_workspace` 46 | **New tool for easy workspace discovery** 47 | 48 | **Description**: Find workspace by name and get its ID for use in other operations. This is especially useful when you know the workspace name but need the GUID for other API calls. 49 | 50 | **Parameters**: 51 | - `bearerToken` (optional): Bearer token for authentication 52 | - `searchName` (required): Workspace name to search for (supports partial matching) 53 | 54 | **Example Usage**: 55 | ``` 56 | fabric_find_workspace searchName="Sales" 57 | fabric_find_workspace searchName="Analytics" 58 | ``` 59 | 60 | **Sample Response** (single match): 61 | ``` 62 | ✅ Found workspace: "Sales Analytics Workspace" 63 | 64 | 📋 Details: 65 | • ID: 41ce06d1-d81b-4ea0-bc6d-2ce3dd2f8e87 66 | • Type: Workspace 67 | • State: Active 68 | • Capacity ID: 41ce06d1-d81b-4ea0-bc6d-2ce3dd2f8e84 69 | 70 | 💡 You can now use this workspace ID (41ce06d1-d81b-4ea0-bc6d-2ce3dd2f8e87) in other operations! 71 | ``` 72 | 73 | **Sample Response** (multiple matches): 74 | ``` 75 | Found 3 workspaces matching "Analytics": 76 | 77 | 1. "Sales Analytics Workspace" 78 | ID: 41ce06d1-d81b-4ea0-bc6d-2ce3dd2f8e87 79 | Type: Workspace 80 | State: Active 81 | Capacity ID: 41ce06d1-d81b-4ea0-bc6d-2ce3dd2f8e84 82 | 83 | 2. "HR Analytics" 84 | ID: 63e028f3-fa3d-6gc2-de8f-4ef5ff4a0a09 85 | Type: Workspace 86 | State: Active 87 | Capacity ID: 63e028f3-fa3d-6gc2-de8f-4ef5ff4a0a06 88 | 89 | 💡 Copy the ID of the workspace you want to use for other operations. 90 | ``` 91 | 92 | ## Typical User Workflow 93 | 94 | ### Before (Cumbersome): 95 | 1. User needs to manually provide workspace GUID 96 | 2. Hard to remember or find the correct GUID 97 | 3. Error-prone copy/paste operations 98 | 99 | ### After (User-Friendly): 100 | 1. **Discover workspaces**: Use `fabric_list_workspaces` to see all available workspaces 101 | 2. **Find specific workspace**: Use `fabric_find_workspace searchName="MyWorkspace"` to get the GUID 102 | 3. **Use in other operations**: Copy the GUID and use it in other fabric tools 103 | 104 | ## API Implementation Details 105 | 106 | ### Enhanced FabricApiClient Methods 107 | 108 | **`listWorkspaces(type?, capacityId?, name?, state?, continuationToken?)`** 109 | - Uses admin API endpoint: `/admin/workspaces` 110 | - Supports all official API parameters 111 | - Returns properly typed `WorkspacesResponse` 112 | 113 | **`simulateWorkspaces(type?, capacityId?, name?, state?)`** 114 | - Provides realistic test data for development/testing 115 | - Supports all filtering parameters 116 | - Returns same structure as real API 117 | 118 | ### Response Format 119 | Follows the official Microsoft Fabric API response structure: 120 | ```typescript 121 | interface WorkspacesResponse { 122 | workspaces: WorkspaceInfo[]; 123 | continuationUri?: string; 124 | continuationToken?: string; 125 | } 126 | 127 | interface WorkspaceInfo { 128 | id: string; 129 | name: string; 130 | type: string; 131 | state: string; 132 | capacityId?: string; 133 | } 134 | ``` 135 | 136 | ## Benefits 137 | 138 | 1. **User Experience**: No more need to remember or look up workspace GUIDs 139 | 2. **Discoverability**: Easy to find available workspaces 140 | 3. **Filtering**: Support for all official API filters 141 | 4. **Pagination**: Built-in support for large workspace lists 142 | 5. **Error Handling**: Helpful error messages and suggestions 143 | 6. **Standards Compliance**: Uses official Microsoft Fabric Admin API 144 | 145 | ## Security Considerations 146 | 147 | - Uses admin API endpoints which require appropriate permissions 148 | - Respects bearer token authentication 149 | - Follows Microsoft Fabric API security guidelines 150 | - Filters sensitive information appropriately 151 | 152 | ## Testing 153 | 154 | The implementation includes simulation data for testing without requiring live API access: 155 | - 6 sample workspaces with various states and types 156 | - Realistic GUIDs and capacity IDs 157 | - Support for all filtering scenarios 158 | - Pagination testing support 159 | -------------------------------------------------------------------------------- /src/migration/capacity-tools.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Fabric Capacity Management Tools 3 | * Handles capacity listing, assignment, and SKU calculations 4 | */ 5 | 6 | import { execSync } from 'child_process'; 7 | 8 | export interface FabricCapacity { 9 | id: string; 10 | displayName: string; 11 | sku: string; 12 | state: string; 13 | region: string; 14 | sparkVCores: number; 15 | maxSparkVCoresWithBurst: number; 16 | queueLimit?: number; 17 | } 18 | 19 | // Capacity SKU specifications based on Microsoft documentation 20 | const CAPACITY_SPECS: Record = { 21 | 'F2': { sparkVCores: 4, burstFactor: 5, queueLimit: 4 }, 22 | 'F4': { sparkVCores: 8, burstFactor: 3, queueLimit: 4 }, 23 | 'F8': { sparkVCores: 16, burstFactor: 3, queueLimit: 8 }, 24 | 'F16': { sparkVCores: 32, burstFactor: 3, queueLimit: 16 }, 25 | 'F32': { sparkVCores: 64, burstFactor: 3, queueLimit: 32 }, 26 | 'F64': { sparkVCores: 128, burstFactor: 3, queueLimit: 64 }, 27 | 'F128': { sparkVCores: 256, burstFactor: 3, queueLimit: 128 }, 28 | 'F256': { sparkVCores: 512, burstFactor: 3, queueLimit: 256 }, 29 | 'F512': { sparkVCores: 1024, burstFactor: 3, queueLimit: 512 }, 30 | 'F1024': { sparkVCores: 2048, burstFactor: 3, queueLimit: 1024 }, 31 | 'F2048': { sparkVCores: 4096, burstFactor: 3, queueLimit: 2048 }, 32 | 'FT1': { sparkVCores: 128, burstFactor: 1, queueLimit: 0 }, // Trial 33 | 'P1': { sparkVCores: 128, burstFactor: 3, queueLimit: 64 }, 34 | 'P2': { sparkVCores: 256, burstFactor: 3, queueLimit: 128 }, 35 | 'P3': { sparkVCores: 512, burstFactor: 3, queueLimit: 256 }, 36 | 'P4': { sparkVCores: 1024, burstFactor: 3, queueLimit: 512 }, 37 | 'P5': { sparkVCores: 2048, burstFactor: 3, queueLimit: 1024 } 38 | }; 39 | 40 | export class CapacityManager { 41 | private async getAccessToken(): Promise { 42 | const tokenOutput = execSync( 43 | 'az account get-access-token --resource https://api.fabric.microsoft.com --query accessToken -o tsv', 44 | { encoding: 'utf-8' } 45 | ); 46 | return tokenOutput.trim(); 47 | } 48 | 49 | /** 50 | * Get Spark VCore specifications for a capacity SKU 51 | */ 52 | public getCapacitySpecs(sku: string): { sparkVCores: number; maxSparkVCoresWithBurst: number; queueLimit: number } { 53 | const specs = CAPACITY_SPECS[sku]; 54 | if (!specs) { 55 | throw new Error(`Unknown capacity SKU: ${sku}`); 56 | } 57 | 58 | return { 59 | sparkVCores: specs.sparkVCores, 60 | maxSparkVCoresWithBurst: specs.sparkVCores * specs.burstFactor, 61 | queueLimit: specs.queueLimit 62 | }; 63 | } 64 | 65 | /** 66 | * List all available Fabric capacities 67 | */ 68 | public async listCapacities(): Promise { 69 | const token = await this.getAccessToken(); 70 | 71 | const response = await fetch('https://api.fabric.microsoft.com/v1/capacities', { 72 | headers: { 'Authorization': `Bearer ${token}` } 73 | }); 74 | 75 | if (!response.ok) { 76 | throw new Error(`Failed to list capacities: ${response.status} ${await response.text()}`); 77 | } 78 | 79 | const data = await response.json(); 80 | const capacities = data.value || []; 81 | 82 | return capacities.map((cap: any) => { 83 | const specs = this.getCapacitySpecs(cap.sku); 84 | return { 85 | id: cap.id, 86 | displayName: cap.displayName || cap.sku, 87 | sku: cap.sku, 88 | state: cap.state, 89 | region: cap.region, 90 | sparkVCores: specs.sparkVCores, 91 | maxSparkVCoresWithBurst: specs.maxSparkVCoresWithBurst, 92 | queueLimit: specs.queueLimit 93 | }; 94 | }); 95 | } 96 | 97 | /** 98 | * Select the best (largest) capacity from available list 99 | */ 100 | public selectBestCapacity(capacities: FabricCapacity[]): FabricCapacity { 101 | const active = capacities.filter(c => c.state === 'Active'); 102 | 103 | if (active.length === 0) { 104 | throw new Error('No active capacities available'); 105 | } 106 | 107 | // Sort by Spark VCores descending 108 | active.sort((a, b) => b.sparkVCores - a.sparkVCores); 109 | return active[0]; 110 | } 111 | 112 | /** 113 | * Assign capacity to a workspace 114 | */ 115 | public async assignCapacity(workspaceId: string, capacityId: string): Promise { 116 | const token = await this.getAccessToken(); 117 | 118 | const response = await fetch( 119 | `https://api.fabric.microsoft.com/v1/workspaces/${workspaceId}/assignToCapacity`, 120 | { 121 | method: 'POST', 122 | headers: { 123 | 'Authorization': `Bearer ${token}`, 124 | 'Content-Type': 'application/json' 125 | }, 126 | body: JSON.stringify({ capacityId }) 127 | } 128 | ); 129 | 130 | if (!response.ok && response.status !== 200) { 131 | throw new Error(`Failed to assign capacity: ${response.status} ${await response.text()}`); 132 | } 133 | 134 | console.error(`✅ Successfully assigned capacity ${capacityId} to workspace ${workspaceId}`); 135 | } 136 | 137 | /** 138 | * Get workspace capacity assignment 139 | */ 140 | public async getWorkspaceCapacity(workspaceId: string): Promise { 141 | const token = await this.getAccessToken(); 142 | 143 | const response = await fetch(`https://api.fabric.microsoft.com/v1/workspaces/${workspaceId}`, { 144 | headers: { 'Authorization': `Bearer ${token}` } 145 | }); 146 | 147 | if (!response.ok) { 148 | throw new Error(`Failed to get workspace: ${response.status} ${await response.text()}`); 149 | } 150 | 151 | const workspace = await response.json(); 152 | return workspace.capacityId || null; 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Microsoft Fabric Analytics MCP Server 2 | 3 | First off, thank you for considering contributing to this project! 🎉 4 | 5 | ## Code of Conduct 6 | 7 | This project adheres to a Code of Conduct. By participating, you are expected to uphold this code. Please report unacceptable behavior to the project maintainers. 8 | 9 | ## How Can I Contribute? 10 | 11 | ### 🐛 Reporting Bugs 12 | 13 | Before creating bug reports, please check the issue list as you might find out that you don't need to create one. When you are creating a bug report, please include as many details as possible: 14 | 15 | - **Use a clear and descriptive title** 16 | - **Describe the exact steps to reproduce the problem** 17 | - **Provide specific examples to demonstrate the steps** 18 | - **Describe the behavior you observed and what behavior you expected** 19 | - **Include screenshots or logs if helpful** 20 | - **Specify your environment** (OS, Node.js version, etc.) 21 | 22 | ### 💡 Suggesting Enhancements 23 | 24 | Enhancement suggestions are tracked as GitHub issues. When creating an enhancement suggestion, please include: 25 | 26 | - **Use a clear and descriptive title** 27 | - **Provide a step-by-step description of the suggested enhancement** 28 | - **Provide specific examples to demonstrate the steps** 29 | - **Describe the current behavior and expected behavior** 30 | - **Explain why this enhancement would be useful** 31 | 32 | ### 🚀 Pull Requests 33 | 34 | Good pull requests (patches, improvements, new features) are a fantastic help. They should remain focused in scope and avoid containing unrelated commits. 35 | 36 | **Please ask first** before embarking on any significant pull request (e.g., implementing features, refactoring code), otherwise you risk spending a lot of time working on something that the project's developers might not want to merge into the project. 37 | 38 | ## Development Setup 39 | 40 | 1. **Fork and clone the repo** 41 | ```bash 42 | git clone https://github.com/santhoshravindran7/Fabric-Analytics-MCP.git 43 | cd microsoft-fabric-analytics-mcp 44 | ``` 45 | 46 | 2. **Install dependencies** 47 | ```bash 48 | npm install 49 | ``` 50 | 51 | 3. **Build the project** 52 | ```bash 53 | npm run build 54 | ``` 55 | 56 | 4. **Run tests** (if available) 57 | ```bash 58 | npm test 59 | ``` 60 | 61 | ## Development Guidelines 62 | 63 | ### Code Style 64 | 65 | - **TypeScript**: Use TypeScript for all new code 66 | - **Formatting**: Run `npm run format` before committing 67 | - **Linting**: Ensure `npm run lint` passes 68 | - **Comments**: Add JSDoc comments for public APIs 69 | 70 | ### Commit Messages 71 | 72 | Use clear and meaningful commit messages: 73 | 74 | ``` 75 | feat: add new Spark monitoring dashboard 76 | fix: resolve session timeout issue 77 | docs: update API documentation 78 | refactor: improve error handling 79 | test: add unit tests for Livy API 80 | ``` 81 | 82 | ### Branch Naming 83 | 84 | Use descriptive branch names: 85 | - `feature/add-spark-monitoring` 86 | - `fix/session-timeout` 87 | - `docs/update-readme` 88 | - `refactor/improve-error-handling` 89 | 90 | ## Project Structure 91 | 92 | ``` 93 | ├── src/ 94 | │ ├── index.ts # Main MCP server 95 | │ └── fabric-client.ts # Microsoft Fabric API client 96 | ├── tests/ # Test files 97 | ├── build/ # Compiled JavaScript 98 | ├── docs/ # Documentation 99 | └── examples/ # Example scripts 100 | ``` 101 | 102 | ## Testing 103 | 104 | ### Running Tests 105 | 106 | ```bash 107 | # Run all tests 108 | npm test 109 | 110 | # Run specific test file 111 | npm test -- tests/specific-test.ts 112 | 113 | # Run tests in watch mode 114 | npm run test:watch 115 | ``` 116 | 117 | ### Writing Tests 118 | 119 | - Add tests for new functionality 120 | - Update existing tests when modifying behavior 121 | - Ensure tests pass before submitting PR 122 | - Mock external API calls in tests 123 | 124 | ### Integration Testing 125 | 126 | For testing with real Microsoft Fabric APIs: 127 | 128 | 1. **Never commit real tokens** to the repository 129 | 2. **Use environment variables** for test configuration 130 | 3. **Document test setup** in your PR description 131 | 4. **Include test results** in your PR if applicable 132 | 133 | ## Documentation 134 | 135 | - **Update README.md** for new features 136 | - **Add JSDoc comments** for new functions/classes 137 | - **Update API documentation** for new endpoints 138 | - **Include examples** for new functionality 139 | 140 | ## Security Considerations 141 | 142 | - **Never commit secrets** or tokens 143 | - **Review security implications** of your changes 144 | - **Follow secure coding practices** 145 | - **Report security issues** privately (see SECURITY.md) 146 | 147 | ## Review Process 148 | 149 | 1. **Automated checks** must pass (linting, building, etc.) 150 | 2. **Manual review** by maintainers 151 | 3. **Testing** of new functionality 152 | 4. **Documentation** review 153 | 5. **Merge** once approved 154 | 155 | ## Recognition 156 | 157 | Contributors will be recognized in: 158 | - **README.md** contributors section 159 | - **Release notes** for significant contributions 160 | - **GitHub contributors** page 161 | 162 | ## Getting Help 163 | 164 | - **GitHub Issues**: For bugs and feature requests 165 | - **GitHub Discussions**: For questions and general discussion 166 | - **Documentation**: Check existing docs first 167 | 168 | ## Types of Contributions We're Looking For 169 | 170 | - 🐛 **Bug fixes** 171 | - ✨ **New MCP tools** for Microsoft Fabric 172 | - 📚 **Documentation improvements** 173 | - 🧪 **Test coverage** improvements 174 | - 🔧 **Performance optimizations** 175 | - 🎨 **UI/UX improvements** for Claude Desktop integration 176 | - 🌐 **Internationalization** support 177 | - 📦 **Packaging and distribution** improvements 178 | 179 | ## What We're NOT Looking For 180 | 181 | - **Breaking changes** without discussion 182 | - **Large refactors** without approval 183 | - **Unrelated feature creep** 184 | - **Code that doesn't follow project conventions** 185 | 186 | --- 187 | 188 | Thank you for contributing! 🚀 189 | 190 | *This contributing guide is inspired by open source best practices and tailored for this MCP server project.* 191 | -------------------------------------------------------------------------------- /src/capacity-tools.ts: -------------------------------------------------------------------------------- 1 | import { z } from 'zod'; 2 | import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; 3 | import type { AuthMethod } from './auth-client.js'; 4 | 5 | // Reuse executeApiCall via injection to avoid circular imports 6 | export function registerCapacityTools( 7 | server: McpServer, 8 | authConfig: { defaultWorkspaceId?: string; method: AuthMethod }, 9 | executeApiCall: ( 10 | bearerToken: string | undefined, 11 | workspaceId: string, 12 | operation: string, 13 | apiCall: (_client: any) => Promise<{ status: 'success' | 'error'; data?: T; error?: string }>, 14 | simulationParams?: Record 15 | ) => Promise<{ status: 'success' | 'error'; data?: T; error?: string }>, 16 | ) { 17 | // Schemas local (keep lightweight – mirrors definitions in index.ts but isolated) 18 | const ListCapacitiesSchema = z.object({ 19 | bearerToken: z.string().optional().describe("Optional bearer token if not using configured authentication") 20 | }); 21 | 22 | const AssignWorkspaceToCapacitySchema = z.object({ 23 | bearerToken: z.string().optional().describe("Optional bearer token if not using configured authentication"), 24 | capacityId: z.string().min(1).describe("Target capacity ID"), 25 | workspaceId: z.string().min(1).describe("Workspace ID to assign to capacity") 26 | }); 27 | 28 | const UnassignWorkspaceFromCapacitySchema = z.object({ 29 | bearerToken: z.string().optional().describe("Optional bearer token if not using configured authentication"), 30 | workspaceId: z.string().min(1).describe("Workspace ID to unassign from capacity") 31 | }); 32 | 33 | const ListCapacityWorkspacesSchema = z.object({ 34 | bearerToken: z.string().optional().describe("Optional bearer token if not using configured authentication"), 35 | capacityId: z.string().min(1).describe("Capacity ID to list workspaces for") 36 | }); 37 | 38 | // Register tools (idempotent guard to prevent duplicate registration if index.ts already has them) 39 | const existingTools: any[] = (server as any).listTools ? (server as any).listTools() : []; 40 | const existing = new Set(existingTools.map((t: any) => t.name)); 41 | if (existing.has("fabric_list_capacities")) { 42 | return; // Already registered in main file 43 | } 44 | 45 | server.tool( 46 | "fabric_list_capacities", 47 | "List all available Fabric capacities", 48 | ListCapacitiesSchema.shape, 49 | async ({ bearerToken }) => { 50 | const result = await executeApiCall( 51 | bearerToken, 52 | authConfig.defaultWorkspaceId || "global", 53 | "list-capacities", 54 | (client) => client.listCapacities(), 55 | {} 56 | ); 57 | 58 | if (result.status === 'error') { 59 | return { content: [{ type: 'text', text: `Error listing capacities: ${result.error}` }] }; 60 | } 61 | 62 | const capacities: any[] = Array.isArray(result.data) ? (result.data as any[]) : []; 63 | if (capacities.length === 0) { 64 | return { content: [{ type: 'text', text: 'No capacities found in your tenant.' }] }; 65 | } 66 | 67 | const list = capacities.map((c, i) => `${i + 1}. ${c.displayName} (${c.sku})\n ID: ${c.id}\n State: ${c.state}\n Region: ${c.region}`).join('\n\n'); 68 | 69 | return { 70 | content: [{ type: 'text', text: `🏗️ Found ${capacities.length} Fabric Capacities:\n\n${list}\n\nUse a capacity ID in other operations (assignment, listing workspaces).` }] 71 | }; 72 | } 73 | ); 74 | 75 | server.tool( 76 | "fabric_assign_workspace_to_capacity", 77 | "Assign a workspace to a dedicated Fabric capacity", 78 | AssignWorkspaceToCapacitySchema.shape, 79 | async ({ bearerToken, capacityId, workspaceId }) => { 80 | const result = await executeApiCall( 81 | bearerToken, 82 | authConfig.defaultWorkspaceId || "global", 83 | "assign-workspace-to-capacity", 84 | (client) => client.assignWorkspaceToCapacity(capacityId, workspaceId), 85 | { capacityId, workspaceId } 86 | ); 87 | 88 | if (result.status === 'error') { 89 | return { content: [{ type: 'text', text: `Error assigning workspace to capacity: ${result.error}` }] }; 90 | } 91 | 92 | return { 93 | content: [{ type: 'text', text: `✅ Workspace ${workspaceId} assigned to capacity ${capacityId}.` }] 94 | }; 95 | } 96 | ); 97 | 98 | server.tool( 99 | "fabric_unassign_workspace_from_capacity", 100 | "Unassign a workspace from its capacity (move to shared capacity)", 101 | UnassignWorkspaceFromCapacitySchema.shape, 102 | async ({ bearerToken, workspaceId }) => { 103 | const result = await executeApiCall( 104 | bearerToken, 105 | authConfig.defaultWorkspaceId || "global", 106 | "unassign-workspace-from-capacity", 107 | (client) => client.unassignWorkspaceFromCapacity(workspaceId), 108 | { workspaceId } 109 | ); 110 | 111 | if (result.status === 'error') { 112 | return { content: [{ type: 'text', text: `Error unassigning workspace from capacity: ${result.error}` }] }; 113 | } 114 | 115 | return { 116 | content: [{ type: 'text', text: `✅ Workspace ${workspaceId} moved to shared capacity.` }] 117 | }; 118 | } 119 | ); 120 | 121 | server.tool( 122 | "fabric_list_capacity_workspaces", 123 | "List all workspaces assigned to a specific capacity", 124 | ListCapacityWorkspacesSchema.shape, 125 | async ({ bearerToken, capacityId }) => { 126 | const result = await executeApiCall( 127 | bearerToken, 128 | authConfig.defaultWorkspaceId || "global", 129 | "list-capacity-workspaces", 130 | (client) => client.listCapacityWorkspaces(capacityId), 131 | { capacityId } 132 | ); 133 | 134 | if (result.status === 'error') { 135 | return { content: [{ type: 'text', text: `Error listing capacity workspaces: ${result.error}` }] }; 136 | } 137 | 138 | const workspaces: any[] = Array.isArray(result.data) ? (result.data as any[]) : []; 139 | if (workspaces.length === 0) { 140 | return { content: [{ type: 'text', text: `No workspaces found in capacity ${capacityId}.` }] }; 141 | } 142 | 143 | const list = workspaces.map((w, i) => `${i + 1}. ${w.name} (${w.type})\n ID: ${w.id}\n State: ${w.state}`).join('\n\n'); 144 | return { 145 | content: [{ type: 'text', text: `🏗️ Workspaces in Capacity ${capacityId} (${workspaces.length}):\n\n${list}` }] 146 | }; 147 | } 148 | ); 149 | } 150 | -------------------------------------------------------------------------------- /python-wrapper/build_package.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Build script for creating the PyPI package with embedded Node.js server. 4 | 5 | This script: 6 | 1. Builds the TypeScript/Node.js MCP server 7 | 2. Copies the built server into the Python package 8 | 3. Creates the distributable Python package 9 | """ 10 | 11 | import os 12 | import sys 13 | import shutil 14 | import subprocess 15 | import json 16 | from pathlib import Path 17 | 18 | 19 | def run_command(cmd, cwd=None, check=True): 20 | """Run a shell command and return the result.""" 21 | print(f"Running: {' '.join(cmd) if isinstance(cmd, list) else cmd}") 22 | try: 23 | result = subprocess.run( 24 | cmd, 25 | cwd=cwd, 26 | shell=isinstance(cmd, str), 27 | check=check, 28 | capture_output=True, 29 | text=True 30 | ) 31 | if result.stdout: 32 | print(result.stdout) 33 | return result 34 | except subprocess.CalledProcessError as e: 35 | print(f"Error running command: {e}") 36 | if e.stderr: 37 | print(f"Error output: {e.stderr}") 38 | if check: 39 | sys.exit(1) 40 | return e 41 | 42 | 43 | def build_nodejs_server(): 44 | """Build the Node.js MCP server.""" 45 | print("🔨 Building Node.js MCP server...") 46 | 47 | # Get paths 48 | project_root = Path(__file__).parent.parent 49 | wrapper_root = Path(__file__).parent 50 | 51 | # Run npm install in the main project 52 | print("📦 Installing Node.js dependencies...") 53 | run_command(["npm", "install"], cwd=project_root) 54 | 55 | # Build the TypeScript project 56 | print("🏗️ Building TypeScript...") 57 | run_command(["npm", "run", "build"], cwd=project_root) 58 | 59 | # Verify build directory exists 60 | build_dir = project_root / "build" 61 | if not build_dir.exists(): 62 | print("❌ Build directory not found!") 63 | sys.exit(1) 64 | 65 | # Copy built server to Python package 66 | server_dest = wrapper_root / "fabric_analytics_mcp" / "server" 67 | if server_dest.exists(): 68 | shutil.rmtree(server_dest) 69 | 70 | print("📋 Copying server files to Python package...") 71 | server_dest.mkdir(parents=True) 72 | 73 | # Copy build directory 74 | shutil.copytree(build_dir, server_dest / "build") 75 | 76 | # Copy package.json (needed for metadata) 77 | shutil.copy(project_root / "package.json", server_dest / "package.json") 78 | 79 | # Copy node_modules (essential dependencies only) 80 | node_modules_src = project_root / "node_modules" 81 | if node_modules_src.exists(): 82 | node_modules_dest = server_dest / "node_modules" 83 | print("📦 Copying essential Node.js modules...") 84 | 85 | # Copy only production dependencies 86 | with open(project_root / "package.json", 'r') as f: 87 | package_json = json.load(f) 88 | 89 | dependencies = package_json.get("dependencies", {}) 90 | for dep_name in dependencies: 91 | dep_src = node_modules_src / dep_name 92 | if dep_src.exists(): 93 | dep_dest = node_modules_dest / dep_name 94 | dep_dest.parent.mkdir(parents=True, exist_ok=True) 95 | if dep_src.is_dir(): 96 | shutil.copytree(dep_src, dep_dest, 97 | ignore=shutil.ignore_patterns('*.md', '*.txt', 'test*', 'example*')) 98 | else: 99 | shutil.copy2(dep_src, dep_dest) 100 | 101 | print("✅ Node.js server build complete!") 102 | 103 | 104 | def build_python_package(): 105 | """Build the Python package.""" 106 | print("🐍 Building Python package...") 107 | 108 | wrapper_root = Path(__file__).parent 109 | 110 | # Clean previous builds 111 | for dir_name in ["build", "dist", "*.egg-info"]: 112 | for path in wrapper_root.glob(dir_name): 113 | if path.is_dir(): 114 | shutil.rmtree(path) 115 | else: 116 | path.unlink() 117 | 118 | # Build source distribution 119 | print("📦 Creating source distribution...") 120 | run_command([sys.executable, "setup.py", "sdist"], cwd=wrapper_root) 121 | 122 | # Build wheel distribution 123 | print("🎡 Creating wheel distribution...") 124 | run_command([sys.executable, "setup.py", "bdist_wheel"], cwd=wrapper_root) 125 | 126 | print("✅ Python package build complete!") 127 | 128 | # Show built files 129 | dist_dir = wrapper_root / "dist" 130 | if dist_dir.exists(): 131 | print("\n📦 Built packages:") 132 | for file in dist_dir.iterdir(): 133 | print(f" - {file.name}") 134 | 135 | 136 | def validate_package(): 137 | """Validate the built package.""" 138 | print("🔍 Validating package...") 139 | 140 | wrapper_root = Path(__file__).parent 141 | 142 | # Check if server files exist 143 | server_dir = wrapper_root / "fabric_analytics_mcp" / "server" 144 | if not server_dir.exists(): 145 | print("❌ Server directory not found!") 146 | return False 147 | 148 | index_js = server_dir / "build" / "index.js" 149 | if not index_js.exists(): 150 | print("❌ Server index.js not found!") 151 | return False 152 | 153 | package_json = server_dir / "package.json" 154 | if not package_json.exists(): 155 | print("❌ package.json not found!") 156 | return False 157 | 158 | print("✅ Package validation passed!") 159 | return True 160 | 161 | 162 | def main(): 163 | """Main build script.""" 164 | print("🚀 Building Microsoft Fabric Analytics MCP Server PyPI Package") 165 | print("=" * 70) 166 | 167 | try: 168 | # Build Node.js server 169 | build_nodejs_server() 170 | 171 | # Validate 172 | if not validate_package(): 173 | sys.exit(1) 174 | 175 | # Build Python package 176 | build_python_package() 177 | 178 | print("\n🎉 Build complete!") 179 | print("\n📋 Next steps:") 180 | print(" 1. Test the package: pip install dist/*.whl") 181 | print(" 2. Upload to PyPI: twine upload dist/*") 182 | 183 | except KeyboardInterrupt: 184 | print("\n❌ Build cancelled by user") 185 | sys.exit(1) 186 | except Exception as e: 187 | print(f"\n❌ Build failed: {e}") 188 | sys.exit(1) 189 | 190 | 191 | if __name__ == "__main__": 192 | main() 193 | -------------------------------------------------------------------------------- /src/azure-openai-analyzer.ts: -------------------------------------------------------------------------------- 1 | import fetch from 'node-fetch'; 2 | import fs from 'fs'; 3 | import path from 'path'; 4 | 5 | interface AzureOpenAIConfig { 6 | azureOpenAI: { 7 | apiKey: string; 8 | endpoint: string; 9 | apiVersion: string; 10 | deploymentName: string; 11 | model: string; 12 | maxTokens: number; 13 | temperature: number; 14 | }; 15 | analysis: { 16 | enableLLMAnalysis: boolean; 17 | analysisTypes: string[]; 18 | }; 19 | } 20 | 21 | interface OpenAIResponse { 22 | choices: Array<{ 23 | message: { 24 | content: string; 25 | }; 26 | }>; 27 | } 28 | 29 | export class AzureOpenAIAnalyzer { 30 | private config: AzureOpenAIConfig; 31 | 32 | constructor(configPath?: string) { 33 | const configFile = configPath || path.join(process.cwd(), 'azure-openai-config.json'); 34 | this.config = JSON.parse(fs.readFileSync(configFile, 'utf8')); 35 | } 36 | 37 | async analyzeSparkLogs(logs: string[], analysisType: string = 'comprehensive'): Promise { 38 | if (!this.config.analysis.enableLLMAnalysis) { 39 | return "LLM analysis is disabled in configuration"; 40 | } 41 | 42 | const prompt = this.buildSparkAnalysisPrompt(logs, analysisType); 43 | return await this.callAzureOpenAI(prompt); 44 | } 45 | 46 | async analyzeStatementPerformance(statementData: any, analysisType: string = 'optimization'): Promise { 47 | if (!this.config.analysis.enableLLMAnalysis) { 48 | return "LLM analysis is disabled in configuration"; 49 | } 50 | 51 | const prompt = this.buildStatementAnalysisPrompt(statementData, analysisType); 52 | return await this.callAzureOpenAI(prompt); 53 | } 54 | 55 | async analyzeExecutionHistory(sessions: any[], timeRange: string, analysisType: string = 'trends'): Promise { 56 | if (!this.config.analysis.enableLLMAnalysis) { 57 | return "LLM analysis is disabled in configuration"; 58 | } 59 | 60 | const prompt = this.buildHistoryAnalysisPrompt(sessions, timeRange, analysisType); 61 | return await this.callAzureOpenAI(prompt); 62 | } 63 | 64 | private buildSparkAnalysisPrompt(logs: string[], analysisType: string): string { 65 | const logSample = logs.slice(0, 50).join('\n'); // First 50 lines 66 | 67 | return `You are a Spark performance expert. Analyze the following Spark session logs and provide insights based on the analysis type: ${analysisType}. 68 | 69 | Spark Session Logs: 70 | ${logSample} 71 | 72 | Please provide: 73 | 1. Performance bottlenecks identified 74 | 2. Memory usage patterns 75 | 3. Error analysis (if any) 76 | 4. Optimization recommendations 77 | 5. Resource utilization insights 78 | 79 | Focus on actionable recommendations for improving Spark job performance.`; 80 | } 81 | 82 | private buildStatementAnalysisPrompt(statementData: any, analysisType: string): string { 83 | return `You are a Spark SQL optimization expert. Analyze the following Spark statement execution and provide ${analysisType} insights. 84 | 85 | Statement Details: 86 | - Code: ${statementData.code} 87 | - State: ${statementData.state} 88 | - Execution Time: ${statementData.executionTime || 'Unknown'} 89 | - Output Type: ${statementData.outputType || 'Unknown'} 90 | 91 | Please provide: 92 | 1. Query optimization opportunities 93 | 2. Performance analysis 94 | 3. Best practices recommendations 95 | 4. Potential issues and solutions 96 | 5. Resource optimization suggestions 97 | 98 | Focus on specific, actionable improvements for this Spark statement.`; 99 | } 100 | 101 | private buildHistoryAnalysisPrompt(sessions: any[], timeRange: string, analysisType: string): string { 102 | const sessionSummary = sessions.map(s => ({ 103 | id: s.id, 104 | state: s.state, 105 | kind: s.kind, 106 | duration: s.duration || 'Unknown' 107 | })); 108 | 109 | return `You are a Spark operations analyst. Analyze the following session execution history over ${timeRange} and provide ${analysisType} insights. 110 | 111 | Session History Summary: 112 | ${JSON.stringify(sessionSummary, null, 2)} 113 | 114 | Please provide: 115 | 1. Performance trends analysis 116 | 2. Failure pattern identification 117 | 3. Resource utilization trends 118 | 4. Operational recommendations 119 | 5. Capacity planning insights 120 | 121 | Focus on trends and patterns that can help optimize overall Spark operations.`; 122 | } 123 | 124 | private async callAzureOpenAI(prompt: string): Promise { 125 | const { azureOpenAI } = this.config; 126 | 127 | const url = `${azureOpenAI.endpoint}openai/deployments/${azureOpenAI.deploymentName}/chat/completions?api-version=${azureOpenAI.apiVersion}`; 128 | 129 | const requestBody = { 130 | messages: [ 131 | { 132 | role: "system", 133 | content: "You are an expert Apache Spark performance analyst and optimization consultant. Provide detailed, actionable insights and recommendations." 134 | }, 135 | { 136 | role: "user", 137 | content: prompt 138 | } 139 | ], 140 | max_tokens: azureOpenAI.maxTokens, 141 | temperature: azureOpenAI.temperature, 142 | top_p: 0.95, 143 | frequency_penalty: 0, 144 | presence_penalty: 0 145 | }; 146 | 147 | try { 148 | const response = await fetch(url, { 149 | method: 'POST', 150 | headers: { 151 | 'Content-Type': 'application/json', 152 | 'api-key': azureOpenAI.apiKey 153 | }, 154 | body: JSON.stringify(requestBody) 155 | }); 156 | 157 | if (!response.ok) { 158 | const errorText = await response.text(); 159 | throw new Error(`Azure OpenAI API error: ${response.status} - ${errorText}`); 160 | } 161 | 162 | const data = await response.json() as OpenAIResponse; 163 | return data.choices[0]?.message?.content || "No response from Azure OpenAI"; 164 | } catch (error) { 165 | console.error('Azure OpenAI API call failed:', error); 166 | return `Error calling Azure OpenAI: ${error instanceof Error ? error.message : String(error)}`; 167 | } 168 | } 169 | 170 | async testConnection(): Promise<{ success: boolean; message: string }> { 171 | try { 172 | const testPrompt = "Hello, this is a test connection to Azure OpenAI. Please respond with 'Connection successful' and today's date."; 173 | const response = await this.callAzureOpenAI(testPrompt); 174 | 175 | if (response.includes('Error calling Azure OpenAI')) { 176 | return { 177 | success: false, 178 | message: response 179 | }; 180 | } 181 | 182 | return { 183 | success: true, 184 | message: `✅ Azure OpenAI connection successful! Response: ${response}` 185 | }; 186 | } catch (error) { 187 | return { 188 | success: false, 189 | message: `❌ Connection test failed: ${error instanceof Error ? error.message : String(error)}` 190 | }; 191 | } 192 | } 193 | } 194 | -------------------------------------------------------------------------------- /test-before-push.ps1: -------------------------------------------------------------------------------- 1 | # Comprehensive Pre-Push Testing Script 2 | # This script runs all necessary tests before pushing to Git 3 | 4 | Write-Host "🚀 Starting comprehensive pre-push testing..." -ForegroundColor Green 5 | Write-Host "===============================================" -ForegroundColor Green 6 | 7 | $errors = @() 8 | $warnings = @() 9 | 10 | # Function to log errors 11 | function Log-Error($message) { 12 | $script:errors += $message 13 | Write-Host "❌ ERROR: $message" -ForegroundColor Red 14 | } 15 | 16 | # Function to log warnings 17 | function Log-Warning($message) { 18 | $script:warnings += $message 19 | Write-Host "⚠️ WARNING: $message" -ForegroundColor Yellow 20 | } 21 | 22 | # Function to log success 23 | function Log-Success($message) { 24 | Write-Host "✅ $message" -ForegroundColor Green 25 | } 26 | 27 | # 1. Check Node.js and npm versions 28 | Write-Host "`n📋 Step 1: Environment Check" -ForegroundColor Cyan 29 | try { 30 | $nodeVersion = node --version 31 | $npmVersion = npm --version 32 | Log-Success "Node.js version: $nodeVersion" 33 | Log-Success "npm version: $npmVersion" 34 | } catch { 35 | Log-Error "Node.js or npm not found. Please install Node.js 18+ and npm." 36 | } 37 | 38 | # 2. Install dependencies 39 | Write-Host "`n📦 Step 2: Installing Dependencies" -ForegroundColor Cyan 40 | try { 41 | npm install --silent 42 | if ($LASTEXITCODE -eq 0) { 43 | Log-Success "Dependencies installed successfully" 44 | } else { 45 | Log-Error "Failed to install dependencies" 46 | } 47 | } catch { 48 | Log-Error "Failed to install dependencies: $_" 49 | } 50 | 51 | # 3. TypeScript compilation 52 | Write-Host "`n🔨 Step 3: TypeScript Compilation" -ForegroundColor Cyan 53 | try { 54 | npm run build --silent 55 | if ($LASTEXITCODE -eq 0) { 56 | Log-Success "TypeScript compilation successful" 57 | } else { 58 | Log-Error "TypeScript compilation failed" 59 | } 60 | } catch { 61 | Log-Error "TypeScript compilation failed: $_" 62 | } 63 | 64 | # 4. ESLint check 65 | Write-Host "`n🔍 Step 4: Code Linting" -ForegroundColor Cyan 66 | try { 67 | npm run lint --silent 68 | if ($LASTEXITCODE -eq 0) { 69 | Log-Success "Linting passed" 70 | } else { 71 | Log-Warning "Linting issues found - consider running 'npm run lint:fix'" 72 | } 73 | } catch { 74 | Log-Warning "ESLint check failed: $_" 75 | } 76 | 77 | # 5. Run Jest tests 78 | Write-Host "`n🧪 Step 5: Running Tests" -ForegroundColor Cyan 79 | try { 80 | npm test --silent 81 | if ($LASTEXITCODE -eq 0) { 82 | Log-Success "All tests passed" 83 | } else { 84 | Log-Error "Some tests failed" 85 | } 86 | } catch { 87 | Log-Error "Test execution failed: $_" 88 | } 89 | 90 | # 6. Check critical files exist 91 | Write-Host "`n📄 Step 6: File Structure Check" -ForegroundColor Cyan 92 | $criticalFiles = @( 93 | "src/index.ts", 94 | "src/fabric-client.ts", 95 | "package.json", 96 | "tsconfig.json", 97 | "README.md", 98 | "build/index.js" 99 | ) 100 | 101 | foreach ($file in $criticalFiles) { 102 | if (Test-Path $file) { 103 | Log-Success "Found: $file" 104 | } else { 105 | Log-Error "Missing critical file: $file" 106 | } 107 | } 108 | 109 | # 7. Check package.json for required fields 110 | Write-Host "`n📋 Step 7: Package.json Validation" -ForegroundColor Cyan 111 | try { 112 | $packageJson = Get-Content "package.json" | ConvertFrom-Json 113 | 114 | $requiredFields = @("name", "version", "description", "main", "scripts", "dependencies") 115 | foreach ($field in $requiredFields) { 116 | if ($packageJson.$field) { 117 | Log-Success "package.json has required field: $field" 118 | } else { 119 | Log-Error "package.json missing required field: $field" 120 | } 121 | } 122 | } catch { 123 | Log-Error "Failed to validate package.json: $_" 124 | } 125 | 126 | # 8. Docker build test (optional) 127 | Write-Host "`n🐳 Step 8: Docker Build Test" -ForegroundColor Cyan 128 | try { 129 | if (Get-Command docker -ErrorAction SilentlyContinue) { 130 | docker build -t fabric-analytics-mcp-test . --quiet 131 | if ($LASTEXITCODE -eq 0) { 132 | Log-Success "Docker build successful" 133 | # Clean up test image 134 | docker rmi fabric-analytics-mcp-test --force | Out-Null 135 | } else { 136 | Log-Warning "Docker build failed" 137 | } 138 | } else { 139 | Log-Warning "Docker not found - skipping Docker build test" 140 | } 141 | } catch { 142 | Log-Warning "Docker build test failed: $_" 143 | } 144 | 145 | # 9. Git status check 146 | Write-Host "`n📝 Step 9: Git Status Check" -ForegroundColor Cyan 147 | try { 148 | $gitStatus = git status --porcelain 149 | if ($gitStatus) { 150 | Log-Success "Git changes detected:" 151 | git status --short 152 | } else { 153 | Log-Warning "No Git changes detected" 154 | } 155 | } catch { 156 | Log-Warning "Git status check failed: $_" 157 | } 158 | 159 | # 10. Security check - scan for sensitive data 160 | Write-Host "`n🔐 Step 10: Security Scan" -ForegroundColor Cyan 161 | $sensitivePatterns = @( 162 | "password\s*=", 163 | "secret\s*=", 164 | "token\s*=", 165 | "key\s*=", 166 | "Bearer [A-Za-z0-9\-\._~\+\/]+=*" 167 | ) 168 | 169 | $securityIssues = @() 170 | Get-ChildItem -Recurse -Include "*.ts", "*.js", "*.json" -Exclude "node_modules", "build" | ForEach-Object { 171 | $content = Get-Content $_.FullName -Raw 172 | foreach ($pattern in $sensitivePatterns) { 173 | if ($content -match $pattern) { 174 | $securityIssues += "$($_.Name): Found potential sensitive data pattern" 175 | } 176 | } 177 | } 178 | 179 | if ($securityIssues.Count -eq 0) { 180 | Log-Success "No obvious sensitive data patterns found" 181 | } else { 182 | foreach ($issue in $securityIssues) { 183 | Log-Warning $issue 184 | } 185 | } 186 | 187 | # Summary 188 | Write-Host "`n📊 TEST SUMMARY" -ForegroundColor Magenta 189 | Write-Host "===============" -ForegroundColor Magenta 190 | 191 | if ($errors.Count -eq 0) { 192 | Write-Host "🎉 ALL CHECKS PASSED! Ready to push to Git." -ForegroundColor Green 193 | Write-Host "Run these commands to push:" -ForegroundColor Green 194 | Write-Host " git add ." -ForegroundColor White 195 | Write-Host " git commit -m `"Add comprehensive notebook management features`"" -ForegroundColor White 196 | Write-Host " git push origin master" -ForegroundColor White 197 | } else { 198 | Write-Host "❌ ERRORS FOUND - DO NOT PUSH YET" -ForegroundColor Red 199 | Write-Host "Errors to fix:" -ForegroundColor Red 200 | foreach ($error in $errors) { 201 | Write-Host " - $error" -ForegroundColor Red 202 | } 203 | } 204 | 205 | if ($warnings.Count -gt 0) { 206 | Write-Host "`nWarnings (consider addressing):" -ForegroundColor Yellow 207 | foreach ($warning in $warnings) { 208 | Write-Host " - $warning" -ForegroundColor Yellow 209 | } 210 | } 211 | 212 | Write-Host "`n✅ Pre-push testing completed!" -ForegroundColor Green 213 | -------------------------------------------------------------------------------- /python-wrapper/README.md: -------------------------------------------------------------------------------- 1 | # Microsoft Fabric Analytics MCP Server - Python Package 2 | 3 | [![PyPI version](https://badge.fury.io/py/fabric-analytics-mcp.svg)](https://badge.fury.io/py/fabric-analytics-mcp) 4 | [![Python Support](https://img.shields.io/pypi/pyversions/fabric-analytics-mcp.svg)](https://pypi.org/project/fabric-analytics-mcp/) 5 | [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) 6 | 7 | **Easy PyPI installation for Microsoft Fabric Analytics MCP Server** 8 | 9 | This Python package provides a convenient wrapper around the Microsoft Fabric Analytics MCP Server, enabling seamless installation and integration with AI assistants like Claude, GitHub Copilot, and other MCP-compatible clients. 10 | 11 | ## 🚀 Quick Start 12 | 13 | ```bash 14 | # Install from PyPI 15 | pip install fabric-analytics-mcp 16 | 17 | # Start the server 18 | fabric-analytics-mcp start 19 | 20 | # Validate installation 21 | fabric-analytics-mcp validate 22 | ``` 23 | 24 | ## ✨ Features 25 | 26 | - **41+ Microsoft Fabric Tools** - Complete analytics toolkit 27 | - **Easy Installation** - Simple `pip install` command 28 | - **Cross-Platform** - Windows, macOS, Linux support 29 | - **Multiple Auth Methods** - Bearer token, Service Principal, Interactive 30 | - **Workspace Management** - Easy discovery and management 31 | - **Spark Integration** - Job monitoring and session management 32 | - **Notebook Support** - Execution and management 33 | - **Production Ready** - Battle-tested and reliable 34 | 35 | ## 📋 Requirements 36 | 37 | - **Python 3.8+** 38 | - **Node.js 18+** (automatically validated) 39 | - **Microsoft Fabric Access** (with appropriate permissions) 40 | 41 | ## 🔧 Installation 42 | 43 | ### Option 1: Direct PyPI Installation (Recommended) 44 | 45 | ```bash 46 | pip install fabric-analytics-mcp 47 | ``` 48 | 49 | ### Option 2: Development Installation 50 | 51 | ```bash 52 | git clone https://github.com/santhoshravindran7/Fabric-Analytics-MCP 53 | cd Fabric-Analytics-MCP/python-wrapper 54 | pip install -e . 55 | ``` 56 | 57 | ## ⚙️ Configuration 58 | 59 | ### Environment Variables 60 | 61 | ```bash 62 | export FABRIC_AUTH_METHOD=bearer_token 63 | export FABRIC_CLIENT_ID=your-client-id 64 | export FABRIC_CLIENT_SECRET=your-client-secret 65 | export FABRIC_TENANT_ID=your-tenant-id 66 | export FABRIC_DEFAULT_WORKSPACE_ID=your-workspace-id 67 | ``` 68 | 69 | ### Claude Desktop Configuration 70 | 71 | Add to your `claude_desktop_config.json`: 72 | 73 | ```json 74 | { 75 | "mcpServers": { 76 | "fabric-analytics": { 77 | "command": "fabric-analytics-mcp", 78 | "args": ["start"], 79 | "env": { 80 | "FABRIC_AUTH_METHOD": "bearer_token" 81 | } 82 | } 83 | } 84 | } 85 | ``` 86 | 87 | ### GitHub Copilot Configuration 88 | 89 | For VS Code with GitHub Copilot: 90 | 91 | ```json 92 | { 93 | "github.copilot.mcp.servers": { 94 | "fabric-analytics": { 95 | "command": "fabric-analytics-mcp", 96 | "args": ["start"], 97 | "env": { 98 | "FABRIC_AUTH_METHOD": "bearer_token" 99 | } 100 | } 101 | } 102 | } 103 | ``` 104 | 105 | ## 🛠️ Usage 106 | 107 | ### Command Line Interface 108 | 109 | ```bash 110 | # Start the MCP server 111 | fabric-analytics-mcp start 112 | 113 | # Start with specific configuration 114 | fabric-analytics-mcp start --auth-method service_principal --workspace-id 115 | 116 | # Validate installation 117 | fabric-analytics-mcp validate 118 | 119 | # Show configuration help 120 | fabric-analytics-mcp config 121 | 122 | # Get help 123 | fabric-analytics-mcp --help 124 | ``` 125 | 126 | ### Python API 127 | 128 | ```python 129 | from fabric_analytics_mcp import FabricMCPServer 130 | 131 | # Start server programmatically 132 | config = { 133 | 'FABRIC_AUTH_METHOD': 'bearer_token', 134 | 'FABRIC_DEFAULT_WORKSPACE_ID': 'your-workspace-id' 135 | } 136 | 137 | with FabricMCPServer(config) as server: 138 | # Server is running 139 | tools = server.list_tools() 140 | print(f"Available tools: {len(tools['result']['tools'])}") 141 | ``` 142 | 143 | ## 🏢 Available Tools 144 | 145 | The server provides 41+ tools for Microsoft Fabric analytics: 146 | 147 | ### Workspace Management 148 | - `fabric_list_workspaces` - List all accessible workspaces 149 | - `fabric_find_workspace` - Find workspace by name 150 | - `fabric_create_workspace` - Create new workspace 151 | 152 | ### Item Management 153 | - `list-fabric-items` - List workspace items 154 | - `create-fabric-item` - Create new items 155 | - `update-fabric-item` - Update existing items 156 | - `delete-fabric-item` - Delete items 157 | 158 | ### Notebook Operations 159 | - `create-fabric-notebook` - Create notebooks 160 | - `execute-fabric-notebook` - Run notebooks 161 | - `get-fabric-notebook-definition` - Get notebook content 162 | 163 | ### Spark Integration 164 | - `submit-spark-job` - Submit Spark jobs 165 | - `get-job-status` - Monitor job status 166 | - `create-livy-session` - Create interactive sessions 167 | - `execute-livy-statement` - Run Spark code 168 | 169 | ### Monitoring & Analytics 170 | - `get-spark-monitoring-dashboard` - Performance insights 171 | - `analyze-livy-session-logs` - Log analysis 172 | - `get-workspace-spark-applications` - Application monitoring 173 | 174 | [View complete tool list →](https://github.com/santhoshravindran7/Fabric-Analytics-MCP#-available-tools) 175 | 176 | ## 🔐 Authentication 177 | 178 | ### Bearer Token (Recommended for Development) 179 | ```bash 180 | export FABRIC_AUTH_METHOD=bearer_token 181 | # Get token from Fabric portal 182 | ``` 183 | 184 | ### Service Principal (Recommended for Production) 185 | ```bash 186 | export FABRIC_AUTH_METHOD=service_principal 187 | export FABRIC_CLIENT_ID=your-app-id 188 | export FABRIC_CLIENT_SECRET=your-secret 189 | export FABRIC_TENANT_ID=your-tenant-id 190 | ``` 191 | 192 | ### Interactive Login 193 | ```bash 194 | export FABRIC_AUTH_METHOD=interactive 195 | # Opens browser for authentication 196 | ``` 197 | 198 | ## 🔍 Validation 199 | 200 | Ensure everything is working correctly: 201 | 202 | ```bash 203 | # Validate installation 204 | fabric-analytics-mcp validate 205 | 206 | # Test server startup 207 | fabric-analytics-mcp start --validate 208 | 209 | # Check tool availability 210 | echo '{"jsonrpc": "2.0", "id": 1, "method": "tools/list"}' | fabric-analytics-mcp start 211 | ``` 212 | 213 | ## 📚 Documentation 214 | 215 | - **[Full Documentation](https://github.com/santhoshravindran7/Fabric-Analytics-MCP)** 216 | - **[API Reference](https://github.com/santhoshravindran7/Fabric-Analytics-MCP/blob/master/docs/)** 217 | - **[Examples](https://github.com/santhoshravindran7/Fabric-Analytics-MCP/blob/master/EXAMPLES.md)** 218 | - **[Troubleshooting](https://github.com/santhoshravindran7/Fabric-Analytics-MCP/blob/master/TROUBLESHOOTING.md)** 219 | 220 | ## 🤝 Support 221 | 222 | - **Issues**: [GitHub Issues](https://github.com/santhoshravindran7/Fabric-Analytics-MCP/issues) 223 | - **Discussions**: [GitHub Discussions](https://github.com/santhoshravindran7/Fabric-Analytics-MCP/discussions) 224 | - **Email**: santhoshravindran7@gmail.com 225 | 226 | ## 📄 License 227 | 228 | This project is licensed under the MIT License - see the [LICENSE](https://github.com/santhoshravindran7/Fabric-Analytics-MCP/blob/master/LICENSE) file for details. 229 | 230 | ## 🙏 Contributing 231 | 232 | Contributions are welcome! Please read our [Contributing Guide](https://github.com/santhoshravindran7/Fabric-Analytics-MCP/blob/master/CONTRIBUTING.md) for details. 233 | 234 | --- 235 | 236 | **Developed by [Santhosh Ravindran](https://github.com/santhoshravindran7)** ✨ 237 | -------------------------------------------------------------------------------- /src/migration/spark-pool-tools.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Spark Pool Management Tools 3 | * Handles Synapse Spark pool discovery and Fabric Spark pool conversion 4 | */ 5 | 6 | import { execSync } from 'child_process'; 7 | 8 | export interface SynapseSparkPool { 9 | id: string; 10 | name: string; 11 | nodeSize: string; 12 | nodeCount: number; 13 | autoScale: { 14 | enabled: boolean; 15 | minNodeCount?: number; 16 | maxNodeCount?: number; 17 | }; 18 | autoPause: { 19 | enabled: boolean; 20 | delayInMinutes?: number; 21 | }; 22 | sparkVersion: string; 23 | dynamicExecutorAllocation?: { 24 | enabled: boolean; 25 | minExecutors?: number; 26 | maxExecutors?: number; 27 | }; 28 | } 29 | 30 | export interface FabricSparkPoolRecommendation { 31 | synapsePoolName: string; 32 | recommendedConfig: { 33 | driverCores: number; 34 | driverMemory: string; 35 | executorCores: number; 36 | executorMemory: string; 37 | dynamicAllocation: boolean; 38 | minExecutors?: number; 39 | maxExecutors?: number; 40 | }; 41 | notes: string[]; 42 | } 43 | 44 | // Synapse node size to vCore mapping 45 | const NODE_SIZE_VCORES: Record = { 46 | 'Small': 4, 47 | 'Medium': 8, 48 | 'Large': 16, 49 | 'XLarge': 32, 50 | 'XXLarge': 64 51 | }; 52 | 53 | export class SparkPoolManager { 54 | private async getAzureAccessToken(): Promise { 55 | const tokenOutput = execSync( 56 | 'az account get-access-token --resource https://management.azure.com --query accessToken -o tsv', 57 | { encoding: 'utf-8' } 58 | ); 59 | return tokenOutput.trim(); 60 | } 61 | 62 | /** 63 | * List all Spark pools in a Synapse workspace 64 | */ 65 | public async listSynapseSparkPools( 66 | subscriptionId: string, 67 | resourceGroup: string, 68 | workspaceName: string 69 | ): Promise { 70 | const token = await this.getAzureAccessToken(); 71 | const apiVersion = '2021-06-01'; 72 | 73 | const url = `https://management.azure.com/subscriptions/${subscriptionId}/resourceGroups/${resourceGroup}/providers/Microsoft.Synapse/workspaces/${workspaceName}/bigDataPools?api-version=${apiVersion}`; 74 | 75 | const response = await fetch(url, { 76 | headers: { 'Authorization': `Bearer ${token}` } 77 | }); 78 | 79 | if (!response.ok) { 80 | throw new Error(`Failed to list Synapse Spark pools: ${response.status} ${await response.text()}`); 81 | } 82 | 83 | const data = await response.json(); 84 | const pools = data.value || []; 85 | 86 | return pools.map((pool: any) => ({ 87 | id: pool.id, 88 | name: pool.name, 89 | nodeSize: pool.properties?.nodeSize || 'Medium', 90 | nodeCount: pool.properties?.nodeCount || 3, 91 | autoScale: { 92 | enabled: pool.properties?.autoScale?.enabled || false, 93 | minNodeCount: pool.properties?.autoScale?.minNodeCount, 94 | maxNodeCount: pool.properties?.autoScale?.maxNodeCount 95 | }, 96 | autoPause: { 97 | enabled: pool.properties?.autoPause?.enabled || false, 98 | delayInMinutes: pool.properties?.autoPause?.delayInMinutes 99 | }, 100 | sparkVersion: pool.properties?.sparkVersion || '3.3', 101 | dynamicExecutorAllocation: { 102 | enabled: pool.properties?.dynamicExecutorAllocation?.enabled || false, 103 | minExecutors: pool.properties?.dynamicExecutorAllocation?.minExecutors, 104 | maxExecutors: pool.properties?.dynamicExecutorAllocation?.maxExecutors 105 | } 106 | })); 107 | } 108 | 109 | /** 110 | * Convert Synapse Spark pools to Fabric Spark recommendations 111 | * Formula: 1 Synapse VCore = 2 Fabric VCores per CU 112 | * Fabric capacities get 3X burst factor 113 | */ 114 | public convertSynapseToFabricPools( 115 | synapsePools: SynapseSparkPool[], 116 | capacitySku: string 117 | ): FabricSparkPoolRecommendation[] { 118 | const recommendations: FabricSparkPoolRecommendation[] = []; 119 | 120 | for (const pool of synapsePools) { 121 | const nodeVCores = NODE_SIZE_VCORES[pool.nodeSize] || 8; // Default to Medium 122 | const synapseVCores = pool.autoScale.enabled 123 | ? (pool.autoScale.maxNodeCount || pool.nodeCount) * nodeVCores 124 | : pool.nodeCount * nodeVCores; 125 | 126 | // Convert to Fabric VCores: 1 Synapse VCore = 2 Fabric VCores 127 | const fabricVCores = synapseVCores * 2; 128 | 129 | // Determine executor configuration 130 | // Use 4 cores per executor as best practice 131 | const executorCores = 4; 132 | const maxExecutors = Math.floor(fabricVCores / executorCores); 133 | const minExecutors = pool.autoScale.enabled 134 | ? Math.floor(((pool.autoScale.minNodeCount || 1) * nodeVCores * 2) / executorCores) 135 | : Math.floor(maxExecutors / 2); 136 | 137 | const notes: string[] = []; 138 | notes.push(`Synapse: ${pool.nodeCount} × ${pool.nodeSize} nodes (${nodeVCores} vCores/node) = ${synapseVCores} total vCores`); 139 | notes.push(`Fabric conversion: ${synapseVCores} × 2 = ${fabricVCores} vCores required`); 140 | notes.push(`Capacity ${capacitySku} provides burst capacity for optimal performance`); 141 | 142 | if (pool.autoScale.enabled) { 143 | notes.push(`Synapse autoscale: ${pool.autoScale.minNodeCount}-${pool.autoScale.maxNodeCount} nodes`); 144 | notes.push(`Fabric recommendation: ${minExecutors}-${maxExecutors} executors with dynamic allocation`); 145 | } 146 | 147 | if (pool.autoPause.enabled) { 148 | notes.push(`Synapse auto-pause: ${pool.autoPause.delayInMinutes} minutes - configure similar timeout in Fabric`); 149 | } 150 | 151 | recommendations.push({ 152 | synapsePoolName: pool.name, 153 | recommendedConfig: { 154 | driverCores: 4, 155 | driverMemory: '28g', 156 | executorCores: executorCores, 157 | executorMemory: '28g', 158 | dynamicAllocation: pool.autoScale.enabled || pool.dynamicExecutorAllocation?.enabled || false, 159 | minExecutors: minExecutors, 160 | maxExecutors: maxExecutors 161 | }, 162 | notes 163 | }); 164 | } 165 | 166 | return recommendations; 167 | } 168 | 169 | /** 170 | * Validate if capacity can support the converted Spark pools 171 | */ 172 | public validateCapacityForPools( 173 | recommendations: FabricSparkPoolRecommendation[], 174 | capacitySparkVCores: number, 175 | capacityMaxBurst: number 176 | ): { canSupport: boolean; warnings: string[] } { 177 | const warnings: string[] = []; 178 | let canSupport = true; 179 | 180 | for (const rec of recommendations) { 181 | const requiredVCores = rec.recommendedConfig.maxExecutors! * rec.recommendedConfig.executorCores; 182 | 183 | if (requiredVCores > capacityMaxBurst) { 184 | canSupport = false; 185 | warnings.push( 186 | `❌ Pool '${rec.synapsePoolName}' requires ${requiredVCores} vCores but capacity only provides ${capacityMaxBurst} (with burst)` 187 | ); 188 | } else if (requiredVCores > capacitySparkVCores) { 189 | warnings.push( 190 | `⚠️ Pool '${rec.synapsePoolName}' requires ${requiredVCores} vCores, using burst capacity (base: ${capacitySparkVCores}, burst: ${capacityMaxBurst})` 191 | ); 192 | } else { 193 | warnings.push( 194 | `✅ Pool '${rec.synapsePoolName}' can run within base capacity (${requiredVCores}/${capacitySparkVCores} vCores)` 195 | ); 196 | } 197 | } 198 | 199 | return { canSupport, warnings }; 200 | } 201 | } 202 | -------------------------------------------------------------------------------- /docs/AZURE_CLI_AUTH.md: -------------------------------------------------------------------------------- 1 | # Azure CLI Authentication for Microsoft Fabric Analytics MCP 2 | 3 | This guide explains how to use Azure CLI authentication for easy local testing of the Microsoft Fabric Analytics MCP server. 4 | 5 | ## 🎯 **Why Azure CLI Authentication?** 6 | 7 | Azure CLI authentication provides the **easiest way** for users to test the MCP server locally by leveraging their existing Azure credentials without needing to: 8 | - Register Azure applications 9 | - Manage client secrets 10 | - Handle complex authentication flows 11 | 12 | ## 🚀 **Quick Start** 13 | 14 | ### 1. **Install Azure CLI** 15 | 16 | **Windows:** 17 | ```powershell 18 | # Using winget 19 | winget install Microsoft.AzureCLI 20 | 21 | # Or download from: https://aka.ms/installazurecliwindows 22 | ``` 23 | 24 | **macOS:** 25 | ```bash 26 | brew install azure-cli 27 | ``` 28 | 29 | **Linux:** 30 | ```bash 31 | curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash 32 | ``` 33 | 34 | ### 2. **Login to Azure** 35 | 36 | ```bash 37 | # Login with your Microsoft account 38 | az login 39 | 40 | # For specific tenant (if you have multiple) 41 | az login --tenant 42 | 43 | # Verify login 44 | az account show 45 | ``` 46 | 47 | ### 3. **Test Authentication Setup** 48 | 49 | ```bash 50 | # Run the built-in test script 51 | npm run test:azure-cli 52 | ``` 53 | 54 | This will verify: 55 | - ✅ Azure CLI installation 56 | - ✅ Login status 57 | - ✅ Microsoft Fabric token acquisition 58 | - ✅ Power BI API access 59 | 60 | ### 4. **Run MCP Server with Azure CLI Auth** 61 | 62 | ```bash 63 | # Set authentication method 64 | export FABRIC_AUTH_METHOD=azure_cli 65 | 66 | # Start the server 67 | npm run start 68 | ``` 69 | 70 | **Windows PowerShell:** 71 | ```powershell 72 | $env:FABRIC_AUTH_METHOD="azure_cli" 73 | npm run start 74 | ``` 75 | 76 | ## 🔧 **Configuration Options** 77 | 78 | ### Environment Variables 79 | 80 | | Variable | Required | Description | Example | 81 | |----------|----------|-------------|---------| 82 | | `FABRIC_AUTH_METHOD` | Yes | Set to `azure_cli` | `azure_cli` | 83 | | `FABRIC_DEFAULT_WORKSPACE_ID` | Optional | Default workspace for operations | `12345678-1234-...` | 84 | 85 | ### Example Configuration 86 | 87 | **.env file:** 88 | ```bash 89 | # Authentication method 90 | FABRIC_AUTH_METHOD=azure_cli 91 | 92 | # Optional: Default workspace 93 | FABRIC_DEFAULT_WORKSPACE_ID=12345678-1234-1234-1234-123456789abc 94 | 95 | # Optional: Tenant ID for multi-tenant scenarios 96 | FABRIC_TENANT_ID=87654321-4321-4321-4321-210987654321 97 | ``` 98 | 99 | ## 🎭 **How It Works** 100 | 101 | 1. **Token Acquisition**: Uses `az account get-access-token` to get valid tokens 102 | 2. **Scope Management**: Automatically requests appropriate scopes: 103 | - `https://api.fabric.microsoft.com/.default` (Microsoft Fabric - primary) 104 | - `https://api.powerbi.com/.default` (Power BI) 105 | 3. **Token Caching**: Caches tokens until expiration for better performance 106 | 4. **Auto Refresh**: Automatically refreshes expired tokens 107 | 108 | ## 🔍 **Troubleshooting** 109 | 110 | ### **❌ "Azure CLI is not installed"** 111 | ```bash 112 | # Install Azure CLI (see installation section above) 113 | # Then verify: 114 | az --version 115 | ``` 116 | 117 | ### **❌ "Please run 'az login'"** 118 | ```bash 119 | az login 120 | # Follow browser prompts to complete authentication 121 | ``` 122 | 123 | ### **❌ "Authentication error - login may have expired"** 124 | ```bash 125 | az logout 126 | az login 127 | ``` 128 | 129 | ### **❌ "Failed to get access token"** 130 | This usually means permission issues: 131 | 132 | 1. **Check Fabric Access:** 133 | ```bash 134 | # Test if you can access Fabric 135 | az account get-access-token --scope "https://api.fabric.microsoft.com/.default" 136 | ``` 137 | 138 | 2. **Verify Permissions:** 139 | - Ensure your account has access to Microsoft Fabric 140 | - Check if you're in the correct tenant 141 | - Verify workspace permissions 142 | 143 | 3. **Try Specific Tenant:** 144 | ```bash 145 | az login --tenant 146 | ``` 147 | 148 | ### **❌ "Multiple subscriptions found"** 149 | ```bash 150 | # Set default subscription 151 | az account set --subscription "" 152 | 153 | # Or use specific subscription 154 | az login --subscription "" 155 | ``` 156 | 157 | ## 🔒 **Security Considerations** 158 | 159 | ### **Permissions Required** 160 | Your Azure account needs: 161 | - **Microsoft Fabric Access**: Ability to access Fabric workspaces 162 | - **Power BI Access**: Read/write permissions to Power BI content 163 | - **Resource Access**: Permissions for specific workspaces/items you want to access 164 | 165 | ### **Token Scope** 166 | Azure CLI authentication uses these scopes: 167 | - `https://api.fabric.microsoft.com/.default` (Primary Fabric API) 168 | - `https://api.powerbi.com/.default` (Power BI API when needed) 169 | 170 | ### **Best Practices** 171 | - Use least-privilege accounts for testing 172 | - Regularly rotate Azure credentials 173 | - Use specific tenants when working with multiple organizations 174 | - Monitor token usage in Azure AD logs 175 | 176 | ## 🚀 **Example Usage** 177 | 178 | ### **1. List Workspaces** 179 | ```bash 180 | # Set auth method 181 | export FABRIC_AUTH_METHOD=azure_cli 182 | 183 | # Start MCP server 184 | npm run start 185 | ``` 186 | 187 | Then in your MCP client: 188 | ```json 189 | { 190 | "method": "tools/call", 191 | "params": { 192 | "name": "list-workspaces" 193 | } 194 | } 195 | ``` 196 | 197 | ### **2. Create Items** 198 | ```json 199 | { 200 | "method": "tools/call", 201 | "params": { 202 | "name": "create-fabric-item", 203 | "arguments": { 204 | "workspaceId": "your-workspace-id", 205 | "itemType": "Lakehouse", 206 | "displayName": "My Test Lakehouse" 207 | } 208 | } 209 | } 210 | ``` 211 | 212 | ### **3. Run Data Pipelines** 213 | ```json 214 | { 215 | "method": "tools/call", 216 | "params": { 217 | "name": "fabric_run_data_pipeline", 218 | "arguments": { 219 | "workspaceId": "your-workspace-id", 220 | "pipelineId": "your-pipeline-id" 221 | } 222 | } 223 | } 224 | ``` 225 | 226 | ## 🔄 **Token Lifecycle** 227 | 228 | 1. **First Request**: Acquires token using `az account get-access-token` 229 | 2. **Subsequent Requests**: Uses cached token if still valid 230 | 3. **Token Expiry**: Automatically refreshes when expired 231 | 4. **Error Handling**: Falls back to fresh authentication on failures 232 | 233 | ## 📊 **Monitoring** 234 | 235 | The MCP server provides helpful logging: 236 | 237 | ```bash 238 | ✅ Azure CLI is installed 239 | ✅ Logged in as: user@company.com 240 | Subscription: My Subscription (12345...) 241 | Tenant: 87654321... 242 | ✅ Azure CLI authentication successful 243 | ``` 244 | 245 | ## 🎯 **Benefits** 246 | 247 | - ✅ **Zero Configuration**: No app registration needed 248 | - ✅ **Familiar Flow**: Uses existing Azure login 249 | - ✅ **Secure**: Leverages Azure's security infrastructure 250 | - ✅ **Multi-tenant**: Works across different tenants 251 | - ✅ **Auto-refresh**: Handles token lifecycle automatically 252 | - ✅ **Debugging**: Clear error messages and troubleshooting 253 | 254 | ## 🆚 **vs Other Auth Methods** 255 | 256 | | Feature | Azure CLI | Service Principal | Interactive | Device Code | 257 | |---------|-----------|-------------------|-------------|-------------| 258 | | Setup Complexity | ⭐ Minimal | ⭐⭐⭐ Complex | ⭐⭐ Medium | ⭐⭐ Medium | 259 | | Local Testing | ✅ Perfect | ❌ Not ideal | ⚠️ OK | ⚠️ OK | 260 | | CI/CD | ❌ Not suitable | ✅ Perfect | ❌ Not suitable | ❌ Not suitable | 261 | | Security | ✅ User-based | ✅ App-based | ✅ User-based | ✅ User-based | 262 | | Convenience | ✅ Excellent | ❌ Poor | ⚠️ OK | ⚠️ OK | 263 | 264 | **Recommendation**: Use Azure CLI for local development and testing, Service Principal for production/CI-CD. 265 | -------------------------------------------------------------------------------- /python-wrapper/fabric_analytics_mcp/server_manager.py: -------------------------------------------------------------------------------- 1 | """ 2 | Server manager for Microsoft Fabric Analytics MCP Server 3 | 4 | This module manages the Node.js MCP server process from Python. 5 | """ 6 | 7 | import subprocess 8 | import os 9 | import signal 10 | import threading 11 | import time 12 | from pathlib import Path 13 | from typing import Optional, Dict, Any 14 | 15 | 16 | class FabricMCPServer: 17 | """Manages the Microsoft Fabric Analytics MCP Server process.""" 18 | 19 | def __init__(self, config: Optional[Dict[str, Any]] = None): 20 | """Initialize the server manager. 21 | 22 | Args: 23 | config: Optional configuration dictionary 24 | """ 25 | self.config = config or {} 26 | self.process: Optional[subprocess.Popen] = None 27 | self.is_running = False 28 | self._monitor_thread: Optional[threading.Thread] = None 29 | 30 | def start(self, stdio: bool = True, timeout: int = 30) -> bool: 31 | """Start the MCP server. 32 | 33 | Args: 34 | stdio: Whether to use stdio transport (default for MCP) 35 | timeout: Timeout in seconds to wait for server start 36 | 37 | Returns: 38 | True if server started successfully, False otherwise 39 | """ 40 | if self.is_running: 41 | return True 42 | 43 | # Find Node.js executable 44 | node_cmd = self._find_node() 45 | if not node_cmd: 46 | raise RuntimeError("Node.js not found. Please install Node.js 18+") 47 | 48 | # Get server path 49 | server_path = self._get_server_path() 50 | if not server_path.exists(): 51 | raise RuntimeError(f"MCP server not found at {server_path}") 52 | 53 | # Prepare environment 54 | env = os.environ.copy() 55 | for key, value in self.config.items(): 56 | env[key.upper()] = str(value) 57 | 58 | try: 59 | # Start the server process 60 | if stdio: 61 | # Standard MCP stdio transport 62 | self.process = subprocess.Popen( 63 | [node_cmd, str(server_path)], 64 | env=env, 65 | stdin=subprocess.PIPE, 66 | stdout=subprocess.PIPE, 67 | stderr=subprocess.PIPE, 68 | text=True, 69 | bufsize=0 70 | ) 71 | else: 72 | # HTTP transport (for testing/debugging) 73 | env['MCP_TRANSPORT'] = 'http' 74 | env['MCP_PORT'] = str(self.config.get('port', 3000)) 75 | self.process = subprocess.Popen( 76 | [node_cmd, str(server_path)], 77 | env=env 78 | ) 79 | 80 | # Wait a moment for startup 81 | time.sleep(2) 82 | 83 | # Check if process is still running 84 | if self.process.poll() is None: 85 | self.is_running = True 86 | self._start_monitor() 87 | return True 88 | else: 89 | self.process = None 90 | return False 91 | 92 | except Exception as e: 93 | raise RuntimeError(f"Failed to start MCP server: {e}") 94 | 95 | def stop(self, timeout: int = 10) -> bool: 96 | """Stop the MCP server. 97 | 98 | Args: 99 | timeout: Timeout in seconds to wait for graceful shutdown 100 | 101 | Returns: 102 | True if server stopped successfully, False otherwise 103 | """ 104 | if not self.is_running or not self.process: 105 | return True 106 | 107 | try: 108 | # Try graceful shutdown first 109 | self.process.terminate() 110 | 111 | # Wait for graceful shutdown 112 | try: 113 | self.process.wait(timeout=timeout) 114 | except subprocess.TimeoutExpired: 115 | # Force kill if necessary 116 | self.process.kill() 117 | self.process.wait() 118 | 119 | self.process = None 120 | self.is_running = False 121 | 122 | # Stop monitor thread 123 | if self._monitor_thread and self._monitor_thread.is_alive(): 124 | self._monitor_thread.join(timeout=5) 125 | 126 | return True 127 | 128 | except Exception: 129 | return False 130 | 131 | def send_request(self, request: Dict[str, Any]) -> Optional[Dict[str, Any]]: 132 | """Send a JSON-RPC request to the server. 133 | 134 | Args: 135 | request: JSON-RPC request dictionary 136 | 137 | Returns: 138 | Response dictionary or None if failed 139 | """ 140 | if not self.is_running or not self.process: 141 | return None 142 | 143 | try: 144 | import json 145 | 146 | # Send request 147 | request_json = json.dumps(request) + '\n' 148 | self.process.stdin.write(request_json) 149 | self.process.stdin.flush() 150 | 151 | # Read response 152 | response_line = self.process.stdout.readline() 153 | if response_line: 154 | return json.loads(response_line.strip()) 155 | 156 | except Exception: 157 | pass 158 | 159 | return None 160 | 161 | def list_tools(self) -> Optional[Dict[str, Any]]: 162 | """Get list of available tools from the server. 163 | 164 | Returns: 165 | Tools list response or None if failed 166 | """ 167 | request = { 168 | "jsonrpc": "2.0", 169 | "id": 1, 170 | "method": "tools/list" 171 | } 172 | return self.send_request(request) 173 | 174 | def _find_node(self) -> Optional[str]: 175 | """Find Node.js executable.""" 176 | import shutil 177 | for cmd in ["node", "nodejs"]: 178 | if shutil.which(cmd): 179 | return cmd 180 | return None 181 | 182 | def _get_server_path(self) -> Path: 183 | """Get path to the MCP server.""" 184 | package_dir = Path(__file__).parent 185 | return package_dir / "server" / "build" / "index.js" 186 | 187 | def _start_monitor(self): 188 | """Start background thread to monitor server process.""" 189 | if self._monitor_thread and self._monitor_thread.is_alive(): 190 | return 191 | 192 | self._monitor_thread = threading.Thread( 193 | target=self._monitor_process, 194 | daemon=True 195 | ) 196 | self._monitor_thread.start() 197 | 198 | def _monitor_process(self): 199 | """Monitor the server process in background.""" 200 | while self.is_running and self.process: 201 | if self.process.poll() is not None: 202 | # Process has ended 203 | self.is_running = False 204 | break 205 | time.sleep(1) 206 | 207 | def __enter__(self): 208 | """Context manager entry.""" 209 | self.start() 210 | return self 211 | 212 | def __exit__(self, exc_type, exc_val, exc_tb): 213 | """Context manager exit.""" 214 | self.stop() 215 | 216 | def __del__(self): 217 | """Cleanup on deletion.""" 218 | if self.is_running: 219 | self.stop() 220 | -------------------------------------------------------------------------------- /AUTHENTICATION_SETUP.md: -------------------------------------------------------------------------------- 1 | # 🔐 **Authentication Configuration Guide** 2 | 3 | This guide shows how to configure different authentication methods for production deployment of the Microsoft Fabric Analytics MCP Server. 4 | 5 | ## 📋 **Supported Authentication Methods** 6 | 7 | ### **1. 🎫 Bearer Token Authentication** 8 | **Use Case**: Development, testing, or when you already have a valid token 9 | **Configuration**: Pass bearer token directly to tools 10 | 11 | ### **2. 🤖 Service Principal Authentication** ⭐ **RECOMMENDED FOR PRODUCTION** 12 | **Use Case**: Production deployment, automated systems, CI/CD pipelines 13 | **Configuration**: Environment variables for SPN credentials 14 | 15 | ### **3. 📱 Device Code Authentication** 16 | **Use Case**: Headless environments, remote servers 17 | **Configuration**: Interactive device code flow 18 | 19 | ### **4. 🌐 Interactive Authentication** 20 | **Use Case**: Development, testing on local machines 21 | **Configuration**: Browser-based authentication 22 | 23 | --- 24 | 25 | ## 🚀 **Production Setup: Service Principal Authentication** 26 | 27 | ### **Step 1: Create Azure AD Application** 28 | 29 | 1. **Go to Azure Portal** → Azure Active Directory → App registrations 30 | 2. **Click "New registration"** 31 | 3. **Fill out the form**: 32 | - Name: `Fabric-Analytics-MCP-Server` 33 | - Supported account types: `Accounts in this organizational directory only` 34 | - Redirect URI: Leave blank 35 | 4. **Click "Register"** 36 | 37 | ### **Step 2: Generate Client Secret** 38 | 39 | 1. **Go to your app** → Certificates & secrets 40 | 2. **Click "New client secret"** 41 | 3. **Add description**: `MCP Server Secret` 42 | 4. **Set expiration**: 24 months (recommended) 43 | 5. **Click "Add"** 44 | 6. **Copy the secret value** immediately (you won't see it again!) 45 | 46 | ### **Step 3: Grant Microsoft Fabric Permissions** 47 | 48 | 1. **Go to your app** → API permissions 49 | 2. **Click "Add a permission"** 50 | 3. **Select "APIs my organization uses"** 51 | 4. **Search for "Power BI Service"** or **"Microsoft Fabric"** 52 | 5. **Select application permissions**: 53 | - `Dataset.ReadWrite.All` 54 | - `Workspace.ReadWrite.All` 55 | - `Item.ReadWrite.All` 56 | 6. **Click "Grant admin consent"** 57 | 58 | ### **Step 4: Environment Variables Configuration** 59 | 60 | Create these environment variables for production deployment: 61 | 62 | ```bash 63 | # Service Principal Configuration 64 | FABRIC_CLIENT_ID=your-app-client-id 65 | FABRIC_CLIENT_SECRET=your-app-client-secret 66 | FABRIC_TENANT_ID=your-tenant-id 67 | 68 | # Optional: Default workspace 69 | FABRIC_DEFAULT_WORKSPACE_ID=your-default-workspace-id 70 | 71 | # Authentication method (default: bearer) 72 | FABRIC_AUTH_METHOD=service_principal 73 | ``` 74 | 75 | ### **Step 5: Claude Desktop Configuration for Production** 76 | 77 | Update your Claude Desktop config to use environment variables: 78 | 79 | ```json 80 | { 81 | "mcpServers": { 82 | "fabric-analytics": { 83 | "command": "node", 84 | "args": ["C:\\path\\to\\your\\project\\build\\index.js"], 85 | "env": { 86 | "FABRIC_AUTH_METHOD": "service_principal", 87 | "FABRIC_CLIENT_ID": "your-app-client-id", 88 | "FABRIC_CLIENT_SECRET": "your-app-client-secret", 89 | "FABRIC_TENANT_ID": "your-tenant-id" 90 | } 91 | } 92 | } 93 | } 94 | ``` 95 | 96 | --- 97 | 98 | ## 🔧 **Authentication Method Configuration** 99 | 100 | ### **Method 1: Environment Variables (Recommended)** 101 | 102 | Set these in your system or deployment environment: 103 | 104 | ```bash 105 | # Windows (PowerShell) 106 | $env:FABRIC_AUTH_METHOD="service_principal" 107 | $env:FABRIC_CLIENT_ID="your-client-id" 108 | $env:FABRIC_CLIENT_SECRET="your-client-secret" 109 | $env:FABRIC_TENANT_ID="your-tenant-id" 110 | 111 | # Linux/macOS 112 | export FABRIC_AUTH_METHOD="service_principal" 113 | export FABRIC_CLIENT_ID="your-client-id" 114 | export FABRIC_CLIENT_SECRET="your-client-secret" 115 | export FABRIC_TENANT_ID="your-tenant-id" 116 | ``` 117 | 118 | ### **Method 2: Claude Desktop Environment** 119 | 120 | Configure directly in Claude Desktop config: 121 | 122 | ```json 123 | { 124 | "mcpServers": { 125 | "fabric-analytics": { 126 | "command": "node", 127 | "args": ["C:\\path\\to\\build\\index.js"], 128 | "env": { 129 | "FABRIC_AUTH_METHOD": "service_principal", 130 | "FABRIC_CLIENT_ID": "12345678-1234-1234-1234-123456789abc", 131 | "FABRIC_CLIENT_SECRET": "your-secret-here", 132 | "FABRIC_TENANT_ID": "87654321-4321-4321-4321-cba987654321" 133 | } 134 | } 135 | } 136 | } 137 | ``` 138 | 139 | ### **Method 3: Device Code Flow** 140 | 141 | For headless environments: 142 | 143 | ```json 144 | { 145 | "mcpServers": { 146 | "fabric-analytics": { 147 | "command": "node", 148 | "args": ["C:\\path\\to\\build\\index.js"], 149 | "env": { 150 | "FABRIC_AUTH_METHOD": "device_code", 151 | "FABRIC_CLIENT_ID": "your-client-id", 152 | "FABRIC_TENANT_ID": "your-tenant-id" 153 | } 154 | } 155 | } 156 | } 157 | ``` 158 | 159 | --- 160 | 161 | ## 🛡️ **Security Best Practices** 162 | 163 | ### **✅ DO:** 164 | - Use Service Principal authentication for production 165 | - Store secrets in environment variables or secure key vaults 166 | - Rotate client secrets regularly (every 6-12 months) 167 | - Use least-privilege permissions 168 | - Monitor authentication logs 169 | 170 | ### **❌ DON'T:** 171 | - Hardcode secrets in configuration files 172 | - Commit secrets to version control 173 | - Use personal bearer tokens in production 174 | - Share client secrets via email or chat 175 | - Use overly broad permissions 176 | 177 | --- 178 | 179 | ## 🧪 **Testing Authentication Setup** 180 | 181 | ### **Test Service Principal Authentication** 182 | 183 | Run this command to verify your SPN setup: 184 | 185 | ```bash 186 | # With environment variables set 187 | node -e " 188 | const { MicrosoftAuthClient } = require('./build/auth-client.js'); 189 | const client = new MicrosoftAuthClient({ 190 | clientId: process.env.FABRIC_CLIENT_ID 191 | }); 192 | client.authenticateWithServicePrincipal( 193 | process.env.FABRIC_CLIENT_ID, 194 | process.env.FABRIC_CLIENT_SECRET, 195 | process.env.FABRIC_TENANT_ID 196 | ).then(result => { 197 | console.log('✅ Authentication successful!'); 198 | console.log('Token expires:', result.expiresOn); 199 | }).catch(err => { 200 | console.error('❌ Authentication failed:', err.message); 201 | }); 202 | " 203 | ``` 204 | 205 | ### **Test with Claude Desktop** 206 | 207 | Try these queries after authentication setup: 208 | 209 | ``` 210 | "Test my Microsoft Fabric authentication setup" 211 | "List all items in my default workspace" 212 | "What authentication method am I using?" 213 | ``` 214 | 215 | --- 216 | 217 | ## 🔄 **Authentication Flow Diagram** 218 | 219 | ``` 220 | Claude Desktop → MCP Server → Auth Check → Microsoft Fabric API 221 | ↓ 222 | Environment Variables 223 | ↓ 224 | Service Principal Flow 225 | ↓ 226 | Access Token Cache 227 | ↓ 228 | API Request with Token 229 | ``` 230 | 231 | --- 232 | 233 | ## 📞 **Support & Troubleshooting** 234 | 235 | ### **Common Issues:** 236 | 237 | 1. **"Authentication failed"** 238 | - Verify client ID, secret, and tenant ID 239 | - Check API permissions are granted 240 | - Ensure admin consent is provided 241 | 242 | 2. **"Token expired"** 243 | - The MCP server handles token refresh automatically 244 | - Check if client secret has expired 245 | 246 | 3. **"Insufficient permissions"** 247 | - Verify Microsoft Fabric API permissions 248 | - Check workspace access rights 249 | 250 | ### **Debug Mode:** 251 | 252 | Enable debug logging by setting: 253 | 254 | ```bash 255 | export FABRIC_DEBUG_AUTH=true 256 | ``` 257 | 258 | This will show detailed authentication flow information. 259 | -------------------------------------------------------------------------------- /scripts/build-and-push.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Microsoft Fabric Analytics MCP Server - Docker Build and Push Script 4 | # This script builds the Docker image and pushes it to Azure Container Registry 5 | 6 | set -euo pipefail 7 | 8 | # Colors for output 9 | RED='\033[0;31m' 10 | GREEN='\033[0;32m' 11 | YELLOW='\033[1;33m' 12 | BLUE='\033[0;34m' 13 | NC='\033[0m' # No Color 14 | 15 | # Configuration 16 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 17 | PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" 18 | IMAGE_NAME="fabric-analytics-mcp" 19 | VERSION="${VERSION:-latest}" 20 | 21 | # Azure Container Registry configuration 22 | ACR_NAME="${ACR_NAME:-}" 23 | AZURE_SUBSCRIPTION_ID="${AZURE_SUBSCRIPTION_ID:-}" 24 | RESOURCE_GROUP="${RESOURCE_GROUP:-fabric-mcp-rg}" 25 | 26 | # Function to print colored output 27 | print_status() { 28 | echo -e "${BLUE}[INFO]${NC} $1" 29 | } 30 | 31 | print_success() { 32 | echo -e "${GREEN}[SUCCESS]${NC} $1" 33 | } 34 | 35 | print_warning() { 36 | echo -e "${YELLOW}[WARNING]${NC} $1" 37 | } 38 | 39 | print_error() { 40 | echo -e "${RED}[ERROR]${NC} $1" 41 | } 42 | 43 | # Function to check prerequisites 44 | check_prerequisites() { 45 | print_status "Checking prerequisites..." 46 | 47 | # Check if Docker is installed and running 48 | if ! command -v docker &> /dev/null; then 49 | print_error "Docker is not installed or not in PATH" 50 | exit 1 51 | fi 52 | 53 | if ! docker info &> /dev/null; then 54 | print_error "Docker daemon is not running" 55 | exit 1 56 | fi 57 | 58 | # Check if Azure CLI is installed 59 | if ! command -v az &> /dev/null; then 60 | print_error "Azure CLI is not installed or not in PATH" 61 | exit 1 62 | fi 63 | 64 | # Check if logged into Azure 65 | if ! az account show &> /dev/null; then 66 | print_error "Not logged into Azure. Please run 'az login' first" 67 | exit 1 68 | fi 69 | 70 | print_success "Prerequisites check passed" 71 | } 72 | 73 | # Function to validate configuration 74 | validate_config() { 75 | print_status "Validating configuration..." 76 | 77 | if [[ -z "$ACR_NAME" ]]; then 78 | print_error "ACR_NAME environment variable is not set" 79 | print_status "Please set ACR_NAME to your Azure Container Registry name" 80 | exit 1 81 | fi 82 | 83 | if [[ -z "$AZURE_SUBSCRIPTION_ID" ]]; then 84 | print_warning "AZURE_SUBSCRIPTION_ID not set, using current subscription" 85 | AZURE_SUBSCRIPTION_ID=$(az account show --query id -o tsv) 86 | fi 87 | 88 | print_success "Configuration validation passed" 89 | } 90 | 91 | # Function to build Docker image 92 | build_image() { 93 | print_status "Building Docker image..." 94 | 95 | cd "$PROJECT_ROOT" 96 | 97 | # Build the image 98 | docker build \ 99 | --tag "${IMAGE_NAME}:${VERSION}" \ 100 | --tag "${IMAGE_NAME}:latest" \ 101 | --label "version=${VERSION}" \ 102 | --label "build-date=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" \ 103 | --label "git-commit=$(git rev-parse --short HEAD 2>/dev/null || echo 'unknown')" \ 104 | --label "git-branch=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo 'unknown')" \ 105 | . 106 | 107 | print_success "Docker image built successfully" 108 | } 109 | 110 | # Function to test the image 111 | test_image() { 112 | print_status "Testing Docker image..." 113 | 114 | # Run a quick test to ensure the image starts properly 115 | CONTAINER_ID=$(docker run -d --rm -p 3000:3000 "${IMAGE_NAME}:${VERSION}") 116 | 117 | # Wait a few seconds for the container to start 118 | sleep 5 119 | 120 | # Check if container is still running 121 | if docker ps | grep -q "$CONTAINER_ID"; then 122 | print_success "Image test passed" 123 | docker stop "$CONTAINER_ID" || true 124 | else 125 | print_error "Image test failed - container exited" 126 | docker logs "$CONTAINER_ID" || true 127 | exit 1 128 | fi 129 | } 130 | 131 | # Function to login to Azure Container Registry 132 | acr_login() { 133 | print_status "Logging into Azure Container Registry..." 134 | 135 | # Set the subscription 136 | az account set --subscription "$AZURE_SUBSCRIPTION_ID" 137 | 138 | # Login to ACR 139 | az acr login --name "$ACR_NAME" 140 | 141 | print_success "Successfully logged into ACR" 142 | } 143 | 144 | # Function to tag and push image 145 | push_image() { 146 | print_status "Tagging and pushing image to ACR..." 147 | 148 | local acr_url="${ACR_NAME}.azurecr.io" 149 | local full_image_name="${acr_url}/${IMAGE_NAME}" 150 | 151 | # Tag the image for ACR 152 | docker tag "${IMAGE_NAME}:${VERSION}" "${full_image_name}:${VERSION}" 153 | docker tag "${IMAGE_NAME}:${VERSION}" "${full_image_name}:latest" 154 | 155 | # Push the images 156 | docker push "${full_image_name}:${VERSION}" 157 | docker push "${full_image_name}:latest" 158 | 159 | print_success "Image pushed successfully to ${full_image_name}:${VERSION}" 160 | 161 | # Output the full image name for use in deployment 162 | echo "" 163 | print_status "Image ready for deployment:" 164 | echo " ${full_image_name}:${VERSION}" 165 | echo " ${full_image_name}:latest" 166 | } 167 | 168 | # Function to cleanup local images (optional) 169 | cleanup() { 170 | if [[ "${CLEANUP_LOCAL:-false}" == "true" ]]; then 171 | print_status "Cleaning up local images..." 172 | docker rmi "${IMAGE_NAME}:${VERSION}" "${IMAGE_NAME}:latest" || true 173 | print_success "Local images cleaned up" 174 | fi 175 | } 176 | 177 | # Function to display usage 178 | usage() { 179 | echo "Usage: $0 [OPTIONS]" 180 | echo "" 181 | echo "Options:" 182 | echo " -h, --help Show this help message" 183 | echo " -v, --version VERSION Set image version (default: latest)" 184 | echo " -t, --test Run image tests after build" 185 | echo " -c, --cleanup Cleanup local images after push" 186 | echo " --no-push Build only, don't push to registry" 187 | echo "" 188 | echo "Environment Variables:" 189 | echo " ACR_NAME Azure Container Registry name (required)" 190 | echo " AZURE_SUBSCRIPTION_ID Azure subscription ID (optional)" 191 | echo " RESOURCE_GROUP Azure resource group (default: fabric-mcp-rg)" 192 | echo " VERSION Image version (default: latest)" 193 | echo " CLEANUP_LOCAL Cleanup local images after push (default: false)" 194 | echo "" 195 | echo "Examples:" 196 | echo " ACR_NAME=myregistry $0" 197 | echo " ACR_NAME=myregistry $0 --version v1.0.0 --test" 198 | echo " ACR_NAME=myregistry $0 --no-push" 199 | } 200 | 201 | # Main function 202 | main() { 203 | local run_tests=false 204 | local push_to_registry=true 205 | 206 | # Parse command line arguments 207 | while [[ $# -gt 0 ]]; do 208 | case $1 in 209 | -h|--help) 210 | usage 211 | exit 0 212 | ;; 213 | -v|--version) 214 | VERSION="$2" 215 | shift 2 216 | ;; 217 | -t|--test) 218 | run_tests=true 219 | shift 220 | ;; 221 | -c|--cleanup) 222 | CLEANUP_LOCAL=true 223 | shift 224 | ;; 225 | --no-push) 226 | push_to_registry=false 227 | shift 228 | ;; 229 | *) 230 | print_error "Unknown option: $1" 231 | usage 232 | exit 1 233 | ;; 234 | esac 235 | done 236 | 237 | print_status "Starting Docker build and push process..." 238 | 239 | # Run the build process 240 | check_prerequisites 241 | validate_config 242 | build_image 243 | 244 | if [[ "$run_tests" == "true" ]]; then 245 | test_image 246 | fi 247 | 248 | if [[ "$push_to_registry" == "true" ]]; then 249 | acr_login 250 | push_image 251 | else 252 | print_status "Skipping push to registry (--no-push specified)" 253 | fi 254 | 255 | cleanup 256 | 257 | print_success "Build and push process completed successfully!" 258 | } 259 | 260 | # Run main function if script is executed directly 261 | if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then 262 | main "$@" 263 | fi 264 | -------------------------------------------------------------------------------- /MCP_TOOLS_USER_GUIDE.md: -------------------------------------------------------------------------------- 1 | # MCP Tools User Guide - Synapse to Fabric Migration 2 | 3 | ## Overview 4 | This guide shows how to use the Fabric Analytics MCP tools through GitHub Copilot to migrate from Azure Synapse Analytics to Microsoft Fabric. 5 | 6 | ## Available MCP Tools 7 | 8 | ### 1. **fabric_synapse_workspace_details** 9 | Get complete details about a Synapse workspace including notebooks, pipelines, Spark pools, and linked services. 10 | 11 | **Example Copilot Prompt:** 12 | ``` 13 | Get me the details of my Synapse workspace 'samplesynapseworkspace' in resource group 'sparkpmteam_rc' 14 | ``` 15 | 16 | **What it does:** 17 | - ✅ Calls Azure Management API to get workspace metadata 18 | - ✅ Discovers all notebooks, pipelines, and Spark pools 19 | - ✅ Returns inventory for migration planning 20 | 21 | **Azure API Integration:** 22 | - Azure Resource Manager API (workspace metadata) 23 | - Synapse Management API (notebooks, pipelines) 24 | - Spark Pool Management API (pool configurations) 25 | 26 | --- 27 | 28 | ### 2. **fabric_synapse_compute_spend** 29 | Analyze compute spending for a Synapse workspace over a time period. 30 | 31 | **Example Copilot Prompt:** 32 | ``` 33 | Show me the compute spend for my Synapse workspace 'samplesynapseworkspace' over the last 30 days 34 | ``` 35 | 36 | **What it does:** 37 | - ✅ Calls Azure Cost Management API for actual spend data 38 | - ✅ Falls back to intelligent estimation based on pool configurations 39 | - ✅ Breaks down costs by Spark pools, storage, and other services 40 | 41 | **Azure API Integration:** 42 | - Azure Cost Management API (actual spend data) 43 | - Spark Pool API (for estimation if Cost API unavailable) 44 | 45 | --- 46 | 47 | ### 3. **fabric_recommend_fabric_capacity** 48 | Get intelligent Fabric capacity recommendations based on Synapse compute usage. 49 | 50 | **Example Copilot Prompt:** 51 | ``` 52 | What Fabric capacity should I use to migrate my Synapse workspace with 72 vCores? 53 | ``` 54 | 55 | **What it does:** 56 | - ✅ Fetches real-time Fabric pricing from Azure Retail Prices API 57 | - ✅ Calculates required Fabric vCores (1 Synapse vCore = 2 Fabric vCores) 58 | - ✅ Recommends optimal F-series SKU with confidence level 59 | - ✅ Shows cost comparison and potential savings 60 | 61 | **Azure API Integration:** 62 | - Azure Retail Prices API (real-time Fabric pricing) 63 | - Uses Capacity Unit (CU) pricing at $0.18/hour per CU 64 | - Calculates F-series pricing: F64 = 64 CUs × $0.18 = $11.52/hour 65 | 66 | --- 67 | 68 | ### 4. **fabric_migrate_spark_pools_to_fabric** 69 | Convert Synapse Spark pool configurations to Fabric equivalents and validate capacity. 70 | 71 | **Example Copilot Prompt:** 72 | ``` 73 | Migrate my Synapse Spark pools to Fabric F64 capacity 74 | ``` 75 | 76 | **What it does:** 77 | - ✅ Converts Synapse node sizes to Fabric executor configurations 78 | - ✅ Applies 1:2 vCore conversion ratio 79 | - ✅ Validates all pools fit within target capacity 80 | - ✅ Provides detailed migration recommendations 81 | 82 | **Conversion Logic:** 83 | - Synapse Small (4 vCores) → Fabric 8 vCores 84 | - Synapse Medium (8 vCores) → Fabric 16 vCores 85 | - Synapse Large (16 vCores) → Fabric 32 vCores 86 | - Auto-scale → Dynamic executor allocation 87 | 88 | --- 89 | 90 | ### 5. **fabric_migrate_synapse_to_fabric** (Full Migration) 91 | Execute complete end-to-end migration from Synapse to Fabric. 92 | 93 | **Example Copilot Prompt:** 94 | ``` 95 | Migrate my Synapse workspace 'samplesynapseworkspace' to Fabric workspace '43f2c633-e5c1-4e1d-906e-789cd4f081a2' 96 | ``` 97 | 98 | **What it does:** 99 | - ✅ Discovers all Synapse assets 100 | - ✅ Transforms notebooks (mssparkutils → notebookutils) 101 | - ✅ Provisions notebooks to Fabric workspace 102 | - ✅ Creates lakehouse if needed 103 | - ✅ Generates comprehensive migration report 104 | 105 | --- 106 | 107 | ## Example End-to-End Migration Workflow 108 | 109 | ### Step 1: Get Workspace Details 110 | **Copilot Prompt:** 111 | ``` 112 | Show me all the assets in my Synapse workspace 'samplesynapseworkspace' 113 | ``` 114 | 115 | **Tool Used:** `fabric_synapse_workspace_details` 116 | 117 | **Result:** 118 | - 2 notebooks discovered 119 | - 2 Spark pools (SampleSpark, sampleLargePool) 120 | - 72 total Synapse vCores 121 | 122 | --- 123 | 124 | ### Step 2: Analyze Compute Spend 125 | **Copilot Prompt:** 126 | ``` 127 | What's my monthly compute spend for this Synapse workspace? 128 | ``` 129 | 130 | **Tool Used:** `fabric_synapse_compute_spend` 131 | 132 | **Result:** 133 | - Total spend: $22,080/month (estimated) 134 | - Primary driver: Spark Compute 135 | - Period: Last 30 days 136 | 137 | --- 138 | 139 | ### Step 3: Get Capacity Recommendation 140 | **Copilot Prompt:** 141 | ``` 142 | Recommend a Fabric capacity for migrating this workspace 143 | ``` 144 | 145 | **Tool Used:** `fabric_recommend_fabric_capacity` 146 | 147 | **Result:** 148 | - **Recommended SKU: F64** 149 | - **Confidence: HIGH** 150 | - **Pricing Source: Azure Retail Prices API** 151 | - **Cost: $8,409.60/month** 152 | - **Savings: $13,670.40/month (61.9%)** 153 | 154 | --- 155 | 156 | ### Step 4: Validate Spark Pool Migration 157 | **Copilot Prompt:** 158 | ``` 159 | Validate my Spark pools can run on Fabric F64 capacity 160 | ``` 161 | 162 | **Tool Used:** `fabric_migrate_spark_pools_to_fabric` 163 | 164 | **Result:** 165 | - ✅ All pools can run within base capacity 166 | - ✅ SampleSpark: 48/128 vCores 167 | - ✅ sampleLargePool: 96/128 vCores 168 | - Detailed executor configurations provided 169 | 170 | --- 171 | 172 | ### Step 5: Execute Full Migration (Optional) 173 | **Copilot Prompt:** 174 | ``` 175 | Migrate everything to my Fabric workspace with lakehouse 'SynapseMigration' 176 | ``` 177 | 178 | **Tool Used:** `fabric_migrate_synapse_to_fabric` 179 | 180 | **Result:** 181 | - Notebooks transformed and provisioned 182 | - Lakehouse created 183 | - Migration report generated 184 | 185 | --- 186 | 187 | ## Azure API Integration Summary 188 | 189 | ### APIs Used: 190 | | API | Purpose | Authentication | 191 | |-----|---------|----------------| 192 | | **Azure Resource Manager** | Workspace metadata | Azure CLI token | 193 | | **Azure Cost Management** | Actual spend data | Azure CLI token | 194 | | **Azure Retail Prices** | Real-time Fabric pricing | Public API (no auth) | 195 | | **Synapse Management** | Notebooks, pipelines | Azure CLI token | 196 | | **Spark Pool Management** | Pool configurations | Azure CLI token | 197 | 198 | ### Pricing Accuracy: 199 | - ✅ **Real-time pricing** from Azure Retail Prices API 200 | - ✅ **Capacity Unit (CU) based:** $0.18/hour per CU 201 | - ✅ **F-series calculation:** F64 = 64 CUs × $0.18 = $11.52/hour 202 | - ✅ **Regional pricing** based on workspace location 203 | 204 | --- 205 | 206 | ## Test Results 207 | 208 | ### ✅ All MCP Tools Validated: 209 | 210 | **Test Workspace:** `samplesynapseworkspace` (sparkpmteam_rc) 211 | 212 | **Results:** 213 | - ✅ Workspace details retrieved via Azure Management API 214 | - ✅ Compute spend analyzed ($22,080/month estimated) 215 | - ✅ Real-time pricing fetched from Azure Retail Prices API 216 | - ✅ F64 capacity recommended with HIGH confidence 217 | - ✅ Spark pool migration validated (all pools fit) 218 | - ✅ Detailed migration recommendations generated 219 | 220 | **Pricing Source:** Azure Retail Prices API ✅ 221 | 222 | --- 223 | 224 | ## Benefits of MCP Tool Integration 225 | 226 | ### For Users: 227 | - 🗣️ **Natural language interface** - Just ask Copilot 228 | - 📊 **Real-time data** - Actual Azure API responses 229 | - 💰 **Accurate pricing** - Live from Azure Retail Prices API 230 | - ✅ **Validation** - Know before you migrate 231 | - 📋 **Reports** - Comprehensive migration analysis 232 | 233 | ### For Developers: 234 | - 🔧 **Modular tools** - Each tool is independent 235 | - 🔄 **Composable** - Chain tools together 236 | - 📦 **Testable** - Validated end-to-end 237 | - 🌐 **Azure-native** - Official API integration 238 | 239 | --- 240 | 241 | ## Next Steps 242 | 243 | 1. **Test with your workspace:** 244 | ``` 245 | Get details for my Synapse workspace '' 246 | ``` 247 | 248 | 2. **Get recommendations:** 249 | ``` 250 | Recommend Fabric capacity for my workspace 251 | ``` 252 | 253 | 3. **Validate migration:** 254 | ``` 255 | Check if my Spark pools will work on F64 256 | ``` 257 | 258 | 4. **Execute migration:** 259 | ``` 260 | Migrate to Fabric workspace '' 261 | ``` 262 | 263 | --- 264 | 265 | ## Support 266 | 267 | For issues or questions: 268 | - Check test results: `node test-mcp-tools-e2e.mjs` 269 | - Review logs in terminal output 270 | - Verify Azure CLI authentication: `az account show` 271 | 272 | --- 273 | 274 | **Last Updated:** November 13, 2025 275 | **Status:** ✅ All tools working with Azure API integration 276 | -------------------------------------------------------------------------------- /scripts/install-unix.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Microsoft Fabric Analytics MCP Server Installation Script for Linux/macOS 3 | 4 | set -e 5 | 6 | echo "🚀 Microsoft Fabric Analytics MCP Server Installer" 7 | echo "=" * 60 8 | 9 | # Colors for output 10 | RED='\033[0;31m' 11 | GREEN='\033[0;32m' 12 | YELLOW='\033[1;33m' 13 | BLUE='\033[0;34m' 14 | NC='\033[0m' # No Color 15 | 16 | # Function to print colored output 17 | print_status() { 18 | echo -e "${GREEN}✅ $1${NC}" 19 | } 20 | 21 | print_warning() { 22 | echo -e "${YELLOW}⚠️ $1${NC}" 23 | } 24 | 25 | print_error() { 26 | echo -e "${RED}❌ $1${NC}" 27 | } 28 | 29 | print_info() { 30 | echo -e "${BLUE}ℹ️ $1${NC}" 31 | } 32 | 33 | # Check if command exists 34 | command_exists() { 35 | command -v "$1" >/dev/null 2>&1 36 | } 37 | 38 | # Detect operating system 39 | detect_os() { 40 | if [[ "$OSTYPE" == "linux-gnu"* ]]; then 41 | echo "linux" 42 | elif [[ "$OSTYPE" == "darwin"* ]]; then 43 | echo "macos" 44 | else 45 | echo "unknown" 46 | fi 47 | } 48 | 49 | # Install Python if not present 50 | install_python() { 51 | if ! command_exists python3; then 52 | print_warning "Python 3 not found. Installing..." 53 | 54 | OS=$(detect_os) 55 | if [[ "$OS" == "linux" ]]; then 56 | if command_exists apt-get; then 57 | sudo apt-get update 58 | sudo apt-get install -y python3 python3-pip 59 | elif command_exists yum; then 60 | sudo yum install -y python3 python3-pip 61 | elif command_exists dnf; then 62 | sudo dnf install -y python3 python3-pip 63 | else 64 | print_error "Unable to install Python. Please install manually." 65 | exit 1 66 | fi 67 | elif [[ "$OS" == "macos" ]]; then 68 | if command_exists brew; then 69 | brew install python3 70 | else 71 | print_error "Homebrew not found. Please install Python 3 manually." 72 | exit 1 73 | fi 74 | fi 75 | else 76 | print_status "Python 3 found" 77 | fi 78 | } 79 | 80 | # Install Node.js if not present 81 | install_nodejs() { 82 | if ! command_exists node; then 83 | print_warning "Node.js not found. Installing..." 84 | 85 | OS=$(detect_os) 86 | if [[ "$OS" == "linux" ]]; then 87 | # Install Node.js via NodeSource repository 88 | curl -fsSL https://deb.nodesource.com/setup_18.x | sudo -E bash - 89 | if command_exists apt-get; then 90 | sudo apt-get install -y nodejs 91 | elif command_exists yum; then 92 | sudo yum install -y nodejs npm 93 | elif command_exists dnf; then 94 | sudo dnf install -y nodejs npm 95 | fi 96 | elif [[ "$OS" == "macos" ]]; then 97 | if command_exists brew; then 98 | brew install node 99 | else 100 | print_error "Homebrew not found. Please install Node.js manually." 101 | exit 1 102 | fi 103 | fi 104 | else 105 | print_status "Node.js found" 106 | fi 107 | } 108 | 109 | # Main installation function 110 | install_fabric_mcp() { 111 | local method="$1" 112 | 113 | print_info "Installing Microsoft Fabric Analytics MCP Server..." 114 | 115 | case "$method" in 116 | "python"|"pip") 117 | print_info "Installing via Python/pip..." 118 | install_python 119 | 120 | if command_exists pip3; then 121 | pip3 install fabric-analytics-mcp 122 | elif command_exists pip; then 123 | pip install fabric-analytics-mcp 124 | else 125 | python3 -m pip install fabric-analytics-mcp 126 | fi 127 | 128 | print_status "Python package installed successfully!" 129 | print_info "Test with: fabric-analytics-mcp validate" 130 | ;; 131 | 132 | "npm") 133 | print_info "Installing via NPM..." 134 | install_nodejs 135 | 136 | npm install -g mcp-for-microsoft-fabric-analytics 137 | 138 | print_status "NPM package installed successfully!" 139 | print_info "Test with: fabric-analytics" 140 | ;; 141 | 142 | "source"|"git") 143 | print_info "Installing from source..." 144 | install_nodejs 145 | 146 | if [[ -d "Fabric-Analytics-MCP" ]]; then 147 | print_warning "Directory already exists. Updating..." 148 | cd Fabric-Analytics-MCP 149 | git pull 150 | else 151 | git clone https://github.com/santhoshravindran7/Fabric-Analytics-MCP.git 152 | cd Fabric-Analytics-MCP 153 | fi 154 | 155 | npm install 156 | npm run build 157 | 158 | print_status "Source installation completed!" 159 | print_info "Start with: npm start" 160 | ;; 161 | 162 | *) 163 | print_error "Unknown installation method: $method" 164 | echo "Available methods: python, npm, source" 165 | exit 1 166 | ;; 167 | esac 168 | } 169 | 170 | # Configuration helper 171 | configure_environment() { 172 | print_info "Setting up environment configuration..." 173 | 174 | cat << 'EOF' > fabric-mcp-env.sh 175 | #!/bin/bash 176 | # Microsoft Fabric Analytics MCP Server Environment Configuration 177 | 178 | # Required: Authentication method 179 | export FABRIC_AUTH_METHOD=bearer_token # Options: bearer_token, service_principal, interactive 180 | 181 | # Optional: Default workspace ID (can be discovered using tools) 182 | # export FABRIC_DEFAULT_WORKSPACE_ID=your-workspace-id 183 | 184 | # For Service Principal authentication: 185 | # export FABRIC_CLIENT_ID=your-client-id 186 | # export FABRIC_CLIENT_SECRET=your-client-secret 187 | # export FABRIC_TENANT_ID=your-tenant-id 188 | 189 | echo "✅ Microsoft Fabric MCP environment configured" 190 | echo "💡 Edit this file to set your specific configuration" 191 | EOF 192 | 193 | chmod +x fabric-mcp-env.sh 194 | print_status "Environment configuration created: fabric-mcp-env.sh" 195 | print_info "Run 'source fabric-mcp-env.sh' to load configuration" 196 | } 197 | 198 | # Claude Desktop configuration helper 199 | configure_claude() { 200 | print_info "Setting up Claude Desktop configuration..." 201 | 202 | # Detect Claude config path 203 | if [[ "$(detect_os)" == "macos" ]]; then 204 | CLAUDE_CONFIG_DIR="$HOME/Library/Application Support/Claude" 205 | else 206 | CLAUDE_CONFIG_DIR="$HOME/.config/claude" 207 | fi 208 | 209 | CLAUDE_CONFIG_FILE="$CLAUDE_CONFIG_DIR/claude_desktop_config.json" 210 | 211 | mkdir -p "$CLAUDE_CONFIG_DIR" 212 | 213 | if [[ -f "$CLAUDE_CONFIG_FILE" ]]; then 214 | print_warning "Claude config exists. Creating backup..." 215 | cp "$CLAUDE_CONFIG_FILE" "$CLAUDE_CONFIG_FILE.backup.$(date +%s)" 216 | fi 217 | 218 | cat << 'EOF' > "$CLAUDE_CONFIG_FILE" 219 | { 220 | "mcpServers": { 221 | "fabric-analytics": { 222 | "command": "fabric-analytics-mcp", 223 | "args": ["start"], 224 | "env": { 225 | "FABRIC_AUTH_METHOD": "bearer_token" 226 | } 227 | } 228 | } 229 | } 230 | EOF 231 | 232 | print_status "Claude Desktop configuration created" 233 | print_info "Restart Claude Desktop to apply changes" 234 | } 235 | 236 | # Main script logic 237 | main() { 238 | echo "🎯 Choose installation method:" 239 | echo "1) Python/pip (Recommended)" 240 | echo "2) NPM" 241 | echo "3) From source" 242 | echo "4) All methods" 243 | echo "" 244 | read -p "Enter choice (1-4): " choice 245 | 246 | case "$choice" in 247 | 1) 248 | install_fabric_mcp "python" 249 | ;; 250 | 2) 251 | install_fabric_mcp "npm" 252 | ;; 253 | 3) 254 | install_fabric_mcp "source" 255 | ;; 256 | 4) 257 | install_fabric_mcp "python" 258 | install_fabric_mcp "npm" 259 | ;; 260 | *) 261 | print_error "Invalid choice" 262 | exit 1 263 | ;; 264 | esac 265 | 266 | # Offer additional configuration 267 | echo "" 268 | read -p "Setup environment configuration? (y/N): " setup_env 269 | if [[ "$setup_env" =~ ^[Yy]$ ]]; then 270 | configure_environment 271 | fi 272 | 273 | read -p "Setup Claude Desktop configuration? (y/N): " setup_claude 274 | if [[ "$setup_claude" =~ ^[Yy]$ ]]; then 275 | configure_claude 276 | fi 277 | 278 | print_status "Installation completed successfully!" 279 | echo "" 280 | print_info "Next steps:" 281 | echo "1. Set up authentication (see environment configuration)" 282 | echo "2. Test installation: fabric-analytics-mcp validate" 283 | echo "3. Start using with Claude Desktop or other MCP clients" 284 | echo "" 285 | print_info "Documentation: https://github.com/santhoshravindran7/Fabric-Analytics-MCP" 286 | } 287 | 288 | # Run main function 289 | main "$@" 290 | --------------------------------------------------------------------------------